text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/opt/moose/miniconda/bin/python
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import pylab
data = np.genfromtxt('./elas_plas_nl1_cycle_out.csv', delimiter=',', names=True)
fig = plt.figure()
ax1 = fig.add_subplot(111)
mpl.rcParams.update({'font.size': 10})
ax1.set_xlabel("Time")
ax1.set_ylabel("Stress (MPa)")
ax1.plot(data['time'], data['stress_yy'], label='Stress YY', color='k')
ax1.plot(data['time'], data['vonmises'], label='Vonmises', color='b')
ax1.plot(data['time'], data['pressure'], label='Pressure', color='r')
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
leg = ax1.legend(loc='best')
plt.savefig('plot_cycled_stress.pdf')
plt.show(fig)
|
nuclear-wizard/moose
|
modules/combined/test/tests/inelastic_strain/elas_plas/plot_cycled_stress.py
|
Python
|
lgpl-2.1
| 1,045
|
[
"MOOSE"
] |
4d0a33498029abc0a412505e88edc4ecf3a2279484961ee46278d3d47d459d0f
|
"""Setup script for Bokeh."""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import os, platform, re, shutil, site, subprocess, sys, time
from os.path import abspath, dirname, exists, isdir, join, realpath, relpath
try:
import colorama
def bright(text): return "%s%s%s" % (colorama.Style.BRIGHT, text, colorama.Style.RESET_ALL)
def dim(text): return "%s%s%s" % (colorama.Style.DIM, text, colorama.Style.RESET_ALL)
def white(text): return "%s%s%s" % (colorama.Fore.WHITE, text, colorama.Style.RESET_ALL)
def blue(text): return "%s%s%s" % (colorama.Fore.BLUE, text, colorama.Style.RESET_ALL)
def red(text): return "%s%s%s" % (colorama.Fore.RED, text, colorama.Style.RESET_ALL)
def green(text): return "%s%s%s" % (colorama.Fore.GREEN, text, colorama.Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (colorama.Fore.YELLOW, text, colorama.Style.RESET_ALL)
except ImportError:
def bright(text): return text
def dim(text): return text
def white(text) : return text
def blue(text) : return text
def red(text) : return text
def green(text) : return text
def yellow(text) : return text
if 'nightly' in sys.argv:
from setuptools import setup
sys.argv.remove('nightly')
with open('__conda_version__.txt', 'r') as f:
version = f.read().rstrip()
vers_file = os.path.join('bokeh', '__conda_version__.py')
with open(vers_file, 'w') as f:
f.write("conda_version=" + "'" + version + "'")
else:
from distutils.core import setup
from distutils import dir_util
# Our own imports
import versioneer
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
ROOT = dirname(realpath(__file__))
BOKEHJSROOT = join(ROOT, 'bokehjs')
BOKEHJSBUILD = join(BOKEHJSROOT, 'build')
CSS = join(BOKEHJSBUILD, 'css')
JS = join(BOKEHJSBUILD, 'js')
SERVER = join(ROOT, 'bokeh/server')
if sys.version_info[0] < 3:
input = raw_input
# -----------------------------------------------------------------------------
# Local utilities
# -----------------------------------------------------------------------------
versioneer.versionfile_source = 'bokeh/_version.py'
versioneer.versionfile_build = 'bokeh/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'Bokeh-' # dirname like 'myproject-1.2.0'
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
package_data = []
def package_path(path, filters=()):
if not os.path.exists(path):
raise RuntimeError("packaging non-existent path: %s" % path)
elif os.path.isfile(path):
package_data.append(relpath(path, 'bokeh'))
else:
for path, dirs, files in os.walk(path):
path = relpath(path, 'bokeh')
for f in files:
if not filters or f.endswith(filters):
package_data.append(join(path, f))
# You can't install Bokeh in a virtualenv because the lack of getsitepackages()
# This is an open bug: https://github.com/pypa/virtualenv/issues/355
# And this is an intended PR to fix it: https://github.com/pypa/virtualenv/pull/508
# Workaround to fix our issue: https://github.com/bokeh/bokeh/issues/378
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python)."""
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
prefixes = [sys.prefix, sys.exec_prefix]
sitepackages = []
seen = set()
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys.prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version_info[0] >= 3:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
sitepackages.append(os.path.abspath(sitedir))
sitepackages = [p for p in sitepackages if os.path.isdir(p)]
return sitepackages
def check_remove_bokeh_install(site_packages):
bokeh_path = join(site_packages, "bokeh")
if not (exists(bokeh_path) and isdir(bokeh_path)):
return
prompt = "Found existing bokeh install: %s\nRemove it? [y|N] " % bokeh_path
val = input(prompt)
if val == "y":
print("Removing old bokeh install...", end=" ")
try:
shutil.rmtree(bokeh_path)
print("Done")
except (IOError, OSError):
print("Unable to remove old bokeh at %s, exiting" % bokeh_path)
sys.exit(-1)
else:
print("Not removing old bokeh install")
sys.exit(1)
def remove_bokeh_pth(path_file):
if exists(path_file):
try:
os.remove(path_file)
except (IOError, OSError):
print("Unable to remove old path file at %s, exiting" % path_file)
sys.exit(-1)
return True
return False
BUILD_EXEC_FAIL_MSG = bright(red("Failed.")) + """
ERROR: subprocess.Popen(%r) failed to execute:
%s
Have you run `npm install` from the bokehjs subdirectory?
For more information, see the Dev Guide:
http://bokeh.pydata.org/en/latest/docs/dev_guide.html
"""
BUILD_FAIL_MSG = bright(red("Failed.")) + """
ERROR: 'gulp build' returned error message:
%s
"""
BUILD_SIZE_FAIL_MSG = """
ERROR: could not determine sizes:
%s
"""
BUILD_SUCCESS_MSG = bright(green("Success!")) + """
Build output:
%s"""
def build_js():
print("Building BokehJS... ", end="")
sys.stdout.flush()
os.chdir('bokehjs')
if sys.platform != "win32":
cmd = [join('node_modules', '.bin', 'gulp'), 'build']
else:
cmd = [join('node_modules', '.bin', 'gulp.cmd'), 'build']
t0 = time.time()
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
print(BUILD_EXEC_FAIL_MSG % (cmd, e))
sys.exit(1)
finally:
os.chdir('..')
result = proc.wait()
t1 = time.time()
if result != 0:
indented_msg = ""
msg = proc.stderr.read().decode('ascii', errors='ignore')
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_FAIL_MSG % red(msg))
sys.exit(1)
indented_msg = ""
msg = proc.stdout.read().decode('ascii', errors='ignore')
pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL)
for line in msg.strip().split("\n"):
stamp, txt = pat.match(line).groups()
indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n"
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_SUCCESS_MSG % indented_msg)
print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0))))
print()
print("Build artifact sizes:")
try:
blddir = join("bokehjs", "build")
bkjs_size = os.stat(join(blddir, "js", "bokeh.js")).st_size / 2**10
bkjs_min_size = os.stat(join(blddir, "js", "bokeh.min.js")).st_size / 2**10
bkcss_size = os.stat(join(blddir, "css", "bokeh.css")).st_size / 2**10
bkcss_min_size = os.stat(join(blddir, "css", "bokeh.min.css")).st_size / 2**10
print(" - bokeh.js : %6.1f KB" % bkjs_size)
print(" - bokeh.css : %6.1f KB" % bkcss_size)
print(" - bokeh.min.js : %6.1f KB" % bkjs_min_size)
print(" - bokeh.min.css : %6.1f KB" % bkcss_min_size)
except Exception as e:
print(BUILD_SIZE_FAIL_MSG % e)
def install_js():
target_jsdir = join(SERVER, 'static', 'js')
target_cssdir = join(SERVER, 'static', 'css')
STATIC_ASSETS = [
join(JS, 'bokeh.js'),
join(JS, 'bokeh.min.js'),
join(CSS, 'bokeh.css'),
join(CSS, 'bokeh.min.css'),
]
if not all([exists(a) for a in STATIC_ASSETS]):
print("""
ERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.
Please build BokehJS by running setup.py with the `--build_js` option.
Dev Guide: http://bokeh.pydata.org/docs/dev_guide.html#bokehjs.
""")
sys.exit(1)
if exists(target_jsdir):
shutil.rmtree(target_jsdir)
shutil.copytree(JS, target_jsdir)
if exists(target_cssdir):
shutil.rmtree(target_cssdir)
shutil.copytree(CSS, target_cssdir)
def clean():
print("Removing prior-built items...", end=" ")
dir_util.remove_tree('build/lib/bokeh')
print("Done")
def get_user_jsargs():
print("""
Bokeh includes a JavaScript library (BokehJS) that has its own
build process. How would you like to handle BokehJS:
1) build and install fresh BokehJS
2) install last built BokehJS from bokeh/bokehjs/build
""")
mapping = {"1": True, "2": False}
value = input("Choice? ")
while value not in mapping:
print("Input '%s' not understood. Valid choices: 1, 2\n" % value)
value = input("Choice? ")
return mapping[value]
def parse_jsargs():
options = ('install', 'develop', 'sdist', 'egg_info', 'build')
installing = any(arg in sys.argv for arg in options)
if '--build_js' in sys.argv:
if not installing:
print("Error: Option '--build_js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.")
sys.exit(1)
jsbuild = True
sys.argv.remove('--build_js')
elif '--install_js' in sys.argv:
# Note that --install_js can be used by itself (without sdist/install/develop)
jsbuild = False
sys.argv.remove('--install_js')
else:
if installing:
jsbuild = get_user_jsargs()
else:
jsbuild = False
return jsbuild
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Set up this checkout or source archive with the right BokehJS files.
if sys.version_info[:2] < (2, 6):
raise RuntimeError("Bokeh requires python >= 2.6")
# Lightweight command to only install js and nothing more - developer mode
if len(sys.argv) == 2 and sys.argv[-1] == '--install_js':
install_js()
sys.exit(0)
# check for 'sdist' and make sure we always do a BokehJS build when packaging
if "sdist" in sys.argv:
if "--install_js" in sys.argv:
print("Removing '--install_js' incompatible with 'sdist'")
sys.argv.remove('--install_js')
if "--build_js" not in sys.argv:
print("Adding '--build_js' required for 'sdist'")
sys.argv.append('--build_js')
# check for package install, set jsinstall to False to skip prompt
jsinstall = True
if not exists(join(ROOT, 'MANIFEST.in')):
if "--build_js" in sys.argv or "--install_js" in sys.argv:
print("BokehJS source code is not shipped in sdist packages; "
"building/installing from the bokehjs source directory is disabled. "
"To build or develop BokehJS yourself, you must clone the full "
"Bokeh repository from https://github.com/bokeh/bokeh")
if "--build_js" in sys.argv:
sys.argv.remove('--build_js')
if "--install_js" in sys.argv:
sys.argv.remove('--install_js')
jsbuild = False
jsinstall = False
else:
jsbuild = parse_jsargs()
if jsbuild:
build_js()
if jsinstall:
install_js()
sampledata_suffixes = ('.csv', '.conf', '.gz', '.json', '.png', '.ics')
package_path(join(SERVER, 'static'))
package_path(join(SERVER, 'templates'))
package_path(join(ROOT, 'bokeh', '_templates'))
package_path(join(ROOT, 'bokeh', 'sampledata'), sampledata_suffixes)
package_path(join(ROOT, 'bokeh', 'server', 'redis.conf'))
package_path(join(SERVER, 'tests', 'config'))
package_path(join(SERVER, 'tests', 'data'))
scripts = ['bokeh-server', 'websocket_worker.py']
if '--user' in sys.argv:
site_packages = site.USER_SITE
else:
site_packages = getsitepackages()[0]
path_file = join(site_packages, "bokeh.pth")
path = abspath(dirname(__file__))
print()
if 'develop' in sys.argv:
check_remove_bokeh_install(site_packages)
with open(path_file, "w+") as f:
f.write(path)
print("Installing Bokeh for development:")
print(" - writing path '%s' to %s" % (path, path_file))
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % yellow("PACKAGED"))
sys.exit()
elif 'clean' in sys.argv:
clean()
elif 'install' in sys.argv:
pth_removed = remove_bokeh_pth(path_file)
print("Installing Bokeh:")
if pth_removed:
print(" - removed path file at %s" % path_file)
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED")))
elif '--help' in sys.argv:
if jsinstall:
print("Bokeh-specific options available with 'install' or 'develop':")
print()
print(" --build_js build and install a fresh BokehJS")
print(" --install_js install only last previously built BokehJS")
else:
print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'")
print()
print()
REQUIRES = [
'Flask>=0.10.1',
'Jinja2>=2.7',
'MarkupSafe>=0.18',
'Werkzeug>=0.9.1',
'greenlet>=0.4.1',
'itsdangerous>=0.21',
'python-dateutil>=2.1',
'pytz==2013b',
'requests>=1.2.3',
'six>=1.5.2',
'pygments>=1.6',
'pystache>=0.5.3',
'markdown>=2.3.1',
'PyYAML>=3.10',
'pyzmq>=14.3.1',
'tornado>=4.0.1',
# cli
# 'click>=3.3',
# tests
# 'nose>=1.3.0',
# 'mock>=1.0.1',
'colorama>=0.2.7'
]
if sys.version_info[:2] == (2, 6):
REQUIRES.append('argparse>=1.1')
if sys.version_info[0] != 3 and platform.python_implementation() != "PyPy":
REQUIRES.extend([
'websocket>=0.2.1'
])
# if sys.platform != "win32":
# REQUIRES.append('redis>=2.7.6')
if platform.python_implementation() != "PyPy":
# You need to install PyPy's fork of NumPy to make it work:
# pip install git+https://bitbucket.org/pypy/numpy.git
# Also pandas is not yet working with PyPy .
REQUIRES.extend([
'numpy>=1.7.1',
'pandas>=0.11.0'
])
_version = versioneer.get_version()
_cmdclass = versioneer.get_cmdclass()
setup(
name='bokeh',
version=_version,
cmdclass=_cmdclass,
packages=[
'bokeh',
'bokeh.models',
'bokeh.models.tests',
'bokeh.models.widgets',
'bokeh.charts',
'bokeh.charts.builder',
'bokeh.charts.builder.tests',
'bokeh.charts.tests',
'bokeh.crossfilter',
'bokeh.mplexporter',
'bokeh.mplexporter.renderers',
'bokeh.sampledata',
'bokeh.server',
'bokeh.server.models',
'bokeh.server.views',
'bokeh.server.blaze',
'bokeh.server.utils',
'bokeh.server.tests',
'bokeh.sphinxext',
'bokeh.tests',
'bokeh.transforms',
'bokeh.util',
'bokeh.util.tests',
],
package_data={'bokeh': package_data},
author='Continuum Analytics',
author_email='info@continuum.io',
url='http://github.com/bokeh/bokeh',
description='Statistical and novel interactive HTML plots for Python',
license='New BSD',
scripts=scripts,
zip_safe=False,
install_requires=REQUIRES
)
|
canavandl/bokeh
|
setup.py
|
Python
|
bsd-3-clause
| 19,428
|
[
"GULP"
] |
0bf8a29ed93a7aed6a6c332019a4663f14b733ee2260fb03af4e60b3c2c371f8
|
# Storage filtering classes
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from collections import namedtuple
from blivet import arch
from blivet.devices import DASDDevice, FcoeDiskDevice, iScsiDiskDevice, MultipathDevice, \
ZFCPDiskDevice, NVDIMMNamespaceDevice
from blivet.fcoe import has_fcoe
from blivet.iscsi import iscsi
from pyanaconda.flags import flags
from pyanaconda.core.i18n import CN_, CP_
from pyanaconda.storage_utils import try_populate_devicetree, on_disk_storage
from pyanaconda.modules.common.constants.objects import DISK_SELECTION
from pyanaconda.modules.common.constants.services import STORAGE
from pyanaconda.ui.lib.disks import getDisks, applyDiskSelection
from pyanaconda.ui.gui.utils import timed_action
from pyanaconda.ui.gui.spokes import NormalSpoke
from pyanaconda.ui.gui.spokes.advstorage.fcoe import FCoEDialog
from pyanaconda.ui.gui.spokes.advstorage.iscsi import ISCSIDialog
from pyanaconda.ui.gui.spokes.advstorage.zfcp import ZFCPDialog
from pyanaconda.ui.gui.spokes.advstorage.dasd import DASDDialog
from pyanaconda.ui.gui.spokes.advstorage.nvdimm import NVDIMMDialog
from pyanaconda.ui.gui.spokes.lib.cart import SelectedDisksDialog
from pyanaconda.ui.categories.system import SystemCategory
__all__ = ["FilterSpoke"]
PAGE_SEARCH = 0
PAGE_MULTIPATH = 1
PAGE_OTHER = 2
PAGE_NVDIMM = 3
PAGE_Z = 4
DiskStoreRow = namedtuple("DiskStoreRow", ["visible", "selected", "mutable",
"name", "type", "model", "capacity",
"vendor", "interconnect", "serial",
"wwid", "paths", "port", "target",
"lun", "ccw", "wwpn", "namespace", "mode"])
class FilterPage(object):
"""A FilterPage is the logic behind one of the notebook tabs on the filter
UI spoke. Each page has its own specific filtered model overlaid on top
of a common model that holds all non-advanced disks.
A Page is created once, when the filter spoke is initialized. It is
setup multiple times - each time the spoke is revisited. When the Page
is setup, it is given a complete view of all disks that belong on this
Page. This is because certain pages may require populating a combo with
all vendor names, or other similar tasks.
This class is just a base class. One subclass should be created for each
more specialized type of page. Only one instance of each subclass should
ever be created.
"""
def __init__(self, storage, builder):
"""Create a new FilterPage instance.
Instance attributes:
builder -- A reference to the Gtk.Builder instance containing
this page's UI elements.
filterActive -- Whether the user has chosen to filter results down
on this page. If set, visible_func should take the
filter UI elements into account.
storage -- An instance of a blivet object.
"""
self.builder = builder
self.storage = storage
self.model = None
self.filterActive = False
def ismember(self, device):
"""Does device belong on this page? This function should taken into
account what kind of thing device is. It should not be concerned
with any sort of filtering settings. It only determines whether
device belongs.
"""
return True
def setup(self, store, selectedNames, disks):
"""Do whatever setup of the UI is necessary before this page can be
displayed. This function is called every time the filter spoke
is revisited, and thus must first do any cleanup that is necessary.
The setup function is passed a reference to the master store, a list
of names of disks the user has selected (either from a previous visit
or via kickstart), and a list of all disk objects that belong on this
page as determined from the ismember method.
At the least, this method should add all the disks to the store. It
may also need to populate combos and other lists as appropriate.
"""
pass
def clear(self):
"""Blank out any filtering-related fields on this page and return them
to their defaults. This is called when the Clear button is clicked.
"""
pass
def visible_func(self, model, itr, *args):
"""This method is called for every row (disk) in the store, in order to
determine if it should be displayed on this page or not. This method
should take into account whether filterActive is set, perhaps whether
something in pyanaconda.flags is setup, and other settings to make
a final decision. Because filtering can be complicated, many pages
will want to farm this decision out to another method.
The return value is a boolean indicating whether the row is visible
or not.
"""
return True
def setupCombo(self, combo, items):
"""Populate a given GtkComboBoxText instance with a list of items. The
combo will first be cleared, so this method is suitable for calling
repeatedly. The first item in the list will be empty to allow the
combo box criterion to be cleared. The first non-empty item in the
list will be selected by default.
"""
combo.remove_all()
combo.append_text('')
for i in sorted(set(items)):
combo.append_text(i)
if items:
combo.set_active(1)
def _long_identifier(self, disk):
# For iSCSI devices, we want the long ip-address:port-iscsi-tgtname-lun-XX
# identifier, but blivet doesn't expose that in any useful way and I don't
# want to go asking udev. Instead, we dig around in the deviceLinks and
# default to the name if we can't figure anything else out.
for link in disk.device_links:
if "by-path" in link:
lastSlash = link.rindex("/")+1
return link[lastSlash:]
return disk.name
class SearchPage(FilterPage):
# Match these to searchTypeCombo ids in glade
SEARCH_TYPE_NONE = 'None'
SEARCH_TYPE_PORT_TARGET_LUN = 'PTL'
SEARCH_TYPE_WWID = 'WWID'
def __init__(self, storage, builder):
super().__init__(storage, builder)
self.model = self.builder.get_object("searchModel")
self.model.set_visible_func(self.visible_func)
self._lunEntry = self.builder.get_object("searchLUNEntry")
self._wwidEntry = self.builder.get_object("searchWWIDEntry")
self._combo = self.builder.get_object("searchTypeCombo")
self._portCombo = self.builder.get_object("searchPortCombo")
self._targetEntry = self.builder.get_object("searchTargetEntry")
def setup(self, store, selectedNames, disks):
self._combo.set_active_id(self.SEARCH_TYPE_NONE)
self._combo.emit("changed")
ports = []
for disk in disks:
if hasattr(disk, "node") and disk.node is not None:
ports.append(str(disk.node.port))
self.setupCombo(self._portCombo, ports)
def clear(self):
self._lunEntry.set_text("")
self._portCombo.set_active(0)
self._targetEntry.set_text("")
self._wwidEntry.set_text("")
def _port_equal(self, device):
active = self._portCombo.get_active_text()
if active:
if hasattr(device, "node"):
return device.node.port == int(active)
else:
return False
else:
return True
def _target_equal(self, device):
active = self._targetEntry.get_text().strip()
if active:
return active in getattr(device, "initiator", "")
else:
return True
def _lun_equal(self, device):
active = self._lunEntry.get_text().strip()
if active:
if hasattr(device, "node"):
try:
return int(active) == device.node.tpgt
except ValueError:
return False
elif hasattr(device, "fcp_lun"):
return active in device.fcp_lun
else:
return True
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active_id()
if filterBy == self.SEARCH_TYPE_NONE:
return True
elif filterBy == self.SEARCH_TYPE_PORT_TARGET_LUN:
return self._port_equal(device) and self._target_equal(device) and self._lun_equal(device)
elif filterBy == self.SEARCH_TYPE_WWID:
return self._wwidEntry.get_text() in getattr(device, "wwid", self._long_identifier(device))
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.get_device_by_name(obj.name, hidden=True)
return self._filter_func(device)
class MultipathPage(FilterPage):
# Match these to multipathTypeCombo ids in glade
SEARCH_TYPE_NONE = 'None'
SEARCH_TYPE_VENDOR = 'Vendor'
SEARCH_TYPE_INTERCONNECT = 'Interconnect'
SEARCH_TYPE_WWID = 'WWID'
def __init__(self, storage, builder):
super().__init__(storage, builder)
self.model = self.builder.get_object("multipathModel")
self.model.set_visible_func(self.visible_func)
self._combo = self.builder.get_object("multipathTypeCombo")
self._icCombo = self.builder.get_object("multipathInterconnectCombo")
self._vendorCombo = self.builder.get_object("multipathVendorCombo")
self._wwidEntry = self.builder.get_object("multipathWWIDEntry")
def ismember(self, device):
return isinstance(device, MultipathDevice)
def setup(self, store, selectedNames, disks):
vendors = []
interconnects = []
for disk in disks:
paths = [d.name for d in disk.parents]
selected = disk.name in selectedNames
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, str(disk.size),
disk.vendor, disk.bus, disk.serial,
disk.wwid, "\n".join(paths), "", "",
"", "", "", "", ""])
if not disk.vendor in vendors:
vendors.append(disk.vendor)
if not disk.bus in interconnects:
interconnects.append(disk.bus)
self._combo.set_active_id(self.SEARCH_TYPE_NONE)
self._combo.emit("changed")
self.setupCombo(self._vendorCombo, vendors)
self.setupCombo(self._icCombo, interconnects)
def clear(self):
self._icCombo.set_active(0)
self._vendorCombo.set_active(0)
self._wwidEntry.set_text("")
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active_id()
if filterBy == self.SEARCH_TYPE_NONE:
return True
elif filterBy == self.SEARCH_TYPE_VENDOR:
return device.vendor == self._vendorCombo.get_active_text()
elif filterBy == self.SEARCH_TYPE_INTERCONNECT:
return device.bus == self._icCombo.get_active_text()
elif filterBy == self.SEARCH_TYPE_WWID:
return self._wwidEntry.get_text() in device.wwid
def visible_func(self, model, itr, *args):
if not flags.mpath:
return False
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.get_device_by_name(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class OtherPage(FilterPage):
# Match these to otherTypeCombo ids in glade
SEARCH_TYPE_NONE = 'None'
SEARCH_TYPE_VENDOR = 'Vendor'
SEARCH_TYPE_INTERCONNECT = 'Interconnect'
SEARCH_TYPE_ID = 'ID'
def __init__(self, storage, builder):
super().__init__(storage, builder)
self.model = self.builder.get_object("otherModel")
self.model.set_visible_func(self.visible_func)
self._combo = self.builder.get_object("otherTypeCombo")
self._icCombo = self.builder.get_object("otherInterconnectCombo")
self._idEntry = self.builder.get_object("otherIDEntry")
self._vendorCombo = self.builder.get_object("otherVendorCombo")
def ismember(self, device):
return isinstance(device, iScsiDiskDevice) or isinstance(device, FcoeDiskDevice)
def setup(self, store, selectedNames, disks):
vendors = []
interconnects = []
for disk in disks:
paths = [d.name for d in disk.parents]
selected = disk.name in selectedNames
if hasattr(disk, "node") and disk.node is not None:
port = str(disk.node.port)
lun = str(disk.node.tpgt)
else:
port = ""
lun = ""
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, str(disk.size),
disk.vendor, disk.bus, disk.serial,
self._long_identifier(disk), "\n".join(paths), port, getattr(disk, "initiator", ""),
lun, "", "", "", ""])
if not disk.vendor in vendors:
vendors.append(disk.vendor)
if not disk.bus in interconnects:
interconnects.append(disk.bus)
self._combo.set_active_id(self.SEARCH_TYPE_NONE)
self._combo.emit("changed")
self.setupCombo(self._vendorCombo, vendors)
self.setupCombo(self._icCombo, interconnects)
def clear(self):
self._icCombo.set_active(0)
self._idEntry.set_text("")
self._vendorCombo.set_active(0)
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active_id()
if filterBy == self.SEARCH_TYPE_NONE:
return True
elif filterBy == self.SEARCH_TYPE_VENDOR:
return device.vendor == self._vendorCombo.get_active_text()
elif filterBy == self.SEARCH_TYPE_INTERCONNECT:
return device.bus == self._icCombo.get_active_text()
elif filterBy == self.SEARCH_TYPE_ID:
for link in device.device_links:
if "by-path" in link:
return self._idEntry.get_text().strip() in link
return False
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.get_device_by_name(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class ZPage(FilterPage):
# Match these to zTypeCombo ids in glade
SEARCH_TYPE_NONE = 'None'
SEARCH_TYPE_CCW = 'CCW'
SEARCH_TYPE_WWPN = 'WWPN'
SEARCH_TYPE_LUN = 'LUN'
def __init__(self, storage, builder):
super().__init__(storage, builder)
self.model = self.builder.get_object("zModel")
self.model.set_visible_func(self.visible_func)
self._ccwEntry = self.builder.get_object("zCCWEntry")
self._wwpnEntry = self.builder.get_object("zWWPNEntry")
self._lunEntry = self.builder.get_object("zLUNEntry")
self._combo = self.builder.get_object("zTypeCombo")
self._isS390 = arch.is_s390()
def clear(self):
self._lunEntry.set_text("")
self._ccwEntry.set_text("")
self._wwpnEntry.set_text("")
def ismember(self, device):
return isinstance(device, ZFCPDiskDevice) or isinstance(device, DASDDevice)
def setup(self, store, selectedNames, disks):
""" Set up our Z-page, but only if we're running on s390x. """
if not self._isS390:
return
else:
ccws = []
wwpns = []
luns = []
self._combo.set_active_id(self.SEARCH_TYPE_NONE)
self._combo.emit("changed")
for disk in disks:
paths = [d.name for d in disk.parents]
selected = disk.name in selectedNames
if getattr(disk, "type") == "zfcp":
# remember to store all of the zfcp-related junk so we can
# see it in the UI
if not disk.fcp_lun in luns:
luns.append(disk.fcp_lun)
if not disk.wwpn in wwpns:
wwpns.append(disk.wwpn)
if not disk.hba_id in ccws:
ccws.append(disk.hba_id)
# now add it to our store
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, str(disk.size),
disk.vendor, disk.bus, disk.serial, "", "\n".join(paths),
"", "", disk.fcp_lun, disk.hba_id, disk.wwpn, "", ""])
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active_id()
if filterBy == self.SEARCH_TYPE_NONE:
return True
elif filterBy == self.SEARCH_TYPE_CCW:
return self._ccwEntry.get_text() in device.hba_id
elif filterBy == self.SEARCH_TYPE_WWPN:
return self._wwpnEntry.get_text() in device.wwpn
elif filterBy == self.SEARCH_TYPE_LUN:
return self._lunEntry.get_text() in device.fcp_lun
return False
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.get_device_by_name(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class NvdimmPage(FilterPage):
# Match these to nvdimmTypeCombo ids in glade
SEARCH_TYPE_NONE = 'None'
SEARCH_TYPE_NAMESPACE = 'Namespace'
SEARCH_TYPE_MODE = 'Mode'
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("nvdimmModel")
self.treeview = self.builder.get_object("nvdimmTreeView")
self.model.set_visible_func(self.visible_func)
self._combo = self.builder.get_object("nvdimmTypeCombo")
self._modeCombo = self.builder.get_object("nvdimmModeCombo")
self._namespaceEntry = self.builder.get_object("nvdimmNamespaceEntry")
def ismember(self, device):
return isinstance(device, NVDIMMNamespaceDevice)
def setup(self, store, selectedNames, disks):
modes = []
for disk in disks:
paths = [d.name for d in disk.parents]
selected = disk.name in selectedNames
mutable = not disk.protected
if disk.mode != "sector":
mutable = False
selected = False
store.append([True, selected, mutable,
disk.name, "", disk.model, str(disk.size),
disk.vendor, disk.bus, disk.serial,
self._long_identifier(disk), "\n".join(paths), "", "",
"", "", "", disk.devname, disk.mode])
if not disk.mode in modes:
modes.append(disk.mode)
self._combo.set_active_id(self.SEARCH_TYPE_NONE)
self._combo.emit("changed")
self.setupCombo(self._modeCombo, modes)
def clear(self):
self._modeCombo.set_active(0)
self._namespaceEntry.set_text("")
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active_id()
if filterBy == self.SEARCH_TYPE_NONE:
return True
elif filterBy == self.SEARCH_TYPE_MODE:
return device.mode == self._modeCombo.get_active_text()
elif filterBy == self.SEARCH_TYPE_NAMESPACE:
ns = self._namespaceEntry.get_text().strip()
return device.devname == ns
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.get_device_by_name(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
def get_selected_namespaces(self):
namespaces = []
selection = self.treeview.get_selection()
store, pathlist = selection.get_selected_rows()
for path in pathlist:
store_row = DiskStoreRow(*store[store.get_iter(path)])
namespaces.append(store_row.namespace)
return namespaces
class FilterSpoke(NormalSpoke):
"""
.. inheritance-diagram:: FilterSpoke
:parts: 3
"""
builderObjects = ["diskStore", "filterWindow",
"searchModel", "multipathModel", "otherModel", "zModel", "nvdimmModel"]
mainWidgetName = "filterWindow"
uiFile = "spokes/advanced_storage.glade"
helpFile = "FilterSpoke.xml"
category = SystemCategory
title = CN_("GUI|Spoke", "_INSTALLATION DESTINATION")
def __init__(self, *args):
super().__init__(*args)
self.applyOnSkip = True
self.ancestors = []
self.disks = []
self.selected_disks = []
self._reconfigureNVDIMMButton = self.builder.get_object("reconfigureNVDIMMButton")
@property
def indirect(self):
return True
# This spoke has no status since it's not in a hub
@property
def status(self):
return None
def apply(self):
applyDiskSelection(self.storage, self.data, self.selected_disks)
# some disks may have been added in this spoke, we need to recreate the
# snapshot of on-disk storage
if on_disk_storage.created:
on_disk_storage.dispose_snapshot()
on_disk_storage.create_snapshot(self.storage)
def initialize(self):
super().initialize()
self.initialize_start()
self.pages = {
PAGE_SEARCH: SearchPage(self.storage, self.builder),
PAGE_MULTIPATH: MultipathPage(self.storage, self.builder),
PAGE_OTHER: OtherPage(self.storage, self.builder),
PAGE_NVDIMM: NvdimmPage(self.storage, self.builder),
PAGE_Z: ZPage(self.storage, self.builder),
}
self._notebook = self.builder.get_object("advancedNotebook")
if not arch.is_s390():
self._notebook.remove_page(-1)
self.builder.get_object("addZFCPButton").destroy()
self.builder.get_object("addDASDButton").destroy()
if not has_fcoe():
self.builder.get_object("addFCOEButton").destroy()
if not iscsi.available:
self.builder.get_object("addISCSIButton").destroy()
self._store = self.builder.get_object("diskStore")
self._addDisksButton = self.builder.get_object("addDisksButton")
# The button is sensitive only on NVDIMM page
self._reconfigureNVDIMMButton.set_sensitive(False)
# report that we are done
self.initialize_done()
def _real_ancestors(self, disk):
# Return a list of all the ancestors of a disk, but remove the disk
# itself from this list.
return [d for d in disk.ancestors if d.name != disk.name]
def refresh(self):
super().refresh()
self.disks = getDisks(self.storage.devicetree)
disk_select_proxy = STORAGE.get_proxy(DISK_SELECTION)
self.selected_disks = disk_select_proxy.SelectedDisks
self.ancestors = [d.name for disk in self.disks for d in self._real_ancestors(disk)]
self._store.clear()
allDisks = []
multipathDisks = []
otherDisks = []
nvdimmDisks = []
zDisks = []
# Now all all the non-local disks to the store. Everything has been set up
# ahead of time, so there's no need to configure anything. We first make
# these lists of disks, then call setup on each individual page. This is
# because there could be page-specific setup to do that requires a complete
# view of all the disks on that page.
for disk in self.disks:
if self.pages[PAGE_MULTIPATH].ismember(disk):
multipathDisks.append(disk)
elif self.pages[PAGE_OTHER].ismember(disk):
otherDisks.append(disk)
elif self.pages[PAGE_NVDIMM].ismember(disk):
nvdimmDisks.append(disk)
elif self.pages[PAGE_Z].ismember(disk):
zDisks.append(disk)
allDisks.append(disk)
self.pages[PAGE_SEARCH].setup(self._store, self.selected_disks, allDisks)
self.pages[PAGE_MULTIPATH].setup(self._store, self.selected_disks, multipathDisks)
self.pages[PAGE_OTHER].setup(self._store, self.selected_disks, otherDisks)
self.pages[PAGE_NVDIMM].setup(self._store, self.selected_disks, nvdimmDisks)
self.pages[PAGE_Z].setup(self._store, self.selected_disks, zDisks)
self._update_summary()
def _update_summary(self):
summaryButton = self.builder.get_object("summary_button")
label = self.builder.get_object("summary_button_label")
# We need to remove ancestor devices from the count. Otherwise, we'll
# end up in a situation where selecting one multipath device could
# potentially show three devices selected (mpatha, sda, sdb for instance).
count = len([disk for disk in self.selected_disks if disk not in self.ancestors])
summary = CP_("GUI|Installation Destination|Filter",
"%d _storage device selected",
"%d _storage devices selected",
count) % count
label.set_text(summary)
label.set_use_underline(True)
summaryButton.set_visible(count > 0)
label.set_sensitive(count > 0)
def on_back_clicked(self, button):
self.skipTo = "StorageSpoke"
super().on_back_clicked(button)
def on_summary_clicked(self, button):
dialog = SelectedDisksDialog(self.data)
# Include any disks selected in the initial storage spoke, plus any
# selected in this filter UI.
disks = [disk for disk in self.disks if disk.name in self.selected_disks]
free_space = self.storage.get_free_space(disks=disks)
with self.main_window.enlightbox(dialog.window):
dialog.refresh(disks, free_space, showRemove=False, setBoot=False)
dialog.run()
@timed_action(delay=1200, busy_cursor=False)
def on_filter_changed(self, *args):
n = self._notebook.get_current_page()
self.pages[n].filterActive = True
self.pages[n].model.refilter()
def on_clear_icon_clicked(self, entry, icon_pos, event):
if icon_pos == Gtk.EntryIconPosition.SECONDARY:
entry.set_text("")
def on_page_switched(self, notebook, newPage, newPageNum, *args):
self.pages[newPageNum].model.refilter()
notebook.get_nth_page(newPageNum).show_all()
self._reconfigureNVDIMMButton.set_sensitive(newPageNum == 3)
def on_row_toggled(self, button, path):
if not path:
return
page_index = self._notebook.get_current_page()
filter_model = self.pages[page_index].model
model_itr = filter_model.get_iter(path)
itr = filter_model.convert_iter_to_child_iter(model_itr)
self._store[itr][1] = not self._store[itr][1]
if self._store[itr][1] and self._store[itr][3] not in self.selected_disks:
self.selected_disks.append(self._store[itr][3])
elif not self._store[itr][1] and self._store[itr][3] in self.selected_disks:
self.selected_disks.remove(self._store[itr][3])
self._update_summary()
@timed_action(delay=50, threshold=100)
def on_refresh_clicked(self, widget, *args):
try_populate_devicetree(self.storage.devicetree)
self.refresh()
def on_add_iscsi_clicked(self, widget, *args):
dialog = ISCSIDialog(self.data, self.storage)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_add_fcoe_clicked(self, widget, *args):
dialog = FCoEDialog(self.data, self.storage)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_add_zfcp_clicked(self, widget, *args):
dialog = ZFCPDialog(self.data, self.storage)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_add_dasd_clicked(self, widget, *args):
dialog = DASDDialog(self.data, self.storage)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_reconfigure_nvdimm_clicked(self, widget, *args):
namespaces = self.pages[PAGE_NVDIMM].get_selected_namespaces()
dialog = NVDIMMDialog(self.data, self.storage, namespaces)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
##
## SEARCH TAB SIGNAL HANDLERS
##
def on_search_type_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("searchTypeNotebook")
notebook.set_current_page(ndx)
self.on_filter_changed()
##
## MULTIPATH TAB SIGNAL HANDLERS
##
def on_multipath_type_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("multipathTypeNotebook")
notebook.set_current_page(ndx)
self.on_filter_changed()
##
## OTHER TAB SIGNAL HANDLERS
##
def on_other_type_combo_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("otherTypeNotebook")
notebook.set_current_page(ndx)
self.on_filter_changed()
##
## NVDIMM TAB SIGNAL HANDLERS
##
def on_nvdimm_type_combo_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("nvdimmTypeNotebook")
notebook.set_current_page(ndx)
self.on_filter_changed()
##
## Z TAB SIGNAL HANDLERS
##
def on_z_type_combo_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("zTypeNotebook")
notebook.set_current_page(ndx)
self.on_filter_changed()
|
vathpela/anaconda
|
pyanaconda/ui/gui/spokes/advanced_storage.py
|
Python
|
gpl-2.0
| 32,436
|
[
"VisIt"
] |
976f9a119406d466e5c60cef6998cece73e6af378dc8ddb2b505edcca080b432
|
# narrowcommands.py - command modifications for narrowhg extension
#
# Copyright 2017 Google, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import itertools
import os
from mercurial.i18n import _
from mercurial.node import (
hex,
short,
)
from mercurial import (
bundle2,
cmdutil,
commands,
discovery,
encoding,
error,
exchange,
extensions,
hg,
narrowspec,
pathutil,
pycompat,
registrar,
repair,
repoview,
requirements,
sparse,
util,
wireprototypes,
)
from mercurial.utils import (
urlutil,
)
table = {}
command = registrar.command(table)
def setup():
"""Wraps user-facing mercurial commands with narrow-aware versions."""
entry = extensions.wrapcommand(commands.table, b'clone', clonenarrowcmd)
entry[1].append(
(b'', b'narrow', None, _(b"create a narrow clone of select files"))
)
entry[1].append(
(
b'',
b'depth',
b'',
_(b"limit the history fetched by distance from heads"),
)
)
entry[1].append((b'', b'narrowspec', b'', _(b"read narrowspecs from file")))
# TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
if b'sparse' not in extensions.enabled():
entry[1].append(
(b'', b'include', [], _(b"specifically fetch this file/directory"))
)
entry[1].append(
(
b'',
b'exclude',
[],
_(b"do not fetch this file/directory, even if included"),
)
)
entry = extensions.wrapcommand(commands.table, b'pull', pullnarrowcmd)
entry[1].append(
(
b'',
b'depth',
b'',
_(b"limit the history fetched by distance from heads"),
)
)
extensions.wrapcommand(commands.table, b'archive', archivenarrowcmd)
def clonenarrowcmd(orig, ui, repo, *args, **opts):
"""Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
opts = pycompat.byteskwargs(opts)
wrappedextraprepare = util.nullcontextmanager()
narrowspecfile = opts[b'narrowspec']
if narrowspecfile:
filepath = os.path.join(encoding.getcwd(), narrowspecfile)
ui.status(_(b"reading narrowspec from '%s'\n") % filepath)
try:
fdata = util.readfile(filepath)
except IOError as inst:
raise error.Abort(
_(b"cannot read narrowspecs from '%s': %s")
% (filepath, encoding.strtolocal(inst.strerror))
)
includes, excludes, profiles = sparse.parseconfig(ui, fdata, b'narrow')
if profiles:
raise error.ConfigError(
_(
b"cannot specify other files using '%include' in"
b" narrowspec"
)
)
narrowspec.validatepatterns(includes)
narrowspec.validatepatterns(excludes)
# narrowspec is passed so we should assume that user wants narrow clone
opts[b'narrow'] = True
opts[b'include'].extend(includes)
opts[b'exclude'].extend(excludes)
if opts[b'narrow']:
def pullbundle2extraprepare_widen(orig, pullop, kwargs):
orig(pullop, kwargs)
if opts.get(b'depth'):
kwargs[b'depth'] = opts[b'depth']
wrappedextraprepare = extensions.wrappedfunction(
exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
)
with wrappedextraprepare:
return orig(ui, repo, *args, **pycompat.strkwargs(opts))
def pullnarrowcmd(orig, ui, repo, *args, **opts):
"""Wraps pull command to allow modifying narrow spec."""
wrappedextraprepare = util.nullcontextmanager()
if requirements.NARROW_REQUIREMENT in repo.requirements:
def pullbundle2extraprepare_widen(orig, pullop, kwargs):
orig(pullop, kwargs)
if opts.get('depth'):
kwargs[b'depth'] = opts['depth']
wrappedextraprepare = extensions.wrappedfunction(
exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
)
with wrappedextraprepare:
return orig(ui, repo, *args, **opts)
def archivenarrowcmd(orig, ui, repo, *args, **opts):
"""Wraps archive command to narrow the default includes."""
if requirements.NARROW_REQUIREMENT in repo.requirements:
repo_includes, repo_excludes = repo.narrowpats
includes = set(opts.get('include', []))
excludes = set(opts.get('exclude', []))
includes, excludes, unused_invalid = narrowspec.restrictpatterns(
includes, excludes, repo_includes, repo_excludes
)
if includes:
opts['include'] = includes
if excludes:
opts['exclude'] = excludes
return orig(ui, repo, *args, **opts)
def pullbundle2extraprepare(orig, pullop, kwargs):
repo = pullop.repo
if requirements.NARROW_REQUIREMENT not in repo.requirements:
return orig(pullop, kwargs)
if wireprototypes.NARROWCAP not in pullop.remote.capabilities():
raise error.Abort(_(b"server does not support narrow clones"))
orig(pullop, kwargs)
kwargs[b'narrow'] = True
include, exclude = repo.narrowpats
kwargs[b'oldincludepats'] = include
kwargs[b'oldexcludepats'] = exclude
if include:
kwargs[b'includepats'] = include
if exclude:
kwargs[b'excludepats'] = exclude
# calculate known nodes only in ellipses cases because in non-ellipses cases
# we have all the nodes
if wireprototypes.ELLIPSESCAP1 in pullop.remote.capabilities():
kwargs[b'known'] = [
hex(ctx.node())
for ctx in repo.set(b'::%ln', pullop.common)
if ctx.node() != repo.nullid
]
if not kwargs[b'known']:
# Mercurial serializes an empty list as '' and deserializes it as
# [''], so delete it instead to avoid handling the empty string on
# the server.
del kwargs[b'known']
extensions.wrapfunction(
exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare
)
def _narrow(
ui,
repo,
remote,
commoninc,
oldincludes,
oldexcludes,
newincludes,
newexcludes,
force,
backup,
):
oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
# This is essentially doing "hg outgoing" to find all local-only
# commits. We will then check that the local-only commits don't
# have any changes to files that will be untracked.
unfi = repo.unfiltered()
outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc)
ui.status(_(b'looking for local changes to affected paths\n'))
progress = ui.makeprogress(
topic=_(b'changesets'),
unit=_(b'changesets'),
total=len(outgoing.missing) + len(outgoing.excluded),
)
localnodes = []
with progress:
for n in itertools.chain(outgoing.missing, outgoing.excluded):
progress.increment()
if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
localnodes.append(n)
revstostrip = unfi.revs(b'descendants(%ln)', localnodes)
hiddenrevs = repoview.filterrevs(repo, b'visible')
visibletostrip = list(
repo.changelog.node(r) for r in (revstostrip - hiddenrevs)
)
if visibletostrip:
ui.status(
_(
b'The following changeset(s) or their ancestors have '
b'local changes not on the remote:\n'
)
)
maxnodes = 10
if ui.verbose or len(visibletostrip) <= maxnodes:
for n in visibletostrip:
ui.status(b'%s\n' % short(n))
else:
for n in visibletostrip[:maxnodes]:
ui.status(b'%s\n' % short(n))
ui.status(
_(b'...and %d more, use --verbose to list all\n')
% (len(visibletostrip) - maxnodes)
)
if not force:
raise error.StateError(
_(b'local changes found'),
hint=_(b'use --force-delete-local-changes to ignore'),
)
with ui.uninterruptible():
if revstostrip:
tostrip = [unfi.changelog.node(r) for r in revstostrip]
if repo[b'.'].node() in tostrip:
# stripping working copy, so move to a different commit first
urev = max(
repo.revs(
b'(::%n) - %ln + null',
repo[b'.'].node(),
visibletostrip,
)
)
hg.clean(repo, urev)
overrides = {(b'devel', b'strip-obsmarkers'): False}
if backup:
ui.status(_(b'moving unwanted changesets to backup\n'))
else:
ui.status(_(b'deleting unwanted changesets\n'))
with ui.configoverride(overrides, b'narrow'):
repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
todelete = []
for t, f, f2, size in repo.store.datafiles():
if f.startswith(b'data/'):
file = f[5:-2]
if not newmatch(file):
todelete.append(f)
elif f.startswith(b'meta/'):
dir = f[5:-13]
dirs = sorted(pathutil.dirs({dir})) + [dir]
include = True
for d in dirs:
visit = newmatch.visitdir(d)
if not visit:
include = False
break
if visit == b'all':
break
if not include:
todelete.append(f)
repo.destroying()
with repo.transaction(b'narrowing'):
# Update narrowspec before removing revlogs, so repo won't be
# corrupt in case of crash
repo.setnarrowpats(newincludes, newexcludes)
for f in todelete:
ui.status(_(b'deleting %s\n') % f)
util.unlinkpath(repo.svfs.join(f))
repo.store.markremoved(f)
ui.status(_(b'deleting unwanted files from working copy\n'))
with repo.dirstate.parentchange():
narrowspec.updateworkingcopy(repo, assumeclean=True)
narrowspec.copytoworkingcopy(repo)
repo.destroyed()
def _widen(
ui,
repo,
remote,
commoninc,
oldincludes,
oldexcludes,
newincludes,
newexcludes,
):
# for now we assume that if a server has ellipses enabled, we will be
# exchanging ellipses nodes. In future we should add ellipses as a client
# side requirement (maybe) to distinguish a client is shallow or not and
# then send that information to server whether we want ellipses or not.
# Theoretically a non-ellipses repo should be able to use narrow
# functionality from an ellipses enabled server
remotecap = remote.capabilities()
ellipsesremote = any(
cap in remotecap for cap in wireprototypes.SUPPORTED_ELLIPSESCAP
)
# check whether we are talking to a server which supports old version of
# ellipses capabilities
isoldellipses = (
ellipsesremote
and wireprototypes.ELLIPSESCAP1 in remotecap
and wireprototypes.ELLIPSESCAP not in remotecap
)
def pullbundle2extraprepare_widen(orig, pullop, kwargs):
orig(pullop, kwargs)
# The old{in,ex}cludepats have already been set by orig()
kwargs[b'includepats'] = newincludes
kwargs[b'excludepats'] = newexcludes
wrappedextraprepare = extensions.wrappedfunction(
exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
)
# define a function that narrowbundle2 can call after creating the
# backup bundle, but before applying the bundle from the server
def setnewnarrowpats():
repo.setnarrowpats(newincludes, newexcludes)
repo.setnewnarrowpats = setnewnarrowpats
# silence the devel-warning of applying an empty changegroup
overrides = {(b'devel', b'all-warnings'): False}
common = commoninc[0]
with ui.uninterruptible():
if ellipsesremote:
ds = repo.dirstate
p1, p2 = ds.p1(), ds.p2()
with ds.parentchange():
ds.setparents(repo.nullid, repo.nullid)
if isoldellipses:
with wrappedextraprepare:
exchange.pull(repo, remote, heads=common)
else:
known = []
if ellipsesremote:
known = [
ctx.node()
for ctx in repo.set(b'::%ln', common)
if ctx.node() != repo.nullid
]
with remote.commandexecutor() as e:
bundle = e.callcommand(
b'narrow_widen',
{
b'oldincludes': oldincludes,
b'oldexcludes': oldexcludes,
b'newincludes': newincludes,
b'newexcludes': newexcludes,
b'cgversion': b'03',
b'commonheads': common,
b'known': known,
b'ellipses': ellipsesremote,
},
).result()
trmanager = exchange.transactionmanager(
repo, b'widen', remote.url()
)
with trmanager, repo.ui.configoverride(overrides, b'widen'):
op = bundle2.bundleoperation(
repo, trmanager.transaction, source=b'widen'
)
# TODO: we should catch error.Abort here
bundle2.processbundle(repo, bundle, op=op)
if ellipsesremote:
with ds.parentchange():
ds.setparents(p1, p2)
with repo.transaction(b'widening'), repo.dirstate.parentchange():
repo.setnewnarrowpats()
narrowspec.updateworkingcopy(repo)
narrowspec.copytoworkingcopy(repo)
# TODO(rdamazio): Make new matcher format and update description
@command(
b'tracked',
[
(b'', b'addinclude', [], _(b'new paths to include')),
(b'', b'removeinclude', [], _(b'old paths to no longer include')),
(
b'',
b'auto-remove-includes',
False,
_(b'automatically choose unused includes to remove'),
),
(b'', b'addexclude', [], _(b'new paths to exclude')),
(b'', b'import-rules', b'', _(b'import narrowspecs from a file')),
(b'', b'removeexclude', [], _(b'old paths to no longer exclude')),
(
b'',
b'clear',
False,
_(b'whether to replace the existing narrowspec'),
),
(
b'',
b'force-delete-local-changes',
False,
_(b'forces deletion of local changes when narrowing'),
),
(
b'',
b'backup',
True,
_(b'back up local changes when narrowing'),
),
(
b'',
b'update-working-copy',
False,
_(b'update working copy when the store has changed'),
),
]
+ commands.remoteopts,
_(b'[OPTIONS]... [REMOTE]'),
inferrepo=True,
helpcategory=command.CATEGORY_MAINTENANCE,
)
def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
"""show or change the current narrowspec
With no argument, shows the current narrowspec entries, one per line. Each
line will be prefixed with 'I' or 'X' for included or excluded patterns,
respectively.
The narrowspec is comprised of expressions to match remote files and/or
directories that should be pulled into your client.
The narrowspec has *include* and *exclude* expressions, with excludes always
trumping includes: that is, if a file matches an exclude expression, it will
be excluded even if it also matches an include expression.
Excluding files that were never included has no effect.
Each included or excluded entry is in the format described by
'hg help patterns'.
The options allow you to add or remove included and excluded expressions.
If --clear is specified, then all previous includes and excludes are DROPPED
and replaced by the new ones specified to --addinclude and --addexclude.
If --clear is specified without any further options, the narrowspec will be
empty and will not match any files.
If --auto-remove-includes is specified, then those includes that don't match
any files modified by currently visible local commits (those not shared by
the remote) will be added to the set of explicitly specified includes to
remove.
--import-rules accepts a path to a file containing rules, allowing you to
add --addinclude, --addexclude rules in bulk. Like the other include and
exclude switches, the changes are applied immediately.
"""
opts = pycompat.byteskwargs(opts)
if requirements.NARROW_REQUIREMENT not in repo.requirements:
raise error.InputError(
_(
b'the tracked command is only supported on '
b'repositories cloned with --narrow'
)
)
# Before supporting, decide whether it "hg tracked --clear" should mean
# tracking no paths or all paths.
if opts[b'clear']:
raise error.InputError(_(b'the --clear option is not yet supported'))
# import rules from a file
newrules = opts.get(b'import_rules')
if newrules:
try:
filepath = os.path.join(encoding.getcwd(), newrules)
fdata = util.readfile(filepath)
except IOError as inst:
raise error.StorageError(
_(b"cannot read narrowspecs from '%s': %s")
% (filepath, encoding.strtolocal(inst.strerror))
)
includepats, excludepats, profiles = sparse.parseconfig(
ui, fdata, b'narrow'
)
if profiles:
raise error.InputError(
_(
b"including other spec files using '%include' "
b"is not supported in narrowspec"
)
)
opts[b'addinclude'].extend(includepats)
opts[b'addexclude'].extend(excludepats)
addedincludes = narrowspec.parsepatterns(opts[b'addinclude'])
removedincludes = narrowspec.parsepatterns(opts[b'removeinclude'])
addedexcludes = narrowspec.parsepatterns(opts[b'addexclude'])
removedexcludes = narrowspec.parsepatterns(opts[b'removeexclude'])
autoremoveincludes = opts[b'auto_remove_includes']
update_working_copy = opts[b'update_working_copy']
only_show = not (
addedincludes
or removedincludes
or addedexcludes
or removedexcludes
or newrules
or autoremoveincludes
or update_working_copy
)
oldincludes, oldexcludes = repo.narrowpats
# filter the user passed additions and deletions into actual additions and
# deletions of excludes and includes
addedincludes -= oldincludes
removedincludes &= oldincludes
addedexcludes -= oldexcludes
removedexcludes &= oldexcludes
widening = addedincludes or removedexcludes
narrowing = removedincludes or addedexcludes
# Only print the current narrowspec.
if only_show:
ui.pager(b'tracked')
fm = ui.formatter(b'narrow', opts)
for i in sorted(oldincludes):
fm.startitem()
fm.write(b'status', b'%s ', b'I', label=b'narrow.included')
fm.write(b'pat', b'%s\n', i, label=b'narrow.included')
for i in sorted(oldexcludes):
fm.startitem()
fm.write(b'status', b'%s ', b'X', label=b'narrow.excluded')
fm.write(b'pat', b'%s\n', i, label=b'narrow.excluded')
fm.end()
return 0
if update_working_copy:
with repo.wlock(), repo.lock(), repo.transaction(
b'narrow-wc'
), repo.dirstate.parentchange():
narrowspec.updateworkingcopy(repo)
narrowspec.copytoworkingcopy(repo)
return 0
if not (widening or narrowing or autoremoveincludes):
ui.status(_(b"nothing to widen or narrow\n"))
return 0
with repo.wlock(), repo.lock():
cmdutil.bailifchanged(repo)
# Find the revisions we have in common with the remote. These will
# be used for finding local-only changes for narrowing. They will
# also define the set of revisions to update for widening.
r = urlutil.get_unique_pull_path(b'tracked', repo, ui, remotepath)
url, branches = r
ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
remote = hg.peer(repo, opts, url)
try:
# check narrow support before doing anything if widening needs to be
# performed. In future we should also abort if client is ellipses and
# server does not support ellipses
if (
widening
and wireprototypes.NARROWCAP not in remote.capabilities()
):
raise error.Abort(_(b"server does not support narrow clones"))
commoninc = discovery.findcommonincoming(repo, remote)
if autoremoveincludes:
outgoing = discovery.findcommonoutgoing(
repo, remote, commoninc=commoninc
)
ui.status(_(b'looking for unused includes to remove\n'))
localfiles = set()
for n in itertools.chain(outgoing.missing, outgoing.excluded):
localfiles.update(repo[n].files())
suggestedremovals = []
for include in sorted(oldincludes):
match = narrowspec.match(repo.root, [include], oldexcludes)
if not any(match(f) for f in localfiles):
suggestedremovals.append(include)
if suggestedremovals:
for s in suggestedremovals:
ui.status(b'%s\n' % s)
if (
ui.promptchoice(
_(
b'remove these unused includes (yn)?'
b'$$ &Yes $$ &No'
)
)
== 0
):
removedincludes.update(suggestedremovals)
narrowing = True
else:
ui.status(_(b'found no unused includes\n'))
if narrowing:
newincludes = oldincludes - removedincludes
newexcludes = oldexcludes | addedexcludes
_narrow(
ui,
repo,
remote,
commoninc,
oldincludes,
oldexcludes,
newincludes,
newexcludes,
opts[b'force_delete_local_changes'],
opts[b'backup'],
)
# _narrow() updated the narrowspec and _widen() below needs to
# use the updated values as its base (otherwise removed includes
# and addedexcludes will be lost in the resulting narrowspec)
oldincludes = newincludes
oldexcludes = newexcludes
if widening:
newincludes = oldincludes | addedincludes
newexcludes = oldexcludes - removedexcludes
_widen(
ui,
repo,
remote,
commoninc,
oldincludes,
oldexcludes,
newincludes,
newexcludes,
)
finally:
remote.close()
return 0
|
smmribeiro/intellij-community
|
plugins/hg4idea/testData/bin/hgext/narrow/narrowcommands.py
|
Python
|
apache-2.0
| 24,400
|
[
"VisIt"
] |
fa215b4cb45706991f416726c350483c910a6c9ae9e8b0ba64befc5d517e5cba
|
import errno
import multiprocessing
import os
import platform
import shutil
import subprocess
import sys
import threading
import warnings
import inspect
import pickle
import weakref
from itertools import chain
from io import StringIO
import numpy as np
from numba import njit, jit, generated_jit, typeof
from numba.core import types, errors, codegen
from numba import _dispatcher
from numba.core.compiler import compile_isolated
from numba.core.errors import NumbaWarning
from numba.tests.support import (TestCase, temp_directory, import_dynamic,
override_env_config, capture_cache_log,
captured_stdout)
from numba.np.numpy_support import as_dtype
from numba.core.caching import _UserWideCacheLocator
from numba.core.dispatcher import Dispatcher
from numba.tests.support import (skip_parfors_unsupported, needs_lapack,
SerialMixin)
from numba.testing.main import _TIMEOUT as _RUNNER_TIMEOUT
import llvmlite.binding as ll
import unittest
from numba.parfors import parfor
_TEST_TIMEOUT = _RUNNER_TIMEOUT - 60.
try:
import jinja2
except ImportError:
jinja2 = None
try:
import pygments
except ImportError:
pygments = None
_is_armv7l = platform.machine() == 'armv7l'
def dummy(x):
return x
def add(x, y):
return x + y
def addsub(x, y, z):
return x - y + z
def addsub_defaults(x, y=2, z=3):
return x - y + z
def star_defaults(x, y=2, *z):
return x, y, z
def generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x, y):
return x + y
else:
def impl(x, y):
return x - y
return impl
def bad_generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x):
return x
else:
def impl(x, y=6):
return x - y
return impl
def dtype_generated_usecase(a, b, dtype=None):
if isinstance(dtype, (types.misc.NoneType, types.misc.Omitted)):
out_dtype = np.result_type(*(np.dtype(ary.dtype.name)
for ary in (a, b)))
elif isinstance(dtype, (types.DType, types.NumberClass)):
out_dtype = as_dtype(dtype)
else:
raise TypeError("Unhandled Type %s" % type(dtype))
def _fn(a, b, dtype=None):
return np.ones(a.shape, dtype=out_dtype)
return _fn
class BaseTest(TestCase):
jit_args = dict(nopython=True)
def compile_func(self, pyfunc):
def check(*args, **kwargs):
expected = pyfunc(*args, **kwargs)
result = f(*args, **kwargs)
self.assertPreciseEqual(result, expected)
f = jit(**self.jit_args)(pyfunc)
return f, check
def check_access_is_preventable():
# This exists to check whether it is possible to prevent access to
# a file/directory through the use of `chmod 500`. If a user has
# elevated rights (e.g. root) then writes are likely to be possible
# anyway. Tests that require functioning access prevention are
# therefore skipped based on the result of this check.
tempdir = temp_directory('test_cache')
test_dir = (os.path.join(tempdir, 'writable_test'))
os.mkdir(test_dir)
# assume access prevention is not possible
ret = False
# check a write is possible
with open(os.path.join(test_dir, 'write_ok'), 'wt') as f:
f.write('check1')
# now forbid access
os.chmod(test_dir, 0o500)
try:
with open(os.path.join(test_dir, 'write_forbidden'), 'wt') as f:
f.write('check2')
except (OSError, IOError) as e:
# Check that the cause of the exception is due to access/permission
# as per
# https://github.com/conda/conda/blob/4.5.0/conda/gateways/disk/permissions.py#L35-L37 # noqa: E501
eno = getattr(e, 'errno', None)
if eno in (errno.EACCES, errno.EPERM):
# errno reports access/perm fail so access prevention via
# `chmod 500` works for this user.
ret = True
finally:
os.chmod(test_dir, 0o775)
shutil.rmtree(test_dir)
return ret
_access_preventable = check_access_is_preventable()
_access_msg = "Cannot create a directory to which writes are preventable"
skip_bad_access = unittest.skipUnless(_access_preventable, _access_msg)
class TestDispatcher(BaseTest):
def test_equality(self):
@jit
def foo(x):
return x
@jit
def bar(x):
return x
# Written this way to verify `==` returns a bool (gh-5838). Using
# `assertTrue(foo == foo)` or `assertEqual(foo, foo)` would defeat the
# purpose of this test.
self.assertEqual(foo == foo, True)
self.assertEqual(foo == bar, False)
self.assertEqual(foo == None, False) # noqa: E711
def test_dyn_pyfunc(self):
@jit
def foo(x):
return x
foo(1)
[cr] = foo.overloads.values()
# __module__ must be match that of foo
self.assertEqual(cr.entry_point.__module__, foo.py_func.__module__)
def test_no_argument(self):
@jit
def foo():
return 1
# Just make sure this doesn't crash
foo()
def test_coerce_input_types(self):
# Issue #486: do not allow unsafe conversions if we can still
# compile other specializations.
c_add = jit(nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
self.assertPreciseEqual(c_add(12.3, 45.6), add(12.3, 45.6))
self.assertPreciseEqual(c_add(12.3, 45.6j), add(12.3, 45.6j))
self.assertPreciseEqual(c_add(12300000000, 456), add(12300000000, 456))
# Now force compilation of only a single specialization
c_add = jit('(i4, i4)', nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
# Implicit (unsafe) conversion of float to int
self.assertPreciseEqual(c_add(12.3, 45.6), add(12, 45))
with self.assertRaises(TypeError):
# Implicit conversion of complex to int disallowed
c_add(12.3, 45.6j)
def test_ambiguous_new_version(self):
"""Test compiling new version in an ambiguous case
"""
@jit
def foo(a, b):
return a + b
INT = 1
FLT = 1.5
self.assertAlmostEqual(foo(INT, FLT), INT + FLT)
self.assertEqual(len(foo.overloads), 1)
self.assertAlmostEqual(foo(FLT, INT), FLT + INT)
self.assertEqual(len(foo.overloads), 2)
self.assertAlmostEqual(foo(FLT, FLT), FLT + FLT)
self.assertEqual(len(foo.overloads), 3)
# The following call is ambiguous because (int, int) can resolve
# to (float, int) or (int, float) with equal weight.
self.assertAlmostEqual(foo(1, 1), INT + INT)
self.assertEqual(len(foo.overloads), 4, "didn't compile a new "
"version")
def test_lock(self):
"""
Test that (lazy) compiling from several threads at once doesn't
produce errors (see issue #908).
"""
errors = []
@jit
def foo(x):
return x + 1
def wrapper():
try:
self.assertEqual(foo(1), 2)
except Exception as e:
errors.append(e)
threads = [threading.Thread(target=wrapper) for i in range(16)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(errors)
def test_explicit_signatures(self):
f = jit("(int64,int64)")(add)
# Approximate match (unsafe conversion)
self.assertPreciseEqual(f(1.5, 2.5), 3)
self.assertEqual(len(f.overloads), 1, f.overloads)
f = jit(["(int64,int64)", "(float64,float64)"])(add)
# Exact signature matches
self.assertPreciseEqual(f(1, 2), 3)
self.assertPreciseEqual(f(1.5, 2.5), 4.0)
# Approximate match (int32 -> float64 is a safe conversion)
self.assertPreciseEqual(f(np.int32(1), 2.5), 3.5)
# No conversion
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertIn("No matching definition", str(cm.exception))
self.assertEqual(len(f.overloads), 2, f.overloads)
# A more interesting one...
f = jit(["(float32,float32)", "(float64,float64)"])(add)
self.assertPreciseEqual(f(np.float32(1), np.float32(2**-25)), 1.0)
self.assertPreciseEqual(f(1, 2**-25), 1.0000000298023224)
# Fail to resolve ambiguity between the two best overloads
f = jit(["(float32,float64)",
"(float64,float32)",
"(int64,int64)"])(add)
with self.assertRaises(TypeError) as cm:
f(1.0, 2.0)
# The two best matches are output in the error message, as well
# as the actual argument types.
self.assertRegexpMatches(
str(cm.exception),
r"Ambiguous overloading for <function add [^>]*> "
r"\(float64, float64\):\n"
r"\(float32, float64\) -> float64\n"
r"\(float64, float32\) -> float64"
)
# The integer signature is not part of the best matches
self.assertNotIn("int64", str(cm.exception))
def test_signature_mismatch(self):
tmpl = ("Signature mismatch: %d argument types given, but function "
"takes 2 arguments")
with self.assertRaises(TypeError) as cm:
jit("()")(add)
self.assertIn(tmpl % 0, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,)")(add)
self.assertIn(tmpl % 1, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,intc,intc)")(add)
self.assertIn(tmpl % 3, str(cm.exception))
# With forceobj=True, an empty tuple is accepted
jit("()", forceobj=True)(add)
with self.assertRaises(TypeError) as cm:
jit("(intc,)", forceobj=True)(add)
self.assertIn(tmpl % 1, str(cm.exception))
def test_matching_error_message(self):
f = jit("(intc,intc)")(add)
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertEqual(str(cm.exception),
"No matching definition for argument type(s) "
"complex128, complex128")
def test_disabled_compilation(self):
@jit
def foo(a):
return a
foo.compile("(float32,)")
foo.disable_compile()
with self.assertRaises(RuntimeError) as raises:
foo.compile("(int32,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 1)
def test_disabled_compilation_through_list(self):
@jit(["(float32,)", "(int32,)"])
def foo(a):
return a
with self.assertRaises(RuntimeError) as raises:
foo.compile("(complex64,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 2)
def test_disabled_compilation_nested_call(self):
@jit(["(intp,)"])
def foo(a):
return a
@jit
def bar():
foo(1)
foo(np.ones(1)) # no matching definition
with self.assertRaises(TypeError) as raises:
bar()
m = "No matching definition for argument type(s) array(float64, 1d, C)"
self.assertEqual(str(raises.exception), m)
def test_fingerprint_failure(self):
"""
Failure in computing the fingerprint cannot affect a nopython=False
function. On the other hand, with nopython=True, a ValueError should
be raised to report the failure with fingerprint.
"""
@jit
def foo(x):
return x
# Empty list will trigger failure in compile_fingerprint
errmsg = 'cannot compute fingerprint of empty list'
with self.assertRaises(ValueError) as raises:
_dispatcher.compute_fingerprint([])
self.assertIn(errmsg, str(raises.exception))
# It should work in fallback
self.assertEqual(foo([]), [])
# But, not in nopython=True
strict_foo = jit(nopython=True)(foo.py_func)
with self.assertRaises(ValueError) as raises:
strict_foo([])
self.assertIn(errmsg, str(raises.exception))
# Test in loop lifting context
@jit
def bar():
object() # force looplifting
x = []
for i in range(10):
x = foo(x)
return x
self.assertEqual(bar(), [])
# Make sure it was looplifted
[cr] = bar.overloads.values()
self.assertEqual(len(cr.lifted), 1)
def test_serialization(self):
"""
Test serialization of Dispatcher objects
"""
@jit(nopython=True)
def foo(x):
return x + 1
self.assertEqual(foo(1), 2)
# get serialization memo
memo = Dispatcher._memo
Dispatcher._recent.clear()
memo_size = len(memo)
# pickle foo and check memo size
serialized_foo = pickle.dumps(foo)
# increases the memo size
self.assertEqual(memo_size + 1, len(memo))
# unpickle
foo_rebuilt = pickle.loads(serialized_foo)
self.assertEqual(memo_size + 1, len(memo))
self.assertIs(foo, foo_rebuilt)
# do we get the same object even if we delete all the explicit
# references?
id_orig = id(foo_rebuilt)
del foo
del foo_rebuilt
self.assertEqual(memo_size + 1, len(memo))
new_foo = pickle.loads(serialized_foo)
self.assertEqual(id_orig, id(new_foo))
# now clear the recent cache
ref = weakref.ref(new_foo)
del new_foo
Dispatcher._recent.clear()
self.assertEqual(memo_size, len(memo))
# show that deserializing creates a new object
pickle.loads(serialized_foo)
self.assertIs(ref(), None)
@needs_lapack
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_misaligned_array_dispatch(self):
# for context see issue #2937
def foo(a):
return np.linalg.matrix_power(a, 1)
jitfoo = jit(nopython=True)(foo)
n = 64
r = int(np.sqrt(n))
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r)
C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r)
F_contig_aligned = C_contig_aligned.T
F_contig_misaligned = C_contig_misaligned.T
# checking routine
def check(name, a):
a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r)
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# The checks must be run in this order to create the dispatch key
# sequence that causes invalid dispatch noted in #2937.
# The first two should hit the cache as they are aligned, supported
# order and under 5 dimensions. The second two should end up in the
# fallback path as they are misaligned.
check("C_contig_aligned", C_contig_aligned)
check("F_contig_aligned", F_contig_aligned)
check("C_contig_misaligned", C_contig_misaligned)
check("F_contig_misaligned", F_contig_misaligned)
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_immutability_in_array_dispatch(self):
# RO operation in function
def foo(a):
return np.sum(a)
jitfoo = jit(nopython=True)(foo)
n = 64
r = int(np.sqrt(n))
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r)
C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r)
F_contig_aligned = C_contig_aligned.T
F_contig_misaligned = C_contig_misaligned.T
# checking routine
def check(name, a, disable_write_bit=False):
a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r)
if disable_write_bit:
a.flags.writeable = False
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# all of these should end up in the fallback path as they have no write
# bit set
check("C_contig_aligned", C_contig_aligned, disable_write_bit=True)
check("F_contig_aligned", F_contig_aligned, disable_write_bit=True)
check("C_contig_misaligned", C_contig_misaligned,
disable_write_bit=True)
check("F_contig_misaligned", F_contig_misaligned,
disable_write_bit=True)
@needs_lapack
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_misaligned_high_dimension_array_dispatch(self):
def foo(a):
return np.linalg.matrix_power(a[0, 0, 0, 0, :, :], 1)
jitfoo = jit(nopython=True)(foo)
def check_properties(arr, layout, aligned):
self.assertEqual(arr.flags.aligned, aligned)
if layout == "C":
self.assertEqual(arr.flags.c_contiguous, True)
if layout == "F":
self.assertEqual(arr.flags.f_contiguous, True)
n = 729
r = 3
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).\
reshape(r, r, r, r, r, r)
check_properties(C_contig_aligned, 'C', True)
C_contig_misaligned = tmp[1:].view(np.complex128).\
reshape(r, r, r, r, r, r)
check_properties(C_contig_misaligned, 'C', False)
F_contig_aligned = C_contig_aligned.T
check_properties(F_contig_aligned, 'F', True)
F_contig_misaligned = C_contig_misaligned.T
check_properties(F_contig_misaligned, 'F', False)
# checking routine
def check(name, a):
a[:, :] = np.arange(n, dtype=np.complex128).\
reshape(r, r, r, r, r, r)
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# these should all hit the fallback path as the cache is only for up to
# 5 dimensions
check("F_contig_misaligned", F_contig_misaligned)
check("C_contig_aligned", C_contig_aligned)
check("F_contig_aligned", F_contig_aligned)
check("C_contig_misaligned", C_contig_misaligned)
def test_dispatch_recompiles_for_scalars(self):
# for context #3612, essentially, compiling a lambda x:x for a
# numerically wide type (everything can be converted to a complex128)
# and then calling again with e.g. an int32 would lead to the int32
# being converted to a complex128 whereas it ought to compile an int32
# specialization.
def foo(x):
return x
# jit and compile on dispatch for 3 scalar types, expect 3 signatures
jitfoo = jit(nopython=True)(foo)
jitfoo(np.complex128(1 + 2j))
jitfoo(np.int32(10))
jitfoo(np.bool_(False))
self.assertEqual(len(jitfoo.signatures), 3)
expected_sigs = [(types.complex128,), (types.int32,), (types.bool_,)]
self.assertEqual(jitfoo.signatures, expected_sigs)
# now jit with signatures so recompilation is forbidden
# expect 1 signature and type conversion
jitfoo = jit([(types.complex128,)], nopython=True)(foo)
jitfoo(np.complex128(1 + 2j))
jitfoo(np.int32(10))
jitfoo(np.bool_(False))
self.assertEqual(len(jitfoo.signatures), 1)
expected_sigs = [(types.complex128,)]
self.assertEqual(jitfoo.signatures, expected_sigs)
def test_dispatcher_raises_for_invalid_decoration(self):
# For context see https://github.com/numba/numba/issues/4750.
@jit(nopython=True)
def foo(x):
return x
with self.assertRaises(TypeError) as raises:
jit(foo)
err_msg = str(raises.exception)
self.assertIn(
"A jit decorator was called on an already jitted function", err_msg)
self.assertIn("foo", err_msg)
self.assertIn(".py_func", err_msg)
with self.assertRaises(TypeError) as raises:
jit(BaseTest)
err_msg = str(raises.exception)
self.assertIn("The decorated object is not a function", err_msg)
self.assertIn(f"{type(BaseTest)}", err_msg)
class TestSignatureHandling(BaseTest):
"""
Test support for various parameter passing styles.
"""
def test_named_args(self):
"""
Test passing named arguments to a dispatcher.
"""
f, check = self.compile_func(addsub)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# All calls above fall under the same specialization
self.assertEqual(len(f.overloads), 1)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected 3, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6)
self.assertIn("missing argument 'z'", str(cm.exception))
def test_default_args(self):
"""
Test omitting arguments with a default value.
"""
f, check = self.compile_func(addsub_defaults)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# Now omitting some values
check(3, z=10)
check(3, 4)
check(x=3, y=4)
check(3)
check(x=3)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected at least 1, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(y=6, z=7)
self.assertIn("missing argument 'x'", str(cm.exception))
def test_star_args(self):
"""
Test a compiled function with starargs in the signature.
"""
f, check = self.compile_func(star_defaults)
check(4)
check(4, 5)
check(4, 5, 6)
check(4, 5, 6, 7)
check(4, 5, 6, 7, 8)
check(x=4)
check(x=4, y=5)
check(4, y=5)
with self.assertRaises(TypeError) as cm:
f(4, 5, y=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, 5, z=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, x=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
class TestSignatureHandlingObjectMode(TestSignatureHandling):
"""
Sams as TestSignatureHandling, but in object mode.
"""
jit_args = dict(forceobj=True)
class TestGeneratedDispatcher(TestCase):
"""
Tests for @generated_jit.
"""
def test_generated(self):
f = generated_jit(nopython=True)(generated_usecase)
self.assertEqual(f(8), 8 - 5)
self.assertEqual(f(x=8), 8 - 5)
self.assertEqual(f(x=8, y=4), 8 - 4)
self.assertEqual(f(1j), 5 + 1j)
self.assertEqual(f(1j, 42), 42 + 1j)
self.assertEqual(f(x=1j, y=7), 7 + 1j)
def test_generated_dtype(self):
f = generated_jit(nopython=True)(dtype_generated_usecase)
a = np.ones((10,), dtype=np.float32)
b = np.ones((10,), dtype=np.float64)
self.assertEqual(f(a, b).dtype, np.float64)
self.assertEqual(f(a, b, dtype=np.dtype('int32')).dtype, np.int32)
self.assertEqual(f(a, b, dtype=np.int32).dtype, np.int32)
def test_signature_errors(self):
"""
Check error reporting when implementation signature doesn't match
generating function signature.
"""
f = generated_jit(nopython=True)(bad_generated_usecase)
# Mismatching # of arguments
with self.assertRaises(TypeError) as raises:
f(1j)
self.assertIn("should be compatible with signature '(x, y=5)', "
"but has signature '(x)'",
str(raises.exception))
# Mismatching defaults
with self.assertRaises(TypeError) as raises:
f(1)
self.assertIn("should be compatible with signature '(x, y=5)', "
"but has signature '(x, y=6)'",
str(raises.exception))
class TestDispatcherMethods(TestCase):
def test_recompile(self):
closure = 1
@jit
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2.5)
self.assertEqual(len(foo.signatures), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
# Everything was recompiled
self.assertEqual(len(foo.signatures), 2)
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3.5)
def test_recompile_signatures(self):
# Same as above, but with an explicit signature on @jit.
closure = 1
@jit("int32(int32)")
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3)
def test_inspect_llvm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
llvms = foo.inspect_llvm()
self.assertEqual(len(llvms), 3)
# make sure the function name shows up in the llvm
for llvm_bc in llvms.values():
# Look for the function name
self.assertIn("foo", llvm_bc)
# Look for the argument names
self.assertIn("explicit_arg1", llvm_bc)
self.assertIn("explicit_arg2", llvm_bc)
def test_inspect_asm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
asms = foo.inspect_asm()
self.assertEqual(len(asms), 3)
# make sure the function name shows up in the llvm
for asm in asms.values():
# Look for the function name
self.assertTrue("foo" in asm)
def _check_cfg_display(self, cfg, wrapper=''):
# simple stringify test
if wrapper:
wrapper = "{}{}".format(len(wrapper), wrapper)
module_name = __name__.split('.', 1)[0]
module_len = len(module_name)
prefix = r'^digraph "CFG for \'_ZN{}{}{}'.format(wrapper,
module_len,
module_name)
self.assertRegexpMatches(str(cfg), prefix)
# .display() requires an optional dependency on `graphviz`.
# just test for the attribute without running it.
self.assertTrue(callable(cfg.display))
def test_inspect_cfg(self):
# Exercise the .inspect_cfg(). These are minimal tests and do not fully
# check the correctness of the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg() without arguments
cfgs = foo.inspect_cfg()
# Correct count of overloads
self.assertEqual(len(cfgs), 3)
# Makes sure all the signatures are correct
[s1, s2, s3] = cfgs.keys()
self.assertEqual(set([s1, s2, s3]),
set(map(lambda x: (typeof(x),), [a1, a2, a3])))
for cfg in cfgs.values():
self._check_cfg_display(cfg)
self.assertEqual(len(list(cfgs.values())), 3)
# Call inspect_cfg(signature)
cfg = foo.inspect_cfg(signature=foo.signatures[0])
self._check_cfg_display(cfg)
def test_inspect_cfg_with_python_wrapper(self):
# Exercise the .inspect_cfg() including the python wrapper.
# These are minimal tests and do not fully check the correctness of
# the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg(signature, show_wrapper="python")
cfg = foo.inspect_cfg(signature=foo.signatures[0],
show_wrapper="python")
self._check_cfg_display(cfg, wrapper='cpython')
def test_inspect_types(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method
foo.inspect_types(StringIO())
# Test output
expected = str(foo.overloads[foo.signatures[0]].type_annotation)
with captured_stdout() as out:
foo.inspect_types()
assert expected in out.getvalue()
def test_inspect_types_with_signature(self):
@jit
def foo(a):
return a + 1
foo(1)
foo(1.0)
# Inspect all signatures
with captured_stdout() as total:
foo.inspect_types()
# Inspect first signature
with captured_stdout() as first:
foo.inspect_types(signature=foo.signatures[0])
# Inspect second signature
with captured_stdout() as second:
foo.inspect_types(signature=foo.signatures[1])
self.assertEqual(total.getvalue(), first.getvalue() + second.getvalue())
@unittest.skipIf(jinja2 is None, "please install the 'jinja2' package")
@unittest.skipIf(pygments is None, "please install the 'pygments' package")
def test_inspect_types_pretty(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method, dump the output
with captured_stdout():
ann = foo.inspect_types(pretty=True)
# ensure HTML <span> is found in the annotation output
for k, v in ann.ann.items():
span_found = False
for line in v['pygments_lines']:
if 'span' in line[2]:
span_found = True
self.assertTrue(span_found)
# check that file+pretty kwarg combo raises
with self.assertRaises(ValueError) as raises:
foo.inspect_types(file=StringIO(), pretty=True)
self.assertIn("`file` must be None if `pretty=True`",
str(raises.exception))
def test_get_annotation_info(self):
@jit
def foo(a):
return a + 1
foo(1)
foo(1.3)
expected = dict(chain.from_iterable(foo.get_annotation_info(i).items()
for i in foo.signatures))
result = foo.get_annotation_info()
self.assertEqual(expected, result)
def test_issue_with_array_layout_conflict(self):
"""
This test an issue with the dispatcher when an array that is both
C and F contiguous is supplied as the first signature.
The dispatcher checks for F contiguous first but the compiler checks
for C contiguous first. This results in an C contiguous code inserted
as F contiguous function.
"""
def pyfunc(A, i, j):
return A[i, j]
cfunc = jit(pyfunc)
ary_c_and_f = np.array([[1.]])
ary_c = np.array([[0., 1.], [2., 3.]], order='C')
ary_f = np.array([[0., 1.], [2., 3.]], order='F')
exp_c = pyfunc(ary_c, 1, 0)
exp_f = pyfunc(ary_f, 1, 0)
self.assertEqual(1., cfunc(ary_c_and_f, 0, 0))
got_c = cfunc(ary_c, 1, 0)
got_f = cfunc(ary_f, 1, 0)
self.assertEqual(exp_c, got_c)
self.assertEqual(exp_f, got_f)
class BaseCacheTest(TestCase):
# This class is also used in test_cfunc.py.
# The source file that will be copied
usecases_file = None
# Make sure this doesn't conflict with another module
modname = None
def setUp(self):
self.tempdir = temp_directory('test_cache')
sys.path.insert(0, self.tempdir)
self.modfile = os.path.join(self.tempdir, self.modname + ".py")
self.cache_dir = os.path.join(self.tempdir, "__pycache__")
shutil.copy(self.usecases_file, self.modfile)
self.maxDiff = None
def tearDown(self):
sys.modules.pop(self.modname, None)
sys.path.remove(self.tempdir)
def import_module(self):
# Import a fresh version of the test module. All jitted functions
# in the test module will start anew and load overloads from
# the on-disk cache if possible.
old = sys.modules.pop(self.modname, None)
if old is not None:
# Make sure cached bytecode is removed
cached = [old.__cached__]
for fn in cached:
try:
os.unlink(fn)
except OSError as e:
if e.errno != errno.ENOENT:
raise
mod = import_dynamic(self.modname)
self.assertEqual(mod.__file__.rstrip('co'), self.modfile)
return mod
def cache_contents(self):
try:
return [fn for fn in os.listdir(self.cache_dir)
if not fn.endswith(('.pyc', ".pyo"))]
except OSError as e:
if e.errno != errno.ENOENT:
raise
return []
def get_cache_mtimes(self):
return dict((fn, os.path.getmtime(os.path.join(self.cache_dir, fn)))
for fn in sorted(self.cache_contents()))
def check_pycache(self, n):
c = self.cache_contents()
self.assertEqual(len(c), n, c)
def dummy_test(self):
pass
class BaseCacheUsecasesTest(BaseCacheTest):
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def run_in_separate_process(self):
# Cached functions can be run from a distinct process.
# Also stresses issue #1603: uncached function calling cached function
# shouldn't fail compiling.
code = """if 1:
import sys
sys.path.insert(0, %(tempdir)r)
mod = __import__(%(modname)r)
mod.self_test()
""" % dict(tempdir=self.tempdir, modname=self.modname)
popen = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError(
"process failed with code %s: \n"
"stdout follows\n%s\n"
"stderr follows\n%s\n"
% (popen.returncode, out.decode(), err.decode()),
)
def check_module(self, mod):
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
mod.self_test()
def check_hits(self, func, hits, misses=None):
st = func.stats
self.assertEqual(sum(st.cache_hits.values()), hits, st.cache_hits)
if misses is not None:
self.assertEqual(sum(st.cache_misses.values()), misses,
st.cache_misses)
class TestCache(BaseCacheUsecasesTest):
def test_caching(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
self.check_hits(f, 0, 2)
f = mod.record_return
rec = f(mod.aligned_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
rec = f(mod.packed_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
self.check_pycache(9) # 3 index, 6 data
self.check_hits(f, 0, 2)
f = mod.generated_usecase
self.assertPreciseEqual(f(3, 2), 1)
self.assertPreciseEqual(f(3j, 2), 2 + 3j)
# Check the code runs ok from another process
self.run_in_separate_process()
def test_caching_nrt_pruned(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
# NRT pruning may affect cache
self.assertPreciseEqual(f(2, np.arange(3)), 2 + np.arange(3) + 1)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
def test_inner_then_outer(self):
# Caching inner then outer function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.inner(3, 2), 6)
self.check_pycache(2) # 1 index, 1 data
# Uncached outer function shouldn't fail (issue #1603)
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
mod = self.import_module()
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
# Cached outer will create new cache entries
f = mod.outer
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(f(3.5, 2), 2.5)
self.check_pycache(6) # 2 index, 4 data
def test_outer_then_inner(self):
# Caching outer then inner function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.outer(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(mod.outer_uncached(3, 2), 2)
self.check_pycache(4) # same
mod = self.import_module()
f = mod.inner
self.assertPreciseEqual(f(3, 2), 6)
self.check_pycache(4) # same
self.assertPreciseEqual(f(3.5, 2), 6.5)
self.check_pycache(5) # 2 index, 3 data
def test_no_caching(self):
mod = self.import_module()
f = mod.add_nocache_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(0)
def test_looplifted(self):
# Loop-lifted functions can't be cached and raise a warning
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.looplifted
self.assertPreciseEqual(f(4), 6)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "looplifted" '
'as it uses lifted code', str(w[0].message))
def test_big_array(self):
# Code references big array globals cannot be cached
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.use_big_array
np.testing.assert_equal(f(), mod.biggie)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "use_big_array" '
'as it uses dynamic globals', str(w[0].message))
def test_ctypes(self):
# Functions using a ctypes pointer can't be cached and raise
# a warning.
mod = self.import_module()
for f in [mod.use_c_sin, mod.use_c_sin_nest1, mod.use_c_sin_nest2]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
self.assertPreciseEqual(f(0.0), 0.0)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn(
'Cannot cache compiled function "{}"'.format(f.__name__),
str(w[0].message),
)
def test_closure(self):
mod = self.import_module()
with warnings.catch_warnings():
warnings.simplefilter('error', NumbaWarning)
f = mod.closure1
self.assertPreciseEqual(f(3), 6) # 3 + 3 = 6
f = mod.closure2
self.assertPreciseEqual(f(3), 8) # 3 + 5 = 8
f = mod.closure3
self.assertPreciseEqual(f(3), 10) # 3 + 7 = 8
f = mod.closure4
self.assertPreciseEqual(f(3), 12) # 3 + 9 = 12
self.check_pycache(5) # 1 nbi, 4 nbc
def test_cache_reuse(self):
mod = self.import_module()
mod.add_usecase(2, 3)
mod.add_usecase(2.5, 3.5)
mod.add_objmode_usecase(2, 3)
mod.outer_uncached(2, 3)
mod.outer(2, 3)
mod.record_return(mod.packed_arr, 0)
mod.record_return(mod.aligned_arr, 1)
mod.generated_usecase(2, 3)
mtimes = self.get_cache_mtimes()
# Two signatures compiled
self.check_hits(mod.add_usecase, 0, 2)
mod2 = self.import_module()
self.assertIsNot(mod, mod2)
f = mod2.add_usecase
f(2, 3)
self.check_hits(f, 1, 0)
f(2.5, 3.5)
self.check_hits(f, 2, 0)
f = mod2.add_objmode_usecase
f(2, 3)
self.check_hits(f, 1, 0)
# The files haven't changed
self.assertEqual(self.get_cache_mtimes(), mtimes)
self.run_in_separate_process()
self.assertEqual(self.get_cache_mtimes(), mtimes)
def test_cache_invalidate(self):
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
# This should change the functions' results
with open(self.modfile, "a") as f:
f.write("\nZ = 10\n")
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_recompile(self):
# Explicit call to recompile() should overwrite the cache
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
mod = self.import_module()
f = mod.add_usecase
mod.Z = 10
self.assertPreciseEqual(f(2, 3), 6)
f.recompile()
self.assertPreciseEqual(f(2, 3), 15)
# Freshly recompiled version is re-used from other imports
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_same_names(self):
# Function with the same names should still disambiguate
mod = self.import_module()
f = mod.renamed_function1
self.assertPreciseEqual(f(2), 4)
f = mod.renamed_function2
self.assertPreciseEqual(f(2), 8)
def test_frozen(self):
from .dummy_module import function
old_code = function.__code__
code_obj = compile('pass', 'tests/dummy_module.py', 'exec')
try:
function.__code__ = code_obj
source = inspect.getfile(function)
# doesn't return anything, since it cannot find the module
# fails unless the executable is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsNone(locator)
sys.frozen = True
# returns a cache locator object, only works when the executable
# is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsInstance(locator, _UserWideCacheLocator)
finally:
function.__code__ = old_code
del sys.frozen
def _test_pycache_fallback(self):
"""
With a disabled __pycache__, test there is a working fallback
(e.g. on the user-wide cache dir)
"""
mod = self.import_module()
f = mod.add_usecase
# Remove this function's cache files at the end, to avoid accumulation
# across test calls.
self.addCleanup(shutil.rmtree, f.stats.cache_path, ignore_errors=True)
self.assertPreciseEqual(f(2, 3), 6)
# It's a cache miss since the file was copied to a new temp location
self.check_hits(f, 0, 1)
# Test re-use
mod2 = self.import_module()
f = mod2.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_hits(f, 1, 0)
# The __pycache__ is empty (otherwise the test's preconditions
# wouldn't be met)
self.check_pycache(0)
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_creatable_pycache(self):
# Make it impossible to create the __pycache__ directory
old_perms = os.stat(self.tempdir).st_mode
os.chmod(self.tempdir, 0o500)
self.addCleanup(os.chmod, self.tempdir, old_perms)
self._test_pycache_fallback()
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_writable_pycache(self):
# Make it impossible to write to the __pycache__ directory
pycache = os.path.join(self.tempdir, '__pycache__')
os.mkdir(pycache)
old_perms = os.stat(pycache).st_mode
os.chmod(pycache, 0o500)
self.addCleanup(os.chmod, pycache, old_perms)
self._test_pycache_fallback()
def test_ipython(self):
# Test caching in an IPython session
base_cmd = [sys.executable, '-m', 'IPython']
base_cmd += ['--quiet', '--quick', '--no-banner', '--colors=NoColor']
try:
ver = subprocess.check_output(base_cmd + ['--version'])
except subprocess.CalledProcessError as e:
self.skipTest("ipython not available: return code %d"
% e.returncode)
ver = ver.strip().decode()
print("ipython version:", ver)
# Create test input
inputfn = os.path.join(self.tempdir, "ipython_cache_usecase.txt")
with open(inputfn, "w") as f:
f.write(r"""
import os
import sys
from numba import jit
# IPython 5 does not support multiline input if stdin isn't
# a tty (https://github.com/ipython/ipython/issues/9752)
f = jit(cache=True)(lambda: 42)
res = f()
# IPython writes on stdout, so use stderr instead
sys.stderr.write(u"cache hits = %d\n" % f.stats.cache_hits[()])
# IPython hijacks sys.exit(), bypass it
sys.stdout.flush()
sys.stderr.flush()
os._exit(res)
""")
def execute_with_input():
# Feed the test input as stdin, to execute it in REPL context
with open(inputfn, "rb") as stdin:
p = subprocess.Popen(base_cmd, stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
if p.returncode != 42:
self.fail("unexpected return code %d\n"
"-- stdout:\n%s\n"
"-- stderr:\n%s\n"
% (p.returncode, out, err))
return err
execute_with_input()
# Run a second time and check caching
err = execute_with_input()
self.assertIn("cache hits = 1", err.strip())
@skip_parfors_unsupported
class TestSequentialParForsCache(BaseCacheUsecasesTest):
def setUp(self):
super(TestSequentialParForsCache, self).setUp()
# Turn on sequential parfor lowering
parfor.sequential_parfor_lowering = True
def tearDown(self):
super(TestSequentialParForsCache, self).tearDown()
# Turn off sequential parfor lowering
parfor.sequential_parfor_lowering = False
def test_caching(self):
mod = self.import_module()
self.check_pycache(0)
f = mod.parfor_usecase
ary = np.ones(10)
self.assertPreciseEqual(f(ary), ary * ary + ary)
dynamic_globals = [cres.library.has_dynamic_globals
for cres in f.overloads.values()]
self.assertEqual(dynamic_globals, [False])
self.check_pycache(2) # 1 index, 1 data
class TestCacheWithCpuSetting(BaseCacheUsecasesTest):
# Disable parallel testing due to envvars modification
_numba_parallel_test_ = False
def check_later_mtimes(self, mtimes_old):
match_count = 0
for k, v in self.get_cache_mtimes().items():
if k in mtimes_old:
self.assertGreaterEqual(v, mtimes_old[k])
match_count += 1
self.assertGreater(match_count, 0,
msg='nothing to compare')
def test_user_set_cpu_name(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU name to generic
with override_env_config('NUMBA_CPU_NAME', 'generic'):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][1] == ll.get_host_cpu_name():
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], codegen.get_host_cpu_features())
self.assertEqual(key_generic[1][1], 'generic')
self.assertEqual(key_generic[1][2], '')
def test_user_set_cpu_features(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU feature
my_cpu_features = '-sse;-avx'
system_features = codegen.get_host_cpu_features()
self.assertNotEqual(system_features, my_cpu_features)
with override_env_config('NUMBA_CPU_FEATURES', my_cpu_features):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][2] == system_features:
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], system_features)
self.assertEqual(key_generic[1][1], ll.get_host_cpu_name())
self.assertEqual(key_generic[1][2], my_cpu_features)
class TestMultiprocessCache(BaseCacheTest):
# Nested multiprocessing.Pool raises AssertionError:
# "daemonic processes are not allowed to have children"
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def test_multiprocessing(self):
# Check caching works from multiple processes at once (#2028)
mod = self.import_module()
# Calling a pure Python caller of the JIT-compiled function is
# necessary to reproduce the issue.
f = mod.simple_usecase_caller
n = 3
try:
ctx = multiprocessing.get_context('spawn')
except AttributeError:
ctx = multiprocessing
pool = ctx.Pool(n)
try:
res = sum(pool.imap(f, range(n)))
finally:
pool.close()
self.assertEqual(res, n * (n - 1) // 2)
class TestCacheFileCollision(unittest.TestCase):
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "caching_file_loc_fodder"
source_text_1 = """
from numba import njit
@njit(cache=True)
def bar():
return 123
"""
source_text_2 = """
from numba import njit
@njit(cache=True)
def bar():
return 321
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
sys.path.insert(0, self.tempdir)
self.modname = 'module_name_that_is_unlikely'
self.assertNotIn(self.modname, sys.modules)
self.modname_bar1 = self.modname
self.modname_bar2 = '.'.join([self.modname, 'foo'])
foomod = os.path.join(self.tempdir, self.modname)
os.mkdir(foomod)
with open(os.path.join(foomod, '__init__.py'), 'w') as fout:
print(self.source_text_1, file=fout)
with open(os.path.join(foomod, 'foo.py'), 'w') as fout:
print(self.source_text_2, file=fout)
def tearDown(self):
sys.modules.pop(self.modname_bar1, None)
sys.modules.pop(self.modname_bar2, None)
sys.path.remove(self.tempdir)
def import_bar1(self):
return import_dynamic(self.modname_bar1).bar
def import_bar2(self):
return import_dynamic(self.modname_bar2).bar
def test_file_location(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
# Check that the cache file is named correctly
idxname1 = bar1._cache._cache_file._index_name
idxname2 = bar2._cache._cache_file._index_name
self.assertNotEqual(idxname1, idxname2)
self.assertTrue(idxname1.startswith("__init__.bar-3.py"))
self.assertTrue(idxname2.startswith("foo.bar-3.py"))
@unittest.skipUnless(hasattr(multiprocessing, 'get_context'),
'Test requires multiprocessing.get_context')
def test_no_collision(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
with capture_cache_log() as buf:
res1 = bar1()
cachelog = buf.getvalue()
# bar1 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
with capture_cache_log() as buf:
res2 = bar2()
cachelog = buf.getvalue()
# bar2 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
self.assertNotEqual(res1, res2)
try:
# Make sure we can spawn new process without inheriting
# the parent context.
mp = multiprocessing.get_context('spawn')
except ValueError:
print("missing spawn context")
q = mp.Queue()
# Start new process that calls `cache_file_collision_tester`
proc = mp.Process(target=cache_file_collision_tester,
args=(q, self.tempdir,
self.modname_bar1,
self.modname_bar2))
proc.start()
# Get results from the process
log1 = q.get()
got1 = q.get()
log2 = q.get()
got2 = q.get()
proc.join()
# The remote execution result of bar1() and bar2() should match
# the one executed locally.
self.assertEqual(got1, res1)
self.assertEqual(got2, res2)
# The remote should have loaded bar1 from cache
self.assertEqual(log1.count('index saved'), 0)
self.assertEqual(log1.count('data saved'), 0)
self.assertEqual(log1.count('index loaded'), 1)
self.assertEqual(log1.count('data loaded'), 1)
# The remote should have loaded bar2 from cache
self.assertEqual(log2.count('index saved'), 0)
self.assertEqual(log2.count('data saved'), 0)
self.assertEqual(log2.count('index loaded'), 1)
self.assertEqual(log2.count('data loaded'), 1)
def cache_file_collision_tester(q, tempdir, modname_bar1, modname_bar2):
sys.path.insert(0, tempdir)
bar1 = import_dynamic(modname_bar1).bar
bar2 = import_dynamic(modname_bar2).bar
with capture_cache_log() as buf:
r1 = bar1()
q.put(buf.getvalue())
q.put(r1)
with capture_cache_log() as buf:
r2 = bar2()
q.put(buf.getvalue())
q.put(r2)
class TestCacheMultipleFilesWithSignature(unittest.TestCase):
# Regression test for https://github.com/numba/numba/issues/3658
_numba_parallel_test_ = False
source_text_file1 = """
from file2 import function2
"""
source_text_file2 = """
from numba import njit
@njit('float64(float64)', cache=True)
def function1(x):
return x
@njit('float64(float64)', cache=True)
def function2(x):
return x
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
self.file1 = os.path.join(self.tempdir, 'file1.py')
with open(self.file1, 'w') as fout:
print(self.source_text_file1, file=fout)
self.file2 = os.path.join(self.tempdir, 'file2.py')
with open(self.file2, 'w') as fout:
print(self.source_text_file2, file=fout)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_caching_mutliple_files_with_signature(self):
# Execute file1.py
popen = subprocess.Popen([sys.executable, self.file1],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
msg = f"stdout:\n{out.decode()}\n\nstderr:\n{err.decode()}"
self.assertEqual(popen.returncode, 0, msg=msg)
# Execute file2.py
popen = subprocess.Popen([sys.executable, self.file2],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
msg = f"stdout:\n{out.decode()}\n\nstderr:\n{err.decode()}"
self.assertEqual(popen.returncode, 0, msg)
class TestDispatcherFunctionBoundaries(TestCase):
def test_pass_dispatcher_as_arg(self):
# Test that a Dispatcher object can be pass as argument
@jit(nopython=True)
def add1(x):
return x + 1
@jit(nopython=True)
def bar(fn, x):
return fn(x)
@jit(nopython=True)
def foo(x):
return bar(add1, x)
# Check dispatcher as argument inside NPM
inputs = [1, 11.1, np.arange(10)]
expected_results = [x + 1 for x in inputs]
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(foo(arg), expect)
# Check dispatcher as argument from python
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(bar(add1, arg), expect)
def test_dispatcher_as_arg_usecase(self):
@jit(nopython=True)
def maximum(seq, cmpfn):
tmp = seq[0]
for each in seq[1:]:
cmpval = cmpfn(tmp, each)
if cmpval < 0:
tmp = each
return tmp
got = maximum([1, 2, 3, 4], cmpfn=jit(lambda x, y: x - y))
self.assertEqual(got, 4)
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[0] - y[0]))
self.assertEqual(got, (4, 0))
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[1] - y[1]))
self.assertEqual(got, (0, 4))
def test_dispatcher_can_return_to_python(self):
@jit(nopython=True)
def foo(fn):
return fn
fn = jit(lambda x: x)
self.assertEqual(foo(fn), fn)
def test_dispatcher_in_sequence_arg(self):
@jit(nopython=True)
def one(x):
return x + 1
@jit(nopython=True)
def two(x):
return one(one(x))
@jit(nopython=True)
def three(x):
return one(one(one(x)))
@jit(nopython=True)
def choose(fns, x):
return fns[0](x), fns[1](x), fns[2](x)
# Tuple case
self.assertEqual(choose((one, two, three), 1), (2, 3, 4))
# List case
self.assertEqual(choose([one, one, one], 1), (2, 2, 2))
class TestBoxingDefaultError(unittest.TestCase):
# Testing default error at boxing/unboxing
def test_unbox_runtime_error(self):
# Dummy type has no unbox support
def foo(x):
pass
cres = compile_isolated(foo, (types.Dummy("dummy_type"),))
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point(None)
self.assertEqual(str(raises.exception), "can't unbox dummy_type type")
def test_box_runtime_error(self):
def foo():
return unittest # Module type has no boxing logic
cres = compile_isolated(foo, ())
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point()
pat = "cannot convert native Module.* to Python object"
self.assertRegexpMatches(str(raises.exception), pat)
class TestNoRetryFailedSignature(unittest.TestCase):
"""Test that failed-to-compile signatures are not recompiled.
"""
def run_test(self, func):
fcom = func._compiler
self.assertEqual(len(fcom._failed_cache), 0)
# expected failure because `int` has no `__getitem__`
with self.assertRaises(errors.TypingError):
func(1)
self.assertEqual(len(fcom._failed_cache), 1)
# retry
with self.assertRaises(errors.TypingError):
func(1)
self.assertEqual(len(fcom._failed_cache), 1)
# retry with double
with self.assertRaises(errors.TypingError):
func(1.0)
self.assertEqual(len(fcom._failed_cache), 2)
def test_direct_call(self):
@jit(nopython=True)
def foo(x):
return x[0]
self.run_test(foo)
def test_nested_call(self):
@jit(nopython=True)
def bar(x):
return x[0]
@jit(nopython=True)
def foobar(x):
bar(x)
@jit(nopython=True)
def foo(x):
return bar(x) + foobar(x)
self.run_test(foo)
def test_error_count(self):
def check(field, would_fail):
# Slightly modified from the reproducer in issue #4117.
# Before the patch, the compilation time of the failing case is
# much longer than of the successful case. This can be detected
# by the number of times `trigger()` is visited.
k = 10
counter = {'c': 0}
@generated_jit
def trigger(x):
# Keep track of every visit
counter['c'] += 1
if would_fail:
raise errors.TypingError("invoke_failed")
return lambda x: x
@jit(nopython=True)
def ident(out, x):
pass
def chain_assign(fs, inner=ident):
tab_head, tab_tail = fs[-1], fs[:-1]
@jit(nopython=True)
def assign(out, x):
inner(out, x)
out[0] += tab_head(x)
if tab_tail:
return chain_assign(tab_tail, assign)
else:
return assign
chain = chain_assign((trigger,) * k)
out = np.ones(2)
if would_fail:
with self.assertRaises(errors.TypingError) as raises:
chain(out, 1)
self.assertIn('invoke_failed', str(raises.exception))
else:
chain(out, 1)
# Returns the visit counts
return counter['c']
ct_ok = check('a', False)
ct_bad = check('c', True)
# `trigger()` is visited exactly once for both successful and failed
# compilation.
self.assertEqual(ct_ok, 1)
self.assertEqual(ct_bad, 1)
@njit
def add_y1(x, y=1):
return x + y
@njit
def add_ynone(x, y=None):
return x + (1 if y else 2)
@njit
def mult(x, y):
return x * y
@njit
def add_func(x, func=mult):
return x + func(x, x)
def _checker(f1, arg):
assert f1(arg) == f1.py_func(arg)
class TestMultiprocessingDefaultParameters(SerialMixin, unittest.TestCase):
def run_fc_multiproc(self, fc):
try:
ctx = multiprocessing.get_context('spawn')
except AttributeError:
ctx = multiprocessing
# RE: issue #5973, this doesn't use multiprocessing.Pool.map as doing so
# causes the TBB library to segfault under certain conditions. It's not
# clear whether the cause is something in the complexity of the Pool
# itself, e.g. watcher threads etc, or if it's a problem synonymous with
# a "timing attack".
for a in [1, 2, 3]:
p = ctx.Process(target=_checker, args=(fc, a,))
p.start()
p.join(_TEST_TIMEOUT)
self.assertEqual(p.exitcode, 0)
def test_int_def_param(self):
""" Tests issue #4888"""
self.run_fc_multiproc(add_y1)
def test_none_def_param(self):
""" Tests None as a default parameter"""
self.run_fc_multiproc(add_func)
def test_function_def_param(self):
""" Tests a function as a default parameter"""
self.run_fc_multiproc(add_func)
if __name__ == '__main__':
unittest.main()
|
sklam/numba
|
numba/tests/test_dispatcher.py
|
Python
|
bsd-2-clause
| 69,292
|
[
"VisIt"
] |
5132e430af9254d90b6a5ff17dd4852885b8b7c6a7bca3580eccf93a240e3858
|
__doc__ = \
"""
Example of a script that perfoms histogram analysis of an activation
image, to estimate activation Z-score with various heuristics:
* Gamma-Gaussian model
* Gaussian mixture model
* Empirical normal null
This example is based on a (simplistic) simulated image.
Note : We do not want a 'zscore', which does mean anything
(except with the fdr) but probability
that each voxel is in the active class
"""
# Author : Bertrand Thirion, Gael Varoquaux 2008-2009
print __doc__
import numpy as np
import nipy.neurospin.utils.simul_2d_multisubject_fmri_dataset as simul
import nipy.neurospin.utils.emp_null as en
################################################################################
# simulate the data
dimx = 60
dimy = 60
pos = 2*np.array([[6,7],[10,10],[15,10]])
ampli = np.array([3,4,4])
dataset = simul.make_surrogate_array(nbsubj=1, dimx=dimx, dimy=dimy, pos=pos,
ampli=ampli, width=10.0).squeeze()
import pylab as pl
fig = pl.figure(figsize=(12, 10))
pl.subplot(3, 3, 1)
pl.imshow(dataset, cmap=pl.cm.hot)
pl.colorbar()
pl.title('Raw data')
Beta = dataset.ravel().squeeze()
################################################################################
# fit Beta's histogram with a Gamma-Gaussian mixture
gam_gaus_pp = en.Gamma_Gaussian_fit(Beta, Beta)
gam_gaus_pp = np.reshape(gam_gaus_pp, (dimx, dimy, 3))
pl.figure(fig.number)
pl.subplot(3, 3, 4)
pl.imshow(gam_gaus_pp[..., 0], cmap=pl.cm.hot)
pl.title('Gamma-Gaussian mixture,\n first component posterior proba.')
pl.colorbar()
pl.subplot(3, 3, 5)
pl.imshow(gam_gaus_pp[..., 1], cmap=pl.cm.hot)
pl.title('Gamma-Gaussian mixture,\n second component posterior proba.')
pl.colorbar()
pl.subplot(3, 3, 6)
pl.imshow(gam_gaus_pp[..., 2], cmap=pl.cm.hot)
pl.title('Gamma-Gaussian mixture,\n third component posterior proba.')
pl.colorbar()
################################################################################
# fit Beta's histogram with a mixture of Gaussians
alpha = 0.01
gaus_mix_pp = en.three_classes_GMM_fit(Beta, None,
alpha, prior_strength=100)
gaus_mix_pp = np.reshape(gaus_mix_pp, (dimx, dimy, 3))
pl.figure(fig.number)
pl.subplot(3, 3, 7)
pl.imshow(gaus_mix_pp[..., 0], cmap=pl.cm.hot)
pl.title('Gaussian mixture,\n first component posterior proba.')
pl.colorbar()
pl.subplot(3, 3, 8)
pl.imshow(gaus_mix_pp[..., 1], cmap=pl.cm.hot)
pl.title('Gaussian mixture,\n second component posterior proba.')
pl.colorbar()
pl.subplot(3, 3, 9)
pl.imshow(gaus_mix_pp[..., 2], cmap=pl.cm.hot)
pl.title('Gamma-Gaussian mixture,\n third component posterior proba.')
pl.colorbar()
################################################################################
# Fit the null mode of Beta with an empirical normal null
efdr = en.ENN(Beta)
emp_null_fdr = efdr.fdr(Beta)
emp_null_fdr = emp_null_fdr.reshape((dimx, dimy))
pl.subplot(3, 3, 3)
pl.imshow(1-emp_null_fdr, cmap=pl.cm.hot)
pl.colorbar()
pl.title('Empirical FDR\n ')
#efdr.plot()
#pl.title('Empirical FDR fit')
pl.show()
|
yarikoptic/NiPy-OLD
|
examples/neurospin/histogram_fits.py
|
Python
|
bsd-3-clause
| 3,063
|
[
"Gaussian"
] |
aa98be2d645447a735c68970b4561e53dea7827093bd448ac5ab00a51ac1b7e6
|
from math import pi
import numpy as np
from ase.atoms import Atoms
def make_test_dft_calculation():
a = b = 2.0
c = 6.0
atoms = Atoms(positions=[(0, 0, c / 2)],
symbols='H',
pbc=(1, 1, 0),
cell=(a, b, c),
calculator=TestCalculator())
return atoms
class TestCalculator:
def __init__(self, nk=8):
assert nk % 2 == 0
bzk = []
weights = []
ibzk = []
w = 1.0 / nk**2
for i in range(-nk + 1, nk, 2):
for j in range(-nk + 1, nk, 2):
k = (0.5 * i / nk, 0.5 * j / nk, 0)
bzk.append(k)
if i >= j > 0:
ibzk.append(k)
if i == j:
weights.append(4 * w)
else:
weights.append(8 * w)
assert abs(sum(weights) - 1.0) < 1e-12
self.bzk = np.array(bzk)
self.ibzk = np.array(ibzk)
self.weights = np.array(weights)
# Calculate eigenvalues and wave functions:
self.init()
def init(self):
nibzk = len(self.weights)
nbands = 1
V = -1.0
self.eps = 2 * V * (np.cos(2 * pi * self.ibzk[:, 0]) +
np.cos(2 * pi * self.ibzk[:, 1]))
self.eps.shape = (nibzk, nbands)
self.psi = np.zeros((nibzk, 20, 20, 60), complex)
phi = np.empty((2, 2, 20, 20, 60))
z = np.linspace(-1.5, 1.5, 60, endpoint=False)
for i in range(2):
x = np.linspace(0, 1, 20, endpoint=False) - i
for j in range(2):
y = np.linspace(0, 1, 20, endpoint=False) - j
r = (((x[:, None]**2 +
y**2)[:, :, None] +
z**2)**0.5).clip(0, 1)
phi = 1.0 - r**2 * (3.0 - 2.0 * r)
phase = np.exp(pi * 2j * np.dot(self.ibzk, (i, j, 0)))
self.psi += phase[:, None, None, None] * phi
def get_pseudo_wave_function(self, band=0, kpt=0, spin=0):
assert spin == 0 and band == 0
return self.psi[kpt]
def get_eigenvalues(self, kpt=0, spin=0):
assert spin == 0
return self.eps[kpt]
def get_number_of_bands(self):
return 1
def get_k_point_weights(self):
return self.weights
def get_number_of_spins(self):
return 1
def get_fermi_level(self):
return 0.0
|
freephys/python_ase
|
ase/calculators/test.py
|
Python
|
gpl-3.0
| 2,468
|
[
"ASE"
] |
231295059ce28de2b32c94e2d60b06e687554da2327888f45ab1e4536cd424f3
|
from __future__ import division
"""
Script to wrap around nosetests to only run on files modified in the past 10
commits as well as 10% of all files.
"""
import os
import sys
import subprocess
import random
import time
run_ratio = 1 #1/10
try:
files_changed = []
for parent, sub, files in os.walk("pymatgen"):
for f in files:
if f.endswith(".py"):
p = os.path.join(parent, f)
statbuf = os.stat(p)
if time.time() - statbuf.st_mtime < 60 * 60 * 24 * 10:
files_changed.append(p)
# output = subprocess.check_output(["git", "diff", "--name-only", "HEAD~20"])
# files_changed = [f for f in output.decode("utf-8").split("\n")
# if f.startswith("pymatgen")]
except subprocess.CalledProcessError:
print("Can't get changed_files... Setting run_ratio to 100%")
run_ratio = 1
files_changed = []
must_run = []
for f in files_changed:
d = os.path.dirname(f)
b = os.path.basename(f)
testname = os.path.join(d, "tests", "test_" + b)
if os.path.exists(testname):
must_run.append(testname)
can_run = []
for parent, subdir, files in os.walk("pymatgen"):
for f in files:
if (parent.endswith("tests") and f.startswith("test_")
and f.endswith(".py") and f not in must_run):
can_run.append(os.path.join(parent, f))
print("%d test files must be run..." % len(must_run))
print(must_run)
print("%d possible test files can be run..." % len(can_run))
nrun = int(run_ratio * len(can_run))
if random.randint(1, 20) % 20 == 0:
# One in fifty times, we will run a full test.
to_run = must_run + can_run
else:
to_run = list(set(random.sample(can_run, nrun) + must_run))
print("%d test files will be run..." % len(to_run))
#ncpus = multiprocessing.cpu_count()
#print("Using %d cpus" % ncpus)
#p = multiprocessing.Pool(ncpus)
#results = p.map(run_test, to_run)
for i, f in enumerate(to_run):
print("Running %d/%d: %s" % (i+1, len(to_run), f))
result = subprocess.call(["python", f])
if result != 0:
sys.exit(result)
|
matk86/pymatgen
|
run_circle.py
|
Python
|
mit
| 2,154
|
[
"pymatgen"
] |
058a6559dd6c7d3de7f8d679c94d2d4f10dfdc5679936a7678616ac3a2f29e45
|
# -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebJournal exceptions classes
"""
import cgi
from invenio.config import \
CFG_SITE_URL, \
CFG_ETCDIR
from invenio.messages import gettext_set_language
from invenio.webjournal_utils import get_journal_name_intl
import invenio.template
webjournal_templates = invenio.template.load('webjournal')
class InvenioWebJournalTemplateNotFoundError(Exception):
"""
Exception if a journal template is not found in the config.
"""
def __init__(self, ln, journal_name, template=''):
"""
Initialisation.
"""
self.journal_name = journal_name
self.ln = ln
self.template = template
self.journal_name_intl = get_journal_name_intl(self.journal_name,
self.ln)
def __str__(self):
"""
String representation.
"""
return '''No %(tmpl)s template was provided for journal: %(name)s.
The path to this file should be defined in %(CFG_ETCDIR)s/webjournal/%(name)s/%(name)s-config.xml
''' % {'tmpl': self.template,
'name': self.journal_name,
'CFG_ETCDIR': CFG_ETCDIR}
def user_box(self):
"""
user-friendly error message with formatting.
Just say that page does not exist
"""
_ = gettext_set_language(self.ln)
return webjournal_templates.tmpl_webjournal_error_box(self.ln,
' ',
_('Page not found'),
_('The requested page does not exist'))
class InvenioWebJournalNoArticleRuleError(Exception):
"""
Exception if there are no article type rules defined.
"""
def __init__(self, ln, journal_name):
"""
Initialisation.
"""
self.journal_name = journal_name
self.ln = ln
self.journal_name_intl = get_journal_name_intl(self.journal_name,
self.ln)
def __str__(self):
"""
String representation.
"""
return 'The config.xml file for journal: %s does not contain any \
article rules. These rules are needed to associate collections from \
your Invenio installation to navigable article types. A rule should \
have the form of <rule>NameOfArticleType, \
marc_tag:ExpectedContentOfMarcTag' % self.journal_name
def user_box(self):
"""
user-friendly error message with formatting.
"""
_ = gettext_set_language(self.ln)
return webjournal_templates.tmpl_webjournal_error_box(self.ln,
_("No journal articles"),
_("Problem with the configuration of this journal"),
"The system couldn't find the definitions for different article \
kinds (e.g. News, Sports, etc). If there is nothing defined, \
nothing can be shown and it thus indicates that there is either a \
problem with the setup of this journal or in the Software itself.\
There is nothing you can do at this moment. If you wish you can \
send an inquiry to the responsible developers. We apologize \
for the inconvenience.")
class InvenioWebJournalNoIssueNumberTagError(Exception):
"""
Exception if there is no marc tag for issue number defined.
"""
def __init__(self, ln, journal_name):
"""
Initialisation.
"""
self.journal_name = journal_name
self.ln = ln
self.journal_name_intl = get_journal_name_intl(self.journal_name,
self.ln)
def __str__(self):
"""
String representation.
"""
return 'The config.xml file for journal: %s does not contain a marc tag\
to deduce the issue number from. WebJournal is an issue number based \
system, meaning you have to give some form of numbering system in a \
dedicated marc tag, so the system can see which is the active journal \
publication of the date.' % self.journal_name_intl
def user_box(self):
"""
user-friendly error message with formatting.
"""
_ = gettext_set_language(self.ln)
return webjournal_templates.tmpl_webjournal_error_box(self.ln,
_("No journal issues"),
_("Problem with the configuration of this journal"),
"The system couldn't find a definition for an issue \
numbering system. Issue numbers conrol the date of the \
publication you are seing. This indicates that there is an \
error in the setup of this journal or the Software itself. \
There is nothing you can do at the moment. If you wish you \
can send an inquiry to the responsible developers. We \
apologize for the inconvenience.")
class InvenioWebJournalNoArticleNumberError(Exception):
"""
Exception if an article was called without its order number.
"""
def __init__(self, ln, journal_name):
"""
Initialisation.
"""
self.journal_name = journal_name
self.ln = ln
self.journal_name_intl = get_journal_name_intl(self.journal_name,
self.ln)
def __str__(self):
"""
String representation.
"""
return 'In Journal %s an article was called without specifying the order \
of this article in the issue. This parameter is mandatory and should be \
provided by internal links in any case. Maybe this was a bad direct url \
hack. Check where the request came from.' % self.journal_name
def user_box(self):
"""
user-friendly error message with formatting.
"""
_ = gettext_set_language(self.ln)
return webjournal_templates.tmpl_webjournal_error_box(self.ln,
_('Journal article error'),
_('We could not know which article you were looking for'),
'The url you passed did not provide an article number or the \
article number was badly formed. If you \
came to this page through some link on the journal page, please \
report this to the admin. If you got this link through some \
external resource, e.g. an email, you can try to put in a number \
for the article in the url by hand or just visit the front \
page at %s/journal/%s' % (CFG_SITE_URL, cgi.escape(self.journal_name)))
class InvenioWebJournalNoJournalOnServerError(Exception):
"""
Exception that is thrown if there are no Journal instances on the server
"""
def __init__(self, ln):
"""
Initialisation.
"""
self.ln = ln
def __str__(self):
"""
String representation.
"""
return 'Apparently there are no journals configured on this \
installation of CDS Invenio. You can try to use the sample Invenio \
Atlantis Journal for testing.'
def user_box(self):
"""
user-friendly message with formatting.
"""
_ = gettext_set_language(self.ln)
return webjournal_templates.tmpl_webjournal_error_box(self.ln,
_('No journals available'),
_('We could not provide you any journals'),
_('It seems that there are no journals defined on this server. '
'Please contact support if this is not right.'))
class InvenioWebJournalNoNameError(Exception):
"""
"""
def __init__(self, ln):
"""
Initialisation.
"""
self.ln = ln
def __str__(self):
"""
String representation.
"""
return 'User probably forgot to add the name parameter for the journal\
Maybe you also want to check if dns mappings are configured correctly.'
def user_box(self):
"""
user-friendly message with formatting.
"""
_ = gettext_set_language(self.ln)
return webjournal_templates.tmpl_webjournal_missing_info_box(self.ln,
_("Select a journal on this server"),
_("We couldn't guess which journal you are looking for"),
_("You did not provide an argument for a journal name. "
"Please select the journal you want to read in the list below."))
class InvenioWebJournalNoCurrentIssueError(Exception):
"""
"""
def __init__(self, ln):
"""
Initialisation.
"""
self.ln = ln
def __str__(self):
"""
String representation.
"""
return 'There seems to be no current issue number stored for this \
journal. Is this the first time you use the journal? Otherwise, check\
configuration.'
def user_box(self):
"""
user-friendly message with formatting.
"""
_ = gettext_set_language(self.ln)
return webjournal_templates.tmpl_webjournal_error_box(self.ln,
_('No current issue'),
_('We could not find any informtion on the current issue'),
_('The configuration for the current issue seems to be empty. '
'Try providing an issue number or check with support.'))
class InvenioWebJournalIssueNumberBadlyFormedError(Exception):
"""
"""
def __init__(self, ln, issue):
"""
Initialisation.
"""
self.ln = ln
self.issue = issue
def __str__(self):
"""
String representation.
"""
return 'The issue number was badly formed. If this comes from the \
user it is no problem.'
def user_box(self):
"""
user-friendly message with formatting.
"""
_ = gettext_set_language(self.ln)
return webjournal_templates.tmpl_webjournal_error_box(self.ln,
_('Issue number badly formed'),
_('We could not read the issue number you provided'),
'The issue number you provided in the url seems to be badly\
formed. Issue numbers have to be in the form of ww/YYYY, so\
e.g. 50/2007. You provided the issue number like so: \
%s.' % cgi.escape(self.issue))
class InvenioWebJournalArchiveDateWronglyFormedError (Exception):
"""
"""
def __init__(self, ln, date):
"""
Initialisation.
"""
self.ln = ln
self.date = date
def __str__(self):
"""
String representation.
"""
return 'The archive date was badly formed. If this comes from the \
user it is no problem.'
def user_box(self):
"""
user-friendly message with formatting.
"""
_ = gettext_set_language(self.ln)
return webjournal_templates.tmpl_webjournal_error_box(self.ln,
_('Archive date badly formed'),
_('We could not read the archive date you provided'),
'The archive date you provided in the form seems to be badly\
formed. Archive dates have to be in the form of dd/mm/YYYY, so\
e.g. 02/12/2007. You provided the archive date like so: \
%s.' % cgi.escape(self.date))
class InvenioWebJournalNoPopupRecordError(Exception):
"""
Exception that is thrown if a popup is requested without specifying the
type of the popup to call.
"""
def __init__(self, ln, journal_name, recid):
"""
Initialisation.
"""
self.ln = ln
self.journal_name = journal_name
self.recid = recid
self.journal_name_intl = get_journal_name_intl(self.journal_name,
self.ln)
def __str__(self):
"""
String representation.
"""
return 'There was no recid provided to the popup system of webjournal \
or the recid was badly formed. The recid was %s' % repr(self.recid)
def user_box(self):
"""
user-friendly message with formatting.
"""
_ = gettext_set_language(self.ln)
return webjournal_templates.tmpl_webjournal_error_box(self.ln,
_('No popup record'),
_('We could not deduce the popup article you requested'),
'You called a popup window on CDS Invenio without \
specifying a record in which you are interested or the \
record was badly formed. Does this link come \
from a CDS Invenio Journal? If so, please contact \
support.')
class InvenioWebJournalReleaseUpdateError(Exception):
"""
Exception that is thrown if an update release was not successful.
"""
def __init__(self, ln, journal_name):
"""
Initialisation.
"""
self.ln = ln
self.journal_name = journal_name
self.journal_name_intl = get_journal_name_intl(self.journal_name,
self.ln)
def __str__(self):
"""
String representation.
"""
return 'There were no updates submitted on a click on the update button.\
This should never happen and must be due to an internal error.'
def user_box(self):
"""
user-friendly message with formatting.
"""
_ = gettext_set_language(self.ln)
return webjournal_templates.tmpl_webjournal_error_box(self.ln,
_('Update error'),
_('There was an internal error'),
'We encountered an internal error trying to update the \
journal issue. You can try to launch the update again or \
contact the administrator. We apologize for the \
inconvenience.')
class InvenioWebJournalReleaseDBError(Exception):
"""
Exception that is thrown if an update release was not successful.
"""
def __init__(self, ln):
"""
Initialisation.
"""
self.ln = ln
def __str__(self):
"""
String representation.
"""
return 'There was an error in synchronizing DB times with the actual \
Python time objects.'
def user_box(self):
"""
user-friendly message with formatting.
"""
_ = gettext_set_language(self.ln)
return webjournal_templates.tmpl_webjournal_error_box(self.ln,
_('Journal publishing DB error'),
_('There was an internal error'),
'We encountered an internal error trying to publish the \
journal issue. You can try to launch the publish interface \
again or contact the administrator. We apologize for the \
inconvenience.')
class InvenioWebJournalIssueNotFoundDBError(Exception):
"""
Exception that is thrown if there was an issue number not found
"""
def __init__(self, ln, journal_name, issue_number):
"""
Initialisation.
"""
self.ln = ln
self.journal_name = journal_name
self.issue_number = issue_number
self.journal_name_intl = get_journal_name_intl(self.journal_name,
self.ln)
def __str__(self):
"""
String representation.
"""
return 'The issue %s does not seem to exist for %s.' % (self.issue_number, self.journal_name)
def user_box(self):
"""
user-friendly message with formatting.
"""
_ = gettext_set_language(self.ln)
return webjournal_templates.tmpl_webjournal_error_box(self.ln,
_('Journal issue error'),
_('Issue not found'),
'The issue you were looking for was not found for %s' % \
cgi.escape(self.journal_name_intl))
class InvenioWebJournalJournalIdNotFoundDBError(Exception):
"""
Exception that is thrown if there was an issue number not found in the
"""
def __init__(self, ln, journal_name):
"""
Initialisation.
"""
self.ln = ln
self.journal_name = journal_name
self.journal_name_intl = get_journal_name_intl(self.journal_name,
self.ln)
def __str__(self):
"""
String representation.
"""
return 'The id for journal %s was not found in the Database. Make \
sure the entry exists!' % (self.journal_name)
def user_box(self):
"""
user-friendly message with formatting.
"""
_ = gettext_set_language(self.ln)
return webjournal_templates.tmpl_webjournal_error_box(self.ln,
_('Journal ID error'),
_('We could not find the id for this journal in the Database'),
'We encountered an internal error trying to get the id \
for this journal. You can try to refresh the page or \
contact the administrator. We apologize for the \
inconvenience.')
class InvenioWebJournalNoCategoryError(Exception):
"""
Raised when trying to access a category that does not exist.
"""
def __init__(self, ln, category, categories):
"""
Initialisation.
"""
self.ln = ln
self.category = category
self.categories = categories
def __str__(self):
"""
String representation.
"""
return 'The specified category "%s" does not exist' % \
self.category
def user_box(self):
"""
user-friendly message with formatting.
"""
_ = gettext_set_language(self.ln)
return webjournal_templates.tmpl_webjournal_error_box(self.ln,
_('Category "%(category_name)s" not found') % \
{'category_name': cgi.escape(self.category)},
_('Category "%(category_name)s" not found') % \
{'category_name': cgi.escape(self.category)},
_('Sorry, this category does not exist for this journal and issue.'))
|
ppiotr/Bibedit-some-refactoring
|
modules/webjournal/lib/webjournal_config.py
|
Python
|
gpl-2.0
| 19,402
|
[
"VisIt"
] |
e75048b08f5f8678c06c8b8355927c03a33d6559fad72df7f3977586df345cbc
|
import pytest
import os
def str_to_bool(val):
try:
val = val.lower()
except AttributeError:
val = str(val).lower()
if val == 'true':
return True
elif val == 'false':
return False
else:
raise ValueError("Invalid input value: %s" % val)
@pytest.fixture(scope="module")
def setup(host):
cluster_address = ""
osd_ids = []
osds = []
ansible_vars = host.ansible.get_variables()
ansible_facts = host.ansible("setup")
docker = ansible_vars.get("docker")
container_binary = ansible_vars.get("container_binary", "")
osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
group_names = ansible_vars["group_names"]
ansible_distribution = ansible_facts["ansible_facts"]["ansible_distribution"]
if ansible_distribution == "CentOS":
public_interface = "eth1"
cluster_interface = "eth2"
else:
public_interface = "ens6"
cluster_interface = "ens7"
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
num_mons = len(ansible_vars["groups"].get('mons', []))
if osd_auto_discovery:
num_osds = 3
else:
num_osds = len(ansible_vars.get("devices", []))
if not num_osds:
num_osds = len(ansible_vars.get("lvm_volumes", []))
osds_per_device = ansible_vars.get("osds_per_device", 1)
num_osds = num_osds * osds_per_device
# If number of devices doesn't map to number of OSDs, allow tests to define
# that custom number, defaulting it to ``num_devices``
num_osds = ansible_vars.get('num_osds', num_osds)
cluster_name = ansible_vars.get("cluster", "ceph")
conf_path = "/etc/ceph/{}.conf".format(cluster_name)
if "osds" in group_names:
cluster_address = host.interface(cluster_interface).addresses[0]
cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
if cmd.rc == 0:
osd_ids = cmd.stdout.rstrip("\n").split("\n")
osds = osd_ids
address = host.interface(public_interface).addresses[0]
if docker and not container_binary:
container_binary = "podman"
data = dict(
cluster_name=cluster_name,
subnet=subnet,
osd_ids=osd_ids,
num_mons=num_mons,
num_osds=num_osds,
address=address,
osds=osds,
conf_path=conf_path,
public_interface=public_interface,
cluster_interface=cluster_interface,
cluster_address=cluster_address,
container_binary=container_binary)
return data
@pytest.fixture()
def node(host, request):
"""
This fixture represents a single node in the ceph cluster. Using the
host.ansible fixture provided by testinfra it can access all the ansible
variables provided to it by the specific test scenario being ran.
You must include this fixture on any tests that operate on specific type
of node because it contains the logic to manage which tests a node
should run.
"""
ansible_vars = host.ansible.get_variables()
# tox will pass in this environment variable. we need to do it this way
# because testinfra does not collect and provide ansible config passed in
# from using --extra-vars
ceph_stable_release = os.environ.get("CEPH_STABLE_RELEASE", "quincy")
rolling_update = os.environ.get("ROLLING_UPDATE", "False")
group_names = ansible_vars["group_names"]
docker = ansible_vars.get("docker")
dashboard = ansible_vars.get("dashboard_enabled", True)
radosgw_num_instances = ansible_vars.get("radosgw_num_instances", 1)
ceph_release_num = {
'jewel': 10,
'kraken': 11,
'luminous': 12,
'mimic': 13,
'nautilus': 14,
'octopus': 15,
'pacific': 16,
'quincy': 17,
'dev': 99
}
# capture the initial/default state
test_is_applicable = False
for marker in request.node.iter_markers():
if marker.name in group_names or marker.name == 'all':
test_is_applicable = True
break
# Check if any markers on the test method exist in the nodes group_names.
# If they do not, this test is not valid for the node being tested.
if not test_is_applicable:
reason = "%s: Not a valid test for node type: %s" % (
request.function, group_names)
pytest.skip(reason)
if request.node.get_closest_marker('ceph_crash') and group_names in [['nfss'], ['iscsigws'], ['clients'], ['monitoring']]:
pytest.skip('Not a valid test for nfs, client or iscsigw nodes')
if request.node.get_closest_marker("no_docker") and docker:
pytest.skip(
"Not a valid test for containerized deployments or atomic hosts")
if request.node.get_closest_marker("docker") and not docker:
pytest.skip(
"Not a valid test for non-containerized deployments or atomic hosts") # noqa E501
if request.node.get_closest_marker("dashboard") and not dashboard:
pytest.skip(
"Not a valid test with dashboard disabled")
if request.node.get_closest_marker("dashboard") and group_names == ['clients']:
pytest.skip('Not a valid test for client node')
data = dict(
vars=ansible_vars,
docker=docker,
ceph_stable_release=ceph_stable_release,
ceph_release_num=ceph_release_num,
rolling_update=rolling_update,
radosgw_num_instances=radosgw_num_instances,
)
return data
def pytest_collection_modifyitems(session, config, items):
for item in items:
test_path = item.location[0]
if "mon" in test_path:
item.add_marker(pytest.mark.mons)
elif "osd" in test_path:
item.add_marker(pytest.mark.osds)
elif "mds" in test_path:
item.add_marker(pytest.mark.mdss)
elif "mgr" in test_path:
item.add_marker(pytest.mark.mgrs)
elif "rbd-mirror" in test_path:
item.add_marker(pytest.mark.rbdmirrors)
elif "rgw" in test_path:
item.add_marker(pytest.mark.rgws)
elif "nfs" in test_path:
item.add_marker(pytest.mark.nfss)
elif "iscsi" in test_path:
item.add_marker(pytest.mark.iscsigws)
elif "grafana" in test_path:
item.add_marker(pytest.mark.grafanas)
else:
item.add_marker(pytest.mark.all)
if "journal_collocation" in test_path:
item.add_marker(pytest.mark.journal_collocation)
|
ceph/ceph-ansible
|
tests/conftest.py
|
Python
|
apache-2.0
| 6,505
|
[
"Octopus"
] |
cd80bbc3f9732824fe8ca659ed3a3386f8ce39117528f85c5f11335b176bbbac
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""\
Topology readers --- :mod:`MDAnalysis.topology`
===============================================
This submodule contains the topology readers. A topology file supplies the list
of atoms in the system, their connectivity and possibly additional information
such as B-factors, partial charges, etc. The details depend on the file format
and not every topology file provides all (or even any) additional data. This
data is made accessible through AtomGroup properties.
As a minimum, all topology parsers will provide atom ids, atom types, masses,
resids, resnums and segids as well as assigning all atoms to residues and all
residues to segments. For systems without residues and segments, this results
in there being a single residue and segment to which all atoms belong. Often
when data is not provided by a file, it will be guessed based on other data in
the file. In the event that this happens, a UserWarning will always be issued.
The following table lists the currently supported topology formats along with
the attributes they provide.
.. _`Supported topology formats`:
.. table:: Table of Supported Topology Formats
================= ========= ================= ===================================================
Name extension attributes remarks
================= ========= ================= ===================================================
CHARMM/XPLOR PSF psf resnames, :mod:`MDAnalysis.topology.PSFParser`
names, types,
charges,
bonds, angles,
dihedrals,
impropers
CHARMM CARD [#a]_ crd names, "CARD" coordinate output from CHARMM; deals with
tempfactors, either standard or EXTended format;
resnames, :mod:`MDAnalysis.topology.CRDParser`
Brookhaven [#a]_ pdb/ent names, bonds, a simplified PDB format (as used in MD simulations)
resids, resnums, is read by default
types,
chainids,
occupancies,
bfactors,
resids, icodes,
resnames,
segids,
XPDB [#a]_ pdb As PDB except Extended PDB format (can use 5-digit residue
icodes numbers). To use, specify the format "XPBD"
explicitly:
``Universe(..., topology_format="XPDB")``.
Module :mod:`MDAnalysis.coordinates.PDB`
PQR [#a]_ pqr names, charges, PDB-like but whitespace-separated files with charge
types, and radius information;
radii, resids, :mod:`MDAnalysis.topology.PQRParser`
resnames, segids
PDBQT [#a]_ pdbqt names, types, file format used by AutoDock with atom types and
altLocs, charges, partial charges. Module:
resnames, :mod:`MDAnalysis.topology.PDBQTParser`
resids,
icodes,
occupancies,
tempfactors,
segids,
GROMOS96 [#a]_ gro names, resids, GROMOS96 coordinate file;
resnames, :mod:`MDAnalysis.topology.GROParser`
AMBER top, names, charges simple AMBER format reader (only supports a subset
prmtop, type_indices, of flags);
parm7 types, :mod:`MDAnalysis.topology.TOPParser`
resnames,
DESRES [#a]_ dms names, numbers, DESRES molecular sturcture reader (only supports
masses, charges, the atom and bond records);
chainids, resids, :mod:`MDAnalysis.topology.DMSParser`
resnames, segids,
radii,
TPR [#b]_ tpr names, types, Gromacs portable run input reader (limited
resids, resnames, experimental support for some of the more recent
charges, bonds, versions of the file format);
masses, :mod:`MDAnalysis.topology.TPRParser`
MOL2 [#a]_ mol2 ids, names, Tripos MOL2 molecular structure format;
types, resids, :mod:`MDAnalysis.topology.MOL2Parser`
charges, bonds,
resnames,
LAMMPS [#a]_ data ids, types, LAMMPS Data file parser
masses, charges, :mod:`MDAnalysis.topology.LAMMPSParser`
resids, bonds,
angles, dihedrals
XYZ [#a]_ xyz names XYZ File Parser. Reads only the labels from atoms
and constructs minimal topology data.
:mod:`MDAnalysis.topology.XYZParser`
GAMESS [#a]_ gms, names, GAMESS output parser. Read only atoms of assembly
log atomic charges, section (atom, elems and coords) and construct
topology.
:mod:`MDAnalysis.topology.GMSParser`
DL_Poly [#a]_ config, ids, names DL_Poly CONFIG or HISTORY file. Reads only the
history atom names. If atoms are written out of order, will
correct the order.
:mod:`MDAnalysis.topology.DLPolyParser`
Hoomd XML xml types, charges, `HOOMD XML`_ topology file. Reads atom types,
radii, masses masses, and charges if possible. Also reads bonds,
bonds, angles, angles, and dihedrals.
dihedrals :mod:`MDAnalysis.topology.HoomdXMLParser`
Macromolecular mmtf altLocs, `Macromolecular Transmission Format (MMTF)`_.
transmission bfactors, bonds, An efficient compact format for biomolecular
format charges, masses, structures.
names,
occupancies,
types, icodes,
resnames, resids,
segids, models
================= ========= ================= ===================================================
.. [#a] This format can also be used to provide *coordinates* so that
it is possible to create a full
:mod:`~MDAnalysis.core.universe.Universe` by simply providing
a file of this format as the sole argument to
:mod:`~MDAnalysis.core.universe.Universe`: ``u =
Universe(filename)``
.. [#b] The Gromacs TPR format contains coordinate information but
parsing coordinates from a TPR file is currently not implemented
in :mod:`~MDAnalysis.topology.TPRParser`.
.. SeeAlso:: :ref:`Coordinates` with the :ref:`Supported coordinate formats`
.. _HOOMD XML: http://codeblue.umich.edu/hoomd-blue/doc/page_xml_file_format.html
.. _Macromolecular Transmission Format (MMTF): https://mmtf.rcsb.org/
.. _topology-parsers-developer-notes:
Developer Notes
---------------
.. versionadded:: 0.8
.. versionchanged:: 0.16.0
The new array-based topology system completely replaced the old
system that was based on a list of Atom instances.
Topology information consists of data that do not change over time,
i.e. information that is the same for all time steps of a
trajectory. This includes
* identity of atoms (name, type, number, partial charge, ...) and to
which residue and segment they belong; atoms are identified in
MDAnalysis by their :attr:`~MDAnalysis.core.groups.Atom.index`,
an integer number starting at 0 and incremented in the order of
atoms found in the topology.
* bonds (pairs of atoms)
* angles (triplets of atoms)
* dihedral angles (quadruplets of atoms) — proper and improper
dihedrals should be treated separately
Topology readers are generally called "parsers" in MDAnalysis (for
historical reasons and in order to distinguish them from coordinate
"readers"). All parsers are derived from
:class:`MDAnalysis.topology.base.TopologyReaderBase` and have a
:meth:`~MDAnalysis.topology.base.TopologyReaderBase.parse` method that
returns a :class:`MDAnalysis.core.topology.Topology` instance.
atoms
~~~~~~
The **atoms** appear to the user as an array of
:class:`~MDAnalysis.core.groups.Atom` instances. However, under the
hood this is essentially only an array of atom indices that are used
to index the various components of the topology database
:class:`~MDAnalysis.core.topology.Topology`. The parser needs to
initialize the :class:`~MDAnalysis.core.topology.Topology` with the
data read from the topology file.
.. SeeAlso:: :ref:`topology-system-label`
bonds
~~~~~~
**Bonds** are represented as a :class:`tuple` of :class:`tuple`. Each tuple
contains two atom numbers, which indicate the atoms between which the
bond is formed. Only one of the two permutations is stored, typically
the one with the lower atom number first.
bondorder
~~~~~~~~~~
Some **bonds** have additional information called **order**. When available
this is stored in a dictionary of format {bondtuple:order}. This extra
information is then passed to Bond initialisation in u._init_bonds()
angles
~~~~~~~
**Angles** are represented by a :class:`list` of :class:`tuple`. Each
tuple contains three atom numbers. The second of these numbers
represents the apex of the angle.
dihedrals
~~~~~~~~~
**Proper dihedral angles** are represented by a :class:`list` of :class:`tuple`. Each
tuple contains four atom numbers. The angle of the torsion
is defined by the angle between the planes formed by atoms 1, 2, and 3,
and 2, 3, and 4.
impropers
~~~~~~~~~
**Improper dihedral angles** are represented by a :class:`list` of :class:`tuple`. Each
tuple contains four atom numbers. The angle of the improper torsion
is again defined by the angle between the planes formed by atoms 1, 2, and 3,
and 2, 3, and 4. Improper dihedrals differ from regular dihedrals as the
four atoms need not be sequentially bonded, and are instead often all bonded
to the second atom.
"""
from __future__ import absolute_import
__all__ = ['core', 'PSFParser', 'PDBParser', 'PQRParser', 'GROParser',
'CRDParser', 'TOPParser', 'PDBQTParser', 'TPRParser',
'LAMMPSParser', 'XYZParser', 'GMSParser', 'DLPolyParser',
'HoomdXMLParser']
from . import core
from . import PSFParser
from . import TOPParser
from . import PDBParser
from . import ExtendedPDBParser
from . import PQRParser
from . import GROParser
from . import CRDParser
from . import PDBQTParser
from . import DMSParser
from . import TPRParser
from . import MOL2Parser
from . import LAMMPSParser
from . import XYZParser
from . import GMSParser
from . import DLPolyParser
from . import HoomdXMLParser
from . import MMTFParser
|
kain88-de/mdanalysis
|
package/MDAnalysis/topology/__init__.py
|
Python
|
gpl-2.0
| 12,792
|
[
"Amber",
"CHARMM",
"DL_POLY",
"GAMESS",
"Gromacs",
"HOOMD-blue",
"LAMMPS",
"MDAnalysis"
] |
030d8b9c2a059d973c36bdffcfb7dbac3be0c7060b86a3604ad6dbc6dbeeca31
|
#!/usr/bin/env python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys, os
sys.path.append('%s/fsapt' % os.environ['PSIDATADIR'])
from fsapt import *
# => Driver Code <= #
if __name__ == '__main__':
# > Working Dirname < #
if len(sys.argv) == 3:
dirA = sys.argv[1]
dirB = sys.argv[2]
dirD = '.'
elif len(sys.argv) == 4:
dirA = sys.argv[1]
dirB = sys.argv[2]
dirD = sys.argv[3]
else:
raise Exception('Usage: fsapt.py dirnameA dirnameB [dirnameD]')
# Make dirD if needed
if not os.path.exists(dirD):
os.makedirs(dirD)
# > Order-2 Analysis < #
fh = open('%s/fsapt.dat' % dirA, 'w')
fh, sys.stdout = sys.stdout, fh
print(' ==> F-ISAPT: Links by Charge <==\n')
stuffA = computeFsapt(dirA, False)
print(' => Full Analysis <=\n')
printOrder2(stuffA['order2'], stuffA['fragkeys'])
print(' => Reduced Analysis <=\n')
printOrder2(stuffA['order2r'], stuffA['fragkeysr'])
fh, sys.stdout = sys.stdout, fh
fh.close()
fh = open('%s/fsapt.dat' % dirB, 'w')
fh, sys.stdout = sys.stdout, fh
print(' ==> F-ISAPT: Links by Charge <==\n')
stuffB = computeFsapt(dirB, False)
print(' => Full Analysis <=\n')
printOrder2(stuffB['order2'], stuffB['fragkeys'])
print(' => Reduced Analysis <=\n')
printOrder2(stuffB['order2r'], stuffB['fragkeysr'])
fh, sys.stdout = sys.stdout, fh
fh.close()
fh = open('%s/fsapt.dat' % dirD, 'w')
fh, sys.stdout = sys.stdout, fh
print(' ==> F-ISAPT: Links by Charge <==\n')
order2D = diffOrder2(stuffA['order2r'], stuffB['order2r'])
print(' => Reduced Analysis <=\n')
printOrder2(order2D, stuffB['fragkeysr'])
fh, sys.stdout = sys.stdout, fh
fh.close()
fh = open('%s/fsapt.dat' % dirA, 'a')
fh, sys.stdout = sys.stdout, fh
print(' ==> F-ISAPT: Links 50-50 <==\n')
stuffA = computeFsapt(dirA, True)
print(' => Full Analysis <=\n')
printOrder2(stuffA['order2'], stuffA['fragkeys'])
print(' => Reduced Analysis <=\n')
printOrder2(stuffA['order2r'], stuffA['fragkeysr'])
fh, sys.stdout = sys.stdout, fh
fh.close()
fh = open('%s/fsapt.dat' % dirB, 'a')
fh, sys.stdout = sys.stdout, fh
print(' ==> F-ISAPT: Links 50-50 <==\n')
stuffB = computeFsapt(dirB, True)
print(' => Full Analysis <=\n')
printOrder2(stuffB['order2'], stuffB['fragkeys'])
print(' => Reduced Analysis <=\n')
printOrder2(stuffB['order2r'], stuffB['fragkeysr'])
fh, sys.stdout = sys.stdout, fh
fh.close()
fh = open('%s/fsapt.dat' % dirD, 'a')
fh, sys.stdout = sys.stdout, fh
print(' ==> F-ISAPT: Links 50-50 <==\n')
order2D = diffOrder2(stuffA['order2r'], stuffB['order2r'])
print(' => Reduced Analysis <=\n')
printOrder2(order2D, stuffB['fragkeysr'])
fh, sys.stdout = sys.stdout, fh
fh.close()
# > Order-1 PBD Files < #
pdbA = PDB.fromGeom(stuffA['geom'])
printOrder1(dirA, stuffA['order2r'], pdbA, stuffA['frags'])
pdbB = PDB.fromGeom(stuffB['geom'])
printOrder1(dirB, stuffB['order2r'], pdbB, stuffB['frags'])
# Using A geometry
printOrder1(dirD, order2D, pdbA, stuffA['frags'])
|
ashutoshvt/psi4
|
psi4/share/psi4/fsapt/fsaptdiff.py
|
Python
|
lgpl-3.0
| 4,127
|
[
"Psi4"
] |
7dd5accba4e4e81a5983f69ede15475cff033d24bbf953a36465654988ae78db
|
'''GFF3 format (:mod:`skbio.io.format.gff3`)
=========================================
.. currentmodule:: skbio.io.format.gff3
GFF3 (Generic Feature Format version 3) is a standard file format for
describing features for biological sequences. It contains lines of
text, each consisting of 9 tab-delimited columns [1]_.
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |:mod:`skbio.sequence.Sequence` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.sequence.DNA` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.metadata.IntervalMetadata` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |generator of tuple (seq_id of str type, |
| | |:mod:`skbio.metadata.IntervalMetadata`) |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
**State: Experimental as of 0.5.1.**
The first line of the file is a comment that identifies the format and
version. This is followed by a series of data lines. Each data line
corresponds to an annotation and consists of 9 columns: SEQID, SOURCE,
TYPE, START, END, SCORE, STRAND, PHASE, and ATTR.
Column 9 (ATTR) is list of feature attributes in the format
"tag=value". Multiple "tag=value" pairs are delimited by
semicolons. Multiple values of the same tag are separated with the
comma ",". The following tags have predefined meanings: ID, Name,
Alias, Parent, Target, Gap, Derives_from, Note, Dbxref, Ontology_term,
and Is_circular.
The meaning and format of these columns and attributes are explained
detail in the format specification [1]_. And they are read in as the
vocabulary defined in GenBank parser (:mod:`skbio.io.format.genbank`).
Format Parameters
-----------------
Reader-specific Parameters
^^^^^^^^^^^^^^^^^^^^^^^^^^
``IntervalMetadata`` GFF3 reader requires 1 parameter: ``seq_id``.
It reads the annotation with the specified
sequence ID from the GFF3 file into an ``IntervalMetadata`` object.
``DNA`` and ``Sequence`` GFF3 readers require ``seq_num`` of int as
parameter. It specifies which GFF3 record to read from a GFF3 file
with annotations of multiple sequences in it.
Writer-specific Parameters
^^^^^^^^^^^^^^^^^^^^^^^^^^
``skip_subregion`` is a boolean parameter used by all the GFF3 writers. It
specifies whether you would like to write each non-contiguous
sub-region for a feature annotation. For example, if there is
interval feature for a gene with two exons in an ``IntervalMetadata``
object, it will write one line into the GFF3 file when ``skip_subregion`` is
``True`` and will write 3 lines (one for the gene and one for each
exon, respectively) when ``skip_subregion`` is ``False``. Default is ``True``.
In addition, ``IntervalMetadata`` GFF3 writer needs a parameter of
``seq_id``. It specify the sequence ID (column 1 in GFF3 file) that
the annotation belong to.
Examples
--------
Let's create a file stream with following data in GFF3 format:
>>> from skbio import Sequence, DNA
>>> gff_str = """
... ##gff-version 3
... seq_1\\t.\\tgene\\t10\\t90\\t.\\t+\\t0\\tID=gen1
... seq_1\\t.\\texon\\t10\\t30\\t.\\t+\\t.\\tParent=gen1
... seq_1\\t.\\texon\\t50\\t90\\t.\\t+\\t.\\tParent=gen1
... seq_2\\t.\\tgene\\t80\\t96\\t.\\t-\\t.\\tID=gen2
... ##FASTA
... >seq_1
... ATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGC
... ATGCATGCATGCATGCATGCATGCATGCATGCATGCATGC
... >seq_2
... ATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGC
... ATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGC
... """
>>> import io
>>> from skbio.metadata import IntervalMetadata
>>> from skbio.io import read
>>> gff = io.StringIO(gff_str)
We can read it into ``IntervalMetadata``. Each line will be read into
an interval feature in ``IntervalMetadata`` object:
>>> im = read(gff, format='gff3', into=IntervalMetadata,
... seq_id='seq_1')
>>> im # doctest: +SKIP
3 interval features
-------------------
Interval(interval_metadata=<4604421736>, bounds=[(9, 90)], \
fuzzy=[(False, False)], metadata={'type': 'gene', \
'phase': 0, 'strand': '+', 'source': '.', 'score': '.', 'ID': 'gen1'})
Interval(interval_metadata=<4604421736>, bounds=[(9, 30)], \
fuzzy=[(False, False)], metadata={'strand': '+', 'source': '.', \
'type': 'exon', 'Parent': 'gen1', 'score': '.'})
Interval(interval_metadata=<4604421736>, bounds=[(49, 90)], \
fuzzy=[(False, False)], metadata={'strand': '+', 'source': '.', \
'type': 'exon', 'Parent': 'gen1', 'score': '.'})
We can write the ``IntervalMetadata`` object back to GFF3 file:
>>> with io.StringIO() as fh: # doctest: +NORMALIZE_WHITESPACE
... print(im.write(fh, format='gff3', seq_id='seq_1').getvalue())
##gff-version 3
seq_1 . gene 10 90 . + 0 ID=gen1
seq_1 . exon 10 30 . + . Parent=gen1
seq_1 . exon 50 90 . + . Parent=gen1
<BLANKLINE>
If the GFF3 file does not have the sequence ID, it will return an empty object:
>>> gff = io.StringIO(gff_str)
>>> im = read(gff, format='gff3', into=IntervalMetadata,
... seq_id='foo')
>>> im
0 interval features
-------------------
We can also read the GFF3 file into a generator:
>>> gff = io.StringIO(gff_str)
>>> gen = read(gff, format='gff3')
>>> for im in gen: # doctest: +SKIP
... print(im[0]) # the seq id
... print(im[1]) # the interval metadata on this seq
seq_1
3 interval features
-------------------
Interval(interval_metadata=<4603377592>, bounds=[(9, 90)], \
fuzzy=[(False, False)], metadata={'type': 'gene', 'ID': 'gen1', \
'source': '.', 'score': '.', 'strand': '+', 'phase': 0})
Interval(interval_metadata=<4603377592>, bounds=[(9, 30)], \
fuzzy=[(False, False)], metadata={'strand': '+', 'type': 'exon', \
'Parent': 'gen1', 'source': '.', 'score': '.'})
Interval(interval_metadata=<4603377592>, bounds=[(49, 90)], \
fuzzy=[(False, False)], metadata={'strand': '+', 'type': 'exon', \
'Parent': 'gen1', 'source': '.', 'score': '.'})
seq_2
1 interval feature
------------------
Interval(interval_metadata=<4603378712>, bounds=[(79, 96)], \
fuzzy=[(False, False)], metadata={'strand': '-', 'type': 'gene', \
'ID': 'gen2', 'source': '.', 'score': '.'})
For the GFF3 file with sequences, we can read it into ``Sequence`` or ``DNA``:
>>> gff = io.StringIO(gff_str)
>>> seq = read(gff, format='gff3', into=Sequence, seq_num=1)
>>> seq
Sequence
--------------------------------------------------------------------
Metadata:
'description': ''
'id': 'seq_1'
Interval metadata:
3 interval features
Stats:
length: 100
--------------------------------------------------------------------
0 ATGCATGCAT GCATGCATGC ATGCATGCAT GCATGCATGC ATGCATGCAT GCATGCATGC
60 ATGCATGCAT GCATGCATGC ATGCATGCAT GCATGCATGC
>>> gff = io.StringIO(gff_str)
>>> seq = read(gff, format='gff3', into=DNA, seq_num=2)
>>> seq
DNA
--------------------------------------------------------------------
Metadata:
'description': ''
'id': 'seq_2'
Interval metadata:
1 interval feature
Stats:
length: 120
has gaps: False
has degenerates: False
has definites: True
GC-content: 50.00%
--------------------------------------------------------------------
0 ATGCATGCAT GCATGCATGC ATGCATGCAT GCATGCATGC ATGCATGCAT GCATGCATGC
60 ATGCATGCAT GCATGCATGC ATGCATGCAT GCATGCATGC ATGCATGCAT GCATGCATGC
References
----------
.. [1] https://github.com/The-Sequence-Ontology/\
Specifications/blob/master/gff3.md
'''
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import re
from collections import Iterable
from skbio.sequence import DNA, Sequence
from skbio.io import create_format, GFF3FormatError
from skbio.metadata import IntervalMetadata
from skbio.io.format._base import (
_line_generator, _too_many_blanks, _get_nth_sequence)
from skbio.io.format.fasta import _fasta_to_generator
from skbio.io.format._sequence_feature_vocabulary import (
_vocabulary_change, _vocabulary_skip)
from skbio.io import write
gff3 = create_format('gff3')
@gff3.sniffer()
def _gff3_sniffer(fh):
# check the 1st real line is a valid ID line
if _too_many_blanks(fh, 5):
return False, {}
try:
line = next(_line_generator(fh, skip_blanks=True, strip=False))
except StopIteration:
return False, {}
if re.match(r'##gff-version\s+3', line):
return True, {}
else:
return False, {}
@gff3.reader(None)
def _gff3_to_generator(fh):
'''Parse the GFF3 into the existing IntervalMetadata
Parameters
----------
fh : file
file handler
Yields
------
tuple
str of seq id, IntervalMetadata
'''
id_lengths = {}
for data_type, sid, data in _yield_record(fh):
if data_type == 'length':
# get length from sequence-region pragma.
# the pragma lines are always before the real annotation lines.
id_lengths[sid] = data
elif data_type == 'data':
length = id_lengths.get(sid)
yield sid, _parse_record(data, length)
@gff3.writer(None)
def _generator_to_gff3(obj, fh, skip_subregion=True):
'''Write list of IntervalMetadata into file.
Parameters
----------
obj : Iterable of (seq_id, IntervalMetadata)
fh : file handler
skip_subregion : bool
write a line for each sub-regions of an ``Interval`` if it is ``False``
'''
# write file header
fh.write('##gff-version 3\n')
for seq_id, obj_i in obj:
_serialize_interval_metadata(obj_i, seq_id, fh, skip_subregion)
@gff3.reader(Sequence)
def _gff3_to_sequence(fh, seq_num=1):
return _construct_seq(fh, Sequence, seq_num)
@gff3.writer(Sequence)
def _sequence_to_gff3(obj, fh, skip_subregion=True):
# write file header
fh.write('##gff-version 3\n')
_serialize_seq(obj, fh, skip_subregion)
@gff3.reader(DNA)
def _gff3_to_dna(fh, seq_num=1):
return _construct_seq(fh, DNA, seq_num)
@gff3.writer(DNA)
def _dna_to_gff3(obj, fh, skip_subregion=True):
# write file header
fh.write('##gff-version 3\n')
_serialize_seq(obj, fh, skip_subregion)
@gff3.reader(IntervalMetadata)
def _gff3_to_interval_metadata(fh, seq_id):
'''Read a GFF3 record into the specified interval metadata.
Parameters
----------
fh : file handler
seq_id : str
sequence ID which the interval metadata is associated with
'''
length = None
for data_type, sid, data in _yield_record(fh):
if seq_id == sid:
if data_type == 'length':
# get length from sequence-region pragma
length = data
elif data_type == 'data':
return _parse_record(data, length)
else:
raise GFF3FormatError(
'Unknown section in the input GFF3 file: '
'%r %r %r' % (data_type, sid, data))
# return an empty instead of None
return IntervalMetadata(None)
@gff3.writer(IntervalMetadata)
def _interval_metadata_to_gff3(obj, fh, seq_id, skip_subregion=True):
'''Output ``IntervalMetadata`` object to GFF3 file.
Parameters
----------
obj : IntervalMetadata
fh : file object like
seq_id : str
ID for column 1 in the GFF3 file.
skip_subregion : bool
write a line for each sub-regions of an ``Interval`` if it is ``False``
'''
# write file header
fh.write('##gff-version 3\n')
_serialize_interval_metadata(obj, seq_id, fh, skip_subregion=True)
def _construct_seq(fh, constructor=DNA, seq_num=1):
lines = []
for i, (data_type, seq_id, l) in enumerate(_yield_record(fh), 1):
if data_type == 'data' and seq_num == i:
lines = l
seq = _get_nth_sequence(_fasta_to_generator(fh, constructor=constructor),
seq_num=seq_num)
seq.interval_metadata = _parse_record(lines, len(seq))
return seq
def _yield_record(fh):
'''Yield (seq_id, lines) that belong to the same sequence.'''
lines = []
current = False
for line in _line_generator(fh, skip_blanks=True, strip=True):
if line.startswith('##sequence-region'):
_, seq_id, start, end = line.split()
length = int(end) - int(start) + 1
yield 'length', seq_id, length
if line.startswith('##FASTA'):
# stop once reaching to sequence section
break
if not line.startswith('#'):
try:
seq_id, _ = line.split('\t', 1)
except ValueError:
raise GFF3FormatError(
'Wrong GFF3 format at line: %s' % line)
if current == seq_id:
lines.append(line)
else:
if current is not False:
yield 'data', current, lines
lines = [line]
current = seq_id
if current is False:
# if the input file object is empty, it should return
# an empty generator
return
yield
else:
yield 'data', current, lines
def _parse_record(lines, length):
'''Parse the lines into a IntervalMetadata object.'''
interval_metadata = IntervalMetadata(length)
for line in lines:
columns = line.split('\t')
# there should be 9 columns
if len(columns) != 9:
raise GFF3FormatError(
'do not have 9 columns in this line: "%s"' % line)
# the 1st column is seq ID for every feature. don't store
# this repetitive information
metadata = {'source': columns[1],
'type': columns[2],
'score': columns[5],
'strand': columns[6]}
phase = columns[7]
# phase value can only be int or '.'
try:
metadata['phase'] = int(phase)
except ValueError:
if phase != '.':
raise GFF3FormatError(
'unknown value for phase column: {!r}'.format(phase))
metadata.update(_parse_attr(columns[8]))
start, end = columns[3:5]
bounds = [(int(start)-1, int(end))]
interval_metadata.add(bounds, metadata=metadata)
return interval_metadata
def _parse_attr(s):
'''parse attribute column'''
voca_change = _vocabulary_change('gff3')
md = {}
# in case the line ending with ';', strip it.
s = s.rstrip(';')
for attr in s.split(';'):
k, v = attr.split('=')
if k in voca_change:
k = voca_change[k]
md[k] = v
return md
def _serialize_interval_metadata(interval_metadata, seq_id, fh,
skip_subregion=True):
'''Serialize an IntervalMetadata to GFF3.
Parameters
----------
interval_metadata : IntervalMetadata
seq_id : str
Seq id for the current annotation. It will be used as the 1st column
in the GFF3.
fh : file handler
the file object to output
skip_subregion : bool
Whether to skip outputting each sub region as a line in GFF3.
'''
column_keys = ['source', 'type', 'score', 'strand', 'phase']
voca_change = _vocabulary_change('gff3', False)
voca_skip = _vocabulary_skip('gff3')
voca_skip.extend(column_keys)
# these characters have reserved meanings in column 9 and must be
# escaped when used in other contexts
escape = str.maketrans({';': '%3B',
'=': '%3D',
'&': '%26',
',': '%2C'})
for interval in interval_metadata._intervals:
md = interval.metadata
bd = interval.bounds
start = str(bd[0][0] + 1)
end = str(bd[-1][-1])
source, feat_type, score, strand, phase = [
str(md.get(i, '.')) for i in column_keys]
columns = [seq_id, source, feat_type, start, end, score, strand, phase]
# serialize the attributes in column 9
attr = []
# use sort to make the output order deterministic
for k in sorted(md):
if k in voca_skip:
# skip the metadata that doesn't go to attribute column
continue
v = md[k]
if k in voca_change:
k = voca_change[k]
if isinstance(v, Iterable) and not isinstance(v, str):
# if there are multiple values for this attribute,
# convert them to str and concat them with ","
v = ','.join(str(i).translate(escape) for i in v)
else:
v = v.translate(escape)
attr.append('%s=%s' % (k.translate(escape), v))
columns.append(';'.join(attr))
fh.write('\t'.join(columns))
fh.write('\n')
# if there are multiple regions for this feature,
# output each region as a standalone line in GFF3.
if len(bd) > 1 and skip_subregion is False:
for start, end in bd:
# if this is a gene, then each sub region should be an exon
if columns[2] == 'gene':
columns[2] = 'exon'
columns[3] = str(start + 1)
columns[4] = str(end)
try:
parent = md['ID']
except KeyError:
raise GFF3FormatError(
'You need provide ID info for '
'the parent interval feature: %r' % interval)
columns[8] = 'Parent=%s' % parent
fh.write('\t'.join(columns))
fh.write('\n')
def _serialize_seq(seq, fh, skip_subregion=True):
'''Serialize a sequence to GFF3.'''
_serialize_interval_metadata(
seq.interval_metadata, seq.metadata['id'], fh, skip_subregion)
fh.write('##FASTA\n')
write(seq, into=fh, format='fasta')
|
gregcaporaso/scikit-bio
|
skbio/io/format/gff3.py
|
Python
|
bsd-3-clause
| 18,537
|
[
"scikit-bio"
] |
cefe160162a66c934e757ed8a96fa23b2f33e7cc8e58586defadf198226be4b4
|
########################################################################
# File : ServerUtils.py
# Author : Ricardo Graciani
########################################################################
"""
Provide uniform interface to backend for local and remote clients.return
There's a pretty big assumption here: that DB and Handler expose the same calls, with identical signatures.
This is not exactly the case for WMS DBs and services.
"""
__RCSID__ = "$Id$"
def getDBOrClient(DB, serverName):
""" Tries to instantiate the DB object
and returns it if we manage to connect to the DB,
otherwise returns a Client of the server
"""
from DIRAC import gLogger
from DIRAC.Core.DISET.RPCClient import RPCClient
try:
myDB = DB()
if myDB._connected:
return myDB
except BaseException:
pass
gLogger.info('Can not connect to DB will use %s' % serverName)
return RPCClient(serverName)
def getPilotAgentsDB():
serverName = 'WorkloadManagement/PilotManager'
PilotAgentsDB = None
try:
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
except BaseException:
pass
return getDBOrClient(PilotAgentsDB, serverName)
pilotAgentsDB = getPilotAgentsDB()
|
andresailer/DIRAC
|
WorkloadManagementSystem/Client/ServerUtils.py
|
Python
|
gpl-3.0
| 1,235
|
[
"DIRAC"
] |
8e488e9962198dbb2a2c88f42eef06bcf0b95b09bf596feb1c705fb25cc9ebfe
|
## INFO ########################################################################
## ##
## COUBLET ##
## ======= ##
## ##
## Cross-platform desktop client to follow posts from COUB ##
## Version: 0.6.93.172 (20140814) ##
## ##
## File: widgets/button.py ##
## ##
## Designed and written by Peter Varo. Copyright (c) 2014 ##
## License agreement is provided in the LICENSE file ##
## For more info visit: https://github.com/petervaro/coub ##
## ##
## Copyright (c) 2014 Coub Ltd and/or its suppliers and licensors, ##
## 5 Themistokli Dervi Street, Elenion Building, 1066 Nicosia, Cyprus. ##
## All rights reserved. COUB (TM) is a trademark of Coub Ltd. ##
## http://coub.com ##
## ##
######################################################################## INFO ##
# Import PyQt5 modules
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QLabel, QHBoxLayout, QVBoxLayout
# Module level constants
ICON_AND_LABEL = 0
LABEL_AND_ICON = 1
VERTICAL = 0
HORIZONTAL = 1
#------------------------------------------------------------------------------#
class CoubletButtonWidget(QWidget):
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# TODO: do something with width and height if provided
def __init__(self, icon, label, order, orientation,
icon_selected=None, font=None, font_selected=None, palette=None,
palette_selected=None,
width=0, height=0, spacing=0, parent=None, mouse_event_handler=None,
padding_left=0, padding_top=0, padding_right=0, padding_bottom=0):
super().__init__(parent)
# Create layout
layout = QHBoxLayout() if orientation else QVBoxLayout()
layout.setSpacing(0)
layout.setContentsMargins(padding_left, padding_top,
padding_right, padding_bottom)
# Set icon
self._image = image = QLabel()
image.setPixmap(icon)
# If object has two stages
if icon_selected:
self._icon = icon
self._icon_selected = icon_selected
else:
self._icon_selected = self._icon = icon
# Set label
text = QLabel(label)
if font:
text.setFont(font)
if palette:
text.setPalette(palette)
if font_selected:
self._text = text
self._font = font
self._font_selected = font_selected
else:
self._font_selected = self._font = font
if palette_selected:
self._text = text
self._palette = palette
self._palette_selected = palette_selected
else:
self._palette_selected = self._palette = palette
# Place items in order
for i, item in enumerate((image, text, image)[order:order+2]):
if i:
layout.addSpacing(spacing)
layout.addWidget(item, alignment=Qt.AlignHCenter)
# Set layout
self.setLayout(layout)
# Set mouse event if any
if mouse_event_handler:
self._event_handler = mouse_event_handler
self.mouseReleaseEvent = self.on_mouse_release
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_mouse_release(self, event):
self._event_handler.click(event.button())
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def select(self):
self._image.setPixmap(self._icon_selected)
self._text.setFont(self._font_selected)
self._text.setPalette(self._palette_selected)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def deselect(self):
self._image.setPixmap(self._icon)
self._text.setFont(self._font)
self._text.setPalette(self._palette)
|
petervaro/coublet
|
widgets/button.py
|
Python
|
mit
| 4,715
|
[
"VisIt"
] |
2d828f020cc15609714b736aa40a8685b2d352edf1d1b587bc1de2b11d8a3b1a
|
##!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
Tools for field stars
:author: D. Bednarski
:license: GNU GPL v3.0 (https://github.com/danmoser/pyhdust/blob/master/LICENSE)
"""
import os
import re
import csv
import copy
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
#import matplotlib.cm as mplcm
from matplotlib.colors import Normalize, hsv_to_rgb
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from itertools import product
from glob import glob
from pyhdust import hdtpath
import pyhdust.poltools as polt
import pyhdust.phc as phc
#from glob import glob
#import pyhdust.phc as phc
#import pyhdust.jdcal as jdcal
#import datetime as dt
#from pyhdust import hdtpath
#from itertools import product
mpl.rcParams['pdf.fonttype']=42 # Fix the % Symbol in pdf images
filters = ['u','b','v','r','i']
fonts = [20, 17, 17, 14, 13] # Font sizes for titles, axes labels, axes values, key label of graphs, subplot labels
# Dictionary for filter colors for the graphs
colrs = { 'u' : 'Orchid',
'b' : 'MediumBlue',
'v' : 'Green',
'r' : 'Red',
'i' : 'GoldenRod',
}
# Dictionary for column numbers of csv table infile
idx1 = {'be?' : 0, # line concerning to the Be star?
'tgt' : 2, # target name
'tgtHD' : 3, # HD/BD/CD target number
'be' : 4, # Be star name
'sel?' : 5, # Standard selected? ('Y'/'')
'diang' : 7, # angular distance (in degree)
'r' : 8, # radial distance (per cent of Be distance)
'sr' : 9, # radial distance error (idem)
'type' : 10, # target star type
'stype' : 11, # target star spectral type
'mplx' : 13, # target parallax (")
'msplx' : 14, # target parallax error (")
'plx' : 15, # target parallax (pc)
'splx' : 16, # target parallax error (pc)
'magu' : 17, # magnitude filter u
'magb' : 18, # magnitude filter b
'magv' : 19, # magnitude filter v
'magr' : 20, # magnitude filter r
'magi' : 21, # magnitude filter i
'coor' : 23, # coordinates
'RA' : 37, # RA (hours, decimal)
'DEC' : 38, # DEC (degree, decimal)
}
# Dictionary for column numbers of csv table outfile
idx2 = {'MJD' : 0, # MJD
'date' : 1, # date
'ccd' : 2, # CCD name
'filt' : 3, # filter
'calc' : 4, # calcite
'std' : 5, # standard names
'dth' : 6, # delta theta
'sdth' : 7, # delta theta error
'p' : 8, # pol (%)
'q' : 9, # Stokes Q (%)
'u' : 10, # Stokes U (%)
'thet' : 11, # pol angle
's' : 12, # pol error (%)
'sthet' : 13, # pol angle error
'out' : 14, # target out file
'#star' : 15, # star number inside the outfile
'flag' : 16, # star flag ('OK'/'W'/'E')
'tags' : 17, # star tags
'tgt' : 18, # target name
'tgtHD' : 19, # HD/BD/CD target number
'be' : 20, # Be star name
'sel?' : 21, # Field star selected? ('Y'/'')
'magu' : 22, # magnitude filter u
'magb' : 23, # magnitude filter b
'magv' : 24, # magnitude filter v
'magr' : 25, # magnitude filter r
'magi' : 26, # magnitude filter i
'type' : 27, # target star type
'stype' : 28, # target star spectral type
'diang' : 29, # angular distance (in degree)
'plx' : 30, # target parallax (pc)
'splx' : 31, # target parallax error (pc)
'plxbe' : 32, # Be parallax (pc)
'splxbe': 33, # Be parallax error (pc)
'coor' : 34, # target coordinates
'dRA' : 35, # delta RA (target-Be) (degree, decimal)
'dDEC' : 36, # delta DEC (target-Be) (degree, decimal)
'leff' : 37, # lambda_eff (Angstrom)
'sleff' : 38, # lambda_eff error (Angstrom)
}
def readcsv(csvfile, be):
"""
Read lines of Be star *be* from *cvsfile* and return a
list with all components:
[
[[star 1, filter 1],
[star 1, filter 2]],
[[star 2, filter 1],
[star 2, filter 2]],
...
]
Ignores the observations with 'E' flag.
"""
data = []
tgt_curr = 'VOID'
f0 = open(csvfile, 'ro')
reader = csv.reader(f0, delimiter=';')
sortedcsv = sorted(reader, key=lambda x: [x[idx2['be']],x[idx2['tgt']],x[idx2['filt']]])
for line in sortedcsv:
if line == '':
continue
elif line[idx2['be']] == be:
tgt = line[idx2['tgt']]
if tgt != tgt_curr:
tgt_curr = tgt
data += [[]]
# Copy only if is correct data
if line[idx2['flag']] != 'E':
data[-1] += [line]
return data
def gencsv(csvin, path=None, skipdth=False, delta=3.5, epssig=2.0):
"""
Generate a csvfile with every observations for the field stars
listed in pyhdust/refs/pol_hip.txt.
Compute lambda_eff using color index and an mean airmass X=1.35.
The error is the (lambda_eff(X=1.7)-lambda_eff(X=1.))/2
INPUT
csvin The csv file with the informations
about the field stars. The columns are
indentified through idx1[] dictionary.
path The path to the ``red`` directory, inside
which are the nights with the observations
skipdth print all target values anyway, even when
there are no standard star?
epssig sigP/P max for unpolarized target (sigP/P
up to epssig doesn't need standard star
case skipdth==False)
delta tolerance for the angle between the two beams
of calcite.
"""
if path == None:
path = os.getcwd()
try:
objs = np.loadtxt('{0}/refs/pol_hip.txt'.format(hdtpath()), dtype=str)
except:
print('# ERROR: Can\'t read files pyhdust/refs/pol_hip.txt.')
raise SystemExit(1)
fout = open('{0}/dados.csv'.format(path), 'w')
csvout = csv.writer(fout, delimiter=';')#, quoting=csv.QUOTE_NONE, quotechar='')
# Main loop
for obj in objs:
# Generate the table file for each field star
polt.genTarget(obj, path=path, skipdth=skipdth, delta=delta, epssig=epssig)
if os.path.exists('{0}/{1}.log'.format(path,obj)):
# Read informations about the field star and its Be
with open(csvin, 'ro') as fin:
beline, tgtline, linetmp = [],[],[]
for line in csv.reader(fin, delimiter=';'):
if line == []:
continue
# Case the line is for some Be
elif line[idx1['be?']] != '':
linetmp = line[:]
# Case it was matched the field star line
elif line[idx1['tgt']] == obj:
tgtline += [line[:]]
beline += [linetmp[:]]
# At this point we have two lists: tgtline and beline, which hold the
# informations about the field star 'obj' and the respective Be star.
# These variables are lists because more the same field star can be associated
# with more than a single Be.
semilines = []
for i in range(len(tgtline)):
semilines += [[]]
# preparar uma semi-linha para ser copiada para todas as linhas com as informações referentes à estrela de campo e suas relações com a Be de referência. Compilar na linha abaixo apenas as informações que são importantes.
for tag in ('tgt', 'tgtHD', 'be', 'sel?', 'magu', 'magb', 'magv', 'magr', 'magi', 'type', 'stype', 'diang', 'plx', 'splx'):
semilines[i] += [tgtline[i][idx1[tag]]]
semilines[i] += [beline[i][idx1['plx']], beline[i][idx1['splx']]]
semilines[i] += [tgtline[i][idx1['coor']]]
semilines[i] += ['{0:.7f}'.format((float(tgtline[i][idx1['RA']])-float(beline[i][idx1['RA']]))*360./24.)]
semilines[i] += ['{0:.7f}'.format(float(tgtline[i][idx1['DEC']])-float(beline[i][idx1['DEC']]))]
# print semilines
# semilines += [tgtline[i]+beline[i]]
# print '############### AQUI!!!!!!!!!!!!! : ' + str(len(tgtline)) + ' linhas\n'
# if len(tgtline) > 1:
# print '##########################################################'
# print '################### MAIS QUE 1 LINHA #####################'
# for i in range(len(tgtline)):
# print tgtline[i][idx1['tgt']], tgtline[i][idx1['be']]
# print beline[i][idx1['be']]
# print semilines
# lixo = raw_input('Clique qualquer coisa para continuar...')
fobj = np.loadtxt('{0}/{1}.log'.format(path,obj), dtype=str, comments='#')
# Test if is needed to reshape
if len(fobj) > 0 and type(fobj[0]) != np.ndarray:
fobj = fobj.reshape(-1,18)
# print fobj, semilines
for semiline in semilines:
for line in fobj.tolist():
# Compute the lambda_effective and errors
color = ''
if line[3] == 'u' and semiline[4] not in ('~','') and semiline[5] not in ('~',''):
color = float(semiline[4])-float(semiline[5])
elif line[3] in filters[1:] and semiline[5] not in ('~','') and semiline[6] not in ('~',''):
color = float(semiline[5])-float(semiline[6])
if color != '':
leff = polt.lbds(color, line[3], line[2], airmass=1.35)
leff1 = polt.lbds(color, line[3], line[2], airmass=1.)
leff2 = polt.lbds(color, line[3], line[2], airmass=1.7)
sleff = abs(leff2-leff1)/2
# print leff, abs(leff1-leff), abs(leff2-leff)
else:
leff = phc.lbds[line[3]]
sleff = 0.
csvout.writerow(line+semiline+[leff,sleff])
# Process the found star with the substring 'field'
polt.genTarget('field', path=path, skipdth=skipdth, delta=delta, epssig=epssig)
if os.path.exists('{0}/field.log'.format(path)):
fobj = np.loadtxt('{0}/field.log'.format(path), dtype=str, comments='#')
# Test if is needed to reshape
if len(fobj) > 0 and type(fobj[0]) != np.ndarray:
fobj = fobj.reshape(-1,18)
for line in fobj.tolist():
leff = phc.lbds[line[3]]
csvout.writerow(line[:-1]+[line[-1],'',line[-1].split('_')[0],'Y']+['']*15+[leff])
fout.close()
return
def getTable(data, x, y, z=None, sx=None, sy=None, sz=None, \
vfilter=['no-std'], bin_data=True, onlyY=False, unbias='wk'):
"""
Receive the list *data* with many collumns and return
the columns concerning to the *x*, *y*, *z* quantities
(and their errors *sx*, *sy*, *sz*). Returns also 'objarr',
a list with object names, filters and validation status.
IMPORTANT: 1) Propagates error of delta theta factor for
'q', 'u', 'p' and 'thet' labels
2) Apply unbias correction for P and theta values
when specified through 'unbias' variable.
3) Case some label is 'q', 'u', 'p' or 'thet',
'no-std' is not in vfilter list and
bin_data==True, don't bin the data of those
which have 'no-std' flag to prevent to compute
values WITH NO MEANING. Return various lines
for every unbinnable data and a line for the
others else binned data.
INPUT
x,y,z,sx,sy,sz Label (key) of *idx2* dictionary.
bin_data Bin the data in the observations for
the same object+filter+x variable? Note:
Case y and/or z are 'p' or 'thet',
compute the bins *over the Stokes
parameters* and then return the values
to 'p' or 'thet'!
onlyY True = only selects the list with
a "Y" marked
unbias Estimator to unbias the data when 'y' or 'z'
is equals to 'p' or 'thet'. Three options:
a) 'ml': Maximum Likelihood (K=1.41)
b) 'wk': Wardle & Kronberg (K=1.0)
c) '' : None (K=0.)
where K is the factor to unbias (read the
description of routines 'unbiasData' and
'meanAngle'.
vfilter List whose elements are the labels (tags)
to be filtered from the output. It can be
'full', 'prob' or 'comp' for these
pre-defined lists in polt.vfil dictionary.
To mount your own list vfilter, select the current tags:
# General tags for target/standard observation
- 'bad-mod' bad modulation
- 'very-bad-mod' very bad modulation
- 'incomp-mods' some modulations are incompatible
- 'obs-prob' some observational problem/error
- 'iagpol-prob' polarimeter problem suspected
- 'other-prob' another relevant problem
- 'obs!=pub' std incompatible with the published
# Tags for standard status
- 'no-std' no standard in the night
- 'oth-day-std' standard from another day
- 'oth-dth' delta theta estimated from another filter
"""
def pro_arr (xx,yy,zz=None,sxx=None,syy=None,szz=None,phantom=True):
objarr = [[],[],[]]
xarr = [[],[]]
yarr = [[],[]]
zarr = [[],[]]
for block in data:
for line in block:
# Skip the line: 1) case the observation has some tag to be filtered;
# 2) case the xx, yy or zz has not value in line (i.e.,
# if it is equal to '' or '~');
# 3) case the observation has not the mark 'Y' that indicates
# it was selected for THAT Be star, since onlyY==True.
if (onlyY==True and line[idx2['sel?']]!='Y') or \
line[idx2[xx]] in ('','~') or \
line[idx2[yy]] in ('','~') or \
(zz != None and line[idx2[zz]] in ('','~')):
validate = False
# Phantom prevents the lists returned by getTable of distinct sizes.
elif phantom and (line[idx2[x]] in ('','~') or \
line[idx2[y]] in ('','~') or \
(z != None and line[idx2[z]] in ('','~'))):
validate = False
else:
validate = True
for vfilt in vfilter:
if vfilt in line[idx2['tags']]:
validate = False
# print line[idx2['tgt']] + 'FALSE: sel? = ' + line[idx2['sel?']]
break
# print line
# print validate
# IF VALIDATE, proceed
if validate:
objarr[0] += [line[idx2['tgt']]]
objarr[1] += [line[idx2['filt']]]
objarr[2] += [line[idx2['tags']]]
xarr[0] += [line[idx2[xx]]]
yarr[0] += [line[idx2[yy]]]
if sxx != None:
xarr[1] += [line[idx2[sxx]]]
else:
xarr[1] += ['']
if syy != None:
yarr[1] += [line[idx2[syy]]]
else:
yarr[1] += ['']
if zz != None:
zarr[0] += [line[idx2[zz]]]
if szz != None:
zarr[1] += [line[idx2[szz]]]
# print objarr, xarr
try: xarr[0] = [float(xelem) for xelem in xarr[0]]
except: pass
try: xarr[1] = [float(xelem) for xelem in xarr[1]]
except: pass
try: yarr[0] = [float(yelem) for yelem in yarr[0]]
except: pass
try: yarr[1] = [float(yelem) for yelem in yarr[1]]
except: pass
try: zarr[0] = [float(zelem) for zelem in zarr[0]]
except: pass
try: zarr[1] = [float(zelem) for zelem in zarr[1]]
except: pass
# print objarr, xarr
return objarr, xarr, yarr, zarr
# Verify keys
for param in (x, y, z, sx, sy, sz):
if param != None and param not in idx2:
print('Error: key {0} not found'.format(param))
return 1
# Verify if vfilter is a special filter
if vfilter in polt.vfil.keys():
vfilter = polt.vfil[vfilter]
if bin_data:
if x in ('p', 'thet'):
print('Error: key \'p\' or \'thet\' can\'t be used as x values when bin_data==True. Try to pass the data that must be binned through the y values or set bin_data=False.')
return 1
elif z != None and y in ('p', 'thet'):
print('Error: keys \'p\' and/or \'thet\' can\'t be used as x and/or y values when bin_data==True and z!=None. Try to pass the data that must be binned through the z values or set bin_data=False.')
return 1
if x == 'leff': ignx = True
else: ignx = False
if y == 'leff': igny = True
else: igny = False
objarr, xarr, yarr, zarr = pro_arr(x, y, z, sx, sy, sz)
if bin_data and y not in ('p','thet','q','u') and z not in ('p','thet','q','u'):
if z == None:
binData(objarr, xarr, yarr, ignx=ignx)
else:
binData(objarr, xarr, yarr, zarr=zarr, ignx=ignx, igny=igny)
# Case y or z is the polarization P, theta, Q or U, bin the data on the *Stokes parameters QU* and
# compute the new P (or theta, Q, U) values.
if (y in ('p','thet','q','u') or z in ('p','thet','q','u')):
objarr, parr, thetarr, dtharr = pro_arr(xx='p', yy='thet', sxx='s', zz='dth', szz='sdth')
objarr_aux, qarr, uarr, lixo = pro_arr(xx='q', yy='u', sxx='s', syy='s')
thetarr[1], qarr[1], uarr[1] = polt.propQU(parr[0], thetarr[0], parr[1], dtharr[1])
if bin_data:
if z == None:
xarr_aux = copy.deepcopy(xarr)
binData(objarr, xarr, qarr, prevent=True, ignx=ignx)
binData(objarr_aux, xarr_aux, uarr, prevent=True, ignx=ignx, igny=igny)
else:
xarr_aux = copy.deepcopy(xarr)
yarr_aux = copy.deepcopy(yarr)
binData(objarr, xarr, yarr, zarr=qarr, prevent=True, ignx=ignx, igny=igny)
binData(objarr_aux, xarr_aux, yarr_aux, zarr=uarr, prevent=True, ignx=ignx, igny=igny)
if 'thet' in (y, z):
# Call meanAngle over the qarr and uarr lists, one by one element, just to compute theta correctly
thetarr[0] = [meanAngle([qi],[ui],[sqi],[sui], estim=unbias)[0] for qi,ui,sqi,sui in zip(qarr[0],uarr[0],qarr[1],uarr[1])]
thetarr[1] = [meanAngle([qi],[ui],[sqi],[sui], estim=unbias)[1] for qi,ui,sqi,sui in zip(qarr[0],uarr[0],qarr[1],uarr[1])]
if y=='thet': yarr = thetarr
else: zarr = thetarr
if 'p' in (y, z):
parr[0] = [np.sqrt(qi**2+ui**2) for qi,ui in zip(qarr[0],uarr[0])]
parr[1] = [np.sqrt((qi*sqi)**2 + (ui*sui)**2)/pi if pi!=0 else 0. for qi,ui,sqi,sui,pi in zip(qarr[0],uarr[0],qarr[1],uarr[1],parr[0])]
if unbias != '':
unbiasData(parr[0], parr[1], estim=unbias)
if y=='p': yarr = parr
else: zarr = parr
if y=='q': yarr = qarr
elif z=='q': zarr = qarr
if y=='u': yarr = uarr
elif z=='u': zarr = uarr
# Return the values
if z == None:
return objarr, xarr, yarr
else:
return objarr, xarr, yarr, zarr
def graf_field(csvfile, be, pmaxfile=None, vfilter=['no-std'], squared=True, save=False, bin_data=True, \
onlyY=False, extens='pdf', unbias='wk',label=True):
"""
Plot a field graph with polarization directions for Be star *be*
through *csvfile* data table.
pmaxfile : outfile from graf_p. If != None, this routine
will plot the Pmax values in each position.
If == None, will plot the observed values in each filter
(in this case, the lenght of the vetors DON`T represent
the % of polarization).
squared: use the same scale in both x and y axes?
"""
# Subtask to calculate the polarization vector coordinates
def gen_polar(x, y, l, thet):
thet_rad = -np.deg2rad(thet)+np.pi/2
xmin=x-np.cos(thet_rad)*l
xmax=x+np.cos(thet_rad)*l
ymin=y-np.sin(thet_rad)*l
ymax=y+np.sin(thet_rad)*l
return [xmin, xmax], [ymin, ymax]
# Subtask to calculate the coordinates of 'error shade'
def gen_spolar(x, y, l, thet, sthet):
thet_rad = -np.deg2rad(thet)+np.pi/2
s_rad = np.deg2rad(sthet)
l = 0.8*l
x1=x-l*np.cos(thet_rad-s_rad)/np.cos(s_rad)
y1=y-l*np.sin(thet_rad-s_rad)/np.cos(s_rad)
x2=x+l*np.cos(thet_rad-s_rad)/np.cos(s_rad)
y2=y+l*np.sin(thet_rad-s_rad)/np.cos(s_rad)
x3=x+l*np.cos(thet_rad+s_rad)/np.cos(s_rad)
y3=y+l*np.sin(thet_rad+s_rad)/np.cos(s_rad)
x4=x-l*np.cos(thet_rad+s_rad)/np.cos(s_rad)
y4=y-l*np.sin(thet_rad+s_rad)/np.cos(s_rad)
# return [[x1,y1], [x2,y2], [x3,y3], [x4,y4]]
return [[x1,y1], [x2,y2], [x3,y3], [x4,y4]]
plt.close('all')
# Verify if vfilter is a special filter
if vfilter in polt.vfil.keys():
vfilter = polt.vfil[vfilter]
# Read file and generate table
data = readcsv(csvfile, be)
if data == []:
print('No {0} data!'.format(be))
return
objarr, raarr, decarr, tharr = getTable(data, 'dRA', 'dDEC', z='thet', sz='sthet', \
vfilter=vfilter, bin_data=bin_data, onlyY=onlyY, unbias=unbias)
if len(objarr[0]) != len(raarr[0]) or len(decarr[0]) != len(decarr[0]):
print ('ERROR: Distinct sizes of coordinates cells. Verify inside dados.csv file and run again.')
return
# print tharr
if objarr == [] or objarr == [[],[],[]]:
print('No {0} valid data!'.format(be))
return
# print objarr, tharr
fig = plt.figure(1)
ax = plt.subplot(1, 1, 1)
ax.set_title('Field Stars - {0}'.format(phc.bes[be]), fontsize=fonts[0], verticalalignment='bottom')
ax.set_xlabel(r'$\Delta$ RA (degree)', size=fonts[1])
ax.set_ylabel(r'$\Delta$ DEC (degree)', size=fonts[1])
# Set the parameters for the length of vectors
leg = []
vec = [float(veci) for veci in raarr[0]+decarr[0]]
lsize = 0.075*(max(vec)-min(vec)) # Variable for vectors size
if squared:
ax.set_xlim([min(vec)-2*lsize, max(vec)+2*lsize])
ax.set_ylim([min(vec)-2*lsize, max(vec)+2*lsize])
ax.autoscale(False)
if pmaxfile != None:
if os.path.exists(pmaxfile):
with open(pmaxfile, 'ro') as fr:
meanpmax, nn = 0, 0
csvread = csv.reader(fr, delimiter=';')#, quoting=csv.QUOTE_NONE, quotechar='')
for j, line in enumerate(csvread):
if line[0][0] != '#':
meanpmax += float(line[2])
nn += 1
if nn != 0:
meanpmax = meanpmax/nn
ll0 = 0.08*(max(vec)-min(vec))/meanpmax # Variable for vectors size
else:
print('# ERROR: No star inside pmaxfile file!')
return
else:
print('# ERROR: No pmaxfile file found!')
return
# lsize = 0.06*(max(ra)-min(ra)) # Variable for vectors size
# lysize = 0.1*(max(dec)-min(dec))/(max(dec)+min(dec)) # Variable for vectors size
# lsize = np.sqrt(lxsize**2 + lysize**2)
# lsize = 0.1
# ratio = (max(dec)-min(dec)+2*lsize)/(max(ra)-min(ra)+2*lsize) # Variable for vectors size
# ratio=0.65
ymin = 100. # 100. to pass in first iteration
# Do the subplots
# vec = np.arange(0,180.,180./len(objarr[0]))
# tharr[0] = vec.tolist()
# raarr[0] = [0 for i in range(len(raarr[0]))]
# decarr[0] = [0 for i in range(len(raarr[0]))]
obj = ''
for i in range(len(objarr[0])):
# The line below is to plot only once time for each object when is to use the pmax values.
if pmaxfile != None and objarr[0][i] == obj:
continue
else:
obj = objarr[0][i]
x = float(raarr[0][i])
y = float(decarr[0][i])
if y < ymin:
ymin = y
# Use values from pmaxfile file
if pmaxfile != None:
thet, sthet = 0, 0
with open(pmaxfile, 'ro') as fr:
csvread = csv.reader(fr, delimiter=';')#, quoting=csv.QUOTE_NONE, quotechar='')
for j, line in enumerate(csvread):
if line[0] == obj:
thet = float(line[10])
sthet = float(line[11])
pmax = float(line[2])
spmax = (float(line[3])+float(line[4]))/2
color = 'black'
if thet == 0 and sthet == 0:
print('# WARNING: No star named {0} found inside thetfile file. The lines won`t be plotted.'.format(obj))
continue
ll = ll0*pmax
delt = 1.5*lsize # Variable for label names displacement
# Otherwise, use from csvfile
else:
filt = objarr[1][i]
thet = float(tharr[0][i])
sthet = float(tharr[1][i])
color = colrs[filt]
ll = lsize
delt = 1.5*ll # Variable for label names displacement
# Plot vectors
xvert, yvert = gen_polar(x, y, ll, thet)
plt.plot(xvert, yvert, color=color)
# Plot errors
coords = gen_spolar(x, y, ll, thet, sthet)
polygon = Polygon(coords, True, color=color, alpha=0.18)
ax.add_patch(polygon)
# Print object names
if label:
if i==0 or obj not in [ileg[0] for ileg in leg]:
n=0
# Compute scale to fix the label positions
if squared:
scale = 0.035*abs(ax.get_ylim()[1] - ax.get_ylim()[0])
else:
scale = 0.06*abs(min([float(idecarr) for idecarr in decarr[0]]) - \
max([float(idecarr) for idecarr in decarr[0]]))
# Verify if another label shall be closer to the current label
for ileg in leg:
if abs(y - ileg[2]) < delt and abs(x - ileg[1]) < delt:
if n==0:
x1 = ileg[1]
y1 = ileg[2]
n += 1
if n == 0:
x1=x
yleg = y-lsize*1.5
objleg = fixName(obj)
else:
yleg = y1-lsize*1.5-n*scale
objleg = '+ ' + fixName(obj)
ax.text(x1, yleg, '{0}'.format(objleg), horizontalalignment='center', verticalalignment='baseline', fontsize=fonts[3], color='black')
leg += [[obj,x,y]]
if yleg < ymin:
ymin = yleg
# Print point for Be star
ax.plot([0,0], [0,0], 'o', color='black', markersize=7)
# Fix y limits if squared==False
if not squared:
ax.set_ylim([ymin-2*lsize, ax.get_ylim()[1]])
# Fix limits
ax.autoscale(False)
ax.plot(ax.get_xlim(), [0,0], 'k--')
ax.plot([0,0], ax.get_ylim(), 'k--')
plt.gca().invert_xaxis()
# Setting sizes
ax.xaxis.label.set_fontsize(fonts[1])
ax.yaxis.label.set_fontsize(fonts[1])
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fonts[2])
if save:
plt.savefig('{0}_field.{1}'.format(be,extens), bbox_inches='tight')
else:
plt.show()
# bin same object+filter data
def binData(objarr, xarr, yarr, zarr=None, ignx=False, igny=False, prevent=False):
"""
Bin data
objarr is a list [[objects], [filters], [flags]]
xarr is a list [[x values], [sx values]] (idem for yarr and zarr)
If zarr is void, bin just the y values and calculate error by
propagation only (no stddev). Otherwise, bin only the z values.
- prevent: prevent to bin when flags contain 'no-std'? If True,
case there are more than one point able to be binned,
bin only the data which don't have 'no-std' flag. The
output lists will have a copy of every line from those
in input lists with 'no-std' flag, as like as the
binned lines among those that just don't have 'no-std'
flag.
CAUTION: only bins when the contents of objarr AND xarr are the
same (AND yarr also, case zarr != None)! Only if ignx or/and igny
parameter have value 'True' the routine ignores when xarr or/and
yarr have not the same values. In this case, the x or/and y value
to be returned are taken from the first occurrence.
CONSIDERATIONS:
- The error of binned data is just the propagated error,
sqrt(sum(sigma_i^2))/n, and doesn't consider the stddev
"""
# Check sizes of lists
for elem in product([len(lista) for lista in (objarr[0], objarr[1], \
xarr[0], xarr[1], yarr[0], yarr[1])],repeat=2):
if elem[0] != elem[1]:
print('# ERROR: Data binning not processed because the lists have distinct sizes')
return
i=0
# Loop on lines
while i < len(objarr[0]):
obj=objarr[0][i]
fil=objarr[1][i]
tags=objarr[2][i]
x=xarr[0][i]
sx=xarr[1][i]
# If it is to prevent, preserve the line without to bin
# when 'no-std' is in tags
if prevent and 'no-std' in tags:
i += 1
continue
if zarr == None:
try:
yarr[1][i] = yarr[1][i]**2
except:
yarr[1][i] = ''
else:
y=yarr[0][i]
sy=yarr[1][i]
try:
zarr[1][i] = zarr[1][i]**2
except:
zarr[1][i] = ''
n=1
j=i+1
# looking for the same object/filter from i-esim line until the end
while True:
# break if another object or end of list
if j >= len(objarr[0]) or objarr[0][j] != obj:
break
# If all j-esim components are equal, except the z values (or y values,
# case zarr is void), calculate the mean
elif objarr[0][j] == obj and objarr[1][j] == fil and \
(ignx or (xarr[0][j] == x and xarr[1][j] == sx)) and \
(zarr == None or igny or (yarr[0][j] == y and yarr[1][j] == sy)):
n+=1
if zarr == None:
yarr[0][i] += yarr[0][j]
try:
yarr[1][i] += yarr[1][j]**2
except:
pass
else:
zarr[0][i] += zarr[0][j]
try:
zarr[1][i] += zarr[1][j]**2
except:
pass
del(zarr[0][j])
del(zarr[1][j])
del(objarr[0][j])
del(objarr[1][j])
del(xarr[0][j])
del(xarr[1][j])
del(yarr[0][j])
del(yarr[1][j])
# skip to next
else:
j += 1
# Conclude the computation of average and propagated error
if zarr == None:
yarr[0][i] = yarr[0][i]/n
if yarr[1][i] != '':
yarr[1][i] = np.sqrt(yarr[1][i])/n
else:
zarr[0][i] = zarr[0][i]/n
if zarr[1][i] != '':
zarr[1][i] = np.sqrt(zarr[1][i])/n
i+=1
def graf_theta(csvfile, be, vfilter=['no-std'], save=False, bin_data=True, onlyY=False, \
extens='pdf', unbias='wk'):
# Verify if vfilter is a special filter
if vfilter in polt.vfil.keys():
vfilter = polt.vfil[vfilter]
plt.close('all')
data = readcsv(csvfile, be)
if data == []:
print('No {0} data!'.format(be))
return
objarr, filtarr, tharr = getTable(data, 'filt', 'thet', sy='sthet', \
vfilter=vfilter, bin_data=bin_data, onlyY=onlyY, unbias=unbias)
if objarr == [] or objarr == [[],[],[]]:
print('No {0} valid data!'.format(be))
return
# Compute the mean angle
# A new objarr is needed because the bin_data==False can do the objarr below have a larger length
objarr_qu, qarr, uarr = getTable(data, 'q', 'u', sx='s', sy='s', \
vfilter=vfilter, bin_data=False, onlyY=onlyY, unbias=unbias)
thmean = meanAngle(qarr[0], uarr[0], qarr[1], uarr[1], estim=unbias)
fig = plt.figure(1)
ax = plt.subplot(1, 1, 1)
ax.set_title('Field Stars - {0}'.format(phc.bes[be]), fontsize=fonts[0], verticalalignment='bottom')
ax.set_xlabel('star/filter', size=fonts[1])
ax.set_ylabel(r'$\theta$ (degree)', size=fonts[1])
j = 0
pts=[[],[],[]]
longname = False
# Do the subplots
while True:
pts[0] = [i+1 for i in range(len(objarr[0])) if objarr[0][j] == objarr[0][i]]
pts[1] = [float(tharr[0][i]) for i in range(len(objarr[0])) if objarr[0][j] == objarr[0][i]]
pts[2] = [float(tharr[1][i]) for i in range(len(objarr[0])) if objarr[0][j] == objarr[0][i]]
# pts[0].sort(key=lambda x: x[0])
color = gen_color(csvfile, be, objarr[0][j], onlyY=onlyY)
nome = fixName(objarr[0][j])
if len(nome) > 13:
longname = True
ax.errorbar(pts[0], pts[1], yerr=pts[2], label=nome, linestyle='', marker='o', color=color)
j += len(pts[0])
if j >= len(objarr[0]):
break
ax.set_xlim([0,j+j/3])
# Setting legend
leg = ax.legend(loc='best', borderaxespad=0., numpoints=1, prop={'size':fonts[3]})
leg.get_frame().set_alpha(0.5)
ax.plot(ax.get_xlim(), [thmean[0], thmean[0]], 'k--')
# Setting sizes
ax.xaxis.label.set_fontsize(fonts[1])
ax.yaxis.label.set_fontsize(fonts[1])
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fonts[2])
if save:
plt.savefig('{0}_tht.{1}'.format(be,extens), bbox_inches='tight')
else:
plt.show()
def meanAngle(q,u,sq,su,estim='wk'):
"""
Return the mean angle and the error propagated
The computation is over QU parameters.
Unbias theta error using 'estim' estimator:
if p/sp <= K, s_theta = psi
otherwise, s_theta = propagated error
where K is given by the estimator related to the
'estim' variable:
a) 'ml' : Maximum Likelihood (K=1.41, psi=51.96)
b) 'wk' : Wardle & Kronberg (K=1.0, psi=51.96)
c) '' : None (K=0, psi=51.96)
d) 'mts': Maier, Tenzer & Santangelo (estimates
from Bayesian analysis, psi=61.14)
"""
if estim=='wk':
k=1.
elif estim=='ml':
k=1.41
elif estim=='':
k=0.
elif estim!='mts':
print('# ERROR: estimation type `{0}` not valid!.'.format(estim))
raise SystemExit(1)
qq, uu, sqq, suu = q[:], u[:], sq[:], su[:]
# Use binData to compute the mean Q and U values
# espn variables are artifices to bin all data from qq and uu lists using binData
esp1 = [['' for i in range(len(q))], ['' for i in range(len(q))], ['' for i in range(len(q))]]
esp2 = copy.deepcopy(esp1)
esp3 = copy.deepcopy(esp1)
esp4 = copy.deepcopy(esp1)
binData(esp1, esp2, [qq,sqq])
binData(esp3, esp4, [uu,suu])
# Prevent division by 0
if qq[0] == 0:
if uu[0] > 0:
tht = 45.0
elif uu[0] < 0:
tht = 135.0
else:
# print qq, uu
# print sqq, suu
tht = np.arctan(uu[0]/qq[0])*90/np.pi
p = np.sqrt(qq[0]**2 + uu[0]**2)
if p!=0:
sp = np.sqrt((qq[0]*sqq[0])**2 + (uu[0]*suu[0])**2)/p
saux = np.sqrt((qq[0]*suu[0])**2 + (uu[0]*sqq[0])**2)/p
if estim!='mts':
if sp!=0 and p/saux > k:
stht = 28.65*saux/p
else:
stht = 51.96
else:
# We need to use 'saux' below, not 'sp'! It is because if U=0, the error that will
# influence the theta uncertainty is the sigma_U, because sigma_Q will be along the
# P direction. Comparing the formulas for sp and saux, the one that do it correctly
# is saux.
if sp!=0 and p/saux > 6:
stht = 28.65*saux/p
elif saux!=0:
a=32.50
b=1.350
c=0.739
d=0.801
e=1.154
stht = (a*(b+np.tanh(c*(d-p/saux))) - e*p/saux)
else:
stht = 61.14
else:
tht = 0.
if estim!='mts':
stht = 51.96
else:
stht = 61.14
# Fix the angle to the correct in QU diagram
if qq[0] < 0:
tht += 90
if tht < 0:
tht += 180
return [tht, stht]
def meanAngle_star(csvfile, be, obj, filts='ubvri', vfilter=['no-std'], onlyY=False, estim='wk'):
"""
Return the meanAngle for star 'obj' in field of Be
'be' computed in all filters specified in string
format in the variable 'filts'.
For example, if filts='ubv', this routine will
compute the mean angle among UBV filters.
Unbias theta error using 'estim' estimator:
if p/sp <= K, s_theta = psi
otherwise, s_theta = propagated error
where K is given by the estimator related to the
'estim' variable:
a) 'ml' : Maximum Likelihood (K=1.41, psi=51.96)
b) 'wk' : Wardle & Kronberg (K=1.0, psi=51.96)
c) '' : None (K=0, psi=51.96)
d) 'mts': Maier, Tenzer & Santangelo (estimates
from Bayesian analysis, psi=61.14)
CONSIDERATIONS:
- Operations over QU parameters
- Error from standard star is propagated
- If estim != '' and the final polarization p is
smaller than its uncertainty multiplied by the
estimator factor K, the error of theta has the
value 51.96 or 61.14, depending of the model
"""
# Verify if vfilter is a special filter
if vfilter in polt.vfil.keys():
vfilter = polt.vfil[vfilter]
# Read tables and unbias data
data = readcsv(csvfile, be)
if data == []:
print('No {0} data!'.format(be))
return
# Get tables
objarr, qarr, uarr = getTable(data, 'q', 'u', sx='s', sy='s', \
vfilter=vfilter, bin_data=False, onlyY=onlyY)
lixo, parr, arr_aux, = getTable(data, 'p', 'thet', sx='s', sy='sdth', \
vfilter=vfilter, bin_data=False, onlyY=onlyY)
# Propagate error from standard star
lixo, qarr[1], uarr[1] = polt.propQU(parr[0], arr_aux[0], parr[1], arr_aux[1])
qarr_filt, uarr_filt = [[],[]], [[],[]]
# Create new qarr and uarr with the data of star 'obj' in filters 'filts'
for i, (obji, flti) in enumerate(zip(objarr[0], objarr[1])):
if obji == obj and flti in filts:
qarr_filt[0] += [qarr[0][i]]
qarr_filt[1] += [qarr[1][i]]
uarr_filt[0] += [uarr[0][i]]
uarr_filt[1] += [uarr[1][i]]
# Compute the thmean
if qarr_filt != [[],[]]:
thmean = meanAngle(qarr_filt[0], uarr_filt[0], qarr_filt[1], uarr_filt[1], estim=estim)
else:
thmean = [0,0]
return thmean
def gen_color(csvfile, be, obj, onlyY=False):
"""
Receives a string 'csvfile' pointing to the csvfile,
a Be star 'be' and the field star named 'obj'.
Returns a smart computed np array for the color
of such field star.
Note that if you run this task with distinct values
for 'onlyY', the color returned may be distinct also.
'be' parameter is necessary because some field stars
are used for more than one Be star.
"""
data = readcsv(csvfile, be)
# print data
if data == []:
print('No {0} data!'.format(be))
return
stars = []
for block in data:
# print block
line = block[0]
target = line[idx2['tgt']]
if target not in stars and ((onlyY and line[idx2['sel?']] == 'Y') or not onlyY):
stars += [target]
if len(stars) != 0:
if len(stars) != 1:
norm = np.arange(0.04,1.,4.775/(5*(len(stars)-1)))
else:
norm = [0.5]
cm = plt.cm.spectral(norm)
# norm = Normalize(vmin=0., vmax=len(stars))
# cm = plt.cm.ScalarMappable(norm=norm, cmap=plt.cm.get_cmap(cmap)).to_rgba(norm)
if obj in stars:
idx = stars.index(obj)
fcm = cm[idx]
else:
fcm = np.array([])
else:
fcm = np.array([])
return fcm
def fixName(star):
"""
Fix the name of star 'star', returning the
printable name.
'hd' -> 'HD '
'2MASS-' -> ''
'hip' -> 'HIP '
'TYC-' -> 'TYC '
Return identical 'star' name if the name was not
identified
"""
def fixx(starr):
if bool(re.match('^2MASS-J[0-9-+]*$',starr)):
nome = starr[6:]
elif bool(re.match('^(hip|HIP)[0-9]*$',starr)):
nome = 'HIP '+ starr[3:]
elif bool(re.match('^(tyc|TYC)-[0-9-]*$',starr)):
nome = 'TYC ' + starr[4:]
elif bool(re.match('^(hd|HD)[0-9]*$',starr)):
nome = 'HD ' + starr[2:]
elif bool(re.match('^h[0-9]*$',starr)):
nome = 'HD ' + starr[1:]
elif bool(re.match('^hr[0-9]*$',starr)):
nome = 'HR ' + starr[2:]
elif bool(re.match('^bd-[0-9-+]*$',starr)):
nome = 'BD-' + starr[3:5] + ' ' + starr[6:]
elif starr in phc.bes:
nome = phc.bes[starr]
else:
nome = starr
return nome
if bool(re.match('.*_field',star)):
if star.split('_')[0] in phc.bes:
nome = 'Star #' + star[star.index('_field')+6:]
else:
nome = fixx(star.split('_')[0]) + ' (Star #' + star[star.index('_field')+6:] + ')'
elif bool(re.match('.*_.$',star)):
nome = fixx(star.split('_')[0]) + ' ' + star[star.index('_')+1:].upper()
else:
nome = fixx(star)
return nome
def unbiasData(p, s, estim='wk'):
"""
Unbias P data, appling the operation over input
lists/arrays:
p^2 -> p^2 - K^2 s^2
INPUT
p: % polarization
s: pol error
estim: estimative model:
a) 'ml': Maximum Likelihood (K=1.41)
b) 'wk': Wardle & Kronberg (K=1.0)
"""
if estim=='wk':
k=1.
elif estim=='ml':
k=1.41
elif estim=='mts':
print('# Warning: changing estimation type from `mts` to `wk` to the % of pol...')
k=1.
else:
print('# ERROR: estimation type `{0}` not valid!.'.format(estim))
raise SystemExit(1)
for i in range(len(p)):
if p[i]<k*s[i]:
p[i] = 0.
else:
p[i] = np.sqrt(p[i]**2-(k*s[i])**2)
return
def graf_p(csvfile, be, thetfile=None, path=None, vfilter=[], vfilter_be=[], save=False, \
bin_data=True, onlyY=False, useB=True, every=False, propag=True, rotate=False, fit=True, propmode='comb', unbias='wk', law='w82', extens='pdf'):
"""
Plot P x wavelength for star 'be' and operate over a
/'be'_is.csv file. The field stars are read from 'csvfile'.
Fix the polarization bias using Wardle & Kronberg formula.
'csvfile' : location of dados.csv (field stars data).
'bin_data': bin data in graphs?
'onlyY' : use only the field stars with 'Y' marks (field
stars originally selected for Be star 'be')?
'thetfile' : file with the intrinsic angles (oufile from
fs.genInt). If 'thetfile' != None, plot the
ISP component parallel to the disk of the Be
star.
'useB' : Use the b*cos(psi) values fom 'thetfile' to plot
the ISP component parallel to the disk of the Be
star? Case False, rotates the data in QU diagram
at the angle corresponding to the disk inclination
read from 'thetfile'.
'vfilter' : tags to be filtered of observations of field stars
'path' : (for useB=False) the path where is located the log file for star 'be'
(out from polt.genTarget). If None, it is supposed
inside the current directory (.).
'vfilter_be' : (for useB=False) tags to be filtered of data in
the computation of ISP parallel to the disc, when
'thetfile' != None.
'unbias' : Estimator to unbias the data when 'y' or 'z'
is equals to 'p' or 'thet'. Three options:
a) 'ml': Maximum Likelihood (K=1.41)
b) 'wk': Wardle & Kronberg (K=1.0)
c) '' : None (K=0.)
where K is the factor to unbias (read the
description of routines 'unbiasData' and
'meanAngle'.
'every' : (for useB=False) use one intrinsic angle for each one
filter to obtain the // component? If every=False makes
all data to use a mean value at the -4:-2 collums
(22th to 24th) from 'thetfile'
'propag' : (for useB=False) propagates the uncertainies of the
intrinsic angle to the rotated QU values in computation
of ISP parallel to the disc, when 'thetint' != None?
'propmode' : (for useB=False) mode to compute the error from ISP parallel
to the disc, when 'thetint' != None and useB=False.
'stddev' : only the stddev of the mean
'prop' : only the propagated from individual data
'comb' : the combined between them
'fit': fit the ISP using MCMC? Case True, generate the
graphs and a file ./'be'_is.csv with the best values.
Write the mean polarization angles inside this file
also. Case fit==True and there exists this file, this
routine will ask to the user if he wants to read the
previous fitted parameters and plot the Serkowski curves
using them, or if he wants to run MCMC again,
overwriting this file. If there exists a ./'be'_is.csv
file and some standard star was not fitted yet, this
routine will do that and append a line to the csv file.
This routine WON'T display the stars whose 13th column in
'be'_is.csv be assigned with '0' value.
'rotate' : DON'T WORKING PROPERLY. Rotate the polarization of
field stars in QU diagram by the mean angle? A
option to be explored to replace the unbias procedure.
CONSIDERATIONS:
- When useB=False, error for parallel component is the
combination between the propagated error and stddev.
- Error of standard stars is propagated to the field stars
- The polarization angles (PA) inside 'be'_is.csv are binned
over Stokes parameters ALWAYS. The errors are computed by the
simple propagation.
- The mean PA for each star is computed using QU parameters
in all filters, even when p/sigma_p<1 (when the individual
PA error is equals to 51.96), because we are actually
operating over QU space!
- p/sigma_p inside 'be'_is.csv is computed by the ratio of
28.65 and the PA error.
"""
def copyFit(star, csvwriter, secondcol=None):
"""
Copy the line of star 'star' inside 'be'_is.csv file
to the writer csvwriter
If 'secondcol' != None, do the copy only when the 2nd
column inside 'be'_is.csv has the value 'secondcol'.
Return pmax, lmax arrays with the values and
errors (+ and -).
"""
pmax, lmax = [], []
plott = 1
fr = open('{0}_is.csv'.format(be), 'r')
csvread = csv.reader(fr, delimiter=';')#, quoting=csv.QUOTE_NONE, quotechar='')
for i, line in enumerate(csvread):
if line[0] == star and (secondcol==None or line[1]==secondcol):
csvwriter.writerow(line)
pmax, lmax, plott = map(lambda v: float(v), line[2:5]), map(lambda v: float(v), line[5:8]), float(line[13])
# break
fr.close()
return pmax, lmax, plott
if path == None:
path = os.getcwd()
# Verify if vfilter is a special filter
if vfilter in polt.vfil.keys():
vfilter = polt.vfil[vfilter]
if vfilter_be in polt.vfil.keys():
vfilter_be = polt.vfil[vfilter_be]
plt.close('all')
# Read tables and unbias data
data = readcsv(csvfile, be)
if data == []:
print('No {0} data!'.format(be))
return
objarr, larr, parr = getTable(data, 'leff', 'p', sy='s', \
vfilter=vfilter, bin_data=bin_data, onlyY=onlyY, unbias=unbias)
if objarr == [] or objarr == [[],[],[]]:
print('No {0} valid data!'.format(be))
return
# Done inside getTable routine now
# if unbias in ('ml','wk'):
# unbiasData(parr[0], parr[1], unbias)
# Try to find a current 'be'_is.csv file, request to the user and initialize
# the file for the output of the fitted parameters
if fit:
if os.path.exists('{0}_is.csv'.format(be)):
opt = ''
while opt not in ('1','2'):
print(('# File with ISP fits {0}_is.csv already exists. Type the option:\n' +\
' 1) Use this saved values to plot the fitted Serkowski curve;\n' +\
' 2) Run the MCMC to fit all again.').format(be))
opt = raw_input('Type the option: ')
if opt == '1':
usePrevious=True
else:
usePrevious=False
else:
usePrevious=False
fout = open('{0}_is_tmp.csv'.format(be), 'w')
csvout = csv.writer(fout, delimiter=';')#, quoting=csv.QUOTE_NONE, quotechar='')
csvout.writerow(['#obj','#name']+['Pmax','sPmax_+','sPmax_-', 'lmax','slmax_+','slmax_-']+['chi','n']+\
# csvout.writerow(['#obj','#name']+\
['<th>', 's<th>','<p/sp>']+\
['plot point?', 'use in fit?', 'comments','']+\
['th_u', 'sth_u','th_b', 'sth_b','th_v', 'sth_v','th_r', 'sth_r','th_i', 'sth_i']+\
['p/sp_u', 'p/sp_b', 'p/sp_v', 'p/sp_r', 'p/sp_i'])
# Get the table for theta values, propagating errors from standard and computing the mean angle by filter.
# IMPORTANT: Allways bin data below to compute the mean angle in all cases! Allways include 'no-std' to
# filter a incorrect theta value without standard calibration, even if vfilter doesn't contain this flag.
if rotate or fit:
if 'no-std' not in vfilter:
print('Warning: no tag `no-std` in vfilter variable. Adding this tag in computation of theta values...')
vfilter_thet = vfilter + ['no-std']
else:
vfilter_thet = vfilter
objarr_tht, lixo, thtarr = getTable(data, 'filt', 'thet', sy='sthet', \
vfilter=vfilter_thet, bin_data=True, onlyY=onlyY, unbias=unbias)
# Initialize the graph
fig = plt.figure()
ax = plt.subplot(1, 1, 1)
ax.set_title('Field Stars - {0}'.format(phc.bes[be]), fontsize=fonts[0], verticalalignment='bottom')
ax.set_xlabel(r'$\lambda\ (\AA)$', size=fonts[1])
if rotate:
ax.set_ylabel('Q` (%)', size=fonts[1])
else:
ax.set_ylabel('P (%)', size=fonts[1])
ax.set_xlim([2500, 8500])
# Initialize Variables
j = 0
pts=[[],[],[]]
tht = [[],[]]
longname = False
##############################
#### MAIN LOOP ####
# Do the subplots and fits
while True:
star = objarr[0][j]
pts[0] = [float(larr[0][i]) for i in range(len(objarr[0])) if star == objarr[0][i]]
pts[1] = [float(parr[0][i]) for i in range(len(objarr[0])) if star == objarr[0][i]]
pts[2] = [float(parr[1][i]) for i in range(len(objarr[0])) if star == objarr[0][i]]
# print objarr_qu[0][j:]
# print objarr_qu[1][j:]
# Get the mean angles
if rotate or fit:
thmean, psp = [], []
for k, ffil in enumerate(filters):
ver = False
ni = 0
# thmean is a plain list with the angles and errors.
# By the bin performed some lines above, the first matched element is
# the only one, and concerning to the mean angle computed.
for i in range(len(objarr_tht[0])):
if objarr_tht[0][i] == star and objarr_tht[1][i] == ffil:
thmean += ['{0:.2f}'.format(float(thtarr[0][i]))]
thmean += ['{0:.2f}'.format(float(thtarr[1][i]))]
if float(thtarr[1][i]) not in (0, 51.96, 61.14):
psp += [28.65/float(thtarr[1][i])]
else:
psp += [0.]
ver = True
break
if not ver:
thmean += [0.,0.]
psp += [0.]
"""
# It is needed to compute the mean p/sp because bin_data can be False
for i in range(len(objarr[0])):
print objarr[0][i], star
print objarr[1][i], ffil
print parr[0][i], parr[1][i]
print float(parr[0][i])/float(parr[1][i])
if objarr[0][i] == star and objarr[1][i] == ffil and float(parr[1][i]) != 0.:
print '*********ENTROU******************'
psp[k] += float(parr[0][i])/float(parr[1][i])
ni += 1
elif objarr[0][i] == star and objarr[1][i] == ffil:
print '*********NÃOOOOOOO ENTROU******************'
print ''
if ni > 1:
psp[k] = psp[k]/ni
"""
means = meanAngle_star(csvfile, be, star, filts='ubvri', vfilter=vfilter_thet, onlyY=onlyY, estim=unbias)
means += [np.mean(filter(lambda v: v!=0, psp))]
# Store the new rotated values
# It is needed TO FIX above because thmean[0] is not the mean theta, but a list with the mean theta and its error for filter u.
if rotate:
q[0], u[0], q[1], u[1] = rotQU(q[0], u[0], q[1], u[1], thmean[0], thmean[1])
# Sort lists pts[0],pts[1],pts[2] based on values of pts[0]
for i in range(len(pts[0])):
idx = pts[0].index(min(pts[0][i:]))
if idx != i:
tmp0 = pts[0][i]
tmp1 = pts[1][i]
tmp2 = pts[2][i]
pts[0][i] = pts[0][idx]
pts[1][i] = pts[1][idx]
pts[2][i] = pts[2][idx]
pts[0][idx] = tmp0
pts[1][idx] = tmp1
pts[2][idx] = tmp2
if rotate:
tmp3 = q[0][i]
tmp4 = u[0][i]
tmp5 = q[1][i]
tmp6 = u[1][i]
q[0][i] = q[0][idx]
u[0][i] = u[0][idx]
q[1][i] = q[1][idx]
u[1][i] = u[1][idx]
q[0][idx] = tmp3
u[0][idx] = tmp4
q[1][idx] = tmp5
u[1][idx] = tmp6
# Get the color and the the common name to plot
color = gen_color(csvfile, be, star, onlyY=onlyY)
nome = fixName(star)
if len(nome) > 13:
longname = True
if not rotate:
if not fit:
ax.errorbar(pts[0], pts[1], yerr=pts[2], label=nome, linestyle='', marker='o', color=color)
# Fit data HERE or copy from the previous csv file
if fit:
if usePrevious:
# means = map(lambda v: '{0:.2f}'.format(v), means[:2]) + ['{:.1f}'.format(means[2])]
# psp = map(lambda v: '{0:.1f}'.format(v), psp)
pmax_fit, lmax_fit, plott = copyFit(star, csvout)
else:
pmax_fit, lmax_fit = [], []
plott = 1
if plott==1:
ax.errorbar(pts[0], pts[1], yerr=pts[2], label=nome, linestyle='', marker='o', color=color)
# print usePrevious
# print pmax_fit, lmax_fit
# Run MCMC again if not usePrevious or if there is no 'star' inside the
# current csv file.
if (pmax_fit, lmax_fit) == ([],[]):
# Convert to microns and fit
lb = [lbi/10000 for lbi in pts[0]]
pmax_fit, lmax_fit, chi2 = fitSerk(lb, pts[1], pts[2], star=star, extens=extens)
# Fix the format to the lists
pmax_fit_str = map(lambda v: '{0:.5f}'.format(v), list(pmax_fit))
lmax_fit_str = map(lambda v: '{0:.5f}'.format(v), list(lmax_fit))
means = map(lambda v: '{0:.2f}'.format(v), means[:2]) + ['{:.1f}'.format(means[2])]
psp = map(lambda v: '{0:.1f}'.format(v), psp)
chi2 = '{0:.3f}'.format(chi2)
csvout.writerow([star, nome] + pmax_fit_str + lmax_fit_str + [chi2, len(pts[1])] +\
means + ['1','1','---',''] + thmean + psp)
# csvout.writerow([star, nome] + means + ['1','1','---',''] + thmean + psp)
ll = np.linspace(3000.,8300.,100)
pp = np.array([])
for lli in ll:
pp = np.append(pp, polt.serkowski(pmax_fit[0], lmax_fit[0]*10000, lli, law=law, mode=2))
# Only plot the graph if there are more than one data, because with an only point
# the curve is not defined! But the emcee was runned to generate the covariance map.
# If pmax and lmax were read from a *_is.csv file and this current star was assigned to not
# be plotted (plott==0), then it won't plotted below.
if len(pts[0]) > 1 and plott==1:
ax.plot(ll, pp, color=color)
else:
ax.errorbar(pts[0], q[0], yerr=q[1], label=nome+'_q', linestyle='-', marker='', color=color)
# ax.errorbar(pts[0], u[0], yerr=u[1], label=nome+'_u', linestyle='-.', marker='', color=color)
# ax.errorbar(pts[0], pts[1], yerr=pts[2], label=nome+'_p', linestyle='--', marker='', color=color)
ax.plot(pts[0], pts[1], label=nome+'_p', linestyle='--', marker='', color=color)
# Refresh the lines to the next iteration
j += len(pts[0])
if j >= len(objarr[0]):
break
# Setting legend
# fig.subplots_adjust(right=0.8)
# ax.legend(loc='center left', borderaxespad=0., numpoints=1, prop={'size':fonts[4]}, bbox_to_anchor=(1.02,.5))
# Plot ISP parallel to the disk direction of Be star
if thetfile != None:
if not useB:
lbd, pBe, sBe, devBe = rotQUBe(be, thetfile, path=path, every=every, vfilter=vfilter_be, propag=propag)
# Using combined error for points to // pol to Be star
if propmode == 'stddev':
s = devBe
elif propmode == 'prop':
s = sBe
elif propmode == 'comb':
s = [np.sqrt(sBe[i]**2+devBe[i]**2) for i in range(len(sBe))]
else:
print('WARNING: `propmode` parameter not valid as {0}. Activating as `comb` (see what it means in the help...)'.format(propmode))
s = [np.sqrt(sBe[i]**2+devBe[i]**2) for i in range(len(sBe))]
else:
try:
f0 = open(thetfile, 'ro')
reader = csv.reader(f0, delimiter=';')
except:
print('# ERROR: Can\'t read file {0}.'.format(thetfile))
raise SystemExit(1)
pBe, lbd, s = [],[],[]
for line in reader:
if line[0] == be:
try:
for i in range(26,40,3):
# print line[i], line[i+1], line[i+2]
if float(line[i]) != 0:
lbd += [phc.lbds[filters[int((i-26)/3)]]]
pBe += [float(line[i])]
s += [(float(line[i+1])+float(line[i+2]))/2]
else:
print('# WARNING: No b*cos(psi) value for filter {0}.'.format(filters[int((i-26)/3)]))
except:
print line[i], line[i+1], line[i+2]
print('# ERROR: Components in lines of {0} file seem not be float type.'.format(thetfile))
return
if pBe == []:
print('# WARNING: Star {0} not found in {1} file.'.format(be, thetfile))
if lbd != []:
# print lbd, pBe, s
if not fit:
ax.errorbar(lbd, pBe, yerr=s, label=r'$P_{IS}^\perp$', linestyle='', \
marker='s', color='black')
if fit:
if usePrevious:
pmax_fit, lmax_fit, plott = copyFit(be, csvout, secondcol=fixName(be)+' (perp)')
else:
pmax_fit, lmax_fit = [], []
plott = 1
if plott==1:
ax.errorbar(lbd, pBe, yerr=s, label=r'$P_{IS}^\perp$', linestyle='', \
marker='s', color='black')
if (pmax_fit,lmax_fit) == ([],[]):
# Convert to microns
lb = [lbi/10000 for lbi in lbd]
print ''
pmax_fit, lmax_fit, chi2 = fitSerk(lb, pBe, s, star=be, extens=extens)
csvout.writerow([be, fixName(be)+' (perp)'] + list(pmax_fit) + list(lmax_fit) + [chi2, len(pBe)] + ['0']*3 + ['0','0','---',''] + ['0']*15)
ll = np.linspace(3000.,8300.,100)
pp = np.array([])
for lli in ll:
pp = np.append(pp, polt.serkowski(pmax_fit[0], lmax_fit[0]*10000, lli, law=law, mode=2))
# Only plot the graph if there are more than one data, because with an only point
# the curve is not defined! But the emcee was runned to generate the covariance map
if len(pBe) > 1 and plott==1:
ax.plot(ll, pp, color='black')
# Copy the dummy lines beginning with the 'be' and fixed Be name
if fit and usePrevious:
copyFit(be, csvout, secondcol=fixName(be))
if longname:
ax.set_xlim([1500, 8500])
leg = ax.legend(loc='best', borderaxespad=0., numpoints=1, prop={'size':fonts[4]})
leg.get_frame().set_alpha(0.5)
# Setting sizes
ax.xaxis.label.set_fontsize(fonts[1])
ax.yaxis.label.set_fontsize(fonts[1])
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fonts[2])
if save:
fig.savefig('{0}_p.{1}'.format(be,extens), bbox_inches='tight')
else:
fig.show()
print('\nDone!\n')
if fit:
if os.path.exists('{0}_is.csv'.format(be)):
os.remove('{0}_is.csv'.format(be))
os.rename('{0}_is_tmp.csv'.format(be),'{0}_is.csv'.format(be))
fout.close()
return
def graf_pradial(csvfile, be, filt='pmax', vfilter=[], pmaxfile=None, fit=False, \
bin_data=True, onlyY=True, save=False, extens='pdf', unbias='wk'):
"""
Plot a field graph with polarization versus distance.
filt : is the filter to plot in y axes - 'pmax','u','b',
'v','r','i'. If 'pmax' use the Pmax values
from ./'be'_is.csv file to plot.
pmaxfile : 'be'_is.csv file location (out from fs.graf_p).
If None, it is suposed ./'be'_is.csv
fit : (only for filt=='pmax') fit a line in graph? This
routine will not use in fitting the points whose
values in 22th column inside pmaxfile have a '0'
character (this column defines the points to be
used in the adjust). The points not used will be
marked with a 'x' in the graph.
csvfile : location of dados.csv.
Considerations:
- If filt=='pmax', don't plot the data whose values in 21th
column inside pmaxfile have a '0' character.
- If filt=='pmax', don't use in fitting the points whose
values in 22th column inside pmaxfile have a '0' character
when fit=True. The points not used will be marked with a
'x' in the graph.
- Skip the lines inside pmaxfile commented with a '#', or with
a void first element
"""
# Verify if vfilter is a special filter
if vfilter in polt.vfil.keys():
vfilter = polt.vfil[vfilter]
plt.close('all')
# Read csvfile and get the table
data = readcsv(csvfile, be)
if data == []:
print('No {0} data!'.format(be))
return
objarr, plxarr, bearr, parr = getTable(data, 'plx', 'plxbe', sx='splx',
sy='splxbe', z='p', sz='s', vfilter=vfilter, bin_data=bin_data, onlyY=onlyY, unbias=unbias)
if objarr == [] or objarr == [[],[],[]]:
print('No {0} valid data!'.format(be))
return
# Verify pmaxfile
if filt == 'pmax':
if pmaxfile == None:
pmaxfile = '{0}_is.csv'.format(be)
if not os.path.exists(pmaxfile):
print('No {0} file found!'.format(pmaxfile))
return
# Initialize graphs
fig = plt.figure(1)
ax = plt.subplot(1, 1, 1)
ax.set_title('Field Stars - {0}'.format(phc.bes[be]), \
fontsize=fonts[0], verticalalignment='bottom')
ax.set_xlabel(r'r (pc)', size=fonts[1])
if filt=='pmax':
ax.set_ylabel(r'$P_{max}$ (%)', size=fonts[1])
else:
ax.set_ylabel(r'$P_{0}$ (%)'.format(filt.upper()), size=fonts[1])
# Extract data only in filter filt
obj, x, y, y0, colors = [], [[],[]], [[],[],[]], [[],[],[]], []
obj_filt, x_filt, y_filt, colors_filt = [], [[],[]], [[],[],[]], []
x0 = [float(bearr[0][0]), float(bearr[1][0])]
longname, useinfit = False, False
# CASE 1) filt=='pmax'
if filt=='pmax':
with open(pmaxfile, 'r') as fr:
csvread = csv.reader(fr, delimiter=';')#, quoting=csv.QUOTE_NONE, quotechar='')
# print objarr, plxarr
# print len(objarr[0]), len(plxarr[0])
for i, line in enumerate(csvread):
# print line
plotpointi = line[13]
useinfiti = line[14]
if line[0] == '' or line[0][0] == '#':
continue
if line[0] != be and plotpointi=='1':
# 1) Filtered data (only works when fit==True)
if fit and useinfiti == '0':
# read paralaxes from csvfile
for j, obs in enumerate(objarr[0]):
if line[0]==obs and float(plxarr[0][j]) > 0:
x_filt[0] += [float(plxarr[0][j])]
x_filt[1] += [float(plxarr[1][j])]
obj_filt += [line[1]]
y_filt[0] += [float(line[2])]
y_filt[1] += [float(line[3])]
y_filt[2] += [float(line[4])]
colors_filt += [gen_color(csvfile, be, line[0], onlyY=onlyY)]
break
# b) Main data
else:
# read paralaxes from csvfile
for j, obs in enumerate(objarr[0]):
if line[0]==obs and float(plxarr[0][j]) > 0:
x[0] += [float(plxarr[0][j])]
x[1] += [float(plxarr[1][j])]
obj += [line[1]]
y[0] += [float(line[2])]
y[1] += [float(line[3])]
y[2] += [float(line[4])]
colors += [gen_color(csvfile, be, line[0], onlyY=onlyY)]
break
if len(line[1]) > 13:
longname = True
# Don't plot Be's // component, but plot Be entire polarization
# if it was writen in a line and the line[2] element is just
# the Be fixed name + there are a '1' character in 'plot point'
# column
elif line[0] == be and plotpointi=='1':
if useinfiti == '1':
useinfit = True
obj0 = line[1]
x0 = [float(bearr[0][0]), float(bearr[1][0])]
y0 = [float(line[2]), float(line[3]), float(line[4])]
if len(obj0) > 13:
longname = True
else:
for i in range(len(objarr[0])):
if objarr[1][i] == filt:
try:
# Skip if plx is less than 0
if float(plxarr[0][i]) > 0:
x[0] += [float(plxarr[0][i])]
x[1] += [float(plxarr[1][i])]
y[0] += [float(parr[0][i])]
y[1] += [float(parr[1][i])]
y[2] += [float(parr[1][i])]
obj += [fixName(objarr[0][i])]
colors += [gen_color(csvfile, be, objarr[0][i], onlyY=onlyY)]
if len(obj[-1]) > 13:
longname = True
except:
continue
# Plot data
for i in range(len(obj)):
ax.errorbar(x[0][i], y[0][i], xerr=x[1][i], yerr=[[y[2][i]],[y[1][i]]], label=obj[i], \
linestyle='--', marker='o', color=colors[i])
for i in range(len(obj_filt)):
ax.errorbar(x_filt[0][i], y_filt[0][i], xerr=x_filt[1][i], yerr=[[y_filt[2][i]],[y_filt[1][i]]], \
label=obj_filt[i], linestyle='--', marker='x', color=colors_filt[i],\
markersize=10, markeredgewidth=1.5)
if filt=='pmax' and y0 != [[],[],[]]:
ax.errorbar(x0[0], y0[0], xerr=x0[1], yerr=[[y0[2]],[y0[1]]], label=obj0, \
linestyle='--', marker='s', color='black')
# Fit the line
if fit and filt=='pmax':
# If it is to use the point of Be data in the ajust:
if useinfit:
x[0] += [x0[0]]
x[1] += [x0[1]]
y[0] += [y0[0]]
y[1] += [y0[1]]
y[2] += [y0[2]]
# Fit by the total least squares method (orthogonal distance regression) without clipping
param, sparam, cov, chi2, niter,bolfilt = phc.fit_linear(x[0], y[0], x[1], [(y[1][i]+y[2][i])/2 for i in range(len(y[0]))], clip=False)
if len(y[0]) > 2:
rchi2 = chi2[0]/(len(y[0])-2)
else:
rchi2 = 0
pmaxfit = param[0]*x0[0]+param[1]
spmaxfit = np.sqrt((param[0]*x0[1])**2 + (sparam[0]*x0[0])**2 + sparam[1]**2)
spmaxfitcoz = param[0]*x0[1]
print(55*'-')
print ' Total least squares fit (y = a*x+b):'
print(55*'-')
print ' a = {0:.3f} +- {1:.3f}'.format(param[0], sparam[0])
print ' b = {0:.3f} +- {1:.3f}'.format(param[1], sparam[1])
print ''
print ' N = {0:d}'.format(len(y[0]))
print ' red chi^2 = {0:2f}'.format(rchi2)
print ''
print ''
print ' Extrapolated Pmax: '
print ''
print ' Pmax = {0:.4f} +- {1:.4f}*'.format(pmaxfit, spmaxfitcoz)
print ''
print ''
print ' * Propagating only the Be paralax; the error'
print ' propagating the sigma_a and sigma_b give'
print ' us {0:.4f}'.format(spmaxfit)
print ''
print(55*'-')
print ''
xadj = np.linspace(ax.get_xlim()[0],ax.get_xlim()[1],3)
yadj = param[0]*xadj+param[1]
ax.plot(xadj, yadj, '-', color='dimgray', linewidth=1.5)
# Fix limits
if ax.get_xlim()[1] < x0[0]:
ax.set_xlim([ax.get_xlim()[0], x0[0]+x0[1]])
elif ax.get_xlim()[0] > x0[0]:
ax.set_xlim([x0[0]-x0[1],ax.get_xlim()[1]])
ax.autoscale(False)
# Plot marks for Be star
ax.plot([x0[0], x0[0]], ax.get_ylim(), linestyle ='--', color='gray')
ax.plot([x0[0]-x0[1], x0[0]-x0[1]], ax.get_ylim(), \
linestyle =':', color='gray')
ax.plot([x0[0]+x0[1], x0[0]+x0[1]], ax.get_ylim(), \
linestyle =':', color='gray')
leg = ax.legend(loc='best', borderaxespad=0., numpoints=1, prop={'size':fonts[3]})
if leg != None:
leg.get_frame().set_alpha(0.5)
# Setting sizes
ax.xaxis.label.set_fontsize(fonts[1])
ax.yaxis.label.set_fontsize(fonts[1])
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fonts[2])
if save:
plt.savefig('{0}_radial_{1}.{2}'.format(be,filt,extens), bbox_inches='tight')
else:
plt.show()
def graf_inst(logfile, mode=1, vfilter=['no-std'], save=False, extens='pdf'):
"""
Plot a QU diagram for unpolarized standard stars in the
logfile .log file (the outfile from polt.genTarget, appended by
the name of the object at each line). Propagates error of
standard star.
mode=1 plot BVRI graphs in the same figure;
mode=2 plot UBVRI graphs in separeted figures.
Return 3 lists:
1) qarr = [[mean Q], [propagated Q error], [Q stddev]]
2) uarr = [[mean U], [propagated U error], [U stddev]]
3) narr = [n] (the number of data used to compute the averages)
Where [mean Q], [propagated Q error], ..., [n] are lists with
the values for each filter.
"""
# IMPLEMENTAR PESOS PARA CADA ALVO CONFORME ALGUM CRITÉRIO
def meanQU(lines, filt):
"""
Return the mean QU lists and the number of lines (n):
[mean Q, propagated Q error, Q stddev],
[mean U, propagated U error, U stddev],
n
lines: the lines readed from .logfile to be computed
"""
p, q, u, s, thet, sdth = [],[],[],[],[],[]
for line in lines:
if line[3] == filt:
p += [float(line[8])]
q += [float(line[9])]
u += [float(line[10])]
thet += [float(line[11])]
s += [float(line[12])]
sdth += [float(line[7])]
if q == []:
print('No {0} data!'.format(filt))
return [0,0,0],[0,0,0],0
# Propagate error of delta_theta to Stokes QU
lixo, sq, su = polt.propQU(p, thet, s, sdth)
# List with mean QU, the propagated error and the stddev
meanq = [np.mean(q), np.sqrt(np.dot(sq,sq))/len(sq), np.std(q)/np.sqrt(len(q))]
meanu = [np.mean(u), np.sqrt(np.dot(su,su))/len(su), np.std(u)/np.sqrt(len(u))]
return meanq, meanu, len(q)
def plotQU(filt, fig, ax):
"""
Receive figure and axes objects and do the plot for filter
filt WITHOUT show or save the image.
Return the same than meanQU().
"""
# Factor to fix the font sizes
if mode==1:
factor=0.7
else:
factor=1.
try:
lines = np.loadtxt(logfile, dtype=str)
except:
print('# ERROR: Can\'t read file {0}.'.format(logfile))
raise SystemExit(1)
# ax.set_title('{0} filter'.format(filt.upper()), fontsize=fonts[0]*factor, verticalalignment='bottom')
ax.text(0.98, 0.9, '{0} filter'.format(filt.upper()), horizontalalignment='right', \
verticalalignment='bottom', transform=ax.transAxes, fontsize=fonts[1]*factor)
# ax.set_xlabel(r'RA $-$ RA${}_{Be}$ (degree)', size=fonts[1])
# ax.set_ylabel(r'DEC $-$ DEC${}_{Be}$ (degree)', size=fonts[1])
ax.set_xlabel(r'Q (%)', size=fonts[1]*factor)
ax.set_ylabel(r'U (%)', size=fonts[1]*factor)
# Do the subplots when flag is not 'E', tag is not in vfilter and filter is equal to filt
j = 0
while True:
pts, spts = [[],[]], [[],[]]
pts[0] = [float(line[9]) for line in lines if lines[j][-1] == line[-1] and \
line[3] == filt and line[16] != 'E' and not any(sub in line[17] for sub in vfilter)]
pts[1] = [float(line[10]) for line in lines if lines[j][-1] == line[-1] and \
line[3] == filt and line[16] != 'E' and not any(sub in line[17] for sub in vfilter)]
ptmp = [float(line[8]) for line in lines if lines[j][-1] == line[-1] and \
line[3] == filt and line[16] != 'E' and not any(sub in line[17] for sub in vfilter)]
thettmp = [float(line[11]) for line in lines if lines[j][-1] == line[-1] and \
line[3] == filt and line[16] != 'E' and not any(sub in line[17] for sub in vfilter)]
s = [float(line[12]) for line in lines if lines[j][-1] == line[-1] and \
line[3] == filt and line[16] != 'E' and not any(sub in line[17] for sub in vfilter)]
sdth = [float(line[7]) for line in lines if lines[j][-1] == line[-1] and \
line[3] == filt and line[16] != 'E' and not any(sub in line[17] for sub in vfilter)]
lixo, spts[0], spts[1] = polt.propQU(ptmp, thettmp, s, sdth)
#print pts
#print j, len(lines)
if pts != [[],[]]:
ax.errorbar(pts[0], pts[1], xerr=spts[0], yerr=spts[1], label=lines[j][-1], \
elinewidth=0.5, markersize=4, linestyle='', color='black', marker='o', alpha=0.7)
nn = 0
for line in lines[j:]:
if line[-1] == lines[j][-1]:
nn += 1
j += nn
if j >= len(lines):
break
meanq, meanu, n = meanQU(lines, filt)
if n == 0:
return meanq, meanu, n
print 'FILTER {0} -> N = {1}'.format(filt.upper(), n)
print 'FILTER {0} -> Q (%): mean, error, stddev = {1:.7f}, {2:.7f}, {3:.7f}'\
.format(filt.upper(), meanq[0], meanq[1], meanq[2])
print 'FILTER {0} -> U (%): mean, error, stddev = {1:.7f}, {2:.7f}, {3:.7f}'\
.format(filt.upper(), meanu[0], meanu[1], meanu[2])
coords = [[meanq[0]-meanq[2], meanu[0]-meanu[2]]]
coords += [[meanq[0]+meanq[2], meanu[0]-meanu[2]]]
coords += [[meanq[0]+meanq[2], meanu[0]+meanu[2]]]
coords += [[meanq[0]-meanq[2], meanu[0]+meanu[2]]]
polygon = Polygon(coords, True, color='blue', alpha=0.6, visible=True, fill='wheat')
ax.add_patch(polygon)
# ax.errorbar(meanq[0], meanu[0], xerr=meanq[2], yerr=meanu[2], linestyle='', marker='o', \
# elinewidth=2, fillstyle='full', markersize=8, color='black')
ax.errorbar(meanq[0], meanu[0], xerr=meanq[2], yerr=meanu[2], linestyle='', marker='o', \
elinewidth=0.6, fillstyle='full', markersize=3, color='black')
# Fix limits
ax.autoscale(False)
ax.set_xlim([-0.05, 0.05])
ax.set_ylim([-0.05, 0.05])
ax.plot(ax.get_xlim(), [0,0], 'k--')
ax.plot([0,0], ax.get_ylim(), 'k--')
ax.plot(ax.get_xlim(), [meanu[0],meanu[0]], ':', color='grey')
ax.plot([meanq[0],meanq[0]], ax.get_ylim(), ':', color='grey')
# Setting sizes
ax.xaxis.label.set_fontsize(fonts[1]*factor)
ax.yaxis.label.set_fontsize(fonts[1]*factor)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fonts[2]*factor)
return meanq, meanu, n
# Verify if vfilter is a special filter
if vfilter in polt.vfil.keys():
vfilter = polt.vfil[vfilter]
plt.close('all')
qarr, uarr, narr = [[],[],[]], [[],[],[]], []
if mode==1:
fig = plt.figure(1)
axs = [plt.subplot(2, 2, 1)]
axs += [plt.subplot(2, 2, 2, sharey=axs[0])]
axs += [plt.subplot(2, 2, 3, sharex=axs[0])]
axs += [plt.subplot(2, 2, 4, sharex=axs[1], sharey=axs[2])]
plt.subplots_adjust(hspace=0.05, wspace=0.05)
nax = 0
for filt in ('b','v','r','i'):
meanq, meanu, n = plotQU(filt, fig, axs[nax])
for i in range(3):
qarr[i] += [meanq[i]]
uarr[i] += [meanu[i]]
narr += [n]
nax += 1
print('')
# axs[0].set_yticks(axs[0].get_yticks()[1:])
# axs[3].set_xticks(axs[3].get_xticks()[1:])
# print axs[3].get_xticks()
plt.setp(axs[0].get_xticklabels(), visible=False)
plt.setp(axs[1].get_xticklabels(), visible=False)
plt.setp(axs[1].get_yticklabels(), visible=False)
plt.setp(axs[3].get_yticklabels(), visible=False)
axs[0].set_xlabel('')
axs[1].set_xlabel('')
axs[1].set_ylabel('')
axs[3].set_ylabel('')
if save:
plt.savefig('qu_inst.{0}'.format(extens), bbox_inches='tight')
else:
plt.show(block=False)
elif mode==2:
nfig = 1
for filt in ('u','b','v','r','i'):
fig = plt.figure(nfig)
ax = plt.subplot(1, 1, 1)
meanq, meanu, n = plotQU(filt, fig, ax)
for i in range(3):
qarr[i] += [meanq[i]]
uarr[i] += [meanu[i]]
narr += [n]
nfig += 1
print('')
if n == 0:
continue
if save:
plt.savefig('qu_inst_{0}.{1}'.format(filt,extens), bbox_inches='tight')
else:
plt.show(block=False)
return qarr, uarr, narr
def genAll(csvfile, path=None, genlogs=True, genint=True, vfilter=['no-std'], vfilter_graf_p=[], extens='pdf'):
bin_data=True
onlyY=True
rotate=False # Here
every=False
mcmc=True # Here
odr=True
save=True
if path == None or path == '.':
path = os.getcwd()
if extens == 'eps':
print('Warning: field graphs will lose the transparency at .eps format')
try:
objs = np.loadtxt('{0}/refs/pol_alvos.txt'.format(hdtpath()), dtype=str)
except:
print('# ERROR: Can\'t read files pyhdust/refs/pol_alvos.txt.')
raise SystemExit(1)
# Generating logfiles for all Be stars
if genlogs:
for star in objs:
print 'Generating logfile for star {0}...'.format(star)
polt.genTarget(star, path=path, ispol=None, skipdth=False, delta=3.5)
# Generating thet_int.csv file and QU graphs
if genint:
for star in objs:
print('Generating QU graphs for star {0}...'.format(be))
genInt(star, path, vfilter=vfilter, extens=extens)
for star in objs:
print '='*50
print 'Generating graphs for star {0}...'.format(star)
graf_p(csvfile, star, rotate=rotate, path=path, bin_data=bin_data, onlyY=onlyY,
save=save, fit=mcmc, extens=extens, vfilter=vfilter_graf_p)
graf_pradial(csvfile, star, 'v', bin_data=bin_data, onlyY=onlyY, save=save, extens=extens, vfilter=vfilter)
graf_field(csvfile, star, bin_data=bin_data, onlyY=onlyY, save=save, extens=extens)
graf_theta(csvfile, star, bin_data=bin_data, onlyY=onlyY, save=save, extens=extens)
if os.path.exists(path+'/'+star+'.log'):
if not genint:
arr_u, arr_b, arr_v, arr_r, arr_i = polt.graf_qu('{0}/{1}.log'.format(path,star),
mcmc=mcmc, odr=odr, save=True, extens=extens)
polt.graf_t(path+'/'+star+'.log', save=save, extens=extens, vfilter=vfilter)
print('\n\n')
def genInt(be, thetfileold=None, path=None, vfilter=['no-std'], extens='pdf'):
"""
Call polt.graf_qu() for Be star 'be' and save the intrinsic
angles inside thet_int.csv.
path: path inside which the logfile 'be'.log (out from
polt.genTarget) is located. The code will try to open
thet_int.csv inside the current directory (.). If it
was not found, it will be created. Otherwise, the
routine will append a new line inside it, asking about
to overwrite an eventual existing line for star 'be'.
CONSIDERATIONS:
- Propagates errors from standard star.
"""
if path == None or path == '.':
path = os.getcwd()
if not os.path.exists('{0}/{1}.log'.format(path,be)):
print('# No {0}/{1}.log file found.'.format(path,be))
return
fw = open('thet_int_tmp.csv', 'w')
csvwrite = csv.writer(fw, delimiter=';')#, quoting=csv.QUOTE_NONE, quotechar='')
if os.path.exists('thet_int.csv'):
fr = open('thet_int.csv', 'r')
csvread = csv.reader(fr, delimiter=';')#, quoting=csv.QUOTE_NONE, quotechar='')
opt = ''
for i, line in enumerate(csvread):
if line[0] == be:
while opt not in ('y','Y','n','N'):
print('# Star {0} is already present inside file thet_int.csv. Run again and overwrite the current values?'.format(be))
opt = raw_input('(y/n): ')
if opt in ('n','N'):
fr.close()
os.remove('thet_int_tmp.csv')
return
else:
print('\n')
continue
else:
csvwrite.writerow(line)
fr.close()
else:
csvwrite.writerow(['#obj','name']+['th_int', 'sth_int_+', 'sth_int_-', 'n']*5 +\
['th_int', 'sth_int_+',' sth_int_-', 'comment'] +\
['b*cos(2th)', 'sb*cos(2th)_+', 'sb*cos(2th)_-']*5 +\
['Second peaks: ', 'repeated 7by7', 'cells following','these labels in', 'subheader below'])
csvwrite.writerow(['#','']+['u']*4+['b']*4+['v']*4+['r']*4+['i']*4+[' ']*4 +\
['u']*3+['b']*3+['v']*3+['r']*3+['i']*3 +\
['filter', 'th_int', 'sth_int_+', 'sth_int_-','b*cos(2th)', 'sb*cos(2th)_+', 'sb*cos(2th)_-'])#+['to use']*4)
if thetfileold==None:
arr_u, arr_b, arr_v, arr_r, arr_i = polt.graf_qu('{0}/{1}.log'.format(path,be),
mcmc=True, odr=False, save=True, extens=extens, Vb_ran=[0., 1.], vfilter=vfilter)
else:
arr_u, arr_b, arr_v, arr_r, arr_i = polt.graf_qu('{0}/{1}.log'.format(path,be),
mcmc=False, thetfile=thetfileold, odr=False, save=True, extens=extens, vfilter=vfilter)
# arr_u[0][0] = np.arctan(arr_u[0][0])*90/np.pi
# arr_u[1][0] = (90*arr_u[1][0])/(np.pi*(arr_u[1][0]**2+1))
# arr_u[2][0] = arr_u[1][0]
# Reshape lists. Copy the second peak informations to 'addpeak' plain list
# and keep only the first peek informations inside arr_u, arr_b, etc.
addpeak=[]
arrs = (arr_u,arr_b,arr_v,arr_r,arr_i)
for i,arr in enumerate(arrs):
# Case one of filters didn't have been fitted
if arr[0] == []:
for j in (0,1,2):
arrs[i][j] = [0,0,0,0,0]
if len(arr[0])==2 and type(arr[0][0])==list:
addpeak += [filters[i]]
for j in (0,1,2):
addpeak += [arr[j][1][0]/2]
for j in (0,1,2):
addpeak += [arr[j][1][1]]
arrs[i][j] = arrs[i][j][0]
# The operation '/2' below is because the intrinsic angle is the inclination angle of the
# line in in QU diagram diveded by 2! (because PA = 1/2*arctan(U/Q))
csvwrite.writerow([be, fixName(be)]+[arr_u[0][0]/2]+[arr_u[1][0]/2]+[arr_u[2][0]/2]+[arr_u[3]]+\
[arr_b[0][0]/2]+[arr_b[1][0]/2]+[arr_b[2][0]/2]+[arr_b[3]]+\
[arr_v[0][0]/2]+[arr_v[1][0]/2]+[arr_v[2][0]/2]+[arr_v[3]]+\
[arr_r[0][0]/2]+[arr_r[1][0]/2]+[arr_r[2][0]/2]+[arr_r[3]]+\
[arr_i[0][0]/2]+[arr_i[1][0]/2]+[arr_i[2][0]/2]+[arr_i[3]]+\
['','','','---']+\
[arr_u[0][1]]+[arr_u[1][1]]+[arr_u[2][1]]+\
[arr_b[0][1]]+[arr_b[1][1]]+[arr_b[2][1]]+\
[arr_v[0][1]]+[arr_v[1][1]]+[arr_v[2][1]]+\
[arr_r[0][1]]+[arr_r[1][1]]+[arr_r[2][1]]+\
[arr_i[0][1]]+[arr_i[1][1]]+[arr_i[2][1]]+\
addpeak)
# Refresh the csv file
print('\nDone!\n')
fw.close()
try:
os.remove('thet_int.csv')
except:
pass
os.rename('thet_int_tmp.csv','thet_int.csv')
return
def rotQU(q, u, sq, su, ang, sang, propag=True):
"""
Rotates lists/arrays q and u in QU diagram at an angle
2*(ang +- sang) (clockwise).
Look that if 'ang' is the mean polarization angle,
this rotation will transpose all polarization to
U parameter. Q parameter will have residual variations
with respect to the 0.
Return q_rot, u_rot, sq_rot, su_rot
propag : propagates the 'sang' to the uncertainties
if rotated QU?
Todo: sometimes it's better don't use any error value
for ang?
"""
# fig = plt.figure(1)
# ax = plt.subplot(1, 1, 1)
rad = 2*ang*np.pi/180.
if propag:
srad = 2*sang*np.pi/180.
else:
srad = 0.
print u
print
# Rotates each QU value
qRot, sqRot, uRot, suRot = [],[],[],[]
for i in range(len(q)):
qRot += [q[i]*np.cos(rad)+u[i]*np.sin(rad)]
uRot += [-q[i]*np.sin(rad)+u[i]*np.cos(rad)]
sqRot += [np.sqrt((uRot[i]*srad)**2+(sq[i]*np.cos(rad))**2+(su[i]*np.sin(rad))**2)]
suRot += [np.sqrt((qRot[i]*srad)**2+(sq[i]*np.sin(rad))**2+(su[i]*np.cos(rad))**2)]
# print q[i], u[i], sq[i], su[i]
# print qRot[i]*srad
# print sq[i]*np.sin(rad)
# print su[i]*np.cos(rad)
# ax.errorbar(q,u,xerr=sq,yerr=su)
# ax.errorbar(qRot,uRot,xerr=sqRot,yerr=suRot)
# plt.show(fig)
return qRot, uRot, sqRot, suRot
def rotQUBe(be, thetfile, path=None, every=False, propag=True, vfilter=['no-std']):
"""
Rotates the QU values for Be star 'be' in the intrinsic
angle specified inside thetfile and computes the <Q'>
and <U'> of parameters rotated, returning four lists:
lbd (the wavelength values), <U'>, sigma U' and stddev U',
with the values in each one of the filters UBVRI.
'thetfile' : the location (path+filename) of thet_int.csv
file with the intrinsic angles (out from fs.gen).
'path' : the path where is located the log file for star
'be'. If None, is supposed inside '.'.
'every' : use one intrinsic angle for each one filter to
rotate them? If every=False makes all data to use
a mean value at the -4:-2 collums (22th to 24th)
from 'thetfile'.
'propag' : propagates the uncertainies of the intrinsic angle
to the rotated QU values? Note that 'propag'
parameter will only act over sigma U' values among
all the four output lists.
CONSIDERATIONS:
- Propagates errors from standard star.
The rotated parameters are just the polarization
components perpendicular and parallel to the
orientation of the disk.
If every==True, uses the intrinsic angle of each filter to
rotate its QU values (if some angle==0, skip this filter!);
otherwise, use the same value specified in last 4 columns
for every one of the 5 filters.
"""
if path == None:
path = os.getcwd()
try:
f0 = open(thetfile, 'ro')
reader = csv.reader(f0, delimiter=';')
except:
print('# ERROR: Can\'t read file {0}.'.format(thetfile))
raise SystemExit(1)
try:
lines = np.loadtxt('{0}/{1}.log'.format(path, be), dtype=str)
except:
print('# ERROR: {0}.log file not found inside {1}.'.format(be, path))
return [],[],[],[]
# fig = plt.figure(1)
# ax = plt.subplot(1, 1, 1)
# Read the intrinsic angles
thet, sthet, comment = [],[],[]
for line in reader:
if line[0] == be:
if not every:
try:
thet = [float(line[22])]*5
sthet = [(float(line[23])+float(line[24]))/2]*5
comment = [line[25]]*5
except:
print('# ERROR: Components [-4],[-3],[-2] in lines of {0} file seem don\'t exist or not be float type for Be star {1}.'.format(thetfile, be))
return [],[],[],[]
else:
try:
for i in range(2,19,4):
thet += [float(line[i])]
sthet += [(float(line[i+1])+float(line[i+2]))/2]
comment += ['']
if thet[-1] == 0:
print('# WARNING: No intrinsic angle defined to filter {0}.'.format(filters[i/4]))
except:
print('# ERROR: Components in lines of {0} file seem not be float type.'.format(thetfile))
return [],[],[],[]
if thet == []:
print('# ERROR: Star {0} not found in {1} file.'.format(be, thetfile))
return [],[],[],[]
j = 0
lbd, qqRot, uuRot, sqqRot, suuRot = [],[],[], [[],[]], [[],[]]
# Getting the values for each filter
for filt in filters:
if thet[j] == 0:
j += 1
continue
JD, p, q, u, s, tht, sdth = [],[],[],[],[],[],[]
for line in lines:
if line[3] == filt and line[16] != 'E' and not any(sub in line[17] for sub in vfilter):
JD += [float(line[0])]
p += [float(line[8])]
q += [float(line[9])]
u += [float(line[10])]
tht += [float(line[11])]
s += [float(line[12])]
sdth += [float(line[7])]
# print '*'*40
# print p[-1], u[-1], s[-1]
lixo, sq, su = polt.propQU(p, tht, s, sdth)
# print sq, su, s
if len(q) != 0:
# Rotates each QU value
qRot, uRot, sqRot, suRot = rotQU(q,u,sq,su,thet[j],sthet[j],propag=propag)
# Computes the mean of rotated QU parameters, propagates the errors and compute stddev
if len(q) != 0:
lbd += [phc.lbds[filt]]
qqRot += [np.mean(qRot)]
uuRot += [np.mean(uRot)]
sqqRot[0] += [np.sqrt(sum([el**2 for el in sqRot]))/len(sqRot)]
sqqRot[1] += [np.std(qRot)/np.sqrt(len(qRot))]
suuRot[0] += [np.sqrt(sum([el**2 for el in suRot]))/len(suRot)]
suuRot[1] += [np.std(uRot)/np.sqrt(len(uRot))]
# print uRot, suRot
# print suuRot[0][-1]
# print suuRot[1][-1]
j += 1
# plt.show()
# fig = plt.figure(1)
# ax = plt.subplot(1, 1, 1)
# ax.errorbar(lbd, uuRot,yerr=suuRot[1])
# plt.show()
return lbd, uuRot, suuRot[0], suuRot[1]
def fitSerk(larr, parr, sarr, star='', law='w82', n_burnin=400, n_mcmc=800, \
n_walkers=120, extens='pdf'):
"""
Fit Serkowski law using Markov Chain Monte Carlo
from emcee code.
The likelihood function (L) supposes Gaussian
errors around the Pmax values:
log(L) = -0.5*chi2 -0.5*sum(ln(2*pi*sy^2))
INPUT:
larr: array/list with lambda values
parr: array/list with P values
sarr: array/list with the sigma_P values
star: star name to be printed in the graph and
its filename. If it's a void str '', this
routine give a random number to prevent
overwriting data.
law: what K value in Serkowski's law use?
(see polt.serkowski).
n_burnin: number of iterations for burning-in
n_mcmc: number of iterations to run emcee
n_walkers: number of walkers to map the posterior
probabilities.
extens: extension for the graph file
OUTPUT: sorted like "pmax_fit, lmax_fit, chi2"
pmax_fit: [Pmax, sPmax_+, sPmax_-], the Pmax value and
its errors (at right and left from it). Pmax
is the median of distribution probability and
sPmax_+, sPmax_- are the range within which
there are 68.3% of the points in such
distribution.
lmax_fit: Idem, for lmax.
chi2: Reduced chi2.
"""
import emcee
import triangle.nov
from matplotlib.ticker import MaxNLocator
def lnprob(params, x, y, sy):
"""
Return the log of posterior probability (p_pos) in
bayesian statistics for the parameters 'params'
([Pmax,lmax]) and the data poits x, y and sy
(y error values).
p_pos = L*p_prior (unless by a normalization constant),
where L is the likelihood function and p_prior is the
prior probability function.
In our case, for gaussian and independent uncertainies,
the log of likelihood:
log(L) = -0.5*chi2 -0.5*sum(ln(2*pi*sy^2))
Now, p_prior = constant for 'params' values inside the
range defined by 'intervalos'; otherwise, it is 0.
That is the only determination that we can do.
So, p_pos = -0.5*chi2 -0.5*sum(ln(2*pi*sy^2)) or
-inf case 'params' are out from the allowed range.
"""
Pmax, lmax = params
# Set prior ln prob
if Pmax < intervalos[0][0] or Pmax > intervalos[0][1] or \
lmax < intervalos[1][0] or lmax > intervalos[1][1] :
lnprior = -np.inf
else:
lnprior = 0.
# Return posterior prob
if not np.isfinite(lnprior):
return -np.inf
else:
return lnprior -0.5*np.sum(((polt.serkowski(Pmax, lmax*10000, x*10000, law=law, mode=2) - y)/sy)**2 + np.log(2*np.pi*(sy**2)))
def run_emcee(sampler, p0):
"""
Run emcee.
p0 is the initial positions for the walkers
"""
print "Burning-in ..."
pos, prob, state = sampler.run_mcmc(p0, n_burnin)
sampler.reset()
print "Running MCMC ..."
pos, prob, state = sampler.run_mcmc(pos, n_mcmc, rstate0=state)
#~ Print out the mean acceptance fraction.
af = sampler.acceptance_fraction
print "Mean acceptance fraction:", np.mean(af)
# The lines below were to compute the best fit parameters using the maximum value
"""
#~ Get the index with the highest probability
maxprob_idx = np.argmax(prob)
minprob_idx = np.argmin(prob)
#~ Get the best parameters and their respective stddev + chi2
params_fit = pos[maxprob_idx]
stddev_fit = [sampler.flatchain[:,i].std() for i in xrange(ndim)]
samples_aux = map(lambda v: [v[0]-params_fit[0], v[1]-params_fit[1]], samples)
if len(larr) == 2:
chi1 = 1.
else:
chi1 = np.sum(((polt.serkowski(params_fit[0], params_fit[1]*10000, larr*10000, law=law, mode=2) - parr)/sarr)**2)/(len(larr)-2)
# Computing errors accordind to the distance to the maximum value inside which
# there are 68.3% of the data
p_fit1 = np.array([], dtype=float)
p_fit2 = np.array([], dtype=float)
l_fit1 = np.array([], dtype=float)
l_fit2 = np.array([], dtype=float)
print('Please wait, computing errors...')
for elem in samples_aux:
if elem[0] >= 0:
p_fit1 = np.append(p_fit1, [elem[0]])
else:
p_fit2 = np.append(p_fit2, [elem[0]])
if elem[1] >= 0:
l_fit1 = np.append(l_fit1, [elem[1]])
else:
l_fit2 = np.append(l_fit2, [elem[1]])
# Caution, 65.85 = 100-34.15 !!!
p_error = [np.percentile(p_fit1, 68.3), -np.percentile(p_fit2, 31.7)]
l_error = [np.percentile(l_fit1, 68.3), -np.percentile(l_fit2, 31.7)]
print(74*'-')
print('Output')
print(74*'-')
print ' P_max = {0:.4f} +{1:.4f} -{2:.4f}'.format(params_fit[0],p_error[0],p_error[1],stddev_fit[0])
print 'lbd_max = {0:.4f} +{1:.4f} -{2:.4f}'.format(params_fit[1],l_error[0],l_error[1],stddev_fit[1])
print 'reduced chi2 = {0:.4f}'.format(chi1)
print(74*'-')
"""
### 1) Compute the results using all interval
print('Please wait, computing errors...')
samples = sampler.chain[:, :, :].reshape((-1, ndim))
p_mcmc, l_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16.075, 50, 83.925], axis=0)))
if len(larr) == 2:
chi = 0.
else:
chi = np.sum(((polt.serkowski(p_mcmc[0], l_mcmc[0]*10000, larr*10000, law=law, mode=2) - parr)/sarr)**2)/(len(larr)-2)
#~ Plot the graphs -- histogram, corner and convergence map
# fighists = plot_samples_hist(sampler)
plot_conv(sampler, [p_mcmc[0], l_mcmc[0]])
fig = triangle.nov.corner(samples, title=fixName(star), \
# truths=[p_mcmc[0], l_mcmc[0]], \
# extents=[(p_range[0],l_range[0]),(p_range[1],l_range[1])], \
quantiles=[0.16075, 0.50, 0.83925], \
labels=['$P_{max}\,($%$)$', '$\lambda_{max}\,(\mu m)$'], \
verbose=False)
fig.savefig('{0}_correl.{1}'.format(star,extens))
fig1 = triangle.nov.corner(samples, title=fixName(star), \
# truths=[p_mcmc[0], l_mcmc[0]], \
# extents=[(p_range[0],l_range[0]),(p_range[1],l_range[1])], \
quantiles=[0.16075, 0.50, 0.83925], \
labels=['$P_{max}\,($%$)$', '$\lambda_{max}\,(\mu m)$'], \
verbose=False)
fig1.show()
#~ Print the output using all interval
""" TBD """
print(74*'-')
print('Output')
print(74*'-')
print ' P_max = {0:.4f} +{1:.4f} -{2:.4f}'.format(p_mcmc[0],p_mcmc[1],p_mcmc[2])
print 'lbd_max = {0:.4f} +{1:.4f} -{2:.4f}'.format(l_mcmc[0],l_mcmc[1],l_mcmc[2])
print 'reduced chi2 = {0:.4f}'.format(chi)
print(74*'-')
### 2) NEW: Requests if the user want to use some specific interval
opt = ''
while opt not in ('y','Y','n','N'):
print('These values were calculated using all Pmax and lbdmax data.\nDo you want' +\
' to select specific ranges to use to compute the uncertainties?')
opt = raw_input('(y/n): ')
if opt in ('y','Y'):
while True:
print('')
while True:
try:
petr = raw_input('Pmax: specify Pmax in format `Pmax_min,Pmax_max`: ')
# p_int = [float(ei)-params_fit[0] for ei in petr.split(',')]
p_range = [float(ei) for ei in petr.split(',')]
if len(p_range) == 2:
if p_range[1] > p_range[0]:
break
else:
print('Error: Pmax_max must be greather than Pmax_min!')
else:
print('Invalid input!')
except:
print('Invalid input!')
while True:
try:
letr = raw_input('lbdmax: specify lbdmax in format `lbdmax_min,lbdmax_max`: ')
# l_int = [float(ei)-params_fit[1] for ei in letr.split(',')]
l_range = [float(ei) for ei in letr.split(',')]
if len(l_range) == 2:
if l_range[1] > l_range[0]:
break
else:
print('Error: lbdmax_max must be grather than lbdmax_min!')
else:
print('Invalid input!')
except:
print('Invalid input!')
opt = ''
while opt not in ('y','Y','n','N'):
print('\nIs it correct? Pmax_min,Pmax_max = ' + petr + '\n' +\
' lbdmax_min,lbdmax_max = ' + letr)
opt = raw_input('(y/n): ' )
if opt in ('y','Y'):
break
# The lines below were to compute the best fit parameters using the maximum value
"""
p_fit1 = np.array([], dtype=float)
p_fit2 = np.array([], dtype=float)
l_fit1 = np.array([], dtype=float)
l_fit2 = np.array([], dtype=float)
print('Please wait, computing errors...')
# fit1: arrays for the right of the best values
# fit2: arrays for the left of the best values
for elem in samples_aux:
if elem[0] >= 0 and elem[0] < p_int[1]:
p_fit1 = np.append(p_fit1, [elem[0]])
elif elem[0] < 0 and elem[0] > p_int[0]:
p_fit2 = np.append(p_fit2, [elem[0]])
if elem[1] >= 0 and elem[0] < l_int[1]:
l_fit1 = np.append(l_fit1, [elem[1]])
elif elem[1] < 0 and elem[1] > l_int[0]:
l_fit2 = np.append(l_fit2, [elem[1]])
# Caution, 100-68.3 = 31.7!
p_error = [np.percentile(p_fit1, 68.3), -np.percentile(p_fit2, 31.7)]
l_error = [np.percentile(l_fit1, 68.3), -np.percentile(l_fit2, 31.7)]
print(74*'-')
print('Output')
print(74*'-')
print ' P_max = {0:.4f} +{1:.4f} -{2:.4f}'.format(params_fit[0],p_error[0],p_error[1],stddev_fit[0])
print 'lbd_max = {0:.4f} +{1:.4f} -{2:.4f}'.format(params_fit[1],l_error[0],l_error[1],stddev_fit[1])
print 'reduced chi2 = {0:.4f}'.format(chi1)
print(74*'-')
"""
# Filtering 'samples' array
print('Please wait, computing errors...')
samples_new = np.empty(shape=[0, 2])
for elem in samples:
if elem[0] >= p_range[0] and elem[0] <= p_range[1] and \
elem[1] >= l_range[0] and elem[1] <= l_range[1]:
samples_new = np.vstack([samples_new, elem])
# Computing NEW medians and errors
p_mcmc, l_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples_new, [16.075, 50, 83.925], axis=0)))
if len(larr) == 2:
chi = 0.
else:
chi = np.sum(((polt.serkowski(p_mcmc[0], l_mcmc[0]*10000, larr*10000, law=law, mode=2) - parr)/sarr)**2)/(len(larr)-2)
#~ Print the output using the specific range
""" TBD """
print(74*'-')
print('Output')
print(74*'-')
print ' P_max = {0:.4f} +{1:.4f} -{2:.4f}'.format(p_mcmc[0],p_mcmc[1],p_mcmc[2])
print 'lbd_max = {0:.4f} +{1:.4f} -{2:.4f}'.format(l_mcmc[0],l_mcmc[1],l_mcmc[2])
print 'reduced chi2 = {0:.4f}'.format(chi)
print(74*'-')
# Save the new triangle graph
fig = triangle.nov.corner(samples_new, title=fixName(star), \
# truths=[p_mcmc[0], l_mcmc[0]], \
# extents=[(p_range[0],l_range[0]),(p_range[1],l_range[1])], \
quantiles=[0.16075, 0.50, 0.83925], \
labels=['$P_{max}\,($%$)$', '$\lambda_{max}\,(\mu m)$'], \
verbose=False)
fig.savefig('{0}_correl_cut.{1}'.format(star,extens))
else:
try:
os.remove('{0}_correl_cut.{1}'.format(star,extens))
except:
pass
# plt.close(fighists[0])
# plt.close(fighists[1])
plt.close(fig1)
return sampler, p_mcmc, l_mcmc, chi
def plot_samples_hist(sampler):
"""
Plot two figures with the histograms
"""
samples = [sampler.flatchain[:,i] for i in (0,1)]
par = ['$P_{max}$', '$\lambda_{max}$']
fig = []
for i, sample in enumerate(samples):
fig += [plt.figure()]
plt.hist(sample, 100)
plt.title('Sample of parameter {0}'.format(par[i]))
fig[-1].show()
return fig
def plot_conv(sampler, param):
"""
Plot convergence map. 'param' are the values to be highlighted
"""
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(8, 6))
axes[0].plot(sampler.chain[:, :, 0].T, color="k", alpha=0.4)
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].axhline(param[0], color="#888888", lw=2)
axes[0].set_ylabel("$P_{max}$")
axes[1].plot(sampler.chain[:, :, 1].T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].axhline(param[1], color="#888888", lw=2)
axes[1].set_ylabel("$\lambda_{max}$")
axes[1].set_xlabel("Step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_conv.{1}'.format(star,extens))
return
# Setting parameters and limits
Pmax_max = 9.
Pmax_min = -9.
lmax_max = 1.
lmax_min = 0.
intervalos = np.array([[Pmax_min, Pmax_max], [lmax_min, lmax_max]])
ndim = 2
# Converting lists to np.array
if type(parr) == list:
parr = np.array(parr)
if type(larr) == list:
larr = np.array(larr)
if type(sarr) == list:
sarr = np.array(sarr)
# If 'star' was not specified, generate a random number to append to the graph name to be saved
if star == '':
star = 'rand' + str(int(np.random.rand(1)[0]*10000))
# larr = np.array([0.3650, 0.4450, 0.5510, 0.6580, 0.8060])
# parr = np.array([0.26685, 0.34856, 0.36904, 0.33956, 0.29710])
# sigma = np.array([0.09753, 0.00881, 0.00749, 0.01132, 0.01120])
# larr = np.array([0.4450, 0.5510, 0.6580, 0.8060])
# parr = np.array([0.34856, 0.36904, 0.33956, 0.29710])
# sarr = np.array([0.00881, 0.00749, 0.01132, 0.01120])
# Define random values to be used as priori numbers within the interval
p0 = np.array( [np.random.rand(ndim) for n in xrange(n_walkers)] )
for k in range(ndim):
p0[:,k] = intervalos[k][0]+p0[:,k]*(intervalos[k][1]-intervalos[k][0])
# Initialize the sampler and run mcmc
sampler = emcee.EnsembleSampler(n_walkers, ndim, lnprob, args=[larr, parr, sarr], a=3)#, threads=2)
sampler, pmax_fit, lmax_fit, chi = run_emcee(sampler, p0)
return pmax_fit, lmax_fit, chi
def graf_pperp(be, thetfile=None, path=None, plotB=True, plotU=True, plotUevery=True, fit=True, \
invertY=False, propag=True, propmode='comb', law='w82', vfilter_be=[], save=False, extens='pdf'):
"""
Plot P x wavelength for the ISP perpendicular to the
QU intrinsic line in QU diagram.
'be' : Be star to plot
'thetfile' : (for useB=False) file with the intrinsic angles (oufile from
fs.genInt).
plotB' : Plot b*cos(psi) values from 'thetfile' as estimation of
ISP component perpendicular to the QU line of the Be
star?
plotU : Plot the averages of rotated data in QU diagram at the angle
corresponding to the disk inclination read from 23-25th
lines inside 'thetfile'?
plotUevery : Like plotU, but uses one intrinsic angle for each one
filter.
'fit': fit using MCMC? Case True, generate (or append lines)
in the file ./'be'_is_perp.csv with the best values.
'path' : (for useB=False) the path where is located the log file for star 'be'
(out from polt.genTarget). If None, it is supposed
inside the current directory (.).
'vfilter_be' : (for useU/useUevery True) tags to be filtered of data in
the computation of ISP perpendicular to the QU line.
'propag' : (for useU/useUevery True) propagates the uncertainies of the
intrinsic angle to the rotated QU values in computation
of ISP perp to the QU line?
'propmode' : (for useB=False) mode to compute the error from ISP perp
to the QU line.
'stddev' : only the stddev of the mean
'prop' : only the propagated from individual data
'comb' : the combined between them
CONSIDERATIONS:
"""
import time
if path == None:
path = os.getcwd()
# Verify if vfilter is a special filter
if vfilter_be in polt.vfil.keys():
vfilter_be = polt.vfil[vfilter_be]
plt.close('all')
if os.path.exists('{0}_is_perp.csv'.format(be)):
fout = open('{0}_is_perp.csv'.format(be), 'a')
csvout = csv.writer(fout, delimiter=';')#, quoting=csv.QUOTE_NONE, quotechar='')
else:
fout = open('{0}_is_perp.csv'.format(be), 'w')
csvout = csv.writer(fout, delimiter=';')#, quoting=csv.QUOTE_NONE, quotechar='')
csvout.writerow(['#obj','#name']+['Pmax','sPmax_+','sPmax_-', 'lmax','slmax_+','slmax_-']+['chi','n']+\
['<th>', 's<th>','<p/sp>']+\
['plot point?', 'use in fit?', 'comments','']+\
['run date'])
# objarr_tht, lixo, thtarr = getTable(data, 'filt', 'thet', sy='sthet', \
# vfilter=vfilter_thet, bin_data=True, onlyY=onlyY, unbias=unbias)
# Initialize the graph
fig = plt.figure()
ax = plt.subplot(1, 1, 1)
ax.set_title('Perpendicular ISP - {0}'.format(phc.bes[be]), fontsize=fonts[0], verticalalignment='bottom')
ax.set_xlabel(r'$\lambda\ (\AA)$', size=fonts[1])
ax.set_ylabel('P (%)', size=fonts[1])
ax.set_xlim([2500, 8500])
if plotB:
try:
f0 = open(thetfile, 'ro')
reader = csv.reader(f0, delimiter=';')
except:
print('# ERROR: Can\'t read file {0}.'.format(thetfile))
raise SystemExit(1)
pBe, lbd, s = [],[],[]
for line in reader:
if line[0] == be:
try:
for i in range(26,40,3):
# print line[i], line[i+1], line[i+2]
if float(line[i]) != 0:
lbd += [phc.lbds[filters[int((i-26)/3)]]]
pBe += [float(line[i])]
s += [(float(line[i+1])+float(line[i+2]))/2]
else:
print('# WARNING: No b*cos(psi) value for filter {0}.'.format(filters[int((i-26)/3)]))
except:
print line[i], line[i+1], line[i+2]
print('# ERROR: Components in lines of {0} file seem not be float type.'.format(thetfile))
return
if pBe != []:
ax.errorbar(lbd, pBe, yerr=s, label=r'$P_{IS}^\perp\, (b\cos\psi)$', linestyle='', \
marker='o', color='black')
if fit:
# Convert to microns
lb = [lbi/10000 for lbi in lbd]
print ''
pmax_fit, lmax_fit, chi2 = fitSerk(lb, pBe, s, star=be, extens=extens)
csvout.writerow([be, fixName(be)+' (bcos psi)'] + list(pmax_fit) + list(lmax_fit) + [chi2, len(pBe)] + ['0']*3 + ['0','0','---',''] + [time.strftime("%Y/%m/%d - %I:%M %p")])
ll = np.linspace(3000.,8300.,100)
pp = np.array([])
for lli in ll:
pp = np.append(pp, polt.serkowski(pmax_fit[0], lmax_fit[0]*10000, lli, law=law, mode=2))
# Only plot the graph if there are more than one data, because with an only point
# the curve is not defined! But the emcee was runned to generate the covariance map
if len(pBe) > 1:
ax.plot(ll, pp, color='black', ls='-')
else:
print('# WARNING: Star {0} not found in {1} file.'.format(be, thetfile))
if plotU:
lbd, pBe, sBe, devBe = rotQUBe(be, thetfile, path=path, every=False, vfilter=vfilter_be, propag=propag)
# Using combined error for points to // pol to Be star
if propmode == 'stddev':
s = devBe
elif propmode == 'prop':
s = sBe
elif propmode == 'comb':
s = [np.sqrt(sBe[i]**2+devBe[i]**2) for i in range(len(sBe))]
else:
print('WARNING: `propmode` parameter not valid as {0}. Activating as `comb` (see what it means in the help...)'.format(propmode))
s = [np.sqrt(sBe[i]**2+devBe[i]**2) for i in range(len(sBe))]
if lbd != []:
ax.errorbar(lbd, pBe, yerr=s, label=r'$P_{IS}^\perp\, (<U\'>)$', linestyle='', \
marker='^', color='DodgerBlue')
if fit:
# Convert to microns
lb = [lbi/10000 for lbi in lbd]
print ''
pmax_fit, lmax_fit, chi2 = fitSerk(lb, pBe, s, star=be, extens=extens)
csvout.writerow([be, fixName(be)+' (<U`>)'] + list(pmax_fit) + list(lmax_fit) + [chi2, len(pBe)] + ['0']*3 + ['0','0','---',''] + [time.strftime("%Y/%m/%d - %I:%M %p")])
ll = np.linspace(3000.,8300.,100)
pp = np.array([])
for lli in ll:
pp = np.append(pp, polt.serkowski(pmax_fit[0], lmax_fit[0]*10000, lli, law=law, mode=2))
# Only plot the graph if there are more than one data, because with an only point
# the curve is not defined! But the emcee was runned to generate the covariance map
if len(pBe) > 1:
ax.plot(ll, pp, color='DodgerBlue', ls=':')
else:
print('WARNING: don`t displaying the <U`> values because can`t rotate for every=False')
if plotUevery:
lbd, pBe, sBe, devBe = rotQUBe(be, thetfile, path=path, every=True, vfilter=vfilter_be, propag=propag)
# Using combined error for points to // pol to Be star
if propmode == 'stddev':
s = devBe
elif propmode == 'prop':
s = sBe
elif propmode == 'comb':
s = [np.sqrt(sBe[i]**2+devBe[i]**2) for i in range(len(sBe))]
else:
print('WARNING: `propmode` parameter not valid as {0}. Activating as `comb` (see what it means in the help...)'.format(propmode))
s = [np.sqrt(sBe[i]**2+devBe[i]**2) for i in range(len(sBe))]
if lbd != []:
# print lbd, pBe, s
ax.errorbar(lbd, pBe, yerr=s, label=r'$P_{IS}^\perp\, (<U\'>\, 2)$', linestyle='', \
marker='s', color='OrangeRed')
if fit:
# Convert to microns
lb = [lbi/10000 for lbi in lbd]
print ''
pmax_fit, lmax_fit, chi2 = fitSerk(lb, pBe, s, star=be, extens=extens)
csvout.writerow([be, fixName(be)+' (<U`> every)'] + list(pmax_fit) + list(lmax_fit) + [chi2, len(pBe)] + ['0']*3 + ['0','0','---',''] + [time.strftime("%Y/%m/%d - %I:%M %p")])
ll = np.linspace(3000.,8300.,100)
pp = np.array([])
for lli in ll:
pp = np.append(pp, polt.serkowski(pmax_fit[0], lmax_fit[0]*10000, lli, law=law, mode=2))
# Only plot the graph if there are more than one data, because with an only point
# the curve is not defined! But the emcee was runned to generate the covariance map
if len(pBe) > 1:
ax.plot(ll, pp, color='OrangeRed', ls='--')
else:
print('WARNING: don`t displaying the <U`> values because can`t rotate for every=False')
if invertY:
ax.set_ylim(ax.get_ylim()[::-1])
leg = ax.legend(loc='best', borderaxespad=0., numpoints=1, prop={'size':fonts[4]})
leg.get_frame().set_alpha(0.5)
# Setting sizes
ax.xaxis.label.set_fontsize(fonts[1])
ax.yaxis.label.set_fontsize(fonts[1])
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fonts[2])
if save:
fig.savefig('{0}_pperp.{1}'.format(be,extens), bbox_inches='tight')
else:
fig.show()
print('\nDone!\n')
fout.close()
return
|
dbednarski/pyhdust
|
pyhdust/fieldstars.py
|
Python
|
gpl-3.0
| 125,153
|
[
"Gaussian"
] |
bea33e754c9694eb54d6bf5ea4ef818963c208aaf2474e12f49367ae85b7db7c
|
import os
# Mongo information
datastore_host='localhost'
datastore_port=27017
# Logging
import logging
log_level=logging.DEBUG
# Application Settings
import uuid
app_settings = dict(app_name='blinx_api',
compress_response=True,
cookie_secret='<You should fill this in>',
salt_gen=uuid.uuid4,
template_path=os.path.join(os.path.dirname(__file__), 'templates/email'),
smtp_relay='localhost',
smtp_port=587,
smtp_user='foo',
smtp_pass='bar',
smtp_tls=True,
# frontend url users visit put into recovery emails
recovery_url='https://127.0.0.1:1337/recovery/token',
recovery_token_timer=300)
handler_path='routes/'
# Server Settings
import os
import ssl
listen_address='127.0.0.1'
listen_port=6559
ssl_options = {
"certfile": os.path.join(os.path.dirname(__file__),
"certs/blinx.in.crt"),
"keyfile": os.path.join(os.path.dirname(__file__),
"certs/blinx.in.key"),
"ssl_version": ssl.PROTOCOL_TLSv1,
}
|
blinxin/blinx_api
|
blinx_api/settings.py
|
Python
|
gpl-2.0
| 1,208
|
[
"VisIt"
] |
e6df5c3c8b034115c87b9a5c213586f5cafd1c9a518395c04b666b8664cce1e5
|
# encoding: utf-8
"Module for displaying information about the system."
import gtk
from gettext import gettext as _
from ase.gui.widgets import pack
singleimage = _("Single image loaded.")
multiimage = _("Image %d loaded (0 - %d).")
ucconst = _("Unit cell is fixed.")
ucvaries = _("Unit cell varies.")
format = _("""\
%s
Number of atoms: %d.
Unit cell:
%8.3f %8.3f %8.3f
%8.3f %8.3f %8.3f
%8.3f %8.3f %8.3f
%s
""")
class QuickInfo(gtk.Window):
def __init__(self, gui):
gtk.Window.__init__(self)
self.set_title(_("Quick Info"))
vbox = gtk.VBox()
images = gui.images
if images.natoms < 1:
txt = _("No atoms loaded.")
else:
(nimg, natoms, three) = images.P.shape
assert three == 3
img = gui.frame
uc = images.A[img]
if nimg > 1:
equal = True
for i in range(nimg):
equal = equal and (uc == images.A[i]).all()
if equal:
uctxt = ucconst
else:
uctxt = ucvaries
else:
uctxt = ""
if nimg == 1:
imgtxt = singleimage
else:
imgtxt = multiimage % (img, nimg-1)
txt = format % ((imgtxt, natoms) + tuple(uc.flat) + (uctxt,))
label = gtk.Label(txt)
pack(vbox, [label])
but = gtk.Button(stock=gtk.STOCK_CLOSE)
but.connect('clicked', self.close)
pack(vbox, [but], end=True)
self.add(vbox)
vbox.show()
self.show()
self.gui = gui
def close(self, *args):
self.destroy()
|
grhawk/ASE
|
tools/ase/gui/quickinfo.py
|
Python
|
gpl-2.0
| 1,703
|
[
"ASE"
] |
500ce6a4b83870fc32d3e55fb6131dd7fef96e966e97c46460b051d4599ae96e
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#---------------------------------------------------------------
# ReStructuredText to FoLiA Converter
# by Maarten van Gompel
# KNAW Humanities Cluster &
# Centre for Language and Speech Technology (Radboud University Nijmegen)
# proycon AT anaproy DOT nl
#
# Licensed under GPLv3
#
# This script converts RST to FoLiA format.
#
#----------------------------------------------------------------
import sys
import glob
import gzip
import os
import traceback
from collections import defaultdict
from copy import copy
from docutils import writers, nodes, __version__ as DOCUTILSVERSION
from docutils.core import publish_cmdline, publish_string, default_description
import folia.main as folia
from foliatools import VERSION
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
class Writer(writers.Writer):
DEFAULTID = "untitled"
TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
%(stylesheet)s
<FoLiA xmlns="http://ilk.uvt.nl/folia" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="%(docid)s" version="2.0.0" generator="docutils-rst2folia-%(version)s">
<metadata type="native">
<annotations>
%(declarations)s
</annotations>
<provenance>
<processor xml:id="proc.rst2folia" name="rst2folia" type="auto" version="%(version)s" folia_version="2.0.0">
<processor xml:id="proc.rst2folia.generator" name="docutils" type="generator" version="%(docutilsversion)s" folia_version="2.0.0"/>
</processor>
</provenance>
%(metadata)s
</metadata>
%(content)s
</FoLiA>
"""
DEFAULTSTYLESHEET = "folia2html.xsl"
DEFAULTSETS = {
'text': 'https://raw.githubusercontent.com/proycon/folia/master/setdefinitions/text.foliaset.ttl',
'division': 'https://raw.githubusercontent.com/proycon/folia/master/setdefinitions/divisions.foliaset.xml',
'style': 'https://raw.githubusercontent.com/proycon/folia/master/setdefinitions/styles.foliaset.xml',
'note': 'https://raw.githubusercontent.com/proycon/folia/master/setdefinitions/notes.foliaset.xml',
'gap': 'https://raw.githubusercontent.com/proycon/folia/master/setdefinitions/gaps.foliaset.xml',
'term': 'https://raw.githubusercontent.com/proycon/folia/master/setdefinitions/terms.foliaset.xml',
'definition': 'https://raw.githubusercontent.com/proycon/folia/master/setdefinitions/definitions.foliaset.xml',
'paragraph': None,
'sentence': None,
'string': None,
}
#Formats this writer supports
supported = ('folia',)
settings_spec = (
'FoLiA-Specific Options',
None,
(
('Document ID. Default is "%s".' % DEFAULTID, ['--docid'], {'default': DEFAULTID, 'metavar': '<string>'}),
('Parent ID. Assign IDs under the specified element, this can be used to merge output back into a larger document', ['--parentid'], {'metavar': '<string>'}),
('Parent Type. Assume all new elements start under an element of this type (FoLiA tag), this can be used to merge output back into a larger document, use with --parentid', ['--parenttype'], {'default': 'div', 'metavar': '<string>'}),
("Excerpt only. Output only the text node and all elements under it. No standalone document, results may be inserted verbatim into a larger document if used with --parentid/--parenttype and --declare-all", ['--excerpt'], {'default': False, 'action': 'store_true'}),
("Declare all possible sets, even if they're not used.", ['--declare-all'], {'default': False, 'action': 'store_true'}),
("Strip relative hyperlinks", ['--strip-relative-links'], {'default': False, 'action': 'store_true'}),
("Strip all hyperlinks", ['--strip-links'], {'default': False, 'action': 'store_true'}),
("Strip all text styling", ['--strip-style'], {'default': False, 'action': 'store_true'}),
("Strip all gaps (includes verbatim and code blocks)", ['--strip-gaps'], {'default': False, 'action': 'store_true'}),
("Strip all raw content (do not encode as gaps)", ['--strip-raw'], {'default': False, 'action': 'store_true'}),
("Strip tables", ['--strip-tables'], {'default': False, 'action': 'store_true'}),
("Ignore lineblocks, treat as normal paragraphs", ['--ignore-lineblocks'], {'default': False, 'action': 'store_true'}),
("Sets. Comma separated list of annotationtype:seturl pairs. Example: division:https://raw.githubusercontent.com/proycon/folia/master/setdefinitions/divisions.foliaset.xml", ['--sets'],{'default':""}),
("Stylesheet. XSL Stylesheet to associate with the document. Defaults to '%s'" % DEFAULTSTYLESHEET, ['--stylesheet'], {'default': "folia2html.xsl",'metavar':'<string>'}),
)
)
visitor_attributes = ('declarations','metadata','content')
def translate(self):
sets = copy(self.DEFAULTSETS)
for setassignment in self.document.settings.sets.split(','):
if setassignment:
annotationtype,set = setassignment.split(':')
sets[annotationtype] = set
self.visitor = FoLiATranslator(self.document, sets)
self.document.walkabout(self.visitor)
for attr in self.visitor_attributes:
setattr(self, attr, getattr(self.visitor, attr))
self.output = self.apply_template()
def apply_template(self):
subs = self.interpolation_dict()
if self.document.settings.excerpt:
return "%(content)s" % subs
else:
return self.TEMPLATE % subs
def interpolation_dict(self):
subs = {}
for attr in self.visitor_attributes:
subs[attr] = ''.join(getattr(self, attr)).rstrip('\n')
subs['encoding'] = self.document.settings.output_encoding
subs['version'] = VERSION
subs['docutilsversion'] = DOCUTILSVERSION
subs['docid'] = self.document.settings.docid
subs['stylesheet'] = "<?xml-stylesheet type=\"text/xsl\" href=\"" + self.document.settings.stylesheet + "\"?>"
return subs
def assemble_parts(self):
writers.Writer.assemble_parts(self)
for part in self.visitor_attributes:
self.parts[part] = ''.join(getattr(self, part))
class FoLiATranslator(nodes.NodeVisitor):
def __init__(self, document, sets={}):
self.textbuffer = []
self.path = [] #(tag, id) tuples of the current FoLiA path
self.content = [] #will contain all XML content as strings
self.metadata = []
self.declarations = []
self.id_store = defaultdict( lambda: defaultdict(int) )
self.docid = document.settings.docid
self.list_enumerated = [] #contains a 2-list of boolean, int pairs, indicating whether the list is enumerated or not, and the number of items in it thus-far (used for labels), support nesting.
self.rootdiv = False #create a root div element?
self.sets = sets
self.declared = {}
self.texthandled = False
self.footnote_reference = None
self.footnote_seq_nr = 0
self.inserttextbreaks = False
if document.settings.declare_all:
for key in self.sets:
self.declare(key)
else:
self.declare('text')
self.striprellinks = document.settings.strip_relative_links
self.striplinks = document.settings.strip_links
self.stripstyle = document.settings.strip_style
self.stripraw = document.settings.strip_raw
self.stripgaps = document.settings.strip_gaps
self.striptables = document.settings.strip_tables
self.ignorelineblocks = document.settings.ignore_lineblocks
if document.settings.parentid:
self.parentid = document.settings.parentid
self.path.append( (document.settings.parenttype, self.parentid ) )
self.textid = "temporary-container-only"
else:
self.textid = self.docid + ".text"
self.parentid = None
self.excerpt = document.settings.excerpt
nodes.NodeVisitor.__init__(self, document)
############# HELPERS ###############
def astext(self):
return ''.join(self.head + self.content)
def encode(self, text):
"""Encode special characters in `text` & return."""
if not isinstance(text, str):
text = str(text, 'utf-8')
return text.translate({
ord('&'): '&',
ord('<'): '<',
ord('>'): '>',
})
def initstructure(self, tag, **attribs):
"""Generic visit function for structure elements"""
#Generate an ID
if tag == "text":
id = self.textid
elif tag == 'note' and attribs['cls'] == 'footnote':
self.footnote_seq_nr += 1
id = self.textid + '.footnote.' + str(self.footnote_seq_nr)
else:
parenttag = None
for parenttag, parentid in reversed(self.path):
if parenttag != None:
break
if parenttag is not None:
id = self.generate_id(parentid, tag)
parentclass = folia.XML2CLASS[parenttag]
currentclass = folia.XML2CLASS[tag]
try:
parentclass.accepts(currentclass)
except ValueError:
print("WARNING: Adding " + tag + " to " + parenttag + " would violate FoLiA constraints. Skipping this element!",file=sys.stderr)
self.path.append( (None, None) )
return
self.declare(tag)
self.path.append( (tag, id ) )
indentation = (len(self.path)-1) * " "
o = indentation + "<" + tag + " xml:id=\"" + id + "\""
if tag == "text" and self.excerpt: #this is the root of our output, add namespace stuff
o += ' xmlns="http://ilk.uvt.nl/folia" xmlns:xlink="http://www.w3.org/1999/xlink"'
if attribs:
for key, value in attribs.items():
if key == "cls": key = "class"
o += " " + key + "=\"" + str(value) + "\""
o += ">\n"
self.content.append(o)
def closestructure(self, tag):
"""Generic depart function for structure elements"""
_tag, id = self.path.pop()
if tag is None or _tag is None: #we skip this one (nesting violates folia constraints)
return
elif not tag == _tag:
raise Exception("Mismatch in closestructure, expected closure for " + tag + ", got " + _tag)
indentation = len(self.path) * " "
o = ""
if self.footnote_reference and self.textbuffer and self.textbuffer[-1].strip() == self.footnote_reference:
self.textbuffer = self.textbuffer[:-1]
if self.textbuffer:
if self.inserttextbreaks:
o += indentation + " <t>" + " ".join([x.replace("\n","<br/>").strip() for x in self.textbuffer]) + "</t>\n"
else:
o += indentation + " <t>" + " ".join([x.replace("\n"," ").strip() for x in self.textbuffer]) + "</t>\n"
o += indentation + "</" + tag + ">\n"
if self.footnote_reference:
o += indentation + "<ref id=\"" + self.textid + ".footnote." + self.footnote_reference + "\"><t>[" + self.footnote_reference + "]</t></ref>\n"
self.footnote_reference = None
self.textbuffer = []
self.content.append(o)
def generate_id(self, parentid, tag ):
if parentid == "temporary-container-only" and self.parentid:
self.id_store[self.parentid][tag] += 1
return self.parentid + "." + tag + "." + str(self.id_store[parentid][tag])
else:
self.id_store[parentid][tag] += 1
return parentid + "." + tag + "." + str(self.id_store[parentid][tag])
def rightsibling(self, node):
fetch = False
for sibling in node.traverse(None,1,0,1,0):
if sibling is node:
fetch = True
elif fetch:
return sibling
return None
def ignore_depart(self, node):
try:
if node.ignore_depart:
return True
except AttributeError:
return False
def addstyle(self,node,style):
self.texthandled = True
self.declare('style')
if self.stripstyle:
self.textbuffer.append( self.encode(node.astext()) )
else:
self.textbuffer.append( '<t-style class="' + style + '">' + self.encode(node.astext()) + '</t-style>' )
def addlink(self,node,url):
self.texthandled = True
absolute = url.lower().startswith('http://') or url.lower().startswith('https://') or url.lower().startswith('ftp://') or url.lower().startswith('file://') or url[0] == '/'
if self.striplinks or (self.striprellinks and not absolute):
self.textbuffer.append(self.encode(node.astext()))
else:
self.declare('string')
self.textbuffer.append( '<t-str xlink:type="simple" xlink:href="' + url + '">' + self.encode(node.astext()) + '</t-str>' )
def addmetadata(self, key, node):
self.texthandled = True
self.metadata.append( " <meta id=\"" + key + "\">" + self.encode(node.astext()) + "</meta>\n" )
def declare(self, annotationtype):
if annotationtype == 'div':
annotationtype = 'division'
elif annotationtype == 's':
annotationtype = 'sentence'
elif annotationtype == 'p':
annotationtype = 'paragraph'
elif annotationtype == 'def':
annotationtype = 'definition'
elif annotationtype == 'item':
annotationtype = 'list'
elif annotationtype in ('caption',):
#nothing to declare
return
if annotationtype not in self.declared:
if annotationtype in self.sets and self.sets[annotationtype]:
self.declarations.append(" <" + annotationtype + "-annotation set=\"" + self.sets[annotationtype] + "\">\n <annotator processor=\"proc.rst2folia\" />\n </" + annotationtype + "-annotation>\n")
else:
self.declarations.append(" <" + annotationtype + "-annotation>\n <annotator processor=\"proc.rst2folia\" />\n </" + annotationtype + "-annotation>\n")
self.declared[annotationtype] = True
if annotationtype == 'gap':
self.declare('rawcontent')
############# TRANSLATION HOOKS (MAIN STRUCTURE) ################
def visit_document(self, node):
self.initstructure('text')
def depart_document(self, node):
if self.rootdiv:
self.closestructure('div')
self.closestructure('text')
def visit_paragraph(self, node):
if node.parent.__class__.__name__ == 'list_item':
#this paragraph is in an item, we don't want paragraphs in items unless there actually are multiple elements in the item
sibling = self.rightsibling(node)
if sibling:
self.initstructure('p')
else:
node.ignore_depart = True
else:
self.initstructure('p')
def depart_paragraph(self, node):
if not self.ignore_depart(node):
self.closestructure('p')
def visit_container(self, node):
self.initstructure('div',cls="division")
def depart_container(self, node):
self.closestructure('div')
def visit_section(self, node):
self.initstructure('div',cls="section")
def depart_section(self, node):
self.closestructure('div')
def visit_title(self, node):
if node.parent.__class__.__name__ == 'document':
self.rootdiv = True
self.initstructure('div',cls="document" if not self.parentid else "section")
self.initstructure('head')
def depart_title(self, node):
self.closestructure('head')
def visit_subtitle(self, node):
self.initstructure('head')
def depart_subtitle(self, node):
self.closestructure('head')
def visit_rubric(self, node):
self.initstructure('head')
def depart_rubric(self, node):
self.closestructure('head')
def visit_bullet_list(self,node):
self.list_enumerated.append([False,0])
self.initstructure('list')
def depart_bullet_list(self,node):
self.list_enumerated.pop()
self.closestructure('list')
def visit_enumerated_list(self,node):
self.list_enumerated.append([True,0])
self.initstructure('list')
def depart_enumerated_list(self,node):
self.list_enumerated.pop()
self.closestructure('list')
def visit_list_item(self,node):
if self.list_enumerated[-1][0]:
self.list_enumerated[-1][1] += 1
self.initstructure('item',n=self.list_enumerated[-1][1])
else:
self.initstructure('item')
def depart_list_item(self,node):
self.closestructure('item')
def visit_image(self,node):
self.initstructure('figure',src=node['uri'])
def depart_image(self,node):
#parent figure will do the closing if image in figure
if node.parent.__class__.__name__ != "figure":
self.closestructure('figure')
def visit_figure(self,node):
pass
def depart_figure(self,node):
self.closestructure('figure')
def visit_caption(self,node):
self.initstructure('caption')
def depart_caption(self,node):
self.closestructure('caption')
def visit_literal_block(self,node):
self.texthandled = True
if self.stripgaps:
pass
self.initstructure('gap',cls="verbatim")
def depart_literal_block(self,node):
if self.stripgaps:
self.texthandled = False
return
tag = "gap"
_tag, id = self.path.pop()
if not tag == _tag:
raise Exception("Mismatch in closestructure, expected closure for " + tag + ", got " + _tag)
indentation = len(self.path) * " "
o = indentation + " <content><![CDATA[" + node.astext() + "]]></content>\n"
o += indentation + "</" + tag + ">\n"
self.content.append(o)
self.texthandled = False
def visit_raw(self,node):
self.texthandled = True
if self.stripraw:
return
self.initstructure('gap',cls="code")
def depart_raw(self,node):
if self.stripraw:
self.texthandled = False
return
tag = "gap"
_tag, id = self.path.pop()
if not tag == _tag:
raise Exception("Mismatch in closestructure, expected closure for " + tag + ", got " + _tag)
indentation = len(self.path) * " "
o = indentation + " <content><![CDATA[" + node.astext() + "]]></content>\n"
o += indentation + "</" + tag + ">\n"
self.content.append(o)
self.texthandled = False
def visit_code(self,node):
self.texthandled = True
if self.stripgaps:
return
self.initstructure('gap',cls="code")
def depart_code(self,node):
if self.stripgaps:
self.texthandled = False
return
tag = "gap"
_tag, id = self.path.pop()
if not tag == _tag:
raise Exception("Mismatch in closestructure, expected closure for " + tag + ", got " + _tag)
indentation = len(self.path) * " "
o = indentation + " <content><![CDATA[" + node.astext() + "]]></content>\n"
o += indentation + "</" + tag + ">\n"
self.content.append(o)
self.texthandled = False
def visit_block_quote(self, node):
self.initstructure('quote')
def depart_block_quote(self, node):
self.closestructure('quote')
############# TRANSLATION HOOKS (TEXT & MARKUP) ################
def visit_Text(self, node):
if not self.texthandled:
self.textbuffer.append( self.encode(node.astext()) )
def depart_Text(self, node):
pass
def visit_strong(self, node):
self.addstyle(node,"strong")
def depart_strong(self, node):
self.texthandled = False
def visit_emphasis(self, node):
self.addstyle(node,"emphasis")
def depart_emphasis(self, node):
self.texthandled = False
def visit_literal(self, node):
self.addstyle(node,"literal")
def depart_literal(self, node):
self.texthandled = False
def visit_reference(self, node):
self.addlink(node,node.attributes['refuri'])
def depart_reference(self, node):
self.texthandled = False
def visit_target(self, node): #TODO? Seems to work, am I missing something?
pass
def depart_target(self, node):
pass
def visit_comment(self, node):
self.texthandled = True
def depart_comment(self, node):
self.content.append("<!-- " + self.encode(node.astext()) + " -->\n")
self.texthandled = False
############# TRANSLATION HOOKS (OTHER STRUCTURE) ################
def visit_footnote(self,node):
#TODO: handle footnote numbering: http://code.nabla.net/doc/docutils/api/docutils/transforms/references/docutils.transforms.references.Footnotes.html
self.initstructure('note',cls='footnote')
def depart_footnote(self,node):
self.closestructure('note')
def visit_attention(self,node):
self.initstructure('note',cls='attention')
def depart_attention(self,node):
self.initstructure('note')
def visit_hint(self,node):
self.initstructure('note',cls='hint')
def depart_hint(self,node):
self.closestructure('note')
def visit_note(self,node):
self.initstructure('note',cls='note')
def depart_note(self,node):
self.closestructure('note')
def visit_caution(self,node):
self.initstructure('note',cls='caution')
def depart_caution(self,node):
self.closestructure('note')
def visit_warning(self,node):
self.initstructure('note',cls='warning')
def depart_warning(self,node):
self.closestructure('note')
def visit_danger(self,node):
self.initstructure('note',cls='danger')
def depart_danger(self,node):
self.closestructure('note')
def visit_admonition(self,node):
self.initstructure('note',cls='admonition')
def depart_admonition(self,node):
self.closestructure('note')
def visit_tip(self,node):
self.initstructure('note',cls='tip')
def depart_tip(self,node):
self.closestructure('note')
def visit_error(self,node):
self.initstructure('note',cls='error')
def depart_error(self,node):
self.closestructure('note')
def visit_important(self,node):
self.initstructure('note',cls='important')
def depart_important(self,node):
self.closestructure('note')
def visit_table(self,node):
if self.striptables:
self.texthandled = True
return
self.initstructure('table')
def depart_table(self,node):
if self.striptables:
self.texthandled = False
return
self.closestructure('table')
def visit_colspec(self,node):
pass
def depart_colspec(self,node):
pass
def visit_tgroup(self,node):
pass
def depart_tgroup(self,node):
pass
def visit_tbody(self,node):
pass
def depart_tbody(self,node):
pass
def visit_thead(self,node):
pass
def depart_thead(self,node):
pass
def visit_row(self,node):
if self.striptables:
return
else:
self.initstructure('row')
def depart_row(self,node):
if self.striptables:
return
else:
self.closestructure('row')
def visit_entry(self,node):
if self.striptables:
return
else:
self.initstructure('cell')
def depart_entry(self,node):
if self.striptables:
return
else:
self.closestructure('cell')
def visit_label(self,node): #citation/footnote label
self.initstructure('w')
def depart_label(self,node):
self.closestructure('w')
def visit_footnote_reference(self,node): #TODO: doesn't seem to really work as it should yet
symbol = node.astext()
if symbol in ('#','*'):
raise NotImplementedError("Wildcard references [#] [*] are currently not yet supported by rst2folia") #TODO: later
self.footnote_reference = symbol.strip()
def depart_footnote_reference(self,node):
pass
def visit_title_reference(self, node):
self.addlink(node,"#") #TODO: title link points to nowhere now
def depart_title_reference(self, node):
self.texthandled = False
############# TRANSLATION HOOKS (METADATA, rst-specific fields) ################
def visit_docinfo(self, node):
pass
def depart_docinfo(self, node):
pass
def visit_authors(self, node):
pass
def depart_authors(self, node):
pass
def visit_author(self, node):
self.addmetadata('author', node)
def depart_author(self, node):
self.texthandled = False
def visit_date(self, node):
self.addmetadata('date', node)
def depart_date(self, node):
self.texthandled = False
def visit_contact(self, node):
self.addmetadata('contact', node)
def depart_contact(self, node):
self.texthandled = False
def visit_status(self, node):
self.addmetadata('status', node)
def depart_status(self, node):
self.texthandled = False
def visit_version(self, node):
self.addmetadata('version', node)
def depart_version(self, node):
self.texthandled = False
def visit_copyright(self, node):
self.addmetadata('copyright', node)
def depart_copyright(self, node):
self.texthandled = False
def visit_organization(self, node):
self.addmetadata('organization', node)
def depart_organization(self, node):
self.texthandled = False
def visit_address(self, node):
self.addmetadata('address', node)
def depart_address(self, node):
self.texthandled = False
def visit_problematic(self, node):
print("WARNING: RST parser encountered a problematic node, skipping: ", node.astext(),file=sys.stderr)
self.texthandled = True
def depart_problematic(self,node):
self.texthandled = False
def visit_system_message(self, node):
print("WARNING from RST parser: ", node.astext(),file=sys.stderr)
self.texthandled = True
def depart_system_message(self,node):
self.texthandled = False
def visit_substitution_definition(self, node):
print("WARNING substitution definition encountered, but not converted: ", node.astext(),file=sys.stderr)
self.texthandled = True
def depart_substitution_definition(self,node):
self.texthandled = False
def visit_line_block(self, node):
if self.ignorelineblocks:
self.initstructure('p')
else:
self.initstructure('div')
def depart_line_block(self, node):
if self.ignorelineblocks:
self.closestructure('p')
else:
self.closestructure('div')
def visit_line(self, node):
if self.ignorelineblocks:
pass
else:
self.initstructure('part')
def depart_line(self, node):
if self.ignorelineblocks:
pass
else:
self.closestructure('part')
self.content.append('<br/>')
def visit_transition(self, node):
pass
def depart_transition(self, node):
self.content.append("<br/>")
def visit_subscript(self, node):
self.addstyle(node,"subscript")
def depart_subscript(self, node):
self.texthandled = False
def visit_superscript(self, node):
self.addstyle(node,"superscript")
def depart_superscript(self, node):
self.texthandled = False
def visit_math(self, node):
self.addstyle(node,"math")
def depart_math(self, node):
self.texthandled = False
def visit_definition_list(self, node):
pass
def depart_definition_list(self, node):
pass
def visit_definition_list_item(self, node):
self.initstructure('entry')
def depart_definition_list_item(self, node):
self.closestructure('entry')
def visit_term(self, node):
self.initstructure('term')
def depart_term(self, node):
self.closestructure('term')
def visit_classifier(self, node):
texthandled= True
print("WARNING: Classifiers in definition_lists are currently not convertable yet, skipping: ", node.astext(),file=sys.stderr)
def depart_classifier(self, node):
texthandled= False
def visit_definition(self, node):
self.initstructure('def')
def depart_definition(self, node):
self.closestructure('def')
def visit_legend(self, node):
self.initstructure('div')
def depart_legend(self, node):
self.closestructure('div')
def main():
description = 'Generates FoLiA documents from reStructuredText. ' + default_description
publish_cmdline(writer=Writer(), writer_name='folia', description=description)
def rst2folia(srcstring):
return publish_string(srcstring, writer=Writer(), settings_overrides={'output_encoding': 'unicode'})
def flat_convert(filename, targetfilename, *args, **kwargs):
"""Wrapper function for use by FLAT's converter mechanism"""
try:
with open(filename, 'r', encoding='utf-8') as f:
rstdata = f.read()
foliadata = rst2folia(rstdata)
with open(targetfilename,'w',encoding='utf-8') as f:
f.write(foliadata)
except Exception as e:
print(e,file=sys.stderr)
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback, limit=50, file=sys.stderr)
return False, str(e)
return True
if __name__ == '__main__':
main()
|
proycon/foliatools
|
foliatools/rst2folia.py
|
Python
|
gpl-3.0
| 30,332
|
[
"VisIt"
] |
2f5e44dd72d13168297c478feb7101748430e24376c73b1e06ef5c3dd7aad6d5
|
#
# @file TestSBMLError.py
# @brief SBMLError unit tests, C++ version
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# $Id$
# $HeadURL$
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestSBMLError.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestSBMLError(unittest.TestCase):
def test_SBMLError_create(self):
error = libsbml.SBMLError()
self.assert_( error != None )
error = None
error = libsbml.SBMLError(libsbml.EmptyListInReaction)
self.assert_( error.getErrorId() == libsbml.EmptyListInReaction )
self.assert_( error.getSeverity() == libsbml.LIBSBML_SEV_ERROR )
self.assert_( error.getSeverityAsString() == "Error" )
self.assert_( error.getCategory() == libsbml.LIBSBML_CAT_SBML )
self.assert_( error.getCategoryAsString() == "General SBML conformance" )
error = None
error = libsbml.SBMLError(libsbml.OverdeterminedSystem,2,1)
self.assert_( error.getErrorId() == libsbml.OverdeterminedSystem )
self.assert_( error.getSeverity() == libsbml.LIBSBML_SEV_WARNING )
self.assert_( error.getSeverityAsString() == "Warning" )
self.assert_( error.getCategory() == libsbml.LIBSBML_CAT_SBML )
self.assert_( error.getCategoryAsString() == "General SBML conformance" )
error = None
error = libsbml.SBMLError(libsbml.OffsetNoLongerValid,2,2)
self.assert_( error.getErrorId() == libsbml.OffsetNoLongerValid )
self.assert_( error.getSeverity() == libsbml.LIBSBML_SEV_ERROR )
self.assert_( error.getSeverityAsString() == "Error" )
self.assert_( error.getCategory() == libsbml.LIBSBML_CAT_GENERAL_CONSISTENCY )
self.assert_( error.getCategoryAsString() == "SBML component consistency" )
error = None
error = libsbml.SBMLError(libsbml.NoSBOTermsInL1,2,2)
self.assert_( error.getErrorId() == libsbml.NoSBOTermsInL1 )
self.assert_( error.getSeverity() == libsbml.LIBSBML_SEV_WARNING )
self.assert_( error.getSeverityAsString() == "Warning" )
self.assert_( error.getCategory() == libsbml.LIBSBML_CAT_SBML_L1_COMPAT )
self.assert_( error.getCategoryAsString() == "Translation to SBML L1V2" )
error = None
error = libsbml.SBMLError(libsbml.DisallowedMathMLEncodingUse,2,2)
self.assert_( error.getErrorId() == libsbml.DisallowedMathMLEncodingUse )
self.assert_( error.getSeverity() == libsbml.LIBSBML_SEV_ERROR )
self.assert_( error.getSeverityAsString() == "Error" )
self.assert_( error.getCategory() == libsbml.LIBSBML_CAT_MATHML_CONSISTENCY )
self.assert_( error.getShortMessage() == "Disallowed use of MathML 'encoding' attribute" )
error = None
error = libsbml.SBMLError(libsbml.DisallowedMathMLEncodingUse,1,2)
self.assert_( error.getErrorId() == libsbml.DisallowedMathMLEncodingUse )
self.assert_( error.getSeverity() == libsbml.LIBSBML_SEV_NOT_APPLICABLE )
self.assert_( error.getCategory() == libsbml.LIBSBML_CAT_MATHML_CONSISTENCY )
error = None
error = libsbml.SBMLError(libsbml.UnknownError,2,4)
self.assert_( error.getErrorId() == libsbml.UnknownError )
self.assert_( error.getSeverity() == libsbml.LIBSBML_SEV_FATAL )
self.assert_( error.getSeverityAsString() == "Fatal" )
self.assert_( error.getCategory() == libsbml.LIBSBML_CAT_INTERNAL )
self.assert_( error.getShortMessage() == "Unknown internal libSBML error" )
error = None
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSBMLError))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
alexholehouse/SBMLIntegrator
|
libsbml-5.0.0/src/bindings/python/test/sbml/TestSBMLError.py
|
Python
|
gpl-3.0
| 4,822
|
[
"VisIt"
] |
9982a3f2b97f0e60207e795fb8a7c0c527be0554064a65d5dbcf2fab6b4bd95a
|
#### filter fasta by cutoff .py ####
#
# input a gene predictions (fasta) file
# and a set of BLAST validation data
# (gene predictions blasted against DB)
# and a list of KOG <-> gene ID mappings
# from the reference.
#
# output a fasta file
# containing only those predicted genes
# which have hits to the validation DB
# above a threshold
# appending KOG IDs where available
# for secondary checks.
#
# Author: Joe Parker, RBG Kew, 2016
# joe.parker@kew.org
#
# Usage: python filter_fasta_by_cutoff.ply
# (no-args - inputs hardcoded as below)
#
####################################
from Bio import SeqIO
## hardcoded inputs
# holds the BLAST validation data, column 'filter' contains [TRUE|FALSE]
cutoff_file_aa = '/Users/joeparker/Downloads/phylogenome_wales/SNAP-predicted/all_R7R9_thaliana.genetimes.filtered.cutoff.aa.rdata'
cutoff_file_nt = '/Users/joeparker/Downloads/phylogenome_wales/SNAP-predicted/all_R7R9_thaliana.genetimes.filtered.cutoff.nt.rdata'
# the predicted genes with TAIR10 gene names
predicted_aa = '/Users/joeparker/Downloads/phylogenome_wales/SNAP-predicted/SNAP.export.aa.filtered.fa'
predicted_nt = '/Users/joeparker/Downloads/phylogenome_wales/SNAP-predicted/SNAP.export.dna.filtered.fa'
# file (tab-delimited) from JGI containing KOG IDs for TAIR10 A thaliana genes
kog_file = '/Users/joeparker/Downloads/phylogenome_wales/JGI_data/mart_export_A_thaliana_TAIR10.tdf'
# output files for fasta NT ans AA
output_aa = '/Users/joeparker/Downloads/phylogenome_wales/SNAP-predicted/SNAP.export.aa.filtered.cutoff.TAIR.KOG.fa'
output_nt = '/Users/joeparker/Downloads/phylogenome_wales/SNAP-predicted/SNAP.export.nt.filtered.cutoff.TAIR.KOG.fa'
## global collections
# hashes to hold the filters' output
cutoff_aa = {}
cutoff_nt = {}
# hash to hold KOG IDs
kogs = {}
# lists to hold the selected sequences
aln_aa = []
aln_nt = []
## process the data
# add the aa cutoff data as a hash
# keys will be the unique gene IDs
# values will be the TAIR10 assignment (putative)
# only add filter == TRUE
with open(cutoff_file_aa,'r') as f:
for line in f:
data = line.rstrip().split("\t")
if data[16] == 'TRUE':
# print(data[0],data[7],data[16])
cutoff_aa[data[7]] = data[0]
# add the nt cutoff data as a hash
# keys will be the unique gene IDs
# values will be the TAIR10 assignment (putative)
# only add filter == TRUE
with open(cutoff_file_nt,'r') as f:
for line in f:
data = line.rstrip().split("\t")
if data[len(data)-1] == 'TRUE':
cutoff_nt[data[7]] = data[0]
# get the KOG IDs for these putative genes
with open(kog_file) as file:
for line in file:
data = line.split("\t")
# add to global KOG IDs hash
# print(data[0],data[len(data)-1][:7])
kogs[data[0]] = data[len(data)-1][:7]
# walk through AA sequences
for seq in SeqIO.parse(predicted_aa,'fasta'):
if seq.id in cutoff_aa:
this_kog = 'no_kog_value'
if cutoff_aa[seq.id] in kogs:
this_kog = kogs[cutoff_aa[seq.id]]
seq.id = seq.id + '|' + cutoff_aa[seq.id] + '|' + this_kog
# print(seq.id, seq.name, cutoff_aa[seq.id],this_kog,len(seq))
aln_aa.append(seq)
# walk through NT sequences
for seq in SeqIO.parse(predicted_nt,'fasta'):
if seq.id in cutoff_nt:
this_kog = 'no_kog_value'
if cutoff_nt[seq.id] in kogs:
this_kog = kogs[cutoff_nt[seq.id]]
seq.id = seq.id + '|' + cutoff_nt[seq.id] + '|' + this_kog
aln_nt.append(seq)
# write output
SeqIO.write(aln_aa,output_aa,'fasta')
SeqIO.write(aln_nt,output_nt,'fasta')
|
lonelyjoeparker/real-time-phylogenomics
|
wales_analyses/phylogenome_wales/SNAP-predicted/filter_fasta_by_cutoff.py
|
Python
|
gpl-2.0
| 3,468
|
[
"BLAST"
] |
4900dcd40140ecc34986dfa0bcdc62c5d7bc64bfe515c2311834f941fdf2e7fb
|
import numpy as np;
from sklearn.svm import SVC;
import sklearn.preprocessing as preprocessing;
from sklearn.externals import joblib;
from sklearn.metrics import recall_score, accuracy_score, confusion_matrix, classification_report
import os
import sys
import dataset_manupulation as dm
import utils
import arff
import warnings
warnings.simplefilter("ignore", DeprecationWarning)
featureset = "ComParE_Functionals"
#path setup
root_dir = os.path.realpath('/media/fabio/DATA/Work/Snoring/Snore_dist')
targePath = os.path.join(root_dir, 'gmmUbmSvm','snoring_class')
listPath = os.path.join(root_dir, 'dataset')
featPath = os.path.join(root_dir, 'arff', "ComParE2017_Snore.ComParE.")
ubmsPath = os.path.join(targePath, featureset, "ubms")
supervecPath = os.path.join(targePath, featureset, "supervectors")
scoresPath = os.path.join(targePath, featureset, "score")
snoreClassPath=os.path.join(targePath, featureset, "score");#used for save best c-best gamma-best nmix so that extract_supervector_test.py and test.py can read it
sys.stdout = open(os.path.join(scoresPath,'gridsearch.txt'), 'w') #log to a file
print "experiment: "+targePath; #to have the reference to experiments in text files
sys.stderr = open(os.path.join(scoresPath,'gridsearch_err.txt'), 'w') #log to a file
#variables inizialization
nFolds = 1;
C_range = 2.0 ** np.arange(-5, 15+2, 2); # libsvm range
gamma_range = 2.0 ** np.arange(-15, 3+2, 2); # libsvm range
mixtures = 2**np.arange(0, 7, 1);
scores = np.zeros((mixtures.shape[0], nFolds));
cBestValues = np.zeros((mixtures.shape[0], nFolds));
gBestValues = np.zeros((mixtures.shape[0], nFolds));
mIdx = 0;
#LOAD DATASET
trainPath = featPath + "train.arff"
data = arff.load(open(trainPath))
trainset = data['data']
develPath = featPath + "devel.arff"
data = arff.load(open(develPath))
develset = data['data']
del data
y = []
y_train_lab = []
for seq in trainset:
name = str(seq.pop(0)).replace('.wav','.npy')
y.append(name)
label = seq.pop(-1)
y_train_lab.append(utils.labToClass(label))
yd = []
y_devel_lab = []
for seq in develset:
name = str(seq.pop(0)).replace('.wav','.npy')
yd.append(name)
label = seq.pop(-1)
y_devel_lab.append(utils.labToClass(label))
def compute_score(predictions, labels):
print("compute_score")
y_pred = []
for d in predictions:
y_pred.append(int(d))
y_true = []
for n in labels:
y_true.append(int(n))
A = accuracy_score(y_true, y_pred)
UAR = recall_score(y_true, y_pred, average='macro')
CM = confusion_matrix(y_true, y_pred)
# cm = CM.astype(int)
# print("FINAL REPORT")
# print("\t V\t O\t T\t E")
# print("V \t" + str(cm[0, 0]) + "\t" + str(cm[0, 1]) + "\t" + str(cm[0, 2]) + "\t" + str(cm[0, 3]))
# print("O \t" + str(cm[1, 0]) + "\t" + str(cm[1, 1]) + "\t" + str(cm[1, 2]) + "\t" + str(cm[1, 3]))
# print("T \t" + str(cm[2, 0]) + "\t" + str(cm[2, 1]) + "\t" + str(cm[2, 2]) + "\t" + str(cm[2, 3]))
# print("E \t" + str(cm[3, 0]) + "\t" + str(cm[3, 1]) + "\t" + str(cm[3, 2]) + "\t" + str(cm[3, 3]))
#
# print(classification_report(y_true, y_pred, target_names=['V', 'O', 'T', 'E']))
return A, UAR, CM, y_pred
for m in mixtures:
print("Mixture: " + str(m));
sys.stdout.flush()
mixScores = np.zeros((nFolds*(nFolds-1), 1));
fIdx = 0;
for fold in range(0,nFolds):
cGammaScores = np.zeros((C_range.shape[0], gamma_range.shape[0])); #inizializza matrice dei punteggi
print("Fold: " + str(fold));
sys.stdout.flush()
#curListPath = os.path.join(ListPath, "trainset_" + str(fold)); #fall_detection/lists/lolo_right/trainset+devset_(1)
curSupervecPath = os.path.join(supervecPath, "trainset_" + str(fold)); #fall_detection/supervectors/lolo_right/trainset+devset_(1)
for sf in range(0,nFolds): #per ogni coppia di devset_x trainset_x. In questo caso sono 3 coppie quindi 3 subfold
print("Subfold: " + str(sf));
sys.stdout.flush()
curSupervecSubPath = os.path.join(curSupervecPath, str(m)); #fall_detection/supervectors/lolo_right/trainset+devset_1/(1)/1
#trainListFile = os.path.join(curListPath, "trainset_" + str(sf) + ".lst"); #fall_detection/lists/lolo_right/trainset+devset_1/trainset_(1).lst
#trainFilepaths, trainClassLabels, trainDistLabels = utils.readlistfile(trainListFile,labelling);
trainFeatures = utils.readfeatures(curSupervecSubPath, y); #contiene tutte le features della lista
trainClassLabels = y_train_lab
#devListFile = os.path.join(curListPath, "devset_" + str(sf) + ".lst"); #fall_detection/lists/lolo_right/trainset+devset_1/devset_(1).lst
#devFilepaths, devClassLabels, devDistLabels = utils.readlistfile(devListFile,labelling);
devFeatures = utils.readfeatures(curSupervecSubPath, yd); #contiene tutte le features della lista
devClassLabels = y_devel_lab
#TODO: STORE Metrics
cIdx = 0;
for C in C_range:
gIdx = 0;
for gamma in gamma_range:
scaler = preprocessing.MinMaxScaler(feature_range=(-1,1));
scaler.fit(trainFeatures);
svm = SVC(C=C, kernel='rbf', gamma=gamma, class_weight='auto');
svm.fit(scaler.transform(trainFeatures), trainClassLabels);#nomealizzazione e adattamento
predLabels = svm.predict(scaler.transform(devFeatures));
A, UAR, ConfMatrix, class_pred = compute_score(predLabels, y_devel_lab)
cGammaScores[cIdx,gIdx] += UAR;
gIdx += 1;
cIdx += 1;
idxs = np.unravel_index(cGammaScores.argmax(), cGammaScores.shape);#trova l'indirizzo all'interno della matrice cGammaScores a cui corrisponde il valore max
cBestValues[mIdx,fold-1] = C_range[idxs[0]]; #per ogni cartella (trainset+devset_(1)) si salva il valore di C che mi da il punteggio maggiore (il tutto lo fa anche per ogni valore di mixture)
gBestValues[mIdx,fold-1] = gamma_range[idxs[1]]; #per ogni cartella (trainset+devset_(1)) si salva il valore di GAMMA che mi da il punteggio maggiore (il tutto lo fa anche per ogni valore di mixture)
scores[mIdx,fold-1] = cGammaScores.max();
mIdx += 1;
scoresAvg = scores.mean(axis=1);
mIdx = 0;
print("\n**** Results ****\n");
for score in scoresAvg:
print(str(mixtures[mIdx]) + " " + str(score));
mIdx += 1;
idx_max_score = scoresAvg.argmax();
print "best vale of c for " + str(mixtures[idx_max_score]) +" gaussian : "+ str(cBestValues[idx_max_score]);
print "best vale of g for " + str(mixtures[idx_max_score]) +" gaussian : "+ str(gBestValues[idx_max_score]);
#save best c-best gamma-best nmix
joblib.dump(mixtures[idx_max_score],os.path.join(snoreClassPath, "nmix"));
joblib.dump(cBestValues[idx_max_score],os.path.join(snoreClassPath, "cBestValues"));
joblib.dump(gBestValues[idx_max_score],os.path.join(snoreClassPath, "gBestValues"));
|
vespero89/Snoring_Challenge
|
Supervectors/gridsearch_FUNCT.py
|
Python
|
gpl-3.0
| 7,135
|
[
"Gaussian"
] |
9ecb7300909676ac922df9b68f5bbd240efad9af49e25009afa43f26f4d07e73
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
########################################################################
""" Enable using one or more Storage Elements
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
read = True
write = True
check = True
site = ''
mute = False
Script.setUsageMessage( """
Enable using one or more Storage Elements
Usage:
%s SE1 [SE2 ...]
""" % Script.scriptName )
Script.registerSwitch( "r" , "AllowRead" , " Allow only reading from the storage element" )
Script.registerSwitch( "w" , "AllowWrite", " Allow only writing to the storage element" )
Script.registerSwitch( "k" , "AllowCheck", " Allow only check access to the storage element" )
Script.registerSwitch( "m" , "Mute" , " Do not send email" )
Script.registerSwitch( "S:", "Site=" , " Allow all SEs associated to site" )
Script.parseCommandLine( ignoreErrors = True )
ses = Script.getPositionalArgs()
for switch in Script.getUnprocessedSwitches():
if switch[0].lower() == "r" or switch[0].lower() == "allowread":
write = False
check = False
if switch[0].lower() == "w" or switch[0].lower() == "allowwrite":
read = False
check = False
if switch[0].lower() == "k" or switch[0].lower() == "allowcheck":
read = False
write = False
if switch[0].lower() == "m" or switch[0].lower() == "mute":
mute = True
if switch[0] == "S" or switch[0].lower() == "site":
site = switch[1]
#from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC import gConfig, gLogger
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
#csAPI = CSAPI()
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
setup = gConfig.getValue( '/DIRAC/Setup', '' )
if not setup:
print 'ERROR: Could not contact Configuration Service'
exitCode = 2
DIRAC.exit( exitCode )
res = getProxyInfo()
if not res[ 'OK' ]:
gLogger.error( 'Failed to get proxy information', res[ 'Message' ] )
DIRAC.exit( 2 )
userName = res['Value'].get( 'username' )
if not userName:
gLogger.error( 'Failed to get username for proxy' )
DIRAC.exit( 2 )
if site:
res = Resources().getStorageElements( site )
if not res[ 'OK' ]:
gLogger.error( 'The provided site (%s) is not known.' % site )
DIRAC.exit( -1 )
ses.extend( res[ 'Value' ] )
if not ses:
gLogger.error( 'There were no SEs provided' )
DIRAC.exit()
readAllowed = []
writeAllowed = []
checkAllowed = []
resourceStatus = ResourceStatus()
res = resourceStatus.getStorageStatus( ses )
if not res[ 'OK' ]:
gLogger.error( 'Storage Element %s does not exist' % ses )
DIRAC.exit( -1 )
reason = 'Forced with dirac-admin-allow-se by %s' % userName
for se, seOptions in res[ 'Value' ].items():
resW = resC = resR = { 'OK' : False }
# InActive is used on the CS model, Banned is the equivalent in RSS
if read and seOptions.has_key( 'ReadAccess' ):
if not seOptions[ 'ReadAccess' ] in [ "InActive", "Banned", "Probing", "Degraded" ]:
gLogger.notice( 'Read option for %s is %s, instead of %s' %
( se, seOptions[ 'ReadAccess' ], [ "InActive", "Banned", "Probing", "Degraded" ] ) )
gLogger.notice( 'Try specifying the command switches' )
continue
if 'ARCHIVE' in se:
gLogger.notice( '%s is not supposed to change Read status to Active' % se )
resR[ 'OK' ] = True
else:
resR = resourceStatus.setStorageElementStatus( se, 'ReadAccess', 'Active', reason, userName )
if not resR['OK']:
gLogger.error( "Failed to update %s read access to Active" % se )
else:
gLogger.notice( "Successfully updated %s read access to Active" % se )
readAllowed.append( se )
# InActive is used on the CS model, Banned is the equivalent in RSS
if write and seOptions.has_key( 'WriteAccess' ):
if not seOptions[ 'WriteAccess' ] in [ "InActive", "Banned", "Probing", "Degraded" ]:
gLogger.notice( 'Write option for %s is %s, instead of %s' %
( se, seOptions[ 'WriteAccess' ], [ "InActive", "Banned", "Probing", "Degraded" ] ) )
gLogger.notice( 'Try specifying the command switches' )
continue
resW = resourceStatus.setStorageElementStatus( se, 'WriteAccess', 'Active', reason, userName )
if not resW['OK']:
gLogger.error( "Failed to update %s write access to Active" % se )
else:
gLogger.notice( "Successfully updated %s write access to Active" % se )
writeAllowed.append( se )
# InActive is used on the CS model, Banned is the equivalent in RSS
if check and seOptions.has_key( 'CheckAccess' ):
if not seOptions[ 'CheckAccess' ] in [ "InActive", "Banned", "Probing", "Degraded" ]:
gLogger.notice( 'Check option for %s is %s, instead of %s' %
( se, seOptions[ 'CheckAccess' ], [ "InActive", "Banned", "Probing", "Degraded" ] ) )
gLogger.notice( 'Try specifying the command switches' )
continue
resC = resourceStatus.setStorageElementStatus( se, 'CheckAccess', 'Active', reason, userName )
if not resC['OK']:
gLogger.error( "Failed to update %s check access to Active" % se )
else:
gLogger.notice( "Successfully updated %s check access to Active" % se )
checkAllowed.append( se )
if not( resR['OK'] or resW['OK'] or resC['OK'] ):
DIRAC.exit( -1 )
if not ( writeAllowed or readAllowed or checkAllowed ):
gLogger.info( "No storage elements were allowed" )
DIRAC.exit( -1 )
if mute:
gLogger.notice( 'Email is muted by script switch' )
DIRAC.exit( 0 )
subject = '%s storage elements allowed for use' % len( writeAllowed + readAllowed + checkAllowed )
addressPath = 'EMail/Production'
address = Operations().getValue( addressPath, '' )
body = ''
if read:
body = "%s\n\nThe following storage elements were allowed for reading:" % body
for se in readAllowed:
body = "%s\n%s" % ( body, se )
if write:
body = "%s\n\nThe following storage elements were allowed for writing:" % body
for se in writeAllowed:
body = "%s\n%s" % ( body, se )
if check:
body = "%s\n\nThe following storage elements were allowed for checking:" % body
for se in checkAllowed:
body = "%s\n%s" % ( body, se )
if not address:
gLogger.notice( "'%s' not defined in Operations, can not send Mail\n" % addressPath, body )
DIRAC.exit( 0 )
res = diracAdmin.sendMail( address, subject, body )
gLogger.notice( 'Notifying %s' % address )
if res[ 'OK' ]:
gLogger.notice( res[ 'Value' ] )
else:
gLogger.notice( res[ 'Message' ] )
DIRAC.exit( 0 )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
Sbalbp/DIRAC
|
DataManagementSystem/scripts/dirac-admin-allow-se.py
|
Python
|
gpl-3.0
| 7,128
|
[
"DIRAC"
] |
c74b6b100972fd8f12076c0a0d26180cc088e3566dd1b2ff33ac8dd33f2854bf
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic NMR shielding tensor
'''
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import _vhf
from pyscf.scf import cphf
from pyscf.scf import _response_functions
from pyscf.data import nist
def dia(nmrobj, gauge_orig=None, shielding_nuc=None, dm0=None):
'''Diamagnetic part of NMR shielding tensors.
See also J. Olsen et al., Theor. Chem. Acc., 90, 421 (1995)
'''
if shielding_nuc is None: shielding_nuc = nmrobj.shielding_nuc
if dm0 is None: dm0 = nmrobj._scf.make_rdm1()
mol = nmrobj.mol
mf = nmrobj._scf
if getattr(mf, 'with_x2c', None):
raise NotImplementedError('X2C for NMR shielding')
if getattr(mf, 'with_qmmm', None):
raise NotImplementedError('NMR shielding with QM/MM')
if getattr(mf, 'with_solvent', None):
raise NotImplementedError('NMR shielding with Solvent')
if gauge_orig is not None:
# Note the side effects of set_common_origin
mol.set_common_origin(gauge_orig)
msc_dia = []
for n, atm_id in enumerate(shielding_nuc):
with mol.with_rinv_origin(mol.atom_coord(atm_id)):
# a11part = (B dot) -1/2 frac{\vec{r}_N}{r_N^3} r (dot mu)
if gauge_orig is None:
h11 = mol.intor('int1e_giao_a11part', comp=9)
else:
h11 = mol.intor('int1e_cg_a11part', comp=9)
e11 = numpy.einsum('xij,ij->x', h11, dm0).reshape(3,3)
e11 = e11 - numpy.eye(3) * e11.trace()
if gauge_orig is None:
h11 = mol.intor('int1e_a01gp', comp=9)
e11 += numpy.einsum('xij,ij->x', h11, dm0).reshape(3,3)
msc_dia.append(e11)
return numpy.array(msc_dia).reshape(-1, 3, 3)
def para(nmrobj, mo10=None, mo_coeff=None, mo_occ=None, shielding_nuc=None):
'''Paramagnetic part of NMR shielding tensors.
'''
if mo_coeff is None: mo_coeff = nmrobj._scf.mo_coeff
if mo_occ is None: mo_occ = nmrobj._scf.mo_occ
if shielding_nuc is None: shielding_nuc = nmrobj.shielding_nuc
if mo10 is None: mo10 = nmrobj.solve_mo1()[0]
mol = nmrobj.mol
para_vir = numpy.empty((len(shielding_nuc),3,3))
para_occ = numpy.empty((len(shielding_nuc),3,3))
occidx = mo_occ > 0
viridx = mo_occ == 0
orbo = mo_coeff[:,occidx]
orbv = mo_coeff[:,viridx]
# *2 for double occupancy
dm10_oo = numpy.asarray([reduce(numpy.dot, (orbo, x[occidx]*2, orbo.T.conj())) for x in mo10])
dm10_vo = numpy.asarray([reduce(numpy.dot, (orbv, x[viridx]*2, orbo.T.conj())) for x in mo10])
for n, atm_id in enumerate(shielding_nuc):
mol.set_rinv_origin(mol.atom_coord(atm_id))
# H^{01} = 1/2(A01 dot p + p dot A01) => (a01p + c.c.)/2 ~ <a01p>
# Im[A01 dot p] = Im[vec{r}/r^3 x vec{p}] = Im[-i p (1/r) x p] = -p (1/r) x p
h01i = mol.intor_asymmetric('int1e_prinvxp', 3) # = -Im[H^{01}]
# <H^{01},MO^1> = - Tr(Im[H^{01}],Im[MO^1]) = Tr(-Im[H^{01}],Im[MO^1])
para_occ[n] = numpy.einsum('xji,yij->xy', dm10_oo, h01i) * 2 # *2 for + c.c.
para_vir[n] = numpy.einsum('xji,yij->xy', dm10_vo, h01i) * 2 # *2 for + c.c.
msc_para = para_occ + para_vir
return msc_para, para_vir, para_occ
def make_h10(mol, dm0, gauge_orig=None, verbose=logger.WARN):
'''Imaginary part of first order Fock operator
Note the side effects of set_common_origin
'''
log = logger.new_logger(mol, verbose)
if gauge_orig is None:
# A10_i dot p + p dot A10_i consistents with <p^2 g>
# A10_j dot p + p dot A10_j consistents with <g p^2>
# 1/2(A10_j dot p + p dot A10_j) => Im[1/4 (rjxp - pxrj)] = -1/2 <irjxp>
log.debug('First-order GIAO Fock matrix')
h1 = -.5 * mol.intor('int1e_giao_irjxp', 3) + make_h10giao(mol, dm0)
else:
with mol.with_common_origin(gauge_orig):
h1 = -.5 * mol.intor('int1e_cg_irxp', 3)
return h1
def get_jk(mol, dm0):
# J = Im[(i i|\mu g\nu) + (i gi|\mu \nu)] = -i (i i|\mu g\nu)
# K = Im[(\mu gi|i \nu) + (\mu i|i g\nu)]
# = [-i (\mu g i|i \nu)] - h.c. (-h.c. for anti-symm because of the factor -i)
intor = mol._add_suffix('int2e_ig1')
vj, vk = _vhf.direct_mapdm(intor, # (g i,j|k,l)
'a4ij', ('lk->s1ij', 'jk->s1il'),
dm0, 3, # xyz, 3 components
mol._atm, mol._bas, mol._env)
vk = vk - numpy.swapaxes(vk, -1, -2)
return -vj, -vk
def make_h10giao(mol, dm0):
vj, vk = get_jk(mol, dm0)
h1 = vj - .5 * vk
# Im[<g\mu|H|g\nu>] = -i * (gnuc + gkin)
h1 -= mol.intor_asymmetric('int1e_ignuc', 3)
if mol.has_ecp():
h1 -= mol.intor_asymmetric('ECPscalar_ignuc', 3)
h1 -= mol.intor('int1e_igkin', 3)
return h1
def make_s10(mol, gauge_orig=None):
'''First order overlap matrix wrt external magnetic field.'''
if gauge_orig is None:
# Im[<g\mu |g\nu>]
s1 = -mol.intor_asymmetric('int1e_igovlp', 3)
else:
nao = mol.nao_nr()
s1 = numpy.zeros((3,nao,nao))
return s1
get_ovlp = make_s10
def _solve_mo1_uncoupled(mo_energy, mo_occ, h1, s1):
'''uncoupled first order equation'''
e_a = mo_energy[mo_occ==0]
e_i = mo_energy[mo_occ>0]
e_ai = 1 / (e_a.reshape(-1,1) - e_i)
hs = h1 - s1 * e_i
mo10 = numpy.empty_like(hs)
mo10[:,mo_occ==0,:] = -hs[:,mo_occ==0,:] * e_ai
mo10[:,mo_occ>0,:] = -s1[:,mo_occ>0,:] * .5
e_ji = e_i.reshape(-1,1) - e_i
mo_e10 = hs[:,mo_occ>0,:] + mo10[:,mo_occ>0,:] * e_ji
return mo10, mo_e10
#TODO: merge to hessian.rhf.solve_mo1 function
def solve_mo1(nmrobj, mo_energy=None, mo_coeff=None, mo_occ=None,
h1=None, s1=None, with_cphf=None):
'''Solve the first order equation
Kwargs:
with_cphf : boolean or function(dm_mo) => v1_mo
If a boolean value is given, the value determines whether CPHF
equation will be solved or not. The induced potential will be
generated by the function gen_vind.
If a function is given, CPHF equation will be solved, and the
given function is used to compute induced potential
'''
if mo_energy is None: mo_energy = nmrobj._scf.mo_energy
if mo_coeff is None: mo_coeff = nmrobj._scf.mo_coeff
if mo_occ is None: mo_occ = nmrobj._scf.mo_occ
if with_cphf is None: with_cphf = nmrobj.cphf
cput1 = (time.clock(), time.time())
log = logger.Logger(nmrobj.stdout, nmrobj.verbose)
mol = nmrobj.mol
orbo = mo_coeff[:,mo_occ>0]
if h1 is None:
dm0 = nmrobj._scf.make_rdm1(mo_coeff, mo_occ)
h1 = lib.einsum('xpq,pi,qj->xij', nmrobj.get_fock(dm0),
mo_coeff.conj(), orbo)
cput1 = log.timer('first order Fock matrix', *cput1)
if s1 is None:
s1 = lib.einsum('xpq,pi,qj->xij', nmrobj.get_ovlp(mol),
mo_coeff.conj(), orbo)
if with_cphf:
if callable(with_cphf):
vind = with_cphf
else:
vind = gen_vind(nmrobj._scf, mo_coeff, mo_occ)
mo10, mo_e10 = cphf.solve(vind, mo_energy, mo_occ, h1, s1,
nmrobj.max_cycle_cphf, nmrobj.conv_tol,
verbose=log)
else:
mo10, mo_e10 = _solve_mo1_uncoupled(mo_energy, mo_occ, h1, s1)
log.timer('solving mo1 eqn', *cput1)
return mo10, mo_e10
def get_fock(nmrobj, dm0=None, gauge_orig=None):
r'''First order partial derivatives of Fock matrix wrt external magnetic
field. \frac{\partial F}{\partial B}
'''
if dm0 is None: dm0 = nmrobj._scf.make_rdm1()
if gauge_orig is None: gauge_orig = nmrobj.gauge_orig
log = logger.Logger(nmrobj.stdout, nmrobj.verbose)
h1 = make_h10(nmrobj.mol, dm0, gauge_orig, log)
if nmrobj.chkfile:
lib.chkfile.dump(nmrobj.chkfile, 'nmr/h1', h1)
return h1
def gen_vind(mf, mo_coeff, mo_occ):
'''Induced potential'''
vresp = mf.gen_response(singlet=True, hermi=2)
occidx = mo_occ > 0
orbo = mo_coeff[:,occidx]
nocc = orbo.shape[1]
nao, nmo = mo_coeff.shape
def vind(mo1):
dm1 = [reduce(numpy.dot, (mo_coeff, x*2, orbo.T.conj()))
for x in mo1.reshape(3,nmo,nocc)]
dm1 = numpy.asarray([d1-d1.conj().T for d1 in dm1])
v1mo = lib.einsum('xpq,pi,qj->xij', vresp(dm1), mo_coeff.conj(), orbo)
return v1mo.ravel()
return vind
class NMR(lib.StreamObject):
def __init__(self, scf_method):
self.mol = scf_method.mol
self.verbose = scf_method.mol.verbose
self.stdout = scf_method.mol.stdout
self.chkfile = scf_method.chkfile
self._scf = scf_method
self.shielding_nuc = range(self.mol.natm)
# gauge_orig=None will call GIAO. A coordinate array leads to common gauge
self.gauge_orig = None
self.cphf = True
self.max_cycle_cphf = 20
self.conv_tol = 1e-9
self.mo10 = None
self.mo_e10 = None
self._keys = set(self.__dict__.keys())
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('\n')
log.info('******** %s for %s ********',
self.__class__, self._scf.__class__)
if self.gauge_orig is None:
log.info('gauge = GIAO')
else:
log.info('Common gauge = %s', str(self.gauge_orig))
log.info('shielding for atoms %s', str(self.shielding_nuc))
if self.cphf:
log.info('Solving MO10 eq with CPHF.')
log.info('CPHF conv_tol = %g', self.conv_tol)
log.info('CPHF max_cycle_cphf = %d', self.max_cycle_cphf)
if not self._scf.converged:
log.warn('Ground state SCF is not converged')
return self
# Note mo10 is the imaginary part of MO^1
def kernel(self, mo1=None):
return self.shielding(mo1)
def shielding(self, mo1=None):
cput0 = (time.clock(), time.time())
self.check_sanity()
self.dump_flags()
unit_ppm = nist.ALPHA**2 * 1e6
msc_dia = self.dia(self.gauge_orig)
if mo1 is None:
self.mo10, self.mo_e10 = self.solve_mo1()
mo1 = self.mo10
msc_para, para_vir, para_occ = self.para(mo10=mo1)
msc_dia *= unit_ppm
msc_para *= unit_ppm
para_vir *= unit_ppm
para_occ *= unit_ppm
e11 = msc_para + msc_dia
logger.timer(self, 'NMR shielding', *cput0)
if self.verbose >= logger.NOTE:
for i, atm_id in enumerate(self.shielding_nuc):
_write(self.stdout, e11[i],
'\ntotal shielding of atom %d %s' \
% (atm_id, self.mol.atom_symbol(atm_id)))
_write(self.stdout, msc_dia[i], 'dia-magnetic contribution')
_write(self.stdout, msc_para[i], 'para-magnetic contribution')
if self.verbose >= logger.INFO:
_write(self.stdout, para_occ[i], 'occ part of para-magnetism')
_write(self.stdout, para_vir[i], 'vir part of para-magnetism')
return e11
dia = dia
para = para
get_fock = get_fock
solve_mo1 = solve_mo1
def get_ovlp(self, mol=None, gauge_orig=None):
if mol is None: mol = self.mol
if gauge_orig is None: gauge_orig = self.gauge_orig
return get_ovlp(mol, gauge_orig)
from pyscf import scf
scf.hf.RHF.NMR = lib.class_as_method(NMR)
def _write(stdout, msc3x3, title):
stdout.write('%s\n' % title)
stdout.write('B_x %s\n' % str(msc3x3[0]))
stdout.write('B_y %s\n' % str(msc3x3[1]))
stdout.write('B_z %s\n' % str(msc3x3[2]))
stdout.flush()
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom.extend([
[1 , (0. , 0. , .917)],
['F' , (0. , 0. , 0.)], ])
mol.nucmod = {'F': 2} # gaussian nuclear model
mol.basis = {'H': '6-31g',
'F': '6-31g',}
mol.build()
rhf = scf.RHF(mol).run()
nmr = rhf.NMR()
nmr.cphf = True
#nmr.gauge_orig = (0,0,0)
msc = nmr.kernel() # _xx,_yy = 375.232839, _zz = 483.002139
print(msc[1][0,0], msc[1][1,1], 375.232839)
print(msc[1][2,2], 483.002139)
print(lib.finger(msc) - -132.22895063293751)
nmr.cphf = True
nmr.gauge_orig = (1,1,1)
msc = nmr.shielding()
print(msc[1][0,0], msc[1][1,1], 342.447242)
print(msc[1][2,2], 483.002139)
print(lib.finger(msc) - -108.48528212325664)
nmr.cphf = False
nmr.gauge_orig = None
msc = nmr.shielding()
print(msc[1][0,0], msc[1][1,1], 449.032227)
print(msc[1][2,2], 483.002139)
print(lib.finger(msc) - -133.26526049655627)
mol.atom.extend([
[1 , (1. , 0.3, .417)],
[1 , (0.2, 1. , 0.)],])
mol.build()
mf = scf.RHF(mol).run()
nmr = NMR(mf)
nmr.cphf = False
nmr.gauge_orig = None
msc = nmr.shielding()
print(msc[1][0,0], 283.514599)
print(msc[1][1,1], 292.578151)
print(msc[1][2,2], 257.348176)
print(lib.finger(msc) - -123.98600632099961)
|
gkc1000/pyscf
|
pyscf/prop/nmr/rhf.py
|
Python
|
apache-2.0
| 13,891
|
[
"Gaussian",
"PySCF"
] |
00dfcd4fad09c3e25c751018cff03159caec6b80c20a970c653c75160c6ffd9e
|
"""HandleImport transformation takes care of importing user-defined modules."""
from pythran.passmanager import Transformation
from pythran.tables import cxx_keywords, MODULES, pythran_ward
import ast
import importlib
import inspect
def add_filename_field(node, filename):
for descendant in ast.walk(node):
descendant.filename = filename
def mangle_imported_function_name(module_name, func_name):
"""Mangling naming scheme for imported functions."""
return pythran_ward + "imported__" + module_name + "_" + func_name
def is_builtin_function(func_name):
"""Test if a function is a builtin (like len(), map(), ...)."""
return (func_name in MODULES["__builtin__"] or
(func_name in cxx_keywords and
func_name + "_" in MODULES["__builtin__"]))
def is_builtin_module_name(module_name):
"""Test if a module is a builtin module (numpy, math, ...)."""
module_name = module_name.split(".")[0]
return (module_name in MODULES or
(module_name in cxx_keywords and module_name + "_" in MODULES))
def is_builtin_module(module):
"""Test if a module is a builtin module (numpy, math, ...)."""
return is_builtin_module_name(module.name)
def filter_builtinIn_import(import_node):
"""Filter out import list to keep only builtin modules."""
import_node.names = filter(is_builtin_module, import_node.names)
return import_node
class ImportFunction(ast.NodeTransformer):
"""
AST transformer that operates on a function that we need to import.
It visits each call inside the function and recursively import the
callees. The call site is modified to call the new imported function,
using name mangling.
"""
def __init__(self, registry, module, func_name):
self.registry = registry
self.module = module
self.func_name = func_name
self.nested_functions = dict()
def visit_FunctionDef(self, func_node):
"""Keep track of nested Function."""
self.nested_functions[func_node.name] = func_node
self.generic_visit(func_node)
return func_node
def visit_Import(self, import_node):
"""
Track local import.
This is "wrong" because we add these import like if they were global.
"""
for alias in import_node.names:
asname = alias.asname or alias.name
self.module.imported_modules[asname] = alias.name
return filter_builtinIn_import(import_node)
def visit_ImportFrom(self, importfrom_node):
"""
Track local import.
This is "wrong" because we add these import like if they were global.
"""
module_name = importfrom_node.module
for alias in importfrom_node.names:
func_name = alias.name
asname = alias.asname or func_name
self.module.imported_functions[asname] = (module_name,
func_name,
None)
if is_builtin_module_name(module_name):
return importfrom_node
def visit_Call(self, call_node):
"""Find any non-builtin and non-nested function call to pull the callee
as part of the import.
"""
self.generic_visit(call_node)
if isinstance(call_node.func, ast.Name):
# Direct call, resolve in the current module.
# Note: the function is not necessarily locally defined, it could
# be imported in the form "from bar import foo"
func_name = call_node.func.id
if func_name in self.nested_functions:
# Don't need to do anything in this case, nested function are
# implicitly imported with the current function
return call_node
# Import the function now, imply a recursion to import the callee
mangled_name = self.module.call_function(self.registry,
func_name)
# Patch the call site, replacing with the mangle name
call_node.func.id = mangled_name
elif (isinstance(call_node.func, ast.Attribute) and
isinstance(call_node.func.value, ast.Name)):
# This is a call in the form of something.function()
module_alias = call_node.func.value.id
func_name = call_node.func.attr
if module_alias not in self.module.imported_modules:
# This might be an import error, but it can also be that
# "something" in something.function() is not a module, for
# instance: list.append(...)
return call_node
module_name = self.module.imported_modules[module_alias]
module = self.registry.import_module(module_name)
# In case it a builtin module, add import with the correct alias
if isinstance(module, BuiltinModule):
self.module.dependent_modules[module_alias] = module_name
# Import the function from the module now, triggers a recursion
mangled_name = module.import_function(self.registry, func_name)
# Patch witch mangled name, force it for the main module as we want
# to tranform calls from main_module.foo() to simply foo()
if module.to_be_mangled or module.is_main_module:
# Patch the call, replace with the mangle name
call_node.func = ast.Name(id=mangled_name, ctx=ast.Load())
return call_node
class ImportedModule(object):
"""
Represent a user-defined imported module.
It offer an interface to import a function from this module, handling
automatically the import of all callees in the function.
"""
def __init__(self, name, module=None):
"""Parameters are the name for the module (mandatory), and the
ast.Module node (optional) in the case the current module is the main
one. This differentiation is needed to avoid mangling function name for
functions defined in the main module.
"""
self.is_main_module = True
self.node = module
if self.node is None:
# Not main module, parse now the imported module
self.is_main_module = False
imported_module = importlib.import_module(name)
self.node = ast.parse(inspect.getsource(imported_module))
assert isinstance(self.node, ast.Module)
# Recursively add filename information to all nodes, for debug msg
add_filename_field(self.node, name + ".py")
# Mangle function imported, unless it is the main module
self.to_be_mangled = not self.is_main_module
self.name = name
# Functions defined in this module and imported by another one.
# This dict is used at the end of the process to gather functions to be
# prepend at the beginning of the main pythran module
self.exported_functions = dict()
self.dependent_modules = dict()
# Top-level function declared in this module
self.functions = dict()
# Functions imported as "from somemodule import func as func_alias"
self.imported_functions = dict()
# Regular module import. Keys are alias and values are module names
self.imported_modules = dict()
# Collect top-level functions and imports
for decl in self.node.body:
if isinstance(decl, ast.FunctionDef): # regular functions
self.functions[decl.name] = decl
if isinstance(decl, ast.Import): # Module import
for alias in decl.names:
asname = alias.asname or alias.name
self.imported_modules[asname] = alias.name
if isinstance(decl, ast.ImportFrom): # Function import
module_name = decl.module
for alias in decl.names:
func_name = alias.name
asname = alias.asname or func_name
self.imported_functions[asname] = (module_name, func_name,
None)
def call_function(self, registry, func_name):
"""Direct function call from another function of the current module,
try to find if it was imported from another module in the form of
"from module_name import foo", if it was not it has to be locally
defined.
Return the mangled name to be used at call site.
"""
if func_name in self.imported_functions:
module_name, realName, decl = self.imported_functions[func_name]
if not decl: # first time we call this function, import it.
decl = registry.import_module(module_name). \
call_function(registry, realName)
# Cache the fact that it has been imported now
self.imported_functions[func_name] = (module_name, realName,
decl)
if not registry.import_module(module_name).to_be_mangled:
# No mangling in the main module, nor in builtins
return realName
return mangle_imported_function_name(module_name, realName)
# Function not imported, hopefully it was locally defined, delegate!
return self.import_function(registry, func_name)
def import_function(self, registry, func_name):
"""
Called to import a function locally defined in this module.
Return the mangled name to be used at call site.
"""
if func_name in self.exported_functions: # Caching: already registered
return self.exported_functions[func_name].name
# Function is not defined locally, maybe it is an alias, like
# c = math.cos
# c()
# Just give up here and hope for the best!
if func_name not in self.functions:
return func_name
func = self.functions[func_name]
# Mangle function's name here so that module1.foo() and
# module2.foo() don't conflict
if self.to_be_mangled:
# No mangling in the main module
func.name = mangle_imported_function_name(self.name, func_name)
# Cache the processed function
self.exported_functions[func_name] = func
# Recursively visit the function to handle any callees
ImportFunction(registry, self, func.name).visit(func)
return func.name
class BuiltinModule(object):
"""
Represent a builtin module.
it offer the same interface as ImportedModule class, but do not try to
validate function imported from here.
"""
def __init__(self, name):
self.name = name
self.is_main_module = False
self.to_be_mangled = False
# For builtins module, exported_functions is only used to keep
# ImportFrom nodes, see call_function()
self.exported_functions = dict()
self.dependent_modules = dict()
def call_function(self, _, func_name):
# There was a direct call to a function from this builtin. It means it
# was imported in the caller module in the form: from builtin import
# foo. We need to add such node to be imported
importFrom = ast.ImportFrom(module=self.name,
names=[ast.alias(name=func_name,
asname=None)],
level=0) # FIXME what is level?
self.exported_functions[func_name] = importFrom
return func_name
def import_function(self, _, func_name):
# We could check if the function is supported by Pythran here...
return func_name
class ImportRegistry(object):
"""
Keep track of already imported modules.
It avoid duplication in case of diamond or reflective import. It keeps a
single ImportedModule() instance per module. Import has to use the
canonical name (not the aliased one).
"""
def __init__(self):
self.modules = dict() # List of modules already imported
def import_module(self, name):
"""Keep track of imported modules. Pythran-supported builtin modules
are handled using a dummy BuiltinModule() type, while user-defined
modules rely on ImportedModule() to provide an interface to import
function at call site
"""
if name in self.modules: # Caching
return self.modules[name]
if is_builtin_module_name(name):
mod = BuiltinModule(name)
else:
mod = ImportedModule(name)
self.modules[name] = mod
return mod
def generate_ImportList(self):
"""List of imported functions to be added to the main module. """
import_list = []
for mod in self.modules.values():
if mod.is_main_module:
# don't need to import anything from the main module
continue
for alias, module_name in mod.dependent_modules.items():
import_node = ast.Import(names=[ast.alias(name=module_name,
asname=alias)])
import_list.append(import_node)
# Here we import the function itself (FunctionDef node)
# In case of builtin module, it is an ImportFrom node.
import_list += mod.exported_functions.values()
return import_list
class HandleImport(Transformation):
"""This pass handle user-defined import, mangling name for function from
other modules and include them in the current module, patching all call
site accordingly.
"""
def __init__(self):
super(HandleImport, self).__init__()
self.registry = ImportRegistry()
def visit_Module(self, module):
"""Entry point for the module."""
# Do not use registry.import_module because this is the main module and
# ImportedModule takes an extra parameter in this case
self.module = ImportedModule(self.passmanager.module_name, module)
self.registry.modules[self.passmanager.module_name] = self.module
self.generic_visit(module)
# Patch module body: prepend all imported function and import nodes
imported = self.registry.generate_ImportList()
module.body = imported + module.body
self.update |= bool(imported)
return module
@staticmethod
def visit_Import(import_node):
"""Filter out import node to keep only builtin modules."""
return filter_builtinIn_import(import_node)
@staticmethod
def visit_ImportFrom(import_node):
"""Filter out import node to keep only builtin modules."""
module_name = import_node.module
if is_builtin_module_name(module_name):
return import_node
def visit_FunctionDef(self, func):
"""Trigger dependent import for this function's body."""
self.module.call_function(self.registry, func.name)
return func
|
hainm/pythran
|
pythran/transformations/handle_import.py
|
Python
|
bsd-3-clause
| 15,181
|
[
"VisIt"
] |
17afc5214ca7bcdb908d04faf2b6a1ce5e1001f81d7ecf53f947203c3dc8f808
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, sys, glob, string
import zipfile
from datetime import date
try:
import json
except:
import simplejson as json
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
sdk = find_sdk(config)
support_dir = os.path.join(sdk,'module','support')
sys.path.append(support_dir)
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','bencoding.map.js')
if not os.path.exists(js_file): return
sdk = find_sdk(config)
iphone_dir = os.path.join(sdk,'iphone')
sys.path.insert(0,iphone_dir)
from compiler import Compiler
path = os.path.basename(js_file)
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
metadata = compiler.make_function_from_file(path,js_file)
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
method = metadata['method']
eq = path.replace('.','_')
method = ' return %s;' % method
f = os.path.join(cwd,'Classes','BencodingMapModuleAssets.m')
c = open(f).read()
idx = c.find('return ')
before = c[0:idx]
after = """
}
@end
"""
newc = before + method + after
if newc!=c:
x = open(f,'w')
x.write(newc)
x.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README','bencoding.map.js']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[]):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e)==2 and e[1]=='.pyc':continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
for dn in ('assets','example','platform'):
if os.path.exists(dn):
zip_dir(zf,dn,'%s/%s' % (modulepath,dn),['README'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
benbahrenburg/benCoding.Map
|
build.py
|
Python
|
apache-2.0
| 6,428
|
[
"VisIt"
] |
73e129c74ec8a638370b2b62c807748484d3e9221a8e483027e6fa255c12df3e
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Classes for reading/manipulating/writing QChem input files.
"""
import logging
import sys
from typing import Dict, List, Optional, Tuple, Union
from monty.io import zopen
from monty.json import MSONable
from pymatgen.core import Molecule
from .utils import lower_and_check_unique, read_pattern, read_table_pattern
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
__author__ = "Brandon Wood, Samuel Blau, Shyam Dwaraknath, Julian Self, Evan Spotte-Smith"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
__email__ = "b.wood@berkeley.edu"
__credits__ = "Xiaohui Qu"
logger = logging.getLogger(__name__)
class QCInput(MSONable):
"""
An object representing a QChem input file. QCInput attributes represent different sections of a QChem input file.
To add a new section one needs to modify __init__, __str__, from_sting and add staticmethods
to read and write the new section i.e. section_template and read_section. By design, there is very little (or no)
checking that input parameters conform to the appropriate QChem format, this responsible lands on the user or a
separate error handling software.
"""
def __init__(
self,
molecule: Union[Molecule, Literal["read"]],
rem: Dict,
opt: Optional[Dict[str, List]] = None,
pcm: Optional[Dict] = None,
solvent: Optional[Dict] = None,
smx: Optional[Dict] = None,
scan: Optional[Dict[str, List]] = None,
van_der_waals: Optional[Dict[str, float]] = None,
vdw_mode: str = "atomic",
plots: Optional[Dict] = None,
nbo: Optional[Dict] = None,
):
"""
Args:
molecule (pymatgen Molecule object or "read"):
Input molecule. molecule can be set as either a pymatgen Molecule object or as the str "read".
"read" can be used in multi_job QChem input files where the molecule is read in from the
previous calculation.
rem (dict):
A dictionary of all the input parameters for the rem section of QChem input file.
Ex. rem = {'method': 'rimp2', 'basis': '6-31*G++' ... }
opt (dict of lists):
A dictionary of opt sections, where each opt section is a key and the corresponding
values are a list of strings. Stings must be formatted as instructed by the QChem manual.
The different opt sections are: CONSTRAINT, FIXED, DUMMY, and CONNECT
Ex. opt = {"CONSTRAINT": ["tors 2 3 4 5 25.0", "tors 2 5 7 9 80.0"], "FIXED": ["2 XY"]}
pcm (dict):
A dictionary of the PCM section, defining behavior for use of the polarizable continuum model.
Ex: pcm = {"theory": "cpcm", "hpoints": 194}
solvent (dict):
A dictionary defining the solvent parameters used with PCM.
Ex: solvent = {"dielectric": 78.39, "temperature": 298.15}
smx (dict):
A dictionary defining solvent parameters used with the SMD method, a solvent method that adds
short-range terms to PCM.
Ex: smx = {"solvent": "water"}
scan (dict of lists):
A dictionary of scan variables. Because two constraints of the same type are allowed (for instance, two
torsions or two bond stretches), each TYPE of variable (stre, bend, tors) should be its own key in the
dict, rather than each variable. Note that the total number of variable (sum of lengths of all lists)
CANNOT be
more than two.
Ex. scan = {"stre": ["3 6 1.5 1.9 0.1"], "tors": ["1 2 3 4 -180 180 15"]}
van_der_waals (dict):
A dictionary of custom van der Waals radii to be used when construcing cavities for the PCM
model or when computing, e.g. Mulliken charges. They keys are strs whose meaning depends on
the value of vdw_mode, and the values are the custom radii in angstroms.
vdw_mode (str): Method of specifying custom van der Waals radii - 'atomic' or 'sequential'.
In 'atomic' mode (default), dict keys represent the atomic number associated with each
radius (e.g., 12 = carbon). In 'sequential' mode, dict keys represent the sequential
position of a single specific atom in the input structure.
plots (dict):
A dictionary of all the input parameters for the plots section of QChem input file.
nbo (dict):
A dictionary of all the input parameters for the nbo section of QChem input file.
"""
self.molecule = molecule
self.rem = lower_and_check_unique(rem)
self.opt = opt
self.pcm = lower_and_check_unique(pcm)
self.solvent = lower_and_check_unique(solvent)
self.smx = lower_and_check_unique(smx)
self.scan = lower_and_check_unique(scan)
self.van_der_waals = lower_and_check_unique(van_der_waals)
self.vdw_mode = vdw_mode
self.plots = lower_and_check_unique(plots)
self.nbo = lower_and_check_unique(nbo)
# Make sure rem is valid:
# - Has a basis
# - Has a method or DFT exchange functional
# - Has a valid job_type or jobtype
valid_job_types = [
"opt",
"optimization",
"sp",
"freq",
"frequency",
"force",
"nmr",
"ts",
"pes_scan",
]
if "basis" not in self.rem:
raise ValueError("The rem dictionary must contain a 'basis' entry")
if "method" not in self.rem:
if "exchange" not in self.rem:
raise ValueError("The rem dictionary must contain either a 'method' entry or an 'exchange' entry")
if "job_type" not in self.rem:
raise ValueError("The rem dictionary must contain a 'job_type' entry")
if self.rem.get("job_type").lower() not in valid_job_types:
raise ValueError("The rem dictionary must contain a valid 'job_type' entry")
# Still to do:
# - Check that the method or functional is valid
# - Check that basis is valid
# - Check that basis is defined for all species in the molecule
# - Validity checks specific to job type?
# - Check OPT and PCM sections?
def __str__(self):
combined_list = []
# molecule section
combined_list.append(self.molecule_template(self.molecule))
combined_list.append("")
# rem section
combined_list.append(self.rem_template(self.rem))
combined_list.append("")
# opt section
if self.opt:
combined_list.append(self.opt_template(self.opt))
combined_list.append("")
# pcm section
if self.pcm:
combined_list.append(self.pcm_template(self.pcm))
combined_list.append("")
# solvent section
if self.solvent:
combined_list.append(self.solvent_template(self.solvent))
combined_list.append("")
if self.smx:
combined_list.append(self.smx_template(self.smx))
combined_list.append("")
# section for pes_scan
if self.scan:
combined_list.append(self.scan_template(self.scan))
combined_list.append("")
# section for van_der_waals radii
if self.van_der_waals:
combined_list.append(self.van_der_waals_template(self.van_der_waals, self.vdw_mode))
combined_list.append("")
# plots section
if self.plots:
combined_list.append(self.plots_template(self.plots))
combined_list.append("")
# nbo section
if self.nbo is not None:
combined_list.append(self.nbo_template(self.nbo))
combined_list.append("")
return "\n".join(combined_list)
@staticmethod
def multi_job_string(job_list: List["QCInput"]) -> str:
"""
Args:
job_list (): List of jobs
Returns:
(str) String representation of multi job input file.
"""
multi_job_string = ""
for i, job_i in enumerate(job_list):
if i < len(job_list) - 1:
multi_job_string += job_i.__str__() + "\n@@@\n\n"
else:
multi_job_string += job_i.__str__()
return multi_job_string
@classmethod
def from_string(cls, string: str) -> "QCInput":
"""
Read QcInput from string.
Args:
string (str): String input.
Returns:
QcInput
"""
sections = cls.find_sections(string)
molecule = cls.read_molecule(string)
rem = cls.read_rem(string)
# only molecule and rem are necessary everything else is checked
opt = None
pcm = None
solvent = None
smx = None
scan = None
vdw = None
vdw_mode = "atomic"
plots = None
nbo = None
if "opt" in sections:
opt = cls.read_opt(string)
if "pcm" in sections:
pcm = cls.read_pcm(string)
if "solvent" in sections:
solvent = cls.read_solvent(string)
if "smx" in sections:
smx = cls.read_smx(string)
if "scan" in sections:
scan = cls.read_scan(string)
if "van_der_waals" in sections:
vdw_mode, vdw = cls.read_vdw(string)
if "plots" in sections:
plots = cls.read_plots(string)
if "nbo" in sections:
nbo = cls.read_nbo(string)
return cls(
molecule,
rem,
opt=opt,
solvent=solvent,
pcm=pcm,
smx=smx,
scan=scan,
van_der_waals=vdw,
vdw_mode=vdw_mode,
plots=plots,
nbo=nbo,
)
def write_file(self, filename: str):
"""
Write QcInput to file.
Args:
filename (str): Filename
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
@staticmethod
def write_multi_job_file(job_list: List["QCInput"], filename: str):
"""
Write a multijob file.
Args:
job_list (): List of jobs.
filename (): Filename
"""
with zopen(filename, "wt") as f:
f.write(QCInput.multi_job_string(job_list))
@staticmethod
def from_file(filename: str) -> "QCInput":
"""
Create QcInput from file.
Args:
filename (str): Filename
Returns:
QcInput
"""
with zopen(filename, "rt") as f:
return QCInput.from_string(f.read())
@classmethod
def from_multi_jobs_file(cls, filename: str) -> List["QCInput"]:
"""
Create list of QcInput from a file.
Args:
filename (str): Filename
Returns:
List of QCInput objects
"""
with zopen(filename, "rt") as f:
# the delimiter between QChem jobs is @@@
multi_job_strings = f.read().split("@@@")
# list of individual QChem jobs
input_list = [cls.from_string(i) for i in multi_job_strings]
return input_list
@staticmethod
def molecule_template(molecule: Union[Molecule, Literal["read"]]) -> str:
"""
Args:
molecule (Molecule): molecule
Returns:
(str) Molecule template.
"""
# todo: add ghost atoms
mol_list = []
mol_list.append("$molecule")
if isinstance(molecule, str):
if molecule == "read":
mol_list.append(" read")
else:
raise ValueError('The only acceptable text value for molecule is "read"')
else:
mol_list.append(f" {int(molecule.charge)} {molecule.spin_multiplicity}")
for site in molecule.sites:
mol_list.append(
" {atom} {x: .10f} {y: .10f} {z: .10f}".format(
atom=site.species_string, x=site.x, y=site.y, z=site.z
)
)
mol_list.append("$end")
return "\n".join(mol_list)
@staticmethod
def rem_template(rem: Dict) -> str:
"""
Args:
rem ():
Returns:
(str)
"""
rem_list = []
rem_list.append("$rem")
for key, value in rem.items():
rem_list.append(f" {key} = {value}")
rem_list.append("$end")
return "\n".join(rem_list)
@staticmethod
def opt_template(opt: Dict[str, List]) -> str:
"""
Optimization template.
Args:
opt ():
Returns:
(str)
"""
opt_list = []
opt_list.append("$opt")
# loops over all opt sections
for key, value in opt.items():
opt_list.append(f"{key}")
# loops over all values within the section
for i in value:
opt_list.append(f" {i}")
opt_list.append(f"END{key}")
opt_list.append("")
# this deletes the empty space after the last section
del opt_list[-1]
opt_list.append("$end")
return "\n".join(opt_list)
@staticmethod
def pcm_template(pcm: Dict) -> str:
"""
Pcm run template.
Args:
pcm ():
Returns:
(str)
"""
pcm_list = []
pcm_list.append("$pcm")
for key, value in pcm.items():
pcm_list.append(f" {key} {value}")
pcm_list.append("$end")
return "\n".join(pcm_list)
@staticmethod
def solvent_template(solvent: Dict) -> str:
"""
Solvent template.
Args:
solvent ():
Returns:
(str)
"""
solvent_list = []
solvent_list.append("$solvent")
for key, value in solvent.items():
solvent_list.append(f" {key} {value}")
solvent_list.append("$end")
return "\n".join(solvent_list)
@staticmethod
def smx_template(smx: Dict) -> str:
"""
Args:
smx ():
Returns:
(str)
"""
smx_list = []
smx_list.append("$smx")
for key, value in smx.items():
if value == "tetrahydrofuran":
smx_list.append(" {key} {value}".format(key=key, value="thf"))
else:
smx_list.append(f" {key} {value}")
smx_list.append("$end")
return "\n".join(smx_list)
@staticmethod
def scan_template(scan: Dict[str, List]) -> str:
"""
Args:
scan (dict): Dictionary with scan section information.
Ex: {"stre": ["3 6 1.5 1.9 0.1"], "tors": ["1 2 3 4 -180 180 15"]}
Returns:
String representing Q-Chem input format for scan section
"""
scan_list = []
scan_list.append("$scan")
total_vars = sum(len(v) for v in scan.values())
if total_vars > 2:
raise ValueError("Q-Chem only supports PES_SCAN with two or less variables.")
for var_type, variables in scan.items():
if variables not in [None, []]:
for var in variables:
scan_list.append(f" {var_type} {var}")
scan_list.append("$end")
return "\n".join(scan_list)
@staticmethod
def van_der_waals_template(radii: Dict[str, float], mode: str = "atomic") -> str:
"""
Args:
radii (dict): Dictionary with custom van der Waals radii, in
Angstroms, keyed by either atomic number or sequential
atom number (see 'mode' kwarg).
Ex: {1: 1.20, 12: 1.70}
mode: 'atomic' or 'sequential'. In 'atomic' mode (default), dict keys
represent the atomic number associated with each radius (e.g., '12' = carbon).
In 'sequential' mode, dict keys represent the sequential position of
a single specific atom in the input structure.
**NOTE: keys must be given as strings even though they are numbers!**
Returns:
String representing Q-Chem input format for van_der_waals section
"""
vdw_list = []
vdw_list.append("$van_der_waals")
if mode == "atomic":
vdw_list.append("1")
elif mode == "sequential":
vdw_list.append("2")
else:
raise ValueError(f"Invalid value {mode} given for 'mode' kwarg.")
for num, radius in radii.items():
vdw_list.append(f" {num} {radius}")
vdw_list.append("$end")
return "\n".join(vdw_list)
@staticmethod
def plots_template(plots: Dict) -> str:
"""
Args:
plots ():
Returns:
(str)
"""
plots_list = []
plots_list.append("$plots")
for key, value in plots.items():
plots_list.append(f" {key} {value}")
plots_list.append("$end")
return "\n".join(plots_list)
@staticmethod
def nbo_template(nbo: Dict) -> str:
"""
Args:
nbo ():
Returns:
(str)
"""
nbo_list = []
nbo_list.append("$nbo")
for key, value in nbo.items():
nbo_list.append(f" {key} = {value}")
nbo_list.append("$end")
return "\n".join(nbo_list)
@staticmethod
def find_sections(string: str) -> List:
"""
Find sections in the string.
Args:
string (str): String
Returns:
List of sections.
"""
patterns = {"sections": r"^\s*?\$([a-z_]+)", "multiple_jobs": r"(@@@)"}
matches = read_pattern(string, patterns)
# list of the sections present
sections = [val[0] for val in matches["sections"]]
# remove end from sections
sections = [sec for sec in sections if sec != "end"]
# this error should be replaced by a multi job read function when it is added
if "multiple_jobs" in matches.keys():
raise ValueError("Output file contains multiple qchem jobs please parse separately")
if "molecule" not in sections:
raise ValueError("Output file does not contain a molecule section")
if "rem" not in sections:
raise ValueError("Output file does not contain a rem section")
return sections
@staticmethod
def read_molecule(string: str) -> Union[Molecule, Literal["read"]]:
"""
Read molecule from string.
Args:
string (str): String
Returns:
Molecule
"""
charge = None
spin_mult = None
patterns = {
"read": r"^\s*\$molecule\n\s*(read)",
"charge": r"^\s*\$molecule\n\s*((?:\-)*\d+)\s+\d",
"spin_mult": r"^\s*\$molecule\n\s(?:\-)*\d+\s*(\d)",
}
matches = read_pattern(string, patterns)
if "read" in matches.keys():
return "read"
if "charge" in matches.keys():
charge = float(matches["charge"][0][0])
if "spin_mult" in matches.keys():
spin_mult = int(matches["spin_mult"][0][0])
header = r"^\s*\$molecule\n\s*(?:\-)*\d+\s*\d"
row = r"\s*((?i)[a-z]+)\s+([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)"
footer = r"^\$end"
mol_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
species = [val[0] for val in mol_table[0]]
coords = [[float(val[1]), float(val[2]), float(val[3])] for val in mol_table[0]]
if charge is None:
mol = Molecule(species=species, coords=coords)
else:
mol = Molecule(species=species, coords=coords, charge=charge, spin_multiplicity=spin_mult)
return mol
@staticmethod
def read_rem(string: str) -> Dict:
"""
Parse rem from string.
Args:
string (str): String
Returns:
(dict) rem
"""
header = r"^\s*\$rem"
row = r"\s*([a-zA-Z\_]+)\s*=?\s*(\S+)"
footer = r"^\s*\$end"
rem_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
return dict(rem_table[0])
@staticmethod
def read_opt(string: str) -> Dict[str, List]:
"""
Read opt section from string.
Args:
string (str): String
Returns:
(dict) Opt section
"""
patterns = {
"CONSTRAINT": r"^\s*CONSTRAINT",
"FIXED": r"^\s*FIXED",
"DUMMY": r"^\s*DUMMY",
"CONNECT": r"^\s*CONNECT",
}
opt_matches = read_pattern(string, patterns)
opt_sections = list(opt_matches.keys())
opt = {}
if "CONSTRAINT" in opt_sections:
c_header = r"^\s*CONSTRAINT\n"
c_row = r"(\w.*)\n"
c_footer = r"^\s*ENDCONSTRAINT\n"
c_table = read_table_pattern(string, header_pattern=c_header, row_pattern=c_row, footer_pattern=c_footer)
opt["CONSTRAINT"] = [val[0] for val in c_table[0]]
if "FIXED" in opt_sections:
f_header = r"^\s*FIXED\n"
f_row = r"(\w.*)\n"
f_footer = r"^\s*ENDFIXED\n"
f_table = read_table_pattern(
string,
header_pattern=f_header,
row_pattern=f_row,
footer_pattern=f_footer,
)
opt["FIXED"] = [val[0] for val in f_table[0]]
if "DUMMY" in opt_sections:
d_header = r"^\s*DUMMY\n"
d_row = r"(\w.*)\n"
d_footer = r"^\s*ENDDUMMY\n"
d_table = read_table_pattern(
string,
header_pattern=d_header,
row_pattern=d_row,
footer_pattern=d_footer,
)
opt["DUMMY"] = [val[0] for val in d_table[0]]
if "CONNECT" in opt_sections:
cc_header = r"^\s*CONNECT\n"
cc_row = r"(\w.*)\n"
cc_footer = r"^\s*ENDCONNECT\n"
cc_table = read_table_pattern(
string,
header_pattern=cc_header,
row_pattern=cc_row,
footer_pattern=cc_footer,
)
opt["CONNECT"] = [val[0] for val in cc_table[0]]
return opt
@staticmethod
def read_pcm(string: str) -> Dict:
"""
Read pcm parameters from string.
Args:
string (str): String
Returns:
(dict) PCM parameters
"""
header = r"^\s*\$pcm"
row = r"\s*([a-zA-Z\_]+)\s+(\S+)"
footer = r"^\s*\$end"
pcm_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
if not pcm_table:
print("No valid PCM inputs found. Note that there should be no '=' chracters in PCM input lines.")
return {}
return dict(pcm_table[0])
@staticmethod
def read_vdw(string: str) -> Tuple[str, Dict]:
"""
Read van der Waals parameters from string.
Args:
string (str): String
Returns:
(str, dict) vdW mode ('atomic' or 'sequential') and dict of van der Waals radii.
"""
header = r"^\s*\$van_der_waals"
row = r"[^\d]*(\d+).?(\d+.\d+)?.*"
footer = r"^\s*\$end"
vdw_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
if not vdw_table:
print("No valid vdW inputs found. Note that there should be no '=' chracters in vdW input lines.")
return "", {}
if vdw_table[0][0][0] == 2:
mode = "sequential"
else:
mode = "atomic"
return mode, dict(vdw_table[0][1:])
@staticmethod
def read_solvent(string: str) -> Dict:
"""
Read solvent parameters from string.
Args:
string (str): String
Returns:
(dict) Solvent parameters
"""
header = r"^\s*\$solvent"
row = r"\s*([a-zA-Z\_]+)\s+(\S+)"
footer = r"^\s*\$end"
solvent_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
if not solvent_table:
print("No valid solvent inputs found. Note that there should be no '=' chracters in solvent input lines.")
return {}
return dict(solvent_table[0])
@staticmethod
def read_smx(string: str) -> Dict:
"""
Read smx parameters from string.
Args:
string (str): String
Returns:
(dict) SMX parameters.
"""
header = r"^\s*\$smx"
row = r"\s*([a-zA-Z\_]+)\s+(\S+)"
footer = r"^\s*\$end"
smx_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
if not smx_table:
print("No valid smx inputs found. Note that there should be no '=' chracters in smx input lines.")
return {}
smx = {}
for key, val in smx_table[0]:
smx[key] = val
if smx["solvent"] == "tetrahydrofuran":
smx["solvent"] = "thf"
return smx
@staticmethod
def read_scan(string: str) -> Dict[str, List]:
"""
Read scan section from a string.
Args:
string: String to be parsed
Returns:
Dict representing Q-Chem scan section
"""
header = r"^\s*\$scan"
row = r"\s*(stre|bend|tors|STRE|BEND|TORS)\s+((?:[\-\.0-9]+\s*)+)"
footer = r"^\s*\$end"
scan_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
if scan_table == []:
print("No valid scan inputs found. Note that there should be no '=' chracters in scan input lines.")
return {}
stre = []
bend = []
tors = []
for row in scan_table[0]:
if row[0].lower() == "stre":
stre.append(row[1].replace("\n", "").rstrip())
elif row[0].lower() == "bend":
bend.append(row[1].replace("\n", "").rstrip())
elif row[0].lower() == "tors":
tors.append(row[1].replace("\n", "").rstrip())
if len(stre) + len(bend) + len(tors) > 2:
raise ValueError("No more than two variables are allows in the scan section!")
return {"stre": stre, "bend": bend, "tors": tors}
@staticmethod
def read_plots(string: str) -> Dict:
"""
Read plots parameters from string.
Args:
string (str): String
Returns:
(dict) plots parameters.
"""
header = r"^\s*\$plots"
row = r"\s*([a-zA-Z\_]+)\s+(\S+)"
footer = r"^\s*\$end"
plots_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
if plots_table == []:
print("No valid plots inputs found. Note that there should be no '=' chracters in plots input lines.")
return {}
plots = {}
for key, val in plots_table[0]:
plots[key] = val
return plots
@staticmethod
def read_nbo(string: str) -> Dict:
"""
Read nbo parameters from string.
Args:
string (str): String
Returns:
(dict) nbo parameters.
"""
header = r"^\s*\$nbo"
row = r"\s*([a-zA-Z\_]+)\s*=?\s*(\S+)"
footer = r"^\s*\$end"
nbo_table = read_table_pattern(string, header_pattern=header, row_pattern=row, footer_pattern=footer)
if nbo_table == []:
print("No valid nbo inputs found.")
return {}
nbo = {}
for key, val in nbo_table[0]:
nbo[key] = val
return nbo
|
vorwerkc/pymatgen
|
pymatgen/io/qchem/inputs.py
|
Python
|
mit
| 28,528
|
[
"Q-Chem",
"pymatgen"
] |
7ffdda08f1cf5e3e76d0cc216e4adc948c2850650a82c8f7abb715e8579cc13d
|
"""
Tests for ORA (Open Response Assessment) through the LMS UI.
"""
import json
from unittest import skip
from bok_choy.promise import Promise, BrokenPromise
from ..pages.lms.peer_confirm import PeerConfirmPage
from ..pages.lms.auto_auth import AutoAuthPage
from ..pages.lms.course_info import CourseInfoPage
from ..pages.lms.tab_nav import TabNavPage
from ..pages.lms.course_nav import CourseNavPage
from ..pages.lms.open_response import OpenResponsePage
from ..pages.lms.peer_grade import PeerGradePage
from ..pages.lms.peer_calibrate import PeerCalibratePage
from ..pages.lms.progress import ProgressPage
from ..fixtures.course import XBlockFixtureDesc, CourseFixture
from ..fixtures.xqueue import XQueueResponseFixture
from .helpers import load_data_str, UniqueCourseTest
class OpenResponseTest(UniqueCourseTest):
"""
Tests that interact with ORA (Open Response Assessment) through the LMS UI.
This base class sets up a course with open response problems and defines
some helper functions used in the ORA tests.
"""
# Grade response (dict) to return from the XQueue stub
# in response to our unique submission text.
XQUEUE_GRADE_RESPONSE = None
def setUp(self):
"""
Install a test course with ORA problems.
Always start in the subsection with open response problems.
"""
super(OpenResponseTest, self).setUp()
# Create page objects
self.auth_page = AutoAuthPage(self.browser, course_id=self.course_id)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
self.course_nav = CourseNavPage(self.browser)
self.open_response = OpenResponsePage(self.browser)
self.peer_grade = PeerGradePage(self.browser)
self.peer_calibrate = PeerCalibratePage(self.browser)
self.peer_confirm = PeerConfirmPage(self.browser)
self.progress_page = ProgressPage(self.browser, self.course_id)
# Configure the test course
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
# Create a unique name for the peer assessed problem. This will show up
# in the list of peer problems, which is shared among tests running
# in parallel; it needs to be unique so we can find it.
# It's also import that the problem has "Peer" in the name; otherwise,
# the ORA stub will ignore it.
self.peer_problem_name = "Peer-Assessed {}".format(self.unique_id[0:6])
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'combinedopenended',
'Self-Assessed',
data=load_data_str('ora_self_problem.xml'),
metadata={
'graded': True,
},
),
),
XBlockFixtureDesc('vertical', 'Test Unit 2').add_children(
XBlockFixtureDesc(
'combinedopenended',
'AI-Assessed',
data=load_data_str('ora_ai_problem.xml'),
metadata={
'graded': True,
},
),
),
XBlockFixtureDesc('vertical', 'Test Unit 3').add_children(
XBlockFixtureDesc(
'combinedopenended',
self.peer_problem_name,
data=load_data_str('ora_peer_problem.xml'),
metadata={
'graded': True,
},
),
),
# This is the interface a student can use to grade his/her peers
XBlockFixtureDesc('peergrading', 'Peer Module'),
)
)
).install()
# Configure the XQueue stub's response for the text we will submit
# The submission text is unique so we can associate each response with a particular test case.
self.submission = "Test submission " + self.unique_id[0:4]
if self.XQUEUE_GRADE_RESPONSE is not None:
XQueueResponseFixture(self.submission, self.XQUEUE_GRADE_RESPONSE).install()
# Log in and navigate to the essay problems
self.auth_page.visit()
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
def submit_essay(self, expected_assessment_type, expected_prompt):
"""
Submit an essay and verify that the problem uses
the `expected_assessment_type` ("self", "ai", or "peer") and
shows the `expected_prompt` (a string).
"""
# Check the assessment type and prompt
self.assertEqual(self.open_response.assessment_type, expected_assessment_type)
self.assertIn(expected_prompt, self.open_response.prompt)
# Enter a submission, which will trigger a pre-defined response from the XQueue stub.
self.open_response.set_response(self.submission)
# Save the response and expect some UI feedback
self.open_response.save_response()
self.assertEqual(
self.open_response.alert_message,
"Answer saved, but not yet submitted."
)
# Submit the response
self.open_response.submit_response()
def get_asynch_feedback(self, assessment_type):
"""
Wait for and retrieve asynchronous feedback
(e.g. from AI, instructor, or peer grading)
`assessment_type` is either "ai" or "peer".
"""
# Because the check function involves fairly complicated actions
# (navigating through several screens), we give it more time to complete
# than the default.
return Promise(
self._check_feedback_func(assessment_type),
'Got feedback for {0} problem'.format(assessment_type),
timeout=600, try_interval=5
).fulfill()
def _check_feedback_func(self, assessment_type):
"""
Navigate away from, then return to, the peer problem to
receive updated feedback.
The returned function will return a tuple `(is_success, rubric_feedback)`,
`is_success` is True iff we have received feedback for the problem;
`rubric_feedback` is a list of "correct" or "incorrect" strings.
"""
if assessment_type == 'ai':
section_name = 'AI-Assessed'
elif assessment_type == 'peer':
section_name = self.peer_problem_name
else:
raise ValueError('Assessment type not recognized. Must be either "ai" or "peer"')
def _inner_check():
self.course_nav.go_to_sequential('Self-Assessed')
self.course_nav.go_to_sequential(section_name)
try:
feedback = self.open_response.rubric.feedback
# Unsuccessful if the rubric hasn't loaded
except BrokenPromise:
return False, None
# Successful if `feedback` is a non-empty list
else:
return bool(feedback), feedback
return _inner_check
class SelfAssessmentTest(OpenResponseTest):
"""
Test ORA self-assessment.
"""
def test_self_assessment(self):
"""
Given I am viewing a self-assessment problem
When I submit an essay and complete a self-assessment rubric
Then I see a scored rubric
And I see my score in the progress page.
"""
# Navigate to the self-assessment problem and submit an essay
self.course_nav.go_to_sequential('Self-Assessed')
self.submit_essay('self', 'Censorship in the Libraries')
# Fill in the rubric and expect that we get feedback
rubric = self.open_response.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit('self')
self.assertEqual(rubric.feedback, ['incorrect', 'correct'])
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# The first score is self-assessment, which we've answered, so it's 1/2
# The other scores are AI- and peer-assessment, which we haven't answered so those are 0/2
self.assertEqual(scores, [(1, 2), (0, 2), (0, 2)])
class AIAssessmentTest(OpenResponseTest):
"""
Test ORA AI-assessment.
"""
XQUEUE_GRADE_RESPONSE = {
'score': 1,
'feedback': json.dumps({"spelling": "Ok.", "grammar": "Ok.", "markup_text": "NA"}),
'grader_type': 'BC',
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_scores_complete': True,
'rubric_xml': load_data_str('ora_rubric.xml')
}
@skip('Intermittently failing, see ORA-342')
def test_ai_assessment(self):
"""
Given I am viewing an AI-assessment problem that has a trained ML model
When I submit an essay and wait for a response
Then I see a scored rubric
And I see my score in the progress page.
"""
# Navigate to the AI-assessment problem and submit an essay
self.course_nav.go_to_sequential('AI-Assessed')
self.submit_essay('ai', 'Censorship in the Libraries')
# Refresh the page to get the updated feedback
# then verify that we get the feedback sent by our stub XQueue implementation
self.assertEqual(self.get_asynch_feedback('ai'), ['incorrect', 'correct'])
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# First score is the self-assessment score, which we haven't answered, so it's 0/2
# Second score is the AI-assessment score, which we have answered, so it's 1/2
# Third score is peer-assessment, which we haven't answered, so it's 0/2
self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)])
class InstructorAssessmentTest(OpenResponseTest):
"""
Test an AI-assessment that has been graded by an instructor.
This runs the same test as the AI-assessment test, except
that the feedback comes from an instructor instead of the machine grader.
From the student's perspective, it should look the same.
"""
XQUEUE_GRADE_RESPONSE = {
'score': 1,
'feedback': json.dumps({"feedback": "Good job!"}),
'grader_type': 'IN',
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_scores_complete': True,
'rubric_xml': load_data_str('ora_rubric.xml')
}
@skip('Intermittently failing, see ORA-342')
def test_instructor_assessment(self):
"""
Given an instructor has graded my submission
When I view my submission
Then I see a scored rubric
And my progress page shows the problem score.
"""
# Navigate to the AI-assessment problem and submit an essay
# We have configured the stub to simulate that this essay will be staff-graded
self.course_nav.go_to_sequential('AI-Assessed')
self.submit_essay('ai', 'Censorship in the Libraries')
# Refresh the page to get the updated feedback
# then verify that we get the feedback sent by our stub XQueue implementation
self.assertEqual(self.get_asynch_feedback('ai'), ['incorrect', 'correct'])
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# First score is the self-assessment score, which we haven't answered, so it's 0/2
# Second score is the AI-assessment score, which we have answered, so it's 1/2
# Third score is peer-assessment, which we haven't answered, so it's 0/2
self.assertEqual(scores, [(0, 2), (1, 2), (0, 2)])
class PeerAssessmentTest(OpenResponseTest):
"""
Test ORA peer-assessment, including calibration and giving/receiving scores.
"""
# Unlike other assessment types, peer assessment has multiple scores
XQUEUE_GRADE_RESPONSE = {
'score': [2, 2, 2],
'feedback': [json.dumps({"feedback": ""})] * 3,
'grader_type': 'PE',
'success': True,
'grader_id': [1, 2, 3],
'submission_id': 1,
'rubric_scores_complete': [True, True, True],
'rubric_xml': [load_data_str('ora_rubric.xml')] * 3
}
def test_peer_calibrate_and_grade(self):
"""
Given I am viewing a peer-assessment problem
And the instructor has submitted enough example essays
When I submit acceptable scores for enough calibration essays
Then I am able to peer-grade other students' essays.
Given I have submitted an essay for peer-assessment
And I have peer-graded enough students essays
And enough other students have scored my essay
Then I can view the scores and written feedback
And I see my score in the progress page.
"""
# Initially, the student should NOT be able to grade peers,
# because he/she hasn't submitted any essays.
self.course_nav.go_to_sequential('Peer Module')
self.assertIn("You currently do not have any peer grading to do", self.peer_calibrate.message)
# Submit an essay
self.course_nav.go_to_sequential(self.peer_problem_name)
self.submit_essay('peer', 'Censorship in the Libraries')
# Need to reload the page to update the peer grading module
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
self.course_nav.go_to_section('Test Section', 'Test Subsection')
# Select the problem to calibrate
self.course_nav.go_to_sequential('Peer Module')
self.assertIn(self.peer_problem_name, self.peer_grade.problem_list)
self.peer_grade.select_problem(self.peer_problem_name)
# Calibrate
self.peer_confirm.start(is_calibrating=True)
rubric = self.peer_calibrate.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit('peer')
self.peer_calibrate.continue_to_grading()
# Grade a peer
self.peer_confirm.start()
rubric = self.peer_grade.rubric
self.assertEqual(rubric.categories, ["Writing Applications", "Language Conventions"])
rubric.set_scores([0, 1])
rubric.submit()
# Expect to receive essay feedback
# We receive feedback from all three peers, each of which
# provide 2 scores (one for each rubric item)
# Written feedback is a dummy value sent by the XQueue stub.
self.course_nav.go_to_sequential(self.peer_problem_name)
self.assertEqual(self.get_asynch_feedback('peer'), ['incorrect', 'correct'] * 3)
# Verify the progress page
self.progress_page.visit()
scores = self.progress_page.scores('Test Section', 'Test Subsection')
# First score is the self-assessment score, which we haven't answered, so it's 0/2
# Second score is the AI-assessment score, which we haven't answered, so it's 0/2
# Third score is peer-assessment, which we have answered, so it's 2/2
self.assertEqual(scores, [(0, 2), (0, 2), (2, 2)])
|
martynovp/edx-platform
|
common/test/acceptance/tests/test_ora.py
|
Python
|
agpl-3.0
| 16,016
|
[
"VisIt"
] |
79c389000b358484453fb49ee37292df1359bd5d48ae00a432e33e3b794473a5
|
"""
Provides factory methods to assemble the Galaxy web application
"""
import logging, atexit
import os, os.path
from inspect import isclass
from paste.request import parse_formvars
from paste.util import import_string
from paste import httpexceptions
from paste.deploy.converters import asbool
import flup.middleware.session as flup_session
import pkg_resources
log = logging.getLogger( __name__ )
from galaxy import config, jobs, util, tools
import galaxy.model
import galaxy.model.mapping
import galaxy.datatypes.registry
import galaxy.web.framework
def add_controllers( webapp, app ):
"""
Search for controllers in the 'galaxy.web.controllers' module and add
them to the webapp.
"""
from galaxy.web.base.controller import BaseController
from galaxy.web.base.controller import ControllerUnavailable
import galaxy.web.controllers
controller_dir = galaxy.web.controllers.__path__[0]
for fname in os.listdir( controller_dir ):
if not( fname.startswith( "_" ) ) and fname.endswith( ".py" ):
name = fname[:-3]
module_name = "galaxy.web.controllers." + name
try:
module = __import__( module_name )
except ControllerUnavailable, exc:
log.debug("%s could not be loaded: %s" % (module_name, str(exc)))
continue
for comp in module_name.split( "." )[1:]:
module = getattr( module, comp )
# Look for a controller inside the modules
for key in dir( module ):
T = getattr( module, key )
if isclass( T ) and T is not BaseController and issubclass( T, BaseController ):
webapp.add_controller( name, T( app ) )
def app_factory( global_conf, **kwargs ):
"""
Return a wsgi application serving the root object
"""
# Create the Galaxy application unless passed in
if 'app' in kwargs:
app = kwargs.pop( 'app' )
else:
try:
from galaxy.app import UniverseApplication
app = UniverseApplication( global_conf = global_conf, **kwargs )
except:
import traceback, sys
traceback.print_exc()
sys.exit( 1 )
atexit.register( app.shutdown )
# Create the universe WSGI application
webapp = galaxy.web.framework.WebApplication( app, session_cookie='galaxysession' )
add_controllers( webapp, app )
# Force /history to go to /root/history -- needed since the tests assume this
webapp.add_route( '/history', controller='root', action='history' )
# These two routes handle our simple needs at the moment
webapp.add_route( '/async/:tool_id/:data_id/:data_secret', controller='async', action='index', tool_id=None, data_id=None, data_secret=None )
webapp.add_route( '/:controller/:action', action='index' )
webapp.add_route( '/:action', controller='root', action='index' )
webapp.add_route( '/datasets/:dataset_id/:action/:filename', controller='dataset', action='index', dataset_id=None, filename=None)
webapp.finalize_config()
# Wrap the webapp in some useful middleware
if kwargs.get( 'middleware', True ):
webapp = wrap_in_middleware( webapp, global_conf, **kwargs )
if kwargs.get( 'static_enabled', True ):
webapp = wrap_in_static( webapp, global_conf, **kwargs )
# Close any pooled database connections before forking
try:
galaxy.model.mapping.metadata.engine.connection_provider._pool.dispose()
except:
pass
# Return
return webapp
def wrap_in_middleware( app, global_conf, **local_conf ):
"""
Based on the configuration wrap `app` in a set of common and useful
middleware.
"""
# Merge the global and local configurations
conf = global_conf.copy()
conf.update(local_conf)
debug = asbool( conf.get( 'debug', False ) )
# First put into place httpexceptions, which must be most closely
# wrapped around the application (it can interact poorly with
# other middleware):
app = httpexceptions.make_middleware( app, conf )
log.debug( "Enabling 'httpexceptions' middleware" )
# If we're using remote_user authentication, add middleware that
# protects Galaxy from improperly configured authentication in the
# upstream server
if asbool(conf.get( 'use_remote_user', False )):
from galaxy.web.framework.middleware.remoteuser import RemoteUser
app = RemoteUser( app, maildomain=conf.get( 'remote_user_maildomain', None ), ucsc_display_sites=conf.get( 'ucsc_display_sites', [] ) )
log.debug( "Enabling 'remote user' middleware" )
# The recursive middleware allows for including requests in other
# requests or forwarding of requests, all on the server side.
if asbool(conf.get('use_recursive', True)):
from paste import recursive
app = recursive.RecursiveMiddleware( app, conf )
log.debug( "Enabling 'recursive' middleware" )
## # Session middleware puts a session factory into the environment
## if asbool( conf.get( 'use_session', True ) ):
## store = flup_session.MemorySessionStore()
## app = flup_session.SessionMiddleware( store, app )
## log.debug( "Enabling 'flup session' middleware" )
# Beaker session middleware
if asbool( conf.get( 'use_beaker_session', False ) ):
pkg_resources.require( "Beaker" )
import beaker.session
app = beaker.session.SessionMiddleware( app, conf )
log.debug( "Enabling 'beaker session' middleware" )
# Various debug middleware that can only be turned on if the debug
# flag is set, either because they are insecure or greatly hurt
# performance
if debug:
# Middleware to check for WSGI compliance
if asbool( conf.get( 'use_lint', True ) ):
from paste import lint
app = lint.make_middleware( app, conf )
log.debug( "Enabling 'lint' middleware" )
# Middleware to run the python profiler on each request
if asbool( conf.get( 'use_profile', False ) ):
import profile
app = profile.ProfileMiddleware( app, conf )
log.debug( "Enabling 'profile' middleware" )
# Middleware that intercepts print statements and shows them on the
# returned page
if asbool( conf.get( 'use_printdebug', True ) ):
from paste.debug import prints
app = prints.PrintDebugMiddleware( app, conf )
log.debug( "Enabling 'print debug' middleware" )
if debug and asbool( conf.get( 'use_interactive', False ) ):
# Interactive exception debugging, scary dangerous if publicly
# accessible, if not enabled we'll use the regular error printing
# middleware.
pkg_resources.require( "WebError" )
from weberror import evalexception
app = evalexception.EvalException( app, conf,
templating_formatters=build_template_error_formatters() )
log.debug( "Enabling 'eval exceptions' middleware" )
else:
# Not in interactive debug mode, just use the regular error middleware
from paste.exceptions import errormiddleware
app = errormiddleware.ErrorMiddleware( app, conf )
log.debug( "Enabling 'error' middleware" )
# Transaction logging (apache access.log style)
if asbool( conf.get( 'use_translogger', True ) ):
from paste.translogger import TransLogger
app = TransLogger( app )
log.debug( "Enabling 'trans logger' middleware" )
# Config middleware just stores the paste config along with the request,
# not sure we need this but useful
from paste.deploy.config import ConfigMiddleware
app = ConfigMiddleware( app, conf )
log.debug( "Enabling 'config' middleware" )
# X-Forwarded-Host handling
from galaxy.web.framework.middleware.xforwardedhost import XForwardedHostMiddleware
app = XForwardedHostMiddleware( app )
log.debug( "Enabling 'x-forwarded-host' middleware" )
return app
def wrap_in_static( app, global_conf, **local_conf ):
from paste.urlmap import URLMap
from galaxy.web.framework.middleware.static import CacheableStaticURLParser as Static
urlmap = URLMap()
# Merge the global and local configurations
conf = global_conf.copy()
conf.update(local_conf)
# Get cache time in seconds
cache_time = conf.get( "static_cache_time", None )
if cache_time is not None:
cache_time = int( cache_time )
# Send to dynamic app by default
urlmap["/"] = app
# Define static mappings from config
urlmap["/static"] = Static( conf.get( "static_dir" ), cache_time )
urlmap["/images"] = Static( conf.get( "static_images_dir" ), cache_time )
urlmap["/static/scripts"] = Static( conf.get( "static_scripts_dir" ), cache_time )
urlmap["/static/style"] = Static( conf.get( "static_style_dir" ), cache_time )
urlmap["/favicon.ico"] = Static( conf.get( "static_favicon_dir" ), cache_time )
# URL mapper becomes the root webapp
return urlmap
def build_template_error_formatters():
"""
Build a list of template error formatters for WebError. When an error
occurs, WebError pass the exception to each function in this list until
one returns a value, which will be displayed on the error page.
"""
formatters = []
# Formatter for mako
import mako.exceptions
def mako_html_data( exc_value ):
if isinstance( exc_value, ( mako.exceptions.CompileException, mako.exceptions.SyntaxException ) ):
return mako.exceptions.html_error_template().render( full=False, css=False )
if isinstance( exc_value, AttributeError ) and exc_value.args[0].startswith( "'Undefined' object has no attribute" ):
return mako.exceptions.html_error_template().render( full=False, css=False )
formatters.append( mako_html_data )
return formatters
|
dbcls/dbcls-galaxy
|
lib/galaxy/web/buildapp.py
|
Python
|
mit
| 9,983
|
[
"Galaxy"
] |
c89cc3705ea3b55115ea8be3b69e2b0b09752cc4daee4e44c8c4e29e662318ab
|
# -*- coding: utf-8 -*-
"""
Generators for random graphs.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from collections import defaultdict
import itertools
import math
import random
import networkx as nx
from .classic import empty_graph, path_graph, complete_graph
from .degree_seq import degree_sequence_tree
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult (dschult@colgate.edu)'])
__all__ = ['fast_gnp_random_graph',
'gnp_random_graph',
'dense_gnm_random_graph',
'gnm_random_graph',
'erdos_renyi_graph',
'binomial_graph',
'newman_watts_strogatz_graph',
'watts_strogatz_graph',
'connected_watts_strogatz_graph',
'random_regular_graph',
'barabasi_albert_graph',
'powerlaw_cluster_graph',
'duplication_divergence_graph',
'random_lobster',
'random_shell_graph',
'random_powerlaw_tree',
'random_powerlaw_tree_sequence']
#-------------------------------------------------------------------------
# Some Famous Random Graphs
#-------------------------------------------------------------------------
def fast_gnp_random_graph(n, p, seed=None, directed=False):
"""Returns a `G_{n,p}` random graph, also known as an Erdős-Rényi graph or
a binomial graph.
Parameters
----------
n : int
The number of nodes.
p : float
Probability for edge creation.
seed : int, optional
Seed for random number generator (default=None).
directed : bool, optional (default=False)
If ``True``, this function returns a directed graph.
Notes
-----
The `G_{n,p}` graph algorithm chooses each of the `[n (n - 1)] / 2`
(undirected) or `n (n - 1)` (directed) possible edges with probability `p`.
This algorithm runs in `O(n + m)` time, where `m` is the expected number of
edges, which equals `p n (n - 1) / 2`. This should be faster than
:func:`gnp_random_graph` when `p` is small and the expected number of edges
is small (that is, the graph is sparse).
See Also
--------
gnp_random_graph
References
----------
.. [1] Vladimir Batagelj and Ulrik Brandes,
"Efficient generation of large random networks",
Phys. Rev. E, 71, 036113, 2005.
"""
G = empty_graph(n)
G.name="fast_gnp_random_graph(%s,%s)"%(n,p)
if not seed is None:
random.seed(seed)
if p <= 0 or p >= 1:
return nx.gnp_random_graph(n,p,directed=directed)
w = -1
lp = math.log(1.0 - p)
if directed:
G = nx.DiGraph(G)
# Nodes in graph are from 0,n-1 (start with v as the first node index).
v = 0
while v < n:
lr = math.log(1.0 - random.random())
w = w + 1 + int(lr/lp)
if v == w: # avoid self loops
w = w + 1
while w >= n and v < n:
w = w - n
v = v + 1
if v == w: # avoid self loops
w = w + 1
if v < n:
G.add_edge(v, w)
else:
# Nodes in graph are from 0,n-1 (start with v as the second node index).
v = 1
while v < n:
lr = math.log(1.0 - random.random())
w = w + 1 + int(lr/lp)
while w >= v and v < n:
w = w - v
v = v + 1
if v < n:
G.add_edge(v, w)
return G
def gnp_random_graph(n, p, seed=None, directed=False):
"""Returns a `G_{n,p}` random graph, also known as an Erdős-Rényi graph or
a binomial graph.
The `G_{n,p}` model chooses each of the possible edges with probability
``p``.
The functions :func:`binomial_graph` and :func:`erdos_renyi_graph` are
aliases of this function.
Parameters
----------
n : int
The number of nodes.
p : float
Probability for edge creation.
seed : int, optional
Seed for random number generator (default=None).
directed : bool, optional (default=False)
If ``True``, this function returns a directed graph.
See Also
--------
fast_gnp_random_graph
Notes
-----
This algorithm runs in `O(n^2)` time. For sparse graphs (that is, for
small values of `p`), :func:`fast_gnp_random_graph` is a faster algorithm.
References
----------
.. [1] P. Erdős and A. Rényi, On Random Graphs, Publ. Math. 6, 290 (1959).
.. [2] E. N. Gilbert, Random Graphs, Ann. Math. Stat., 30, 1141 (1959).
"""
if directed:
G=nx.DiGraph()
else:
G=nx.Graph()
G.add_nodes_from(range(n))
G.name="gnp_random_graph(%s,%s)"%(n,p)
if p<=0:
return G
if p>=1:
return complete_graph(n,create_using=G)
if not seed is None:
random.seed(seed)
if G.is_directed():
edges=itertools.permutations(range(n),2)
else:
edges=itertools.combinations(range(n),2)
for e in edges:
if random.random() < p:
G.add_edge(*e)
return G
# add some aliases to common names
binomial_graph=gnp_random_graph
erdos_renyi_graph=gnp_random_graph
def dense_gnm_random_graph(n, m, seed=None):
"""Returns a `G_{n,m}` random graph.
In the `G_{n,m}` model, a graph is chosen uniformly at random from the set
of all graphs with `n` nodes and `m` edges.
This algorithm should be faster than :func:`gnm_random_graph` for dense
graphs.
Parameters
----------
n : int
The number of nodes.
m : int
The number of edges.
seed : int, optional
Seed for random number generator (default=None).
See Also
--------
gnm_random_graph()
Notes
-----
Algorithm by Keith M. Briggs Mar 31, 2006.
Inspired by Knuth's Algorithm S (Selection sampling technique),
in section 3.4.2 of [1]_.
References
----------
.. [1] Donald E. Knuth, The Art of Computer Programming,
Volume 2/Seminumerical algorithms, Third Edition, Addison-Wesley, 1997.
"""
mmax=n*(n-1)/2
if m>=mmax:
G=complete_graph(n)
else:
G=empty_graph(n)
G.name="dense_gnm_random_graph(%s,%s)"%(n,m)
if n==1 or m>=mmax:
return G
if seed is not None:
random.seed(seed)
u=0
v=1
t=0
k=0
while True:
if random.randrange(mmax-t)<m-k:
G.add_edge(u,v)
k+=1
if k==m: return G
t+=1
v+=1
if v==n: # go to next row of adjacency matrix
u+=1
v=u+1
def gnm_random_graph(n, m, seed=None, directed=False):
"""Returns a `G_{n,m}` random graph.
In the `G_{n,m}` model, a graph is chosen uniformly at random from the set
of all graphs with `n` nodes and `m` edges.
This algorithm should be faster than :func:`dense_gnm_random_graph` for
sparse graphs.
Parameters
----------
n : int
The number of nodes.
m : int
The number of edges.
seed : int, optional
Seed for random number generator (default=None).
directed : bool, optional (default=False)
If True return a directed graph
See also
--------
dense_gnm_random_graph
"""
if directed:
G=nx.DiGraph()
else:
G=nx.Graph()
G.add_nodes_from(range(n))
G.name="gnm_random_graph(%s,%s)"%(n,m)
if seed is not None:
random.seed(seed)
if n==1:
return G
max_edges=n*(n-1)
if not directed:
max_edges/=2.0
if m>=max_edges:
return complete_graph(n,create_using=G)
nlist=G.nodes()
edge_count=0
while edge_count < m:
# generate random edge,u,v
u = random.choice(nlist)
v = random.choice(nlist)
if u==v or G.has_edge(u,v):
continue
else:
G.add_edge(u,v)
edge_count=edge_count+1
return G
def newman_watts_strogatz_graph(n, k, p, seed=None):
"""Return a Newman–Watts–Strogatz small-world graph.
Parameters
----------
n : int
The number of nodes.
k : int
Each node is joined with its ``k`` nearest neighbors in a ring
topology.
p : float
The probability of adding a new edge for each edge.
seed : int, optional
The seed for the random number generator (the default is ``None``).
Notes
-----
First create a ring over ``n`` nodes. Then each node in the ring is
connected with its ``k`` nearest neighbors (or ``k - 1`` neighbors if ``k``
is odd). Then shortcuts are created by adding new edges as follows: for
each edge ``(u, v)`` in the underlying "``n``-ring with ``k`` nearest
neighbors" with probability ``p`` add a new edge ``(u, w)`` with
randomly-chosen existing node ``w``. In contrast with
:func:`watts_strogatz_graph`, no edges are removed.
See Also
--------
watts_strogatz_graph()
References
----------
.. [1] M. E. J. Newman and D. J. Watts,
Renormalization group analysis of the small-world network model,
Physics Letters A, 263, 341, 1999.
http://dx.doi.org/10.1016/S0375-9601(99)00757-4
"""
if seed is not None:
random.seed(seed)
if k>=n:
raise nx.NetworkXError("k>=n, choose smaller k or larger n")
G=empty_graph(n)
G.name="newman_watts_strogatz_graph(%s,%s,%s)"%(n,k,p)
nlist = G.nodes()
fromv = nlist
# connect the k/2 neighbors
for j in range(1, k // 2+1):
tov = fromv[j:] + fromv[0:j] # the first j are now last
for i in range(len(fromv)):
G.add_edge(fromv[i], tov[i])
# for each edge u-v, with probability p, randomly select existing
# node w and add new edge u-w
e = G.edges()
for (u, v) in e:
if random.random() < p:
w = random.choice(nlist)
# no self-loops and reject if edge u-w exists
# is that the correct NWS model?
while w == u or G.has_edge(u, w):
w = random.choice(nlist)
if G.degree(u) >= n-1:
break # skip this rewiring
else:
G.add_edge(u,w)
return G
def watts_strogatz_graph(n, k, p, seed=None):
"""Return a Watts–Strogatz small-world graph.
Parameters
----------
n : int
The number of nodes
k : int
Each node is joined with its ``k`` nearest neighbors in a ring
topology.
p : float
The probability of rewiring each edge
seed : int, optional
Seed for random number generator (default=None)
See Also
--------
newman_watts_strogatz_graph()
connected_watts_strogatz_graph()
Notes
-----
First create a ring over ``n`` nodes. Then each node in the ring is joined
to its ``k`` nearest neighbors (or ``k - 1`` neighbors if ``k`` is odd).
Then shortcuts are created by replacing some edges as follows: for each
edge ``(u, v)`` in the underlying "``n``-ring with ``k`` nearest neighbors"
with probability ``p`` replace it with a new edge ``(u, w)`` with uniformly
random choice of existing node ``w``.
In contrast with :func:`newman_watts_strogatz_graph`, the random rewiring
does not increase the number of edges. The rewired graph is not guaranteed
to be connected as in :func:`connected_watts_strogatz_graph`.
References
----------
.. [1] Duncan J. Watts and Steven H. Strogatz,
Collective dynamics of small-world networks,
Nature, 393, pp. 440--442, 1998.
"""
if k>=n:
raise nx.NetworkXError("k>=n, choose smaller k or larger n")
if seed is not None:
random.seed(seed)
G = nx.Graph()
G.name="watts_strogatz_graph(%s,%s,%s)"%(n,k,p)
nodes = list(range(n)) # nodes are labeled 0 to n-1
# connect each node to k/2 neighbors
for j in range(1, k // 2+1):
targets = nodes[j:] + nodes[0:j] # first j nodes are now last in list
G.add_edges_from(zip(nodes,targets))
# rewire edges from each node
# loop over all nodes in order (label) and neighbors in order (distance)
# no self loops or multiple edges allowed
for j in range(1, k // 2+1): # outer loop is neighbors
targets = nodes[j:] + nodes[0:j] # first j nodes are now last in list
# inner loop in node order
for u,v in zip(nodes,targets):
if random.random() < p:
w = random.choice(nodes)
# Enforce no self-loops or multiple edges
while w == u or G.has_edge(u, w):
w = random.choice(nodes)
if G.degree(u) >= n-1:
break # skip this rewiring
else:
G.remove_edge(u,v)
G.add_edge(u,w)
return G
def connected_watts_strogatz_graph(n, k, p, tries=100, seed=None):
"""Returns a connected Watts–Strogatz small-world graph.
Attempts to generate a connected graph by repeated generation of
Watts–Strogatz small-world graphs. An exception is raised if the maximum
number of tries is exceeded.
Parameters
----------
n : int
The number of nodes
k : int
Each node is joined with its ``k`` nearest neighbors in a ring
topology.
p : float
The probability of rewiring each edge
tries : int
Number of attempts to generate a connected graph.
seed : int, optional
The seed for random number generator.
See Also
--------
newman_watts_strogatz_graph()
watts_strogatz_graph()
"""
for i in range(tries):
G = watts_strogatz_graph(n, k, p, seed)
if nx.is_connected(G):
return G
raise nx.NetworkXError('Maximum number of tries exceeded')
def random_regular_graph(d, n, seed=None):
"""Returns a random ``d``-regular graph on ``n`` nodes.
The resulting graph has no self-loops or parallel edges.
Parameters
----------
d : int
The degree of each node.
n : integer
The number of nodes. The value of ``n * d`` must be even.
seed : hashable object
The seed for random number generator.
Notes
-----
The nodes are numbered from ``0`` to ``n - 1``.
Kim and Vu's paper [2]_ shows that this algorithm samples in an
asymptotically uniform way from the space of random graphs when
`d = O(n^{1 / 3 - \epsilon})`.
Raises
------
NetworkXError
If ``n * d`` is odd or ``d`` is greater than or equal to ``n``.
References
----------
.. [1] A. Steger and N. Wormald,
Generating random regular graphs quickly,
Probability and Computing 8 (1999), 377-396, 1999.
http://citeseer.ist.psu.edu/steger99generating.html
.. [2] Jeong Han Kim and Van H. Vu,
Generating random regular graphs,
Proceedings of the thirty-fifth ACM symposium on Theory of computing,
San Diego, CA, USA, pp 213--222, 2003.
http://portal.acm.org/citation.cfm?id=780542.780576
"""
if (n * d) % 2 != 0:
raise nx.NetworkXError("n * d must be even")
if not 0 <= d < n:
raise nx.NetworkXError("the 0 <= d < n inequality must be satisfied")
if d == 0:
return empty_graph(n)
if seed is not None:
random.seed(seed)
def _suitable(edges, potential_edges):
# Helper subroutine to check if there are suitable edges remaining
# If False, the generation of the graph has failed
if not potential_edges:
return True
for s1 in potential_edges:
for s2 in potential_edges:
# Two iterators on the same dictionary are guaranteed
# to visit it in the same order if there are no
# intervening modifications.
if s1 == s2:
# Only need to consider s1-s2 pair one time
break
if s1 > s2:
s1, s2 = s2, s1
if (s1, s2) not in edges:
return True
return False
def _try_creation():
# Attempt to create an edge set
edges = set()
stubs = list(range(n)) * d
while stubs:
potential_edges = defaultdict(lambda: 0)
random.shuffle(stubs)
stubiter = iter(stubs)
for s1, s2 in zip(stubiter, stubiter):
if s1 > s2:
s1, s2 = s2, s1
if s1 != s2 and ((s1, s2) not in edges):
edges.add((s1, s2))
else:
potential_edges[s1] += 1
potential_edges[s2] += 1
if not _suitable(edges, potential_edges):
return None # failed to find suitable edge set
stubs = [node for node, potential in potential_edges.items()
for _ in range(potential)]
return edges
# Even though a suitable edge set exists,
# the generation of such a set is not guaranteed.
# Try repeatedly to find one.
edges = _try_creation()
while edges is None:
edges = _try_creation()
G = nx.Graph()
G.name = "random_regular_graph(%s, %s)" % (d, n)
G.add_edges_from(edges)
return G
def _random_subset(seq,m):
""" Return m unique elements from seq.
This differs from random.sample which can return repeated
elements if seq holds repeated elements.
"""
targets=set()
while len(targets)<m:
x=random.choice(seq)
targets.add(x)
return targets
def barabasi_albert_graph(n, m, seed=None):
"""Returns a random graph according to the Barabási–Albert preferential
attachment model.
A graph of ``n`` nodes is grown by attaching new nodes each with ``m``
edges that are preferentially attached to existing nodes with high degree.
Parameters
----------
n : int
Number of nodes
m : int
Number of edges to attach from a new node to existing nodes
seed : int, optional
Seed for random number generator (default=None).
Returns
-------
G : Graph
Raises
------
NetworkXError
If ``m`` does not satisfy ``1 <= m < n``.
References
----------
.. [1] A. L. Barabási and R. Albert "Emergence of scaling in
random networks", Science 286, pp 509-512, 1999.
"""
if m < 1 or m >=n:
raise nx.NetworkXError("Barabási–Albert network must have m >= 1"
" and m < n, m = %d, n = %d" % (m, n))
if seed is not None:
random.seed(seed)
# Add m initial nodes (m0 in barabasi-speak)
G=empty_graph(m)
G.name="barabasi_albert_graph(%s,%s)"%(n,m)
# Target nodes for new edges
targets=list(range(m))
# List of existing nodes, with nodes repeated once for each adjacent edge
repeated_nodes=[]
# Start adding the other n-m nodes. The first node is m.
source=m
while source<n:
# Add edges to m nodes from the source.
G.add_edges_from(zip([source]*m,targets))
# Add one node to the list for each new edge just created.
repeated_nodes.extend(targets)
# And the new node "source" has m edges to add to the list.
repeated_nodes.extend([source]*m)
# Now choose m unique nodes from the existing nodes
# Pick uniformly from repeated_nodes (preferential attachement)
targets = _random_subset(repeated_nodes,m)
source += 1
return G
def powerlaw_cluster_graph(n, m, p, seed=None):
"""Holme and Kim algorithm for growing graphs with powerlaw
degree distribution and approximate average clustering.
Parameters
----------
n : int
the number of nodes
m : int
the number of random edges to add for each new node
p : float,
Probability of adding a triangle after adding a random edge
seed : int, optional
Seed for random number generator (default=None).
Notes
-----
The average clustering has a hard time getting above a certain
cutoff that depends on ``m``. This cutoff is often quite low. The
transitivity (fraction of triangles to possible triangles) seems to
decrease with network size.
It is essentially the Barabási–Albert (BA) growth model with an
extra step that each random edge is followed by a chance of
making an edge to one of its neighbors too (and thus a triangle).
This algorithm improves on BA in the sense that it enables a
higher average clustering to be attained if desired.
It seems possible to have a disconnected graph with this algorithm
since the initial ``m`` nodes may not be all linked to a new node
on the first iteration like the BA model.
Raises
------
NetworkXError
If ``m`` does not satisfy ``1 <= m <= n`` or ``p`` does not
satisfy ``0 <= p <= 1``.
References
----------
.. [1] P. Holme and B. J. Kim,
"Growing scale-free networks with tunable clustering",
Phys. Rev. E, 65, 026107, 2002.
"""
if m < 1 or n < m:
raise nx.NetworkXError(\
"NetworkXError must have m>1 and m<n, m=%d,n=%d"%(m,n))
if p > 1 or p < 0:
raise nx.NetworkXError(\
"NetworkXError p must be in [0,1], p=%f"%(p))
if seed is not None:
random.seed(seed)
G=empty_graph(m) # add m initial nodes (m0 in barabasi-speak)
G.name="Powerlaw-Cluster Graph"
repeated_nodes=G.nodes() # list of existing nodes to sample from
# with nodes repeated once for each adjacent edge
source=m # next node is m
while source<n: # Now add the other n-1 nodes
possible_targets = _random_subset(repeated_nodes,m)
# do one preferential attachment for new node
target=possible_targets.pop()
G.add_edge(source,target)
repeated_nodes.append(target) # add one node to list for each new link
count=1
while count<m: # add m-1 more new links
if random.random()<p: # clustering step: add triangle
neighborhood=[nbr for nbr in G.neighbors(target) \
if not G.has_edge(source,nbr) \
and not nbr==source]
if neighborhood: # if there is a neighbor without a link
nbr=random.choice(neighborhood)
G.add_edge(source,nbr) # add triangle
repeated_nodes.append(nbr)
count=count+1
continue # go to top of while loop
# else do preferential attachment step if above fails
target=possible_targets.pop()
G.add_edge(source,target)
repeated_nodes.append(target)
count=count+1
repeated_nodes.extend([source]*m) # add source node to list m times
source += 1
return G
def duplication_divergence_graph(n, p, seed=None):
"""Returns an undirected graph using the duplication-divergence model.
A graph of ``n`` nodes is created by duplicating the initial nodes
and retaining edges incident to the original nodes with a retention
probability ``p``.
Parameters
----------
n : int
The desired number of nodes in the graph.
p : float
The probability for retaining the edge of the replicated node.
seed : int, optional
A seed for the random number generator of ``random`` (default=None).
Returns
-------
G : Graph
Raises
------
NetworkXError
If `p` is not a valid probability.
If `n` is less than 2.
References
----------
.. [1] I. Ispolatov, P. L. Krapivsky, A. Yuryev,
"Duplication-divergence model of protein interaction network",
Phys. Rev. E, 71, 061911, 2005.
"""
if p > 1 or p < 0:
msg = "NetworkXError p={0} is not in [0,1].".format(p)
raise nx.NetworkXError(msg)
if n < 2:
msg = 'n must be greater than or equal to 2'
raise nx.NetworkXError(msg)
if seed is not None:
random.seed(seed)
G = nx.Graph()
G.graph['name'] = "Duplication-Divergence Graph"
# Initialize the graph with two connected nodes.
G.add_edge(0,1)
i = 2
while i < n:
# Choose a random node from current graph to duplicate.
random_node = random.choice(G.nodes())
# Make the replica.
G.add_node(i)
# flag indicates whether at least one edge is connected on the replica.
flag=False
for nbr in G.neighbors(random_node):
if random.random() < p:
# Link retention step.
G.add_edge(i, nbr)
flag = True
if not flag:
# Delete replica if no edges retained.
G.remove_node(i)
else:
# Successful duplication.
i += 1
return G
def random_lobster(n, p1, p2, seed=None):
"""Returns a random lobster graph.
A lobster is a tree that reduces to a caterpillar when pruning all
leaf nodes. A caterpillar is a tree that reduces to a path graph
when pruning all leaf nodes; setting ``p2`` to zero produces a caterillar.
Parameters
----------
n : int
The expected number of nodes in the backbone
p1 : float
Probability of adding an edge to the backbone
p2 : float
Probability of adding an edge one level beyond backbone
seed : int, optional
Seed for random number generator (default=None).
"""
# a necessary ingredient in any self-respecting graph library
if seed is not None:
random.seed(seed)
llen=int(2*random.random()*n + 0.5)
L=path_graph(llen)
L.name="random_lobster(%d,%s,%s)"%(n,p1,p2)
# build caterpillar: add edges to path graph with probability p1
current_node=llen-1
for n in range(llen):
if random.random()<p1: # add fuzzy caterpillar parts
current_node+=1
L.add_edge(n,current_node)
if random.random()<p2: # add crunchy lobster bits
current_node+=1
L.add_edge(current_node-1,current_node)
return L # voila, un lobster!
def random_shell_graph(constructor, seed=None):
"""Returns a random shell graph for the constructor given.
Parameters
----------
constructor : list of three-tuples
Represents the parameters for a shell, starting at the center
shell. Each element of the list must be of the form ``(n, m,
d)``, where ``n`` is the number of nodes in the shell, ``m`` is
the number of edges in the shell, and ``d`` is the ratio of
inter-shell (next) edges to intra-shell edges. If ``d`` is zero,
there will be no intra-shell edges, and if ``d`` is one there
will be all possible intra-shell edges.
seed : int, optional
Seed for random number generator (default=None).
Examples
--------
>>> constructor = [(10, 20, 0.8), (20, 40, 0.8)]
>>> G = nx.random_shell_graph(constructor)
"""
G=empty_graph(0)
G.name="random_shell_graph(constructor)"
if seed is not None:
random.seed(seed)
glist=[]
intra_edges=[]
nnodes=0
# create gnm graphs for each shell
for (n,m,d) in constructor:
inter_edges=int(m*d)
intra_edges.append(m-inter_edges)
g=nx.convert_node_labels_to_integers(
gnm_random_graph(n,inter_edges),
first_label=nnodes)
glist.append(g)
nnodes+=n
G=nx.operators.union(G,g)
# connect the shells randomly
for gi in range(len(glist)-1):
nlist1=glist[gi].nodes()
nlist2=glist[gi+1].nodes()
total_edges=intra_edges[gi]
edge_count=0
while edge_count < total_edges:
u = random.choice(nlist1)
v = random.choice(nlist2)
if u==v or G.has_edge(u,v):
continue
else:
G.add_edge(u,v)
edge_count=edge_count+1
return G
def random_powerlaw_tree(n, gamma=3, seed=None, tries=100):
"""Returns a tree with a power law degree distribution.
Parameters
----------
n : int
The number of nodes.
gamma : float
Exponent of the power law.
seed : int, optional
Seed for random number generator (default=None).
tries : int
Number of attempts to adjust the sequence to make it a tree.
Raises
------
NetworkXError
If no valid sequence is found within the maximum number of
attempts.
Notes
-----
A trial power law degree sequence is chosen and then elements are
swapped with new elements from a powerlaw distribution until the
sequence makes a tree (by checking, for example, that the number of
edges is one smaller than the number of nodes).
"""
# This call may raise a NetworkXError if the number of tries is succeeded.
seq = random_powerlaw_tree_sequence(n, gamma=gamma, seed=seed, tries=tries)
G = degree_sequence_tree(seq)
G.name = "random_powerlaw_tree(%s,%s)" % (n, gamma)
return G
def random_powerlaw_tree_sequence(n, gamma=3, seed=None, tries=100):
"""Returns a degree sequence for a tree with a power law distribution.
Parameters
----------
n : int,
The number of nodes.
gamma : float
Exponent of the power law.
seed : int, optional
Seed for random number generator (default=None).
tries : int
Number of attempts to adjust the sequence to make it a tree.
Raises
------
NetworkXError
If no valid sequence is found within the maximum number of
attempts.
Notes
-----
A trial power law degree sequence is chosen and then elements are
swapped with new elements from a power law distribution until
the sequence makes a tree (by checking, for example, that the number of
edges is one smaller than the number of nodes).
"""
if seed is not None:
random.seed(seed)
# get trial sequence
z = nx.utils.powerlaw_sequence(n, exponent=gamma)
# round to integer values in the range [0,n]
zseq = [min(n, max(int(round(s)), 0)) for s in z]
# another sequence to swap values from
z = nx.utils.powerlaw_sequence(tries, exponent=gamma)
# round to integer values in the range [0,n]
swap = [min(n, max(int(round(s)), 0)) for s in z]
for deg in swap:
# If this degree sequence can be the degree sequence of a tree, return
# it. It can be a tree if the number of edges is one fewer than the
# number of nodes, or in other words, ``n - sum(zseq) / 2 == 1``. We
# use an equivalent condition below that avoids floating point
# operations.
if 2 * n - sum(zseq) == 2:
return zseq
index = random.randint(0, n - 1)
zseq[index] = swap.pop()
raise nx.NetworkXError('Exceeded max (%d) attempts for a valid tree'
' sequence.' % tries)
|
dmoliveira/networkx
|
networkx/generators/random_graphs.py
|
Python
|
bsd-3-clause
| 31,442
|
[
"VisIt"
] |
bb4b1bde39d73486c7c188ecb35d3ee7fb778f766ab02d1b2b8f6d572b0fda30
|
import moose
import pylab
import rdesigneur as rd
moogList = []
### Remove comment from line below if you want to display the 3-d cell view.
moogList = [['#', '1', '.', 'Vm', 'Membrane potential', -0.065, -0.055]]
rdes = rd.rdesigneur(
turnOffElec = False,
chemDt = 0.002,
chemPlotDt = 0.02,
diffusionLength = 1e-6,
numWaveFrames = 50,
useGssa = False,
addSomaChemCompt = False,
addEndoChemCompt = False,
# cellProto syntax: ['branchedCell', 'name', somaDia, somaLength, dendDia, dendLength, numDendSeg, branchDia, branchLength, numBranchSeg]
cellProto = [['branchedCell', 'soma', 10e-6, 10e-6, 1e-6, 40e-6, 1, 0.5e-6, 40e-6, 1]],
spineProto = [['makeActiveSpine()', 'spine']],
chemProto = [['./chem/CaOnly.g', 'chem']],
spineDistrib = [['spine', '#dend#,#branch#', '4.0e-6', '-0.1e-6']],
chemDistrib = [['chem', 'dend#,branch#,spine#,head#', 'install', '1' ]],
adaptorList = [
[ 'Ca_conc', 'Ca', 'spine/Ca', 'conc', 0.00008, 8 ]
],
stimList = [
['head3', '0.5', 'glu', 'periodicsyn', '0 + 40*(t>2 && t<3)'],
['head3', '0.5', 'NMDA', 'periodicsyn', '0 + 40*(t>2 && t<3)'],
['head4', '0.5', 'glu', 'periodicsyn', '0 + 40*(t>4 && t<5)'],
['head4', '0.5', 'NMDA', 'periodicsyn', '0 + 40*(t>4 && t<5)'],
['head13', '0.5', 'glu', 'periodicsyn', '0 + 40*(t>8 && t<9)'],
['head13', '0.5', 'NMDA', 'periodicsyn', '0 + 40*(t>8 && t<9)'],
['head14', '0.5', 'glu', 'periodicsyn', '0 + 40*(t>10 && t<11)'],
['head14', '0.5', 'NMDA', 'periodicsyn', '0 + 40*(t>10 && t<11)'],
['head23', '0.5', 'glu', 'periodicsyn', '0 + 40*(t>14 && t<15)'],
['head23', '0.5', 'NMDA', 'periodicsyn', '0 + 40*(t>14 && t<15)'],
['head24', '0.5', 'glu', 'periodicsyn', '0 + 40*(t>16 && t<17)'],
['head24', '0.5', 'NMDA', 'periodicsyn', '0 + 40*(t>16 && t<17)'],
],
plotList = [
['head#', '1', 'spine/Ca', 'conc', 'Spine Ca conc'],
['dend#,branch#', '1', 'dend/Ca', 'conc', 'Dend Ca conc'],
['head#', '1', 'spine/Ca', 'conc', 'Spine Ca conc', 'wave'],
['dend#,branch#', '1', 'dend/Ca', 'conc', 'Dend Ca conc', 'wave'],
['soma,#dend#,branch#', '1', '.', 'Vm', 'Memb potl'],
['soma', '1', '.', 'Vm', 'Memb potl'],
],
moogList = moogList,
)
moose.seed( 1234 )
rdes.buildModel()
moose.reinit()
if len(moogList) == 0:
moose.start( 8 )
rdes.display()
else:
rdes.displayMoogli( 0.1, 11, 0.0 )
|
BhallaLab/moose-examples
|
tutorials/Rdesigneur/ex10.2_spine_Ca_influx_to_branched_neuron.py
|
Python
|
gpl-2.0
| 2,506
|
[
"MOOSE"
] |
3489df37bbfc4a4969a42ab70359c3db3ea02a6d72a831072b71f3bc28467f61
|
# -*- coding: utf-8 -*-
# for more info please visit http://www.iptvxtra.net
import sys,xbmc,os,shutil,xbmcaddon
# ------------------------------------------------------------------------------------------------------------------------------------------- wiederherstellen der DE Settings - START
try:
sxUser = 0
sxBackup = 0
saveset = xbmc.translatePath('special://userdata/addon_data/plugin.video.iptvxtra-de/backup.xml')
orgset = xbmc.translatePath('special://userdata/addon_data/plugin.video.iptvxtra-de/settings.xml')
if os.path.isfile(saveset) and not os.path.isfile(orgset):
try:
shutil.copy(saveset, orgset)
print ' ---------------------------------------------- IPTVxtra Info-Message-more TV 100'
print ' ---------------------------------------------------------------------------------'
except: pass
try:
fobj = open(orgset, "r")
for line in fobj:
if "login" in line and "xbmcuser" in line: sxUser = 1
if "sBackup" in line and "true" in line: sxBackup = 1
fobj.close()
except: pass
if sxBackup == 0 and sxUser == 1:
try:
fobj = open(saveset, "r")
for line in fobj:
if "sBackup" in line and "true" in line: sxBackup = 1
break
fobj.close()
except: pass
if os.path.isfile(saveset) and sxBackup == 1 and sxUser == 1: # wiederherstellen
try: os.remove(orgset)
except: pass
try:
shutil.copy(saveset, orgset)
print ' ---------------------------------------------- IPTVxtra Info-Message-more TV 102'
print ' ---------------------------------------------------------------------------------'
except:
print ' ---------------------------------------------- IPTVxtra Info-Message-more TV 103'
print ' ---------------------------------------------------------------------------------'
except:
print ' ---------------------------------------------- IPTVxtra Info-Message-more TV 104'
print ' ---------------------------------------------------------------------------------'
# ---------------------------------------------------------------------------------------------------- wiederherstellen der Settings - Ende
import resources.lib.requests as requests
from datetime import date, datetime,timedelta
import urllib,re,xbmcplugin,xbmcgui,hashlib,pickle,time
addon_handle = int(sys.argv[1])
xbmcplugin.setContent(addon_handle, 'movies')
addon = xbmcaddon.Addon('plugin.video.iptvxtra-de-more')
profile = xbmc.translatePath(addon.getAddonInfo('profile'))
__settings__ = xbmcaddon.Addon(id="plugin.video.iptvxtra-de")
user = __settings__.getSetting("login").strip()
pwd = __settings__.getSetting("password").strip()
puffer = __settings__.getSetting("record_active")
datenholen = __settings__.getSetting("datenholen")
mdx = hashlib.md5('#user='+user+'pass='+pwd).hexdigest()
home = xbmcaddon.Addon(id="plugin.video.iptvxtra-de-more").getAddonInfo('path')
icondir = xbmc.translatePath("special://home/addons/plugin.video.iptvxtra-de-more/resources/lib/")
icon = xbmc.translatePath( os.path.join( home, 'icon.png' ) )
net = xbmc.translatePath( os.path.join( home, 'resources/lib/net.png') )
fanart = xbmc.translatePath( os.path.join( home, 'fanart.jpg' ) )
mode = sys.argv[2]
if __settings__.getSetting("timeshift0") == 'true':
try:
import resources.lib.USTimeZone as USTimeZone
LocalTimezone = USTimeZone.LocalTimezone()
Europe = USTimeZone.GMT1()
if '+01:00' in str(datetime.now(Europe)): euro = 1
elif '+02:00' in str(datetime.now(Europe)): euro = 2
else: euro = 1
eurox = str(datetime.now(LocalTimezone))
eurox = eurox.partition('.')
if '+' in eurox[2] :
eurox = eurox[2].partition('+')
eurox = eurox[2].partition(':')
timeshift = str(int(eurox[0]) - euro)
elif '-' in eurox[2] :
eurox = eurox[2].partition('-')
eurox = eurox[2].partition(':')
timeshift = str(int('-'+eurox[0]) - euro)
__settings__.setSetting("timeshift", timeshift)
except:
timeshift = __settings__.getSetting('timeshift')
xbmc.executebuiltin('XBMC.Notification(Zeitzonen Fehler , die Zeitzone wurde nicht erkannt - die Vorgabewerte werden vom DE Addon geholt ,8000,'+net+')')
else:
timeshift= __settings__.getSetting("timeshift")
if timeshift == '': timeshift = '0'
__settings__.setSetting("timeshift", timeshift)
if __settings__.getSetting("sommer") == 'true': euro = 2
else: euro = 1
def main():
if 'xcat01' in mode: xt = str(datetime.now()).partition(' ')
elif 'xcat02' in mode: xt = str(datetime.now() - timedelta(days=1)).partition(' ')
elif 'xcat03' in mode: xt = str(datetime.now() - timedelta(days=2)).partition(' ')
elif 'xcat04' in mode: xt = str(datetime.now() - timedelta(days=3)).partition(' ')
elif 'xcat05' in mode: xt = str(datetime.now() - timedelta(days=4)).partition(' ')
elif 'xcat06' in mode: xt = str(datetime.now() - timedelta(days=5)).partition(' ')
elif 'xcat07' in mode: xt = str(datetime.now() - timedelta(days=6)).partition(' ')
elif 'xcatc01' in mode: categorie('Top Hits','tophits')
elif 'xcatc02' in mode: categorie('US Serien','serien')
elif 'xcatc03' in mode: categorie('US Sitcom`s & Soap`s','sitcom')
elif 'xcatc04' in mode: categorie('Talk Shows','talkshow')
elif 'xcatc05' in mode: categorie('Dokus','doku')
elif 'xcatc06' in mode: categorie('Sport','sport')
elif 'xcatc07' in mode: categorie('News','news')
elif 'xcatc08' in mode: categorie('Pseudo-Dokus','gerichtdetektiv')
elif 'xcatc09' in mode: categorie('TV Shows','tvshows')
elif 'xcatc10' in mode: categorie('Motor Dokus','motors')
elif 'xcatc11' in mode: categorie('Kid`s TV','kids')
elif 'xcatc12' in mode: categorie('DE Doku Soap`s','de_dokusoap')
elif 'xcatc13' in mode: categorie('DE Serien, Sitcom`s & Soap`s','de_serien')
elif 'xcatc14' in mode: categorie('Koch-Shows und Essen','kochshows')
elif 'xcatc99' in mode: puffer_on_off()
else:
categoriex()
try:
if not os.path.isfile(xbmc.translatePath("special://temp/0_more.fi")):
open(xbmc.translatePath("special://temp/0_more.fi"), "a").close()
xbmc.executebuiltin("Container.SetViewMode(504)")
except: pass
sys.exit(0)
file_cat = mode.split('xxx')[1]
file = get_status()
link = get_url(file.replace('serien',file_cat) + xt[0] + '.xmx')
endtime = xbmcaddon.Addon(id = 'plugin.video.iptvxtra-de').getSetting("record_endtime")
if endtime == '0': endtime = 600
elif endtime == '1': endtime = 1800
elif endtime == '2': endtime = 3600
elif endtime == '3': endtime = 7200
elif endtime == '4': endtime = 10800
for item in link:
if int(item[7]) < (int(time.time())+(int(timeshift)*3600)):
try:
videoTitle = item[3] +' - '+ item[1]
videoTitle2 = item[1]
videoTitle = videoTitle
except: videoTitle = 'no Titel'
try:
desc = item[2].replace('(n)','')
except: desc = ''
try:
thumbnail = item[4]
thumbnail = xbmc.translatePath("special://temp/temp/iptvxtra_thumbs/") + os.path.basename(item[4])
if item[4] == 'none': thumbnail = icon
except: thumbnail = icon
try:
try: videoTitlex = videoTitle.encode('utf-8')
except: videoTitlex = ' ...'
endtimex = int(item[7])-(euro * 3600)+endtime
starttimex = int(item[6])-(euro * 3600)
if endtimex - starttimex > 14400: endtimex = starttimex + 14400
if endtimex > int(time.time()): endtimex = int(time.time()) - 300
url = item[5].replace('http://pebbles','http://c001.p').replace('-lh.akamaihd.net/i/','.edgesuite.net/i/c001/') + '***' + str(starttimex) + '***' + str(endtimex) + '***' + item[8] + '***' + videoTitlex + '***' + thumbnail + '***' + 'tophits2' + '***' + mdx
url = 'plugin://plugin.video.iptvxtra-basic/?runstream=' + url.encode("hex")
except: url = ''
addLink(videoTitle,videoTitle2,url,thumbnail,desc)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_GENRE)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
xbmc.executebuiltin("Container.SetViewMode(504)")
sys.exit(0)
def addLink(name,name2,url,iconimage,desc):
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Label": name, "Title": name, "Genre": name2, "Plot": desc } )
liz.setProperty( "Fanart_Image", iconimage )
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz)
def addDir(name,url,iconimage,desc):
desc = desc.decode("iso-8859-1")
liz=xbmcgui.ListItem(name, iconImage=iconimage, thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Plot": desc } )
liz.setProperty( "Fanart_Image", icon )
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz,isFolder=True)
def categorie(cattitle,file_cat):
t0 = datetime.fromtimestamp( int(time.time())-(int(timeshift)*3600)+(euro * 3600) ).strftime('%d.%m.%Y')
t1 = datetime.fromtimestamp( int(time.time())-(int(timeshift)*3600)+(euro * 3600)-86400 ).strftime('%d.%m.%Y')
t2 = datetime.fromtimestamp( int(time.time())-(int(timeshift)*3600)+(euro * 3600)-(86400*2) ).strftime('%d.%m.%Y')
t3 = datetime.fromtimestamp( int(time.time())-(int(timeshift)*3600)+(euro * 3600)-(86400*3) ).strftime('%d.%m.%Y')
t4 = datetime.fromtimestamp( int(time.time())-(int(timeshift)*3600)+(euro * 3600)-(86400*4) ).strftime('%d.%m.%Y')
t5 = datetime.fromtimestamp( int(time.time())-(int(timeshift)*3600)+(euro * 3600)-(86400*5) ).strftime('%d.%m.%Y')
t6 = datetime.fromtimestamp( int(time.time())-(int(timeshift)*3600)+(euro * 3600)-(86400*6) ).strftime('%d.%m.%Y')
t7 = datetime.fromtimestamp( int(time.time())-(int(timeshift)*3600)+(euro * 3600)-(86400*7) ).strftime('%d.%m.%Y')
d = t0.split('.'); d0 = ("Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag", "Sonntag")[date(int(d[2]), int(d[1]), int(d[0])).weekday()]
d = t1.split('.'); d1 = ("Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag", "Sonntag")[date(int(d[2]), int(d[1]), int(d[0])).weekday()]
d = t2.split('.'); d2 = ("Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag", "Sonntag")[date(int(d[2]), int(d[1]), int(d[0])).weekday()]
d = t3.split('.'); d3 = ("Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag", "Sonntag")[date(int(d[2]), int(d[1]), int(d[0])).weekday()]
d = t4.split('.'); d4 = ("Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag", "Sonntag")[date(int(d[2]), int(d[1]), int(d[0])).weekday()]
d = t5.split('.'); d5 = ("Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag", "Sonntag")[date(int(d[2]), int(d[1]), int(d[0])).weekday()]
d = t6.split('.'); d6 = ("Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag", "Sonntag")[date(int(d[2]), int(d[1]), int(d[0])).weekday()]
d = t7.split('.'); d7 = ("Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag", "Sonntag")[date(int(d[2]), int(d[1]), int(d[0])).weekday()]
addDir(cattitle+' von heute - '+d0, 'plugin://plugin.video.iptvxtra-de-more/?xcat01xxx' + file_cat, icon, '\n'+cattitle+' vom\n'+d0+' den '+t0+'\n\nwird cirka um 10.00 Uhr jeden Tag aktualisiert\n\nalle Zeiten sind deutsche Zeiten')
addDir(cattitle+' von gestern - '+d1, 'plugin://plugin.video.iptvxtra-de-more/?xcat02xxx' + file_cat, icon,'\n'+cattitle+' vom\n'+d1+' den '+t1+'\n\nalle Zeiten sind deutsche Zeiten')
addDir(cattitle+' vor 2 Tagen - '+d2, 'plugin://plugin.video.iptvxtra-de-more/?xcat03xxx' + file_cat, icon,'\n'+cattitle+' vom\n'+d2+' den '+t2+'\n\nalle Zeiten sind deutsche Zeiten')
addDir(cattitle+' vor 3 Tagen - '+d3, 'plugin://plugin.video.iptvxtra-de-more/?xcat04xxx' + file_cat, icon,'\n'+cattitle+' vom\n'+d3+' den '+t3+'\n\nalle Zeiten sind deutsche Zeiten')
addDir(cattitle+' vor 4 Tagen - '+d4, 'plugin://plugin.video.iptvxtra-de-more/?xcat05xxx' + file_cat, icon,'\n'+cattitle+' vom\n'+d4+' den '+t4+'\n\nalle Zeiten sind deutsche Zeiten')
addDir(cattitle+' vor 5 Tagen - '+d5, 'plugin://plugin.video.iptvxtra-de-more/?xcat06xxx' + file_cat, icon,'\n'+cattitle+' vom\n'+d5+' den '+t5+'\n\nalle Zeiten sind deutsche Zeiten')
addDir(cattitle+' vor 6 Tagen - '+d6, 'plugin://plugin.video.iptvxtra-de-more/?xcat07xxx' + file_cat, icon,'\n'+cattitle+' vom\n'+d6+' den '+t6+'\n\nalle Zeiten sind deutsche Zeiten')
#addDir(cattitle+' vor 7 Tagen - '+d7, 'plugin://plugin.video.iptvxtra-de-more/?xcat08xxx' + file_cat, icon,'\n'+cattitle+' vom\n'+d7+' den '+t7+'\n\nsind aber nicht alle verfügbar, da manche älter als 7 Tage sind')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
xbmc.executebuiltin("Container.SetViewMode(503)")
sys.exit(0)
def categoriex():
addDir('Top Hits', 'plugin://plugin.video.iptvxtra-de-more/?xcatc01x', icondir + 'icon1.png', '\n') # xcatc13x
addDir('Kid`s TV', 'plugin://plugin.video.iptvxtra-de-more/?xcatc11x', icondir + 'icon11.png','\n')
addDir('US Serien', 'plugin://plugin.video.iptvxtra-de-more/?xcatc02x', icondir + 'icon2.png','\n')
addDir('US Sitcom`s und Soap`s', 'plugin://plugin.video.iptvxtra-de-more/?xcatc03x', icondir + 'icon3.png','\n')
addDir('DE Serien, Sitcom`s und Soap`s', 'plugin://plugin.video.iptvxtra-de-more/?xcatc13x', icondir + 'icon2.png','\n')
addDir('Gerichts und Detektiv Pseudo-Dokus', 'plugin://plugin.video.iptvxtra-de-more/?xcatc08x', icondir + 'icon8.png','\n')
addDir('deutsche Doku Soap`s', 'plugin://plugin.video.iptvxtra-de-more/?xcatc12x', icondir + 'icon12.png','\n')
addDir('Reportagen, Magazine, Talk und Late Night Shows', 'plugin://plugin.video.iptvxtra-de-more/?xcatc04x', icondir + 'icon4.png','\n')
addDir('TV-Shows, Game-Shows und Musik Sendungen', 'plugin://plugin.video.iptvxtra-de-more/?xcatc09x', icondir + 'icon9.png','\n')
addDir('Dokumentationen und Doku-Serien', 'plugin://plugin.video.iptvxtra-de-more/?xcatc05x', icondir + 'icon5.png','\n')
addDir('Motor Magazine und Dokus', 'plugin://plugin.video.iptvxtra-de-more/?xcatc10x', icondir + 'icon10.png','\n')
addDir('Sport und Sportnachrichten', 'plugin://plugin.video.iptvxtra-de-more/?xcatc06x', icondir + 'icon7.png','\n')
addDir('Koch-Shows und Essen', 'plugin://plugin.video.iptvxtra-de-more/?xcatc14x', icondir + 'icon5.png','\n')
addDir('News, Nachrichten und Politik', 'plugin://plugin.video.iptvxtra-de-more/?xcatc07x', icondir + 'icon6.png','\n')
if puffer == 'true': addDir('IPTVxtra DE - Replay-Puffer ausschalten', 'plugin://plugin.video.iptvxtra-de-more/?xcatc99x', icon,'\n\ndie kompletten Einstellungen sind im\nIPTVxtra DE Addon\neinzusehen')
if puffer == 'false': addDir('IPTVxtra DE - Replay-Puffer einschalten', 'plugin://plugin.video.iptvxtra-de-more/?xcatc99x', icon,'\n\ndie kompletten Einstellungen sind im\nIPTVxtra DE Addon\neinzusehen')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def get_url(url):
try:
xmx = xbmc.translatePath("special://temp/3496-2216be.fi")
urllib.urlretrieve (url, xmx)
f = open(xmx)
liste = pickle.load(f)
f.close()
os.remove(xmx)
return liste
except:
try:
xmx = xbmc.translatePath("special://temp/3496-2216be.fi")
urllib.urlretrieve (url.replace('rv1','rv3'), xmx)
f = open(xmx)
liste = pickle.load(f)
f.close()
os.remove(xmx)
return liste
except: sys.exit(0)
def get_status():
try:
r = requests.get("http://api.iptvxtra.net/tophits.php", params = {'loc': mdx ,'la':'DE','app':'serien1'} )
url = r.text.strip().decode("hex")
except:
xbmc.executebuiltin('XBMC.Notification(Netzwerkfehler , fehlerhafter Zugang zum Login-Server,25000,'+net+')')
sys.exit(0)
if url == '':
xbmc.executebuiltin('XBMC.Notification(Netzwerkfehler , fehlerhafter Zugang zum Login-Server,25000,'+net+')')
sys.exit(0)
return url
def puffer_on_off():
puffer = xbmcaddon.Addon(id = 'plugin.video.iptvxtra-de').getSetting("record_active")
if puffer == 'true':
xbmcaddon.Addon(id = 'plugin.video.iptvxtra-de').setSetting('record_active','false')
xbmc.executebuiltin('XBMC.Notification(Replay-Puffer , ist jetzt ausgeschaltet,5000,'+icon+')')
elif puffer == 'false':
xbmcaddon.Addon(id = 'plugin.video.iptvxtra-de').setSetting('record_active','true')
xbmc.executebuiltin('XBMC.Notification(Replay-Puffer , ist jetzt eingeschaltet,5000,'+icon+')')
xbmc.executebuiltin('Container.Refresh')
sys.exit(0)
main()
|
noba3/KoTos
|
addons/plugin.video.iptvxtra-de-more/default.py
|
Python
|
gpl-2.0
| 17,491
|
[
"VisIt"
] |
59adc0d8f8f9d00e375f0b6c0baf081ec4e0233f7efd419dc6e3d238266deec6
|
# coding: utf-8
from collections import defaultdict
from operator import itemgetter
import os
import random
import string
import sys
import time
import h2o
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegressionCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score
from sklearn.tree import DecisionTreeClassifier
from h2o.estimators.random_forest import H2ORandomForestEstimator
h2o.init()
h2o.no_progress()
def generate_dataset(
num_x,
n_samples,
n_levels=100):
"""
Generates a dataset such that columns c and z are perfectly predictive of y,
but with additional features x_i that are weakly predictive
(and co-correlated).
Parameters
----------
num_x : int
n_samples : int
n_levels : int
This is the total number of levels, so this value is halved for each
set (positive and negative).
Returns
-------
df : pd.DataFrame
"""
X, y = generate_continuous_data_and_targets(num_x, n_samples)
c, z = make_c_and_z_based_on_y(y, n_levels)
df_cat = pd.DataFrame(
X, columns=['x{}'.format(i) for i in range(X.shape[1])])
for col, name in zip([z, c], ['z', 'c']):
df_cat = pd.DataFrame(col, columns=[name]).join(df_cat)
df_ohe = pd.get_dummies(df_cat, 'c')
df_cat = pd.DataFrame(y, columns=['y']).join(df_cat)
df_ohe = pd.DataFrame(y, columns=['y']).join(df_ohe)
return df_cat, df_ohe
def generate_continuous_data_and_targets(
n_dim,
n_samples,
mixing_factor=0.025):
"""
Generates a multivariate Gaussian-distributed dataset and a response
variable that is conditioned on a weighted sum of the data.
Parameters
----------
n_dim : int
n_samples : int
mixing_factor : float
'Squashes' the weighted sum into the linear regime of a sigmoid.
Smaller numbers squash closer to 0.5.
Returns
-------
X : np.array
(n_samples, n_dim)
y : np.array
(n_samples, )
"""
cov = generate_positive_semi_definite_matrix(n_dim)
X = np.random.multivariate_normal(
mean=np.zeros(n_dim),
cov=cov,
size=n_samples)
weights = np.random.randn(n_dim)
y_probs = sigmoid(mixing_factor * np.dot(X, weights))
y = np.random.binomial(1, p=y_probs)
return X, y
def generate_positive_semi_definite_matrix(n_dim):
"""
Creates a positive semi-definite matrix.
Parameters
----------
n_dim : int
Returns
-------
np.array : (n_dim, n_dim)
"""
cov = np.random.randn(n_dim, n_dim)
return np.dot(cov, cov.T)
def sigmoid(x):
"""
Computes sigmoid(x) for some activation x.
Parameters
----------
x : float
Returns
-------
sigmoid(x) : float
"""
return 1 / (1 + np.exp(-x))
def make_C(n_levels=100):
"""
Create two lists, one starting with 'A' and one starting with 'B',
and each with n/2 levels.
Parameters
----------
n_levels : int
Cardinality of C
Returns
-------
Cpos : list of strings
Cneg : list of strings
"""
suffixes = ["{}{}".format(i, j) for i in string.ascii_lowercase for j
in string.ascii_lowercase]
return ["A{}".format(s) for s in suffixes][:int(n_levels/2)], \
["B{}".format(s) for s in suffixes][:int(n_levels/2)]
def make_c_and_z_based_on_y(y_vals, n_levels, z_pivot=10):
"""
Builds a categorical variable c and continuous variable z such that
y is perfectly predictable from c and z, with y = 1 iff c takes a value
from a positive set OR z > z_pivot.
Parameters
----------
y_vals : np.array
n_levels : int
Cardinality of the categorical variable, c.
z_pivot : float
Mean of z.
Returns
-------
c : np.array
z : np.array
"""
z = np.random.normal(loc=z_pivot, scale=5, size=2 * len(y_vals))
z_pos, z_neg = z[z > z_pivot], z[z <= z_pivot]
c_pos, c_neg = make_C(n_levels)
c, z = list(), list()
for y in y_vals:
coin = np.random.binomial(1, 0.5)
if y and coin:
c.append(random.choice(c_pos + c_neg))
z.append(random.choice(z_pos))
elif y and not coin:
c.append(random.choice(c_pos))
z.append(random.choice(z_neg))
else:
c.append(random.choice(c_neg))
z.append(random.choice(z_neg))
return np.array(c), z
def get_feature_names(df, include_c):
"""
Returns a list of feature names from a dataframe, optionally excluding
categorical variables.
Parameters
----------
df : pd.DataFrame
include_c : bool
Returns
-------
names : list of strings
"""
names = [f for f in df.columns if not f.startswith('y')]
if not include_c:
names = [f for f in names if not f.startswith('c')]
return names
class H2ODecisionTree:
"""
Simple class that overloads an H2ORandomForestEstimator to mimic a
decision tree classifier. Only train, predict and varimp are implemented.
"""
def __init__(self):
self.model = None
def train(self, x, y, training_frame):
self.model = H2ORandomForestEstimator(ntrees=1, mtries=len(x))
self.model.train(x=x, y=y, training_frame=training_frame)
def predict(self, frame):
return self.model.predict(frame)
def varimp(self):
return self.model.varimp()
def evaluate_h2o_model(
data,
feature_names,
target_col,
model,
n_iters=10,
metric=roc_auc_score):
"""
Train an H2O model on different train-test splits, and returns a metric
evaluated on each fold, and the feature importance scores for each fold.
Parameters
----------
data : pd.DataFrame
feature_names : list of strings
Names of columns of dataframe that will make up X
target_col : string
Name of target column
model : H2O model
E.g. H2ORandomForestEstimator or H2ODecisionTree
n_iters : int, default 10
metric : function, default roc_auc_score
A function that returns a float when called with metric(y_true, y_test)
Returns
-------
metrics : list of floats
feature_importances : list of dicts
Each dict has the form {feature_name (str): feature_importance (float)}
"""
h2ofr = h2o.H2OFrame(data)
h2ofr.col_names = list(data.columns)
metrics, feature_importances = list(), list()
folds = StratifiedShuffleSplit(y=data[target_col],
n_iter=n_iters,
test_size=0.3)
for train_idx, test_idx in folds:
train_idx, test_idx = \
sorted(train_idx), sorted(test_idx) # H2O indices must be sorted
model.train(x=feature_names,
y=target_col,
training_frame=h2ofr[train_idx, :])
# Slicing an H2O frame causes a (depreciation) warning in h2o version
# 3.10.0.3. There is a TODO to fix it, so we can probably safely ignore
# the warning. The warning uses a print statement, so we'll temporarily
# redirect stdout:
with open(os.devnull, 'w') as f:
old_out = sys.stdout
sys.stdout = f
predictions = model.predict(
h2ofr[test_idx, feature_names]).as_data_frame()
sys.stdout = old_out
try:
prediction_scores = predictions['True']
except KeyError:
# Decision Trees only give a single 'predict' column
prediction_scores = predictions['predict']
metrics.append(
metric(data[target_col].values[test_idx], prediction_scores))
feature_importances.append(
dict([(v[0], v[3]) for v in model.varimp()]))
return {'metric': metrics, 'importances': feature_importances}
def evaluate_sklearn_model(
data,
feature_names,
target_col,
model,
n_iters=10,
metric=roc_auc_score):
"""
Train an sklearn model on different train-test splits, and returns a metric
evaluated on each fold, and the feature importance scores for each fold.
Parameters
----------
data : pd.DataFrame
feature_names : list of strings
Names of columns of dataframe that will make up X
target_col : string
Name of target column
model : H2O model
E.g. H2ORandomForestEstimator or H2ODecisionTree
n_iters : int, default 10
metric : function, default roc_auc_score
A function that returns a float when called with metric(y_true, y_test)
Returns
-------
results
results.metrics : list of floats
results.importances : list of dicts
Each dict has the form
{feature_name (str): feature_importance (float)}
"""
metrics, feature_importances = list(), list()
X = data[feature_names].values
y = data[target_col].values
folds = StratifiedShuffleSplit(y=y, n_iter=n_iters, test_size=0.3)
for train_idx, test_idx in folds:
model.fit(X[train_idx], y[train_idx])
predictions = model.predict_proba(X[test_idx])
metrics.append(
metric(y[test_idx], predictions[:, 1]))
try:
feature_importances.append(
dict(zip(feature_names, model.feature_importances_))
)
except AttributeError: # Not a random forest!
feature_importances.append(
dict(zip(feature_names, model.coef_.ravel()))
)
return {'metric': metrics, 'importances': feature_importances}
def print_auc_mean_std(results):
"""Print an AUC-based summary of performance.
Parameters
---------
results : dict
As produced by `evaluate_sklearn_model` or
`evaluate_h2o_model`.
Prints
------
To standard output, a summary.
"""
print("AUC: mean {:4.4f}, sd {:4.4f}".format(
np.mean(results['metric']), np.std(results['metric'])))
def print_sorted_mean_importances(results, n=5):
"""Print a sorted list of features and their importance
values (which might be coefficients, if this is a
regression-type mode.
Parameters
---------
results : dict
As produced by `evaluate_sklearn_model` or
`evaluate_h2o_model`.
n : int
Number of results to print
Prints
------
To standard output, a formatted list.
"""
data = defaultdict(list)
imps = results['importances']
for d in imps:
for fname, imp in d.items():
data[fname].append(imp)
mu = {}
for fname, vals in data.items():
mu[fname] = np.mean(vals)
mu = sorted(mu.items(), key=itemgetter(1), reverse=True)
if n:
mu = mu[: n]
for fname, val in mu:
print("{:>20}: {:0.03f}".format(fname, val))
|
roaminsight/roamresearch
|
BlogPosts/Categorical_variables_in_tree_models/tree_categorical_variables.py
|
Python
|
apache-2.0
| 11,127
|
[
"Gaussian"
] |
a2b00b6221adbae06fb87c590c165ef6c8511937b14b357ed391c69e32d503f8
|
from scipy import signal
import numpy as np
import pandas as pd
import time
def gaussian_filter(_df, _len=16, _sigma=1.6):
cols = np.empty((len(_df.index), len(_df.columns)))
cols.fill(np.nan)
header = []
for column in _df:
header.append(column)
cols[:,len(header)-1] = gaussian_filtered(_df[column], _len=_len, _sigma=_sigma)
return pd.DataFrame(cols, columns=header)
def gaussian_filtered(_X, _len=16, _sigma=1.6):
norm = np.sqrt(2*np.pi)*_sigma ### Scipy's gaussian window is not normalized
window = signal.gaussian(_len+1, std=_sigma)/norm
return np.convolve(_X, window, "same")
|
degoldschmidt/ribeirolab-codeconversion
|
python/tracking_project/tracking/preprocessing/filtering.py
|
Python
|
gpl-3.0
| 634
|
[
"Gaussian"
] |
b49b50523df8316379198c2a6b86b36faff6490b4d49d18aa60ed72b10e538b1
|
#!/usr/bin/python
# Copyright (c) 2013, Thomas Rast <trast@inf.ethz.ch>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''Try a diff-based merge evilness detection.'''
import sys
import subprocess
import optparse
from collections import defaultdict
from itertools import takewhile
import difflib
usage = '%prog <merge> [ <parent1> <parent2> [--] [<mergebase>...] ]'
description = '''\
Show whether <merge> contains any candidates for hunk level evilness.
The remaining args are optional, but the merge base in particular is
expensive to compute so you may want to provide it from a cache.
Works only on 2-parent merges. (Octopus merges are not supposed to be
created from conflicting changes anyway.)'''
parser = optparse.OptionParser(usage=usage, description=description)
parser.add_option('--stdin', default=False, action='store_true', dest='stdin',
help='Read arguments from stdin (one set of args per line)')
def get_merge_bases(cmt1, cmt2):
try:
out = subprocess.check_output(['git', 'merge-base', '--all', cmt1, cmt2])
return out.strip().split()
except subprocess.CalledProcessError, e:
# merge-base fails with status 1 if there are no bases
if e.returncode == 1:
return []
raise
def get_parents(commit):
out = subprocess.check_output(['git', 'rev-parse', commit+'^1', commit+'^2'])
return out.strip().split()
def die(fmt, *fmtargs):
sys.stderr.write(fmt % fmtargs)
sys.exit(1)
def split_diff(data):
diff = {}
hunk = []
for line in iter(data.splitlines(True)):
if line.startswith('diff '):
if len(hunk):
diff[filename].append(hunk)
hunk = []
continue
elif line.startswith('--- '):
continue
elif line.startswith('+++ '):
filename = line[4:].rstrip('\n')
if filename.startswith('b/'):
filename = filename[2:]
diff[filename] = []
hunk = []
continue
elif line.startswith('index '):
continue
elif line.startswith('@@ '):
if len(hunk):
diff[filename].append(hunk)
hunk = []
continue
elif len(line) and line[0] in '+- \\':
hunk.append(line)
if len(hunk):
diff[filename].append(hunk)
hunk = []
return diff
def get_diff(cmt1, cmt2):
# FIXME allow custom args to tweak the diff
try:
out = subprocess.check_output(['git', 'diff', '-M', cmt1, cmt2])
return split_diff(out)
except subprocess.CalledProcessError, e:
# git-diff fails with status 1 if there are no differences
if e.returncode == 1:
return {}
raise
def assemble_hunks(hunkseq):
sep = []
out = []
for hunk in hunkseq:
out.extend(sep)
out.extend(hunk)
sep = ['@@\n']
return out
def any_suspicious_lines(diff):
in_hunk = False
for line in diff:
if line.startswith('@@ '):
in_hunk = True
continue
if not in_hunk:
continue
if line[:2] in ('--', '+-', '-+', '++'):
return True
return False
def remove_common_hunks(d1, d2):
d1new = dict(d1)
d2new = dict(d2)
f1 = set(d1.keys())
f2 = set(d2.keys())
for f in f1 & f2:
hunks1 = set(''.join(h) for h in d1[f])
hunks2 = set(''.join(h) for h in d2[f])
d1new[f] = [h for h in d1[f] if ''.join(h) not in hunks2]
d2new[f] = [h for h in d2[f] if ''.join(h) not in hunks1]
return d1new, d2new
def find_suspicious_hunks(dxM, dYx):
'''Generate hunkwise interdiffs, trying to find a good match.'''
# FIXME: this quick&dirty version assumes a single merge-base
diff = {}
files = set(dxM.keys()).union(dYx[0].keys())
for f in files:
pre = dYx[0].get(f, [])
post = dxM.get(f, [])
# might try something smarter, but this is a quick way
pre_t = assemble_hunks(pre)
post_t = assemble_hunks(post)
delta = list(difflib.unified_diff(pre_t, post_t, f, f))
if any_suspicious_lines(delta):
diff[f] = delta
return diff
def print_idiff(idiff, header):
if not idiff:
return
print header
for f, diff in idiff.iteritems():
print " %s" % f
for line in diff:
print " %s" % line,
def abbrev(sha):
return sha[:7] # FIXME (or not)
def detect_evilness(M, A, B, bases):
dAM = get_diff(A, M)
dBM = get_diff(B, M)
dYA = [get_diff(Y, A) for Y in bases]
dYB = [get_diff(Y, B) for Y in bases]
for i in range(len(bases)):
dYA[i], dYB[i] = remove_common_hunks(dYA[i], dYB[i])
idiff_A = find_suspicious_hunks(dAM, dYB)
idiff_B = find_suspicious_hunks(dBM, dYA)
if idiff_A or idiff_B:
print 'commit %s' % M
print 'parents', abbrev(A), abbrev(B)
print 'merge bases', ' '.join(abbrev(Y) for Y in bases)
print_idiff(idiff_A, "suspicious hunks from %s..%s" % (abbrev(A), abbrev(M)))
print_idiff(idiff_B, "suspicious hunks from %s..%s" % (abbrev(B), abbrev(M)))
def process_args(args, unhandled_fatal=True):
if len(args) > 3 and args[3] == '--':
del args[3]
if len(args) < 1:
if not unhandled_fatal:
return
parser.print_usage()
sys.exit(1)
merge = args[0]
parent1 = None
parent2 = None
bases = None
if len(args) > 1:
parent1 = args[1]
if len(args) > 2:
parent2 = args[2]
if len(args) > 3:
bases = args[3:]
if not parent1 or not parent2:
try:
parent1, parent2 = get_parents(merge)
except ValueError:
if not unhandled_fatal:
return
die('%s does not appear to be a merge\n', merge)
if not bases:
bases = get_merge_bases(parent1, parent2)
suspects = detect_evilness(merge, parent1, parent2, bases)
if suspects:
print "commit %s" % merge
print "suspicious merge in files:"
for filename, desc in suspects:
print "\t%-25s\t%s" % (desc, filename)
print
if __name__ == '__main__':
options, args = parser.parse_args()
if options.stdin:
for line in sys.stdin:
args = line.strip().split()
process_args(args, unhandled_fatal=False)
else:
process_args(args)
|
trast/evilmergediff
|
evil-base-diffdiff.py
|
Python
|
gpl-2.0
| 7,025
|
[
"Octopus"
] |
1cc1c539e0795574f990762b4c78b770a0405621c3d64a1e565c11e0efe256c5
|
import threading
import logging
import socket
from urllib2 import urlopen
from itertools import chain
from datetime import timedelta
from lxml import etree
from molly.apps.places import get_entity
from molly.apps.places.models import Route, EntityType, StopOnRoute, Source, Entity
from molly.apps.places.providers import BaseMapsProvider
from molly.apps.places.providers.naptan import NaptanMapsProvider
from molly.conf.provider import task
from molly.utils.i18n import set_name_in_language
socket.setdefaulttimeout(5)
logger = logging.getLogger(__name__)
# Maps operator encoded names to known "friendly versions"
OPERATOR_NAMES = {'SOX': 'Stagecoach',
'TT': 'Thames Travel',
'OBC': 'Oxford Bus Company',
'*Voyager_PD_RAV(en-GB)*': 'ARRIVA',
}
class CloudAmberBusRouteProvider(BaseMapsProvider):
"""Sends an empty search string to the cloudamber route search.
This returns all routes which we can scrape to collect the
route information.
"""
def __init__(self, url):
self.url = "%s/Naptan.aspx?rdExactMatch=any&hdnSearchType=searchbyServicenumber&hdnChkValue=any" % url
@task(run_every=timedelta(days=7))
def import_data(self, **metadata):
logger.info("Importing Route data from %s" % self.url)
self._scrape_search()
def _scrape_search(self):
"""Scrapes the search page and queues tasks for scraping the results"""
e = etree.parse(self.url, parser=etree.HTMLParser())
rows = e.findall('.//div[@class="cloud-amber"]')[0].findall('.//table')[1].findall('tbody/tr')
for row in rows:
route_no, operator, dest = row.getchildren()
route_no = route_no.text
operator = operator.find('span').text
operator = OPERATOR_NAMES.get(operator, operator)
route = dest.find('a').text
route_href = dest.find('a').get('href')
logger.debug("Found route: %s - %s - %s" % (route_no, operator, route))
route, created = Route.objects.get_or_create(
external_ref=route_href,
defaults={
'service_id': route_no,
'service_name': route,
'operator': operator,
}
)
if created:
logging.debug("Created new route: %s" % route.service_name)
self._scrape_route.delay(route.id, route_href)
def _get_entity(self, stop_code, stop_name, source, entity_type):
"""Finds a bus stop entity or creates one if it cannot be found.
If multiple entities are found we clean them up.
"""
scheme = 'naptan'
try:
entity = get_entity(scheme, stop_code)
except:
try:
entity = Entity.objects.get(_identifiers__scheme=scheme,
_identifiers__value=stop_code)
logger.debug("Found Entity: %s" % entity)
except Entity.DoesNotExist:
logger.debug("Entity does not exist: %s-%s" % (stop_code, stop_name))
entity = Entity()
except Entity.MultipleObjectsReturned:
logger.warning("Multiple Entities found for : %s-%s" % (stop_code, stop_name))
Entity.objects.filter(_identifiers__scheme=scheme,
_identifiers__value=stop_code).delete()
entity = Entity()
entity.primary_type = entity_type
entity.source = source
identifiers = {scheme: stop_code}
set_name_in_language(entity, 'en', title=stop_name)
entity.all_types = (entity_type,)
entity.save(identifiers=identifiers)
return entity
@task(max_retries=1)
def _scrape_route(self, route_id, href):
"""Load route data from our Cloudamber provider and capture the stop data."""
logger.info("Scraping route: %s" % href)
e = etree.parse(href, parser=etree.HTMLParser())
rows = e.findall('.//div[@class="cloud-amber"]')[0].findall('.//table')[1].findall('tbody/tr')
source = self._get_source()
entity_type = self._get_entity_type()
for i, row in enumerate(rows):
expand, naptan, map_href, stop_name, town = row.getchildren()
stop_code = naptan.text
stop_name = stop_name.find('a').text
entity = self._get_entity(stop_code, stop_name, source, entity_type)
StopOnRoute.objects.create(route_id=route_id, entity=entity, order=i)
def _get_source(self):
"""Create or get a reference to this provider"""
source, created = Source.objects.get_or_create(module_name=__name__,
name='CloudAmber Route Scraper')
source.save()
return source
def _get_entity_type(self):
"""Get the Entity type for BCT - Bus/Coach/Tram stop"""
return NaptanMapsProvider(None)._get_entity_types()['BCT'][0]
class CloudAmberBusRtiProvider(BaseMapsProvider):
"""
Populates bus stop entities with real time departure metadata using the
Cloud Amber interface
An example live instance should be hosted at http://www.oxontime.com
"""
def __init__(self, url):
""" url is CloudAmber instance """
self.url = url
def get_url(self, naptan):
""" Constructs URL containing RTI for a given naptan busstop id """
url = "%s/Naptan.aspx?t=departure&sa=%s&dc=&ac=96&vc=&x=0&y=0&format=xhtml" % (
self.url, naptan)
return url
def augment_metadata(self, entities, routes=[], **kwargs):
""" """
threads = []
for entity in entities:
bus_et = EntityType.objects.get(slug='bus-stop')
if bus_et not in entity.all_types.all():
continue
thread = threading.Thread(target=self.get_times,
args=[entity, routes])
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def parse_html(self, content):
"""
Parse HTML content (Cloud Amber's HTML) from a string
"""
services = {}
messages = []
try:
xml = etree.fromstring(content, parser=etree.HTMLParser())
# we need the second table
cells = xml.findall('.//div[@class="cloud-amber"]')[0].findall('.//table')[1].findall('tbody/tr/td')
# retrieved all cells, splitting every CELLS_PER_ROW to get rows
CELLS_PER_ROW = 5
rows = [cells[i:i+CELLS_PER_ROW] for i in range(0, len(cells), CELLS_PER_ROW)]
for row in rows:
service, destination, proximity = [row[i].text.encode('utf8').replace('\xc2\xa0', '')
for i in range(3)]
if proximity.lower() == 'due':
diff = 0
else:
diff = int(proximity.split(' ')[0])
if not service in services:
# first departure of this service
services[service] = (destination, (proximity, diff), [])
else:
# following departure of this service
services[service][2].append((proximity, diff))
services = [(s[0], s[1][0], s[1][1], s[1][2]) for s in services.items()]
services.sort(key = lambda x: ( ' '*(5-len(x[0]) + (1 if x[0][-1].isalpha() else 0)) + x[0] ))
services.sort(key = lambda x: x[2][1])
services = [{
'service': s[0],
'destination': s[1],
'next': s[2][0],
'following': [f[0] for f in s[3]],
} for s in services]
# messages that can be displayed (bus stop)
cells = xml.findall('.//table')[0].findall('tr/td')
try:
messages = cells[3]
parts = ([messages.text] +
list(chain(*([c.text, etree.tostring(c), c.tail] for c in messages.getchildren()))) +
[messages.tail])
messages = ''.join([p for p in parts if p])
messages = [messages]
except IndexError:
pass
# no message
except Exception:
logger.info('Unable to parse HTML', exc_info=True, extra={
'data': {
'html_content': content,
},
})
return services, messages
def get_times(self, entity, routes):
"""
Retrieve RTI information from one entity
Get page, scrape it.
If it fails, set the meta_refresh to get the page on
ERROR_REFRESH_INTERVAL rather than REFRESH_INTERVAL
Assign a route to each service if it exists in our DB.
"""
REFRESH_INTERVAL = 30
ERROR_REFRESH_INTERVAL = 5
try:
content = urlopen(self.get_url(entity.identifiers.get('naptan'))).read()
services, messages = self.parse_html(content)
except:
logger.info('Unable to retrieve RTI information', exc_info=True,
extra={
'data': {
'naptan_id': entity.identifiers.get('naptan', 0),
},
})
# if an exception occured, send empty metadata.
entity.metadata['real_time_information'] = {
'services': {},
'pip_info': [],
}
# Get the client to refresh sooner if an exception
entity.metadata['meta_refresh'] = ERROR_REFRESH_INTERVAL
else:
# Assign route to each service
for service in services:
service['route'] = self._get_route(service['service'], entity)
entity.metadata['real_time_information'] = {
'services': services,
'pip_info': messages,
}
entity.metadata['meta_refresh'] = REFRESH_INTERVAL
def _get_route(self, service, entity):
return Route.objects.filter(service_id=service, stops=entity).exists()
|
mollyproject/mollyproject
|
molly/apps/places/providers/cloudamber.py
|
Python
|
apache-2.0
| 10,209
|
[
"Amber"
] |
0521801f08331a028f59403fac9eaed395d6193c75956f29655d1a226eaf7159
|
"""
Feature calculations.
"""
import logging
import types
import numpy as np
import multiprocessing
from typing import Any, Dict, List, Iterable, Sequence, Tuple, Union
logger = logging.getLogger(__name__)
class Featurizer(object):
"""Abstract class for calculating a set of features for a datapoint.
This class is abstract and cannot be invoked directly. You'll
likely only interact with this class if you're a developer. In
that case, you might want to make a child class which
implements the `_featurize` method for calculating features for
a single datapoints if you'd like to make a featurizer for a
new datatype.
"""
def featurize(self, datapoints: Iterable[Any],
log_every_n: int = 1000) -> np.ndarray:
"""Calculate features for datapoints.
Parameters
----------
datapoints: Iterable[Any]
A sequence of objects that you'd like to featurize. Subclassses of
`Featurizer` should instantiate the `_featurize` method that featurizes
objects in the sequence.
log_every_n: int, default 1000
Logs featurization progress every `log_every_n` steps.
Returns
-------
A numpy array containing a featurized representation of `datapoints`.
"""
datapoints = list(datapoints)
features = []
for i, point in enumerate(datapoints):
if i % log_every_n == 0:
logger.info("Featurizing datapoint %i" % i)
try:
features.append(self._featurize(point))
except:
logger.warning(
"Failed to featurize datapoint %d. Appending empty array")
features.append(np.array([]))
features = np.asarray(features)
return features
def __call__(self, datapoints: Iterable[Any]):
"""Calculate features for datapoints.
Parameters
----------
datapoints: Iterable[Any]
Any blob of data you like. Subclasss should instantiate this.
"""
return self.featurize(datapoints)
def _featurize(self, datapoint: Any):
"""Calculate features for a single datapoint.
Parameters
----------
datapoint: Any
Any blob of data you like. Subclass should instantiate this.
"""
raise NotImplementedError('Featurizer is not defined.')
def _featurize_callback(
featurizer,
mol_pdb_file,
protein_pdb_file,
log_message,
):
"""Callback function for apply_async in ComplexFeaturizer.
This callback function must be defined globally
because `apply_async` doesn't execute a nested function.
See the details from the following link.
https://stackoverflow.com/questions/56533827/pool-apply-async-nested-function-is-not-executed
"""
logging.info(log_message)
return featurizer._featurize(mol_pdb_file, protein_pdb_file)
class ComplexFeaturizer(object):
""""
Abstract class for calculating features for mol/protein complexes.
"""
def featurize(self, mol_files: Sequence[str],
protein_pdbs: Sequence[str]) -> Tuple[np.ndarray, List]:
"""
Calculate features for mol/protein complexes.
Parameters
----------
mols: List[str]
List of PDB filenames for molecules.
protein_pdbs: List[str]
List of PDB filenames for proteins.
Returns
-------
features: np.ndarray
Array of features
failures: List
Indices of complexes that failed to featurize.
"""
pool = multiprocessing.Pool()
results = []
for i, (mol_file, protein_pdb) in enumerate(zip(mol_files, protein_pdbs)):
log_message = "Featurizing %d / %d" % (i, len(mol_files))
results.append(
pool.apply_async(_featurize_callback,
(self, mol_file, protein_pdb, log_message)))
pool.close()
features = []
failures = []
for ind, result in enumerate(results):
new_features = result.get()
# Handle loading failures which return None
if new_features is not None:
features.append(new_features)
else:
failures.append(ind)
features = np.asarray(features)
return features, failures
def _featurize(self, mol_pdb: str, complex_pdb: str):
"""
Calculate features for single mol/protein complex.
Parameters
----------
mol_pdb: list
Should be a list of lines of the PDB file.
complex_pdb: list
Should be a list of lines of the PDB file.
"""
raise NotImplementedError('Featurizer is not defined.')
class MolecularFeaturizer(Featurizer):
"""Abstract class for calculating a set of features for a
molecule.
The defining feature of a `MolecularFeaturizer` is that it
uses SMILES strings and RDKIT molecule objects to represent
small molecules. All other featurizers which are subclasses of
this class should plan to process input which comes as smiles
strings or RDKIT molecules.
Child classes need to implement the _featurize method for
calculating features for a single molecule.
Note
----
In general, subclasses of this class will require RDKit to be installed.
"""
def featurize(self, molecules, log_every_n=1000):
"""Calculate features for molecules.
Parameters
----------
molecules: RDKit Mol / SMILES string / iterable
RDKit Mol, or SMILES string or iterable sequence of RDKit mols/SMILES
strings.
Returns
-------
A numpy array containing a featurized representation of
`datapoints`.
"""
try:
from rdkit import Chem
from rdkit.Chem import rdmolfiles
from rdkit.Chem import rdmolops
from rdkit.Chem.rdchem import Mol
except ModuleNotFoundError:
raise ValueError("This class requires RDKit to be installed.")
# Special case handling of single molecule
if isinstance(molecules, str) or isinstance(molecules, Mol):
molecules = [molecules]
else:
# Convert iterables to list
molecules = list(molecules)
features = []
for i, mol in enumerate(molecules):
if i % log_every_n == 0:
logger.info("Featurizing datapoint %i" % i)
try:
# Process only case of SMILES strings.
if isinstance(mol, str):
# mol must be a SMILES string so parse
mol = Chem.MolFromSmiles(mol)
# TODO (ytz) this is a bandage solution to reorder the atoms
# so that they're always in the same canonical order.
# Presumably this should be correctly implemented in the
# future for graph mols.
if mol:
new_order = rdmolfiles.CanonicalRankAtoms(mol)
mol = rdmolops.RenumberAtoms(mol, new_order)
features.append(self._featurize(mol))
except:
logger.warning(
"Failed to featurize datapoint %d. Appending empty array")
features.append(np.array([]))
features = np.asarray(features)
return features
class MaterialStructureFeaturizer(Featurizer):
"""
Abstract class for calculating a set of features for an
inorganic crystal structure.
The defining feature of a `MaterialStructureFeaturizer` is that it
operates on 3D crystal structures with periodic boundary conditions.
Inorganic crystal structures are represented by Pymatgen structure
objects. Featurizers for inorganic crystal structures that are subclasses of
this class should plan to process input which comes as pymatgen
structure objects.
This class is abstract and cannot be invoked directly. You'll
likely only interact with this class if you're a developer. Child
classes need to implement the _featurize method for calculating
features for a single crystal structure.
Notes
-----
Some subclasses of this class will require pymatgen and matminer to be
installed.
"""
def featurize(self,
structures: Iterable[Dict[str, Any]],
log_every_n: int = 1000) -> np.ndarray:
"""Calculate features for crystal structures.
Parameters
----------
structures: Iterable[Dict[str, Any]]
Iterable sequence of pymatgen structure dictionaries.
Dictionary representations of pymatgen.Structure
https://pymatgen.org/pymatgen.core.structure.html
log_every_n: int, default 1000
Logging messages reported every `log_every_n` samples.
Returns
-------
features: np.ndarray
A numpy array containing a featurized representation of
`structures`.
"""
structures = list(structures)
try:
from pymatgen import Structure
except ModuleNotFoundError:
raise ValueError("This class requires pymatgen to be installed.")
features = []
for idx, structure in enumerate(structures):
if idx % log_every_n == 0:
logger.info("Featurizing datapoint %i" % idx)
try:
s = Structure.from_dict(structure)
features.append(self._featurize(s))
except:
logger.warning(
"Failed to featurize datapoint %i. Appending empty array" % idx)
features.append(np.array([]))
features = np.asarray(features)
return features
class MaterialCompositionFeaturizer(Featurizer):
"""
Abstract class for calculating a set of features for an
inorganic crystal composition.
The defining feature of a `MaterialCompositionFeaturizer` is that it
operates on 3D crystal chemical compositions.
Inorganic crystal compositions are represented by Pymatgen composition
objects. Featurizers for inorganic crystal compositions that are
subclasses of this class should plan to process input which comes as
Pymatgen composition objects.
This class is abstract and cannot be invoked directly. You'll
likely only interact with this class if you're a developer. Child
classes need to implement the _featurize method for calculating
features for a single crystal composition.
Notes
-----
Some subclasses of this class will require pymatgen and matminer to be
installed.
"""
def featurize(self, compositions: Iterable[str],
log_every_n: int = 1000) -> np.ndarray:
"""Calculate features for crystal compositions.
Parameters
----------
compositions: Iterable[str]
Iterable sequence of composition strings, e.g. "MoS2".
log_every_n: int, default 1000
Logging messages reported every `log_every_n` samples.
Returns
-------
features: np.ndarray
A numpy array containing a featurized representation of
`compositions`.
"""
compositions = list(compositions)
try:
from pymatgen import Composition
except ModuleNotFoundError:
raise ValueError("This class requires pymatgen to be installed.")
features = []
for idx, composition in enumerate(compositions):
if idx % log_every_n == 0:
logger.info("Featurizing datapoint %i" % idx)
try:
c = Composition(composition)
features.append(self._featurize(c))
except:
logger.warning(
"Failed to featurize datapoint %i. Appending empty array" % idx)
features.append(np.array([]))
features = np.asarray(features)
return features
class UserDefinedFeaturizer(Featurizer):
"""Directs usage of user-computed featurizations."""
def __init__(self, feature_fields):
"""Creates user-defined-featurizer."""
self.feature_fields = feature_fields
|
miaecle/deepchem
|
deepchem/feat/base_classes.py
|
Python
|
mit
| 11,262
|
[
"CRYSTAL",
"RDKit",
"matminer",
"pymatgen"
] |
9ed6deffb6d7df9f2e9f50d0ea07f6770e3bbc708d91ae64d805662b89d17aeb
|
# $Id$
#
# Copyright (C) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import os
import io
import unittest
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Chem import FragmentCatalog, BuildFragmentCatalog
from rdkit.six.moves import cPickle
def feq(n1,n2,tol=1e-4):
return abs(n1-n2)<tol
class TestCase(unittest.TestCase):
def setUp(self) :
self.smiList = ["S(SC1=NC2=CC=CC=C2S1)C3=NC4=C(S3)C=CC=C4","CC1=CC(=O)C=CC1=O",
"OC1=C(Cl)C=C(C=C1[N+]([O-])=O)[N+]([O-])=O",
"[O-][N+](=O)C1=CNC(=N)S1", "NC1=CC2=C(C=C1)C(=O)C3=C(C=CC=C3)C2=O",
"OC(=O)C1=C(C=CC=C1)C2=C3C=CC(=O)C(=C3OC4=C2C=CC(=C4Br)O)Br",
"CN(C)C1=C(Cl)C(=O)C2=C(C=CC=C2)C1=O",
"CC1=C(C2=C(C=C1)C(=O)C3=CC=CC=C3C2=O)[N+]([O-])=O",
"CC(=NO)C(C)=NO"]
self.smiList2 = ['OCCC','CCC','C=CC','OC=CC','CC(O)C',
'C=C(O)C','OCCCC','CC(O)CC','C=CCC','CC=CC',
'OC=CCC','CC=C(O)C','OCC=CC','C=C(O)CC',
'C=CC(O)C','C=CCCO',
]
self.list2Acts = [1,0,0,1,1,1,1,1,0,0,1,1,1,1,1,1]
self.list2Obls = [(0,1,2),(1,3),(1,4,5),(1,6,7),(0,8),(0,6,9),(0,1,2,3,10),
(0,1,2,8,11),(1,3,4,5,12),(1,4,5,13),(1,3,6,7,14),(0,1,6,7,9,15)]
ffile = os.path.join(RDConfig.RDDataDir,'FunctionalGroups.txt')
self.catParams = FragmentCatalog.FragCatParams(1,6,ffile)
self.fragCat = FragmentCatalog.FragCatalog(self.catParams)
self.fgen = FragmentCatalog.FragCatGenerator()
def _fillCat(self,smilList):
for smi in self.smiList2:
mol = Chem.MolFromSmiles(smi)
self.fgen.AddFragsFromMol(mol,self.fragCat)
def _testBits(self,fragCat):
fpgen = FragmentCatalog.FragFPGenerator()
obits = [3,2,3,3,2,3,5,5,5,4,5,6]
obls = self.list2Obls
suppl = Chem.SmilesMolSupplierFromText('\n'.join(self.smiList2),
',',0,-1,0)
i = 0
for mol in suppl:
fp = fpgen.GetFPForMol(mol, fragCat)
if i < len(obits):
smi = Chem.MolToSmiles(mol)
assert fp.GetNumOnBits()==obits[i],'%s: %s'%(smi,str(fp.GetOnBits()))
obl = fp.GetOnBits()
if i < len(obls):
assert tuple(obl)==obls[i],'%s: %s'%(smi,obl)
i+=1
def test1CatGen(self) :
self._fillCat(self.smiList2)
assert self.fragCat.GetNumEntries()==21
assert self.fragCat.GetFPLength()==21
self._testBits(self.fragCat)
def test2CatStringPickle(self):
self._fillCat(self.smiList2)
# test non-binary pickle:
cat2 = cPickle.loads(cPickle.dumps(self.fragCat))
assert cat2.GetNumEntries()==21
assert cat2.GetFPLength()==21
self._testBits(cat2)
# test binary pickle:
cat2 = cPickle.loads(cPickle.dumps(self.fragCat,1))
assert cat2.GetNumEntries()==21
assert cat2.GetFPLength()==21
self._testBits(cat2)
def test3CatFilePickle(self):
with open(os.path.join(RDConfig.RDCodeDir,'Chem',
'test_data','simple_catalog.pkl'),
'r') as pklTFile:
buf = pklTFile.read().replace('\r\n', '\n').encode('utf-8')
pklTFile.close()
with io.BytesIO(buf) as pklFile:
cat = cPickle.load(pklFile, encoding='bytes')
assert cat.GetNumEntries()==21
assert cat.GetFPLength()==21
self._testBits(cat)
def test4CatGuts(self):
self._fillCat(self.smiList2)
assert self.fragCat.GetNumEntries()==21
assert self.fragCat.GetFPLength()==21
#
# FIX: (Issue 162)
# bits like 11 and 15 are questionable here because the underlying
# fragments are symmetrical, so they can generate one of two
# text representations (i.e. there is nothing to distinguish
# between 'CC<-O>CC' and 'CCC<-O>C').
# This ought to eventually be cleaned up.
descrs = [(0,'C<-O>C',1,(34,)),
(1,'CC',1,()),
(2,'C<-O>CC',2,(34,)),
(3,'CCC',2,()),
(4,'C=C',1,()),
(5,'C=CC',2,()),
(6,'C<-O>=C',1,(34,)),
(7,'C<-O>=CC',2,(34,)),
(8,'CC<-O>C',2,(34,)),
(9,'C=C<-O>C',2,(34,)),
(10,'C<-O>CCC',3,(34,)),
(11,'CC<-O>CC',3,(34,)),
(12,'C=CCC',3,()),
(13,'CC=CC',3,()),
(14,'C<-O>=CCC',3,(34,)),
(15,'CC=C<-O>C',3,(34,)),
(16,'C=CC<-O>',2,(34,)),
]
for i in range(len(descrs)):
id,d,order,ids=descrs[i]
descr = self.fragCat.GetBitDescription(id)
assert descr == d,'%d: %s != %s'%(id,descr,d)
assert self.fragCat.GetBitOrder(id)==order
assert tuple(self.fragCat.GetBitFuncGroupIds(id)) == \
ids,'%d: %s != %s'%(id,
str(self.fragCat.GetBitFuncGroupIds(id)),
str(ids))
def _test5MoreComplex(self):
lastIdx = 0
ranges = {}
suppl = Chem.SmilesMolSupplierFromText('\n'.join(self.smiList),
',',0,-1,0)
i = 0
for mol in suppl:
nEnt = self.fgen.AddFragsFromMol(mol,self.fragCat)
ranges[i] = range(lastIdx,lastIdx+nEnt)
lastIdx+=nEnt
i+=1
# now make sure that those bits are contained in the signatures:
fpgen = FragmentCatalog.FragFPGenerator()
i = 0
for mol in suppl:
fp = fpgen.GetFPForMol(mol,self.fragCat)
for bit in ranges[i]:
assert fp[bit],'%s: %s'%(Chem.MolToSmiles(mol),str(bit))
i += 1
def test6Builder(self):
suppl = Chem.SmilesMolSupplierFromText('\n'.join(self.smiList2),
',',0,-1,0)
cat = BuildFragmentCatalog.BuildCatalog(suppl,minPath=1,reportFreq=20)
assert cat.GetNumEntries()==21
assert cat.GetFPLength()==21
self._testBits(cat)
def test7ScoreMolecules(self):
suppl = Chem.SmilesMolSupplierFromText('\n'.join(self.smiList2),
',',0,-1,0)
cat = BuildFragmentCatalog.BuildCatalog(suppl,minPath=1,reportFreq=20)
assert cat.GetNumEntries()==21
assert cat.GetFPLength()==21
scores,obls = BuildFragmentCatalog.ScoreMolecules(suppl,cat,acts=self.list2Acts,
reportFreq=20)
for i in range(len(self.list2Obls)):
assert tuple(obls[i])==self.list2Obls[i],'%d: %s != %s'%(i,str(obls[i]),
str(self.list2Obls[i]))
scores2 = BuildFragmentCatalog.ScoreFromLists(obls,suppl,cat,acts=self.list2Acts,
reportFreq=20)
for i in range(len(scores)):
assert (scores[i]==scores2[i]).all(),'%d: %s != %s'%(i,str(scores[i]),str(scores2[i]))
def test8MolRanks(self):
suppl = Chem.SmilesMolSupplierFromText('\n'.join(self.smiList2),
',',0,-1,0)
cat = BuildFragmentCatalog.BuildCatalog(suppl,minPath=1,reportFreq=20)
assert cat.GetNumEntries()==21
assert cat.GetFPLength()==21
# new InfoGain ranking:
bitInfo,fps = BuildFragmentCatalog.CalcGains(suppl,cat,topN=10,acts=self.list2Acts,
reportFreq=20,biasList=(1,))
entry = bitInfo[0]
assert int(entry[0])==0
assert cat.GetBitDescription(int(entry[0]))=='C<-O>C'
assert feq(entry[1],0.4669)
entry = bitInfo[1]
assert int(entry[0]) in (2,6)
txt = cat.GetBitDescription(int(entry[0]))
self.assertTrue( txt in ('C<-O>CC','C<-O>=C'), txt)
assert feq(entry[1],0.1611)
entry = bitInfo[6]
assert int(entry[0])==16
assert cat.GetBitDescription(int(entry[0]))=='C=CC<-O>'
assert feq(entry[1],0.0560)
# standard InfoGain ranking:
bitInfo,fps = BuildFragmentCatalog.CalcGains(suppl,cat,topN=10,acts=self.list2Acts,
reportFreq=20)
entry = bitInfo[0]
assert int(entry[0])==0
assert cat.GetBitDescription(int(entry[0]))=='C<-O>C'
assert feq(entry[1],0.4669)
entry = bitInfo[1]
assert int(entry[0])==5
assert cat.GetBitDescription(int(entry[0]))=='C=CC'
assert feq(entry[1],0.2057)
def test9Issue116(self):
smiList = ['Cc1ccccc1']
suppl = Chem.SmilesMolSupplierFromText('\n'.join(smiList),
',',0,-1,0)
cat = BuildFragmentCatalog.BuildCatalog(suppl,minPath=2,maxPath=2)
assert cat.GetFPLength()==2
assert cat.GetBitDescription(0)=='ccC'
fpgen = FragmentCatalog.FragFPGenerator()
mol = Chem.MolFromSmiles('Cc1ccccc1')
fp = fpgen.GetFPForMol(mol,cat)
assert fp[0]
assert fp[1]
mol = Chem.MolFromSmiles('c1ccccc1-c1ccccc1')
fp = fpgen.GetFPForMol(mol,cat)
assert not fp[0]
assert fp[1]
if __name__ == '__main__':
unittest.main()
|
soerendip42/rdkit
|
rdkit/Chem/UnitTestCatalog.py
|
Python
|
bsd-3-clause
| 9,124
|
[
"RDKit"
] |
0beebb81df828581f1211ac79f0a7c580faa8954712d9699d0ca5fc3459a03dd
|
"""
Functions for transforming flow cytometer data to MEF units.
"""
import os
import functools
import collections
import six
import packaging
import packaging.version
import numpy as np
import scipy
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import sklearn
if packaging.version.parse(sklearn.__version__) \
>= packaging.version.parse('0.18'):
from sklearn.mixture import GaussianMixture
else:
from sklearn.mixture import GMM
import FlowCal.plot
import FlowCal.transform
import FlowCal.stats
standard_curve_colors = ['tab:blue', 'tab:green', 'tab:red']
def clustering_gmm(data,
n_clusters,
tol=1e-7,
min_covar=None,
scale='logicle'):
"""
Find clusters in an array using a Gaussian Mixture Model.
Before clustering, `data` can be automatically rescaled as specified by
the `scale` argument.
Parameters
----------
data : FCSData or array_like
Data to cluster.
n_clusters : int
Number of clusters to find.
tol : float, optional
Tolerance for convergence. Directly passed to either
``GaussianMixture`` or ``GMM``, depending on ``scikit-learn``'s
version.
min_covar : float, optional
The minimum trace that the initial covariance matrix will have. If
``scikit-learn``'s version is older than 0.18, `min_covar` is also
passed directly to ``GMM``.
scale : str, optional
Rescaling applied to `data` before performing clustering. Can be
either ``linear`` (no rescaling), ``log``, or ``logicle``.
Returns
-------
labels : array
Nx1 array with labels for each element in `data`, assigning
``data[i]`` to cluster ``labels[i]``.
Notes
-----
A Gaussian Mixture Model finds clusters by fitting a linear combination
of `n_clusters` Gaussian probability density functions (pdf) to `data`
using Expectation Maximization (EM).
This method can be fairly sensitive to the initial parameter choice. To
generate a reasonable set of initial conditions, `clustering_gmm`
first divides all points in `data` into `n_clusters` groups of the
same size based on their Euclidean distance to the minimum value. Then,
for each group, the 50% samples farther away from the mean are
discarded. The mean and covariance are calculated from the remaining
samples of each group, and used as initial conditions for the GMM EM
algorithm.
`clustering_gmm` internally uses a `GaussianMixture` object from the
``scikit-learn`` library (``GMM`` if ``scikit-learn``'s version is
lower than 0.18), with full covariance matrices for each cluster. For
more information, consult ``scikit-learn``'s documentation.
"""
# Initialize min_covar parameter
# Parameter is initialized differently depending on scikit's version
if min_covar is None:
if packaging.version.parse(sklearn.__version__) \
>= packaging.version.parse('0.18'):
min_covar = 1e-3
else:
min_covar = 5e-5
# Copy events before rescaling
data = data.copy()
# Apply rescaling
if scale=='linear':
# No rescaling
pass
elif scale=='log':
# Logarithm of zero and negatives is undefined. Therefore, saturate
# any non-positives to a small positive value.
# The machine epsilon `eps` is the smallest number such that
# `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`.
data[data < 1e-15] = 1e-15
# Rescale
data = np.log10(data)
elif scale=='logicle':
# Use the logicle transform class in the plot module, and transform
# data one channel at a time.
for ch in range(data.shape[1]):
# We need a transformation from "data value" to "display scale"
# units. To do so, we use an inverse logicle transformation.
t = FlowCal.plot._LogicleTransform(data=data, channel=ch).inverted()
data[:,ch] = t.transform_non_affine(data[:,ch],
mask_out_of_range=False)
else:
raise ValueError("scale {} not supported".format(scale))
###
# Parameter initialization
###
weights = np.tile(1.0 / n_clusters, n_clusters)
means = []
covars = []
# Calculate distance to minimum value. Then, sort based on this distance.
dist = np.sum((data - np.min(data, axis=0))**2., axis=1)
sorted_idx = np.argsort(dist)
# Expected number of elements per cluster
n_per_cluster = data.shape[0]/float(n_clusters)
# Get means and covariances per cluster
# We will just use a fraction of ``1 - discard_frac`` of the data.
# Data at the edges that actually corresponds to another cluster can
# really mess up the final result.
discard_frac = 0.5
for i in range(n_clusters):
il = int((i + discard_frac/2)*n_per_cluster)
ih = int((i + 1 - discard_frac/2)*n_per_cluster)
sorted_idx_cluster = sorted_idx[il:ih]
data_cluster = data[sorted_idx_cluster]
# Calculate means and covariances
means.append(np.mean(data_cluster, axis=0))
if data.shape[1] == 1:
cov = np.cov(data_cluster.T).reshape(1,1)
else:
cov = np.cov(data_cluster.T)
# Add small number to diagonal to avoid near-singular covariances
cov += np.eye(data.shape[1]) * min_covar
covars.append(cov)
# Means should be an array
means = np.array(means)
###
# Run Gaussian Mixture Model Clustering
###
if packaging.version.parse(sklearn.__version__) \
>= packaging.version.parse('0.18'):
# GaussianMixture uses precisions, the inverse of covariances.
# To get the inverse, we solve the linear equation C*P = I. We also
# use the fact that C is positive definite.
precisions = [scipy.linalg.solve(c,
np.eye(c.shape[0]),
assume_a='pos')
for c in covars]
precisions = np.array(precisions)
# Initialize GaussianMixture object
gmm = GaussianMixture(n_components=n_clusters,
tol=tol,
covariance_type='full',
weights_init=weights,
means_init=means,
precisions_init=precisions,
max_iter=500)
else:
# Initialize GMM object
gmm = GMM(n_components=n_clusters,
tol=tol,
min_covar=min_covar,
covariance_type='full',
params='mc',
init_params='')
# Set initial parameters
gmm.weight_ = weights
gmm.means_ = means
gmm.covars_ = covars
# Fit
gmm.fit(data)
# Get labels by sampling from the responsibilities
# This avoids the complete elimination of a cluster if two or more
# clusters have very similar means.
resp = gmm.predict_proba(data)
labels = [np.random.choice(range(n_clusters), p=ri) for ri in resp]
return labels
def selection_std(populations,
low=None,
high=None,
n_std_low=2.5,
n_std_high=2.5,
scale='logicle'):
"""
Select populations if most of their elements are between two values.
This function selects populations from `populations` if their means are
more than `n_std_low` standard deviations greater than `low` and
`n_std_high` standard deviations lower than `high`.
Optionally, all elements in `populations` can be rescaled as specified
by the `scale` argument before calculating means and standard
deviations.
Parameters
----------
populations : list of 1D arrays or 1-channel FCSData objects
Populations to select or discard.
low, high : int or float
Low and high thresholds. Required if the elements in `populations`
are numpy arrays. If not specified, and the elements in
`populations` are FCSData objects, use 1.5% and 98.5% of the range
in ``populations[0].range``.
n_std_low, n_std_high : float, optional
Number of standard deviations from `low` and `high`, respectively,
that a population's mean has to be closer than to be discarded.
scale : str, optional
Rescaling applied to `populations` before calculating means and
standard deviations. Can be either ``linear`` (no rescaling),
``log``, or ``logicle``.
Returns
-------
selected_mask : boolean array
Flags indicating whether a population has been selected.
"""
# Generate scaling functions
if scale == 'linear':
# Identity function
sf = lambda x: x
elif scale == 'log':
sf = np.log10
elif scale == 'logicle':
# We need a transformation from "data value" to "display scale"
# units. To do so, we use an inverse logicle transformation.
t = FlowCal.plot._LogicleTransform(data=populations[0],
channel=0).inverted()
sf = lambda x: t.transform_non_affine(x, mask_out_of_range=False)
else:
raise ValueError("scale {} not supported".format(scale))
# If thresholds were provided, apply scaling function. Else, obtain and
# rescale thresholds from range.
if low is None:
if hasattr(populations[0], 'hist_bins'):
# Obtain default thresholds from range
r = populations[0].range(channels=0)
# If using log scale and the lower limit is non-positive, change to
# a very small positive number.
# The machine epsilon `eps` is the smallest number such that
# `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`.
if scale == 'log' and r[0] <= 0:
r[0] = 1e-15
low = sf(r[0]) + 0.015*(sf(r[1]) - sf(r[0]))
else:
raise TypeError("argument 'low' not specified")
else:
low = sf(low)
if high is None:
if hasattr(populations[0], 'hist_bins'):
# Obtain default thresholds from range
r = populations[0].range(channels=0)
# If using log scale and the lower limit is non-positive, change to
# a very small positive number.
# The machine epsilon `eps` is the smallest number such that
# `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`.
if scale == 'log' and r[0] <= 0:
r[0] = 1e-15
high = sf(r[0]) + 0.985*(sf(r[1]) - sf(r[0]))
else:
raise TypeError("argument 'high' not specified")
else:
high = sf(high)
# Copy events
for i in range(len(populations)):
populations[i] = populations[i].copy()
# For log scaling, logarithm of zero and negatives is undefined. Therefore,
# saturate any non-positives to a small positive value.
# The machine epsilon `eps` is the smallest number such that
# `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`.
if scale == 'log':
for p in populations:
p[p < 1e-15] = 1e-15
# Rescale events
for i in range(len(populations)):
populations[i] = sf(populations[i])
# Calculate means and standard deviations
pop_mean = np.array([FlowCal.stats.mean(p) for p in populations])
pop_std = np.array([FlowCal.stats.std(p) for p in populations])
# Some populations, especially the highest ones when they are near
# saturation, tend to aggregate mostly on one bin and give a standard
# deviation of almost zero. This is an effect of the finite bin resolution
# and probably gives a bad estimate of the standard deviation. We choose
# to be conservative and overestimate the standard deviation in these
# cases. Therefore, we set the minimum standard deviation to 0.005.
min_std = 0.005
pop_std[pop_std < min_std] = min_std
# Return populations that don't cross either threshold
selected_mask = np.logical_and(
(pop_mean - n_std_low*pop_std) > low,
(pop_mean + n_std_high*pop_std) < high)
return selected_mask
def fit_beads_autofluorescence(fl_rfi, fl_mef):
"""
Fit a standard curve using a beads model with autofluorescence.
Parameters
----------
fl_rfi : array
Fluorescence values of bead populations in units of Relative
Fluorescence Intensity (RFI).
fl_mef : array
Fluorescence values of bead populations in MEF units.
Returns
-------
std_crv : function
Standard curve that transforms fluorescence values from RFI to MEF
units. This function has the signature ``y = std_crv(x)``, where
`x` is some fluorescence value in RFI and `y` is the same
fluorescence expressed in MEF units.
beads_model : function
Fluorescence model of calibration beads. This function has the
signature ``y = beads_model(x)``, where `x` is the fluorescence of
some bead population in RFI units and `y` is the same fluorescence
expressed in MEF units, without autofluorescence.
beads_params : array
Fitted parameters of the bead fluorescence model: ``[m, b,
fl_mef_auto]``.
beads_model_str : str
String representation of the beads model used.
beads_params_names : list of str
Names of the parameters in a list, in the same order as they are
given in `beads_params`.
Notes
-----
The following model is used to describe bead fluorescence::
m*log(fl_rfi[i]) + b = log(fl_mef_auto + fl_mef[i])
where ``fl_rfi[i]`` is the fluorescence of bead subpopulation ``i`` in
RFI units and ``fl_mef[i]`` is the corresponding fluorescence in MEF
units. The model includes 3 parameters: ``m`` (slope), ``b``
(intercept), and ``fl_mef_auto`` (bead autofluorescence). The last term
is constrained to be greater or equal to zero.
The bead fluorescence model is fit in log space using nonlinear least
squares regression. In our experience, fitting in log space weights
the residuals more evenly, whereas fitting in linear space vastly
overvalues the brighter beads.
A standard curve is constructed by solving for ``fl_mef``. As cell
samples may not have the same autofluorescence as beads, the bead
autofluorescence term (``fl_mef_auto``) is omitted from the standard
curve; the user is expected to use an appropriate white cell sample to
account for cellular autofluorescence if necessary. The returned
standard curve mapping fluorescence in RFI units to MEF units is thus
of the following form::
fl_mef = exp(m*log(fl_rfi) + b)
This is equivalent to::
fl_mef = exp(b) * (fl_rfi**m)
This works for positive ``fl_rfi`` values, but it is undefined for
``fl_rfi < 0`` and non-integer ``m`` (general case).
To extend this standard curve to negative values of ``fl_rfi``, we
define ``s(fl_rfi)`` to be equal to the standard curve above when
``fl_rfi >= 0``. Next, we require this function to be odd, that is,
``s(fl_rfi) = - s(-fl_rfi)``. This extends the domain to negative
``fl_rfi`` values and results in ``s(fl_rfi) < 0`` for any negative
``fl_rfi``. Finally, we make ``fl_mef = s(fl_rfi)`` our new
standard curve. In this way,::
s(fl_rfi) = exp(b) * ( fl_rfi **m), fl_rfi >= 0
- exp(b) * ((-fl_rfi)**m), fl_rfi < 0
This satisfies the definition of an odd function. In addition,
``s(0) = 0``, and ``s(fl_rfi)`` converges to zero when ``fl_rfi -> 0``
from both sides. Therefore, the function is continuous at
``fl_rfi = 0``. The definition of ``s(fl_rfi)`` can be expressed more
conveniently as::
s(fl_rfi) = sign(fl_rfi) * exp(b) * (abs(fl_rfi)**m)
This is the equation implemented.
"""
# Check that the input data has consistent dimensions
if len(fl_rfi) != len(fl_mef):
raise ValueError("fl_rfi and fl_mef have different lengths")
# Check that we have at least three points
if len(fl_rfi) <= 2:
raise ValueError("standard curve model requires at least three "
"values")
# Initialize parameters
params = np.zeros(3)
# Initial guesses:
# 0: slope found by putting a line through the highest two points.
# 1: y-intercept found by putting a line through highest two points.
# 2: bead autofluorescence initialized using the first point.
params[0] = (np.log(fl_mef[-1]) - np.log(fl_mef[-2])) / \
(np.log(fl_rfi[-1]) - np.log(fl_rfi[-2]))
params[1] = np.log(fl_mef[-1]) - params[0] * np.log(fl_rfi[-1])
params[2] = np.exp(params[0]*np.log(fl_rfi[0]) + params[1]) - fl_mef[0]
# Error function
def err_fun(p, x, y):
return np.sum((np.log(y + p[2]) - ( p[0] * np.log(x) + p[1] ))**2)
# Bead model function
def fit_fun(p,x):
return np.exp(p[0] * np.log(x) + p[1]) - p[2]
# RFI-to-MEF standard curve transformation function
def sc_fun(p,x):
return np.sign(x) * np.exp(p[1]) * (np.abs(x)**p[0])
# Fit parameters
err_par = lambda p: err_fun(p, fl_rfi, fl_mef)
res = minimize(err_par,
params,
bounds=((None, None), (None, None), (0, None)),
options = {'gtol': 1e-10, 'ftol': 1e-10})
# Separate parameters
beads_params = res.x
# Beads model function
beads_model = lambda x: fit_fun(beads_params, x)
# Standard curve function
std_crv = lambda x: sc_fun(beads_params, x)
# Model string representation
beads_model_str = 'm*log(fl_rfi) + b = log(fl_mef_auto + fl_mef)'
# Parameter names
beads_params_names = ['m', 'b', 'fl_mef_auto']
return (std_crv,
beads_model,
beads_params,
beads_model_str,
beads_params_names)
def plot_standard_curve(fl_rfi,
fl_mef,
beads_model,
std_crv,
xscale='linear',
yscale='linear',
xlim=None,
ylim=(1.,1e8)):
"""
Plot a standard curve with fluorescence of calibration beads.
Parameters
----------
fl_rfi : array_like
Fluorescence of the calibration beads' subpopulations, in RFI
units.
fl_mef : array_like
Fluorescence of the calibration beads' subpopulations, in MEF
units.
beads_model : function
Fluorescence model of the calibration beads.
std_crv : function
The standard curve, mapping relative fluorescence (RFI) units to
MEF units.
Other Parameters
----------------
xscale : str, optional
Scale of the x axis, either ``linear`` or ``log``.
yscale : str, optional
Scale of the y axis, either ``linear`` or ``log``.
xlim : tuple, optional
Limits for the x axis.
ylim : tuple, optional
Limits for the y axis.
"""
# Plot fluorescence of beads populations
plt.plot(fl_rfi,
fl_mef,
'o',
label='Beads',
color=standard_curve_colors[0])
# Generate points in x axis to plot beads model and standard curve.
if xlim is None:
xlim = plt.xlim()
if xscale=='linear':
xdata = np.linspace(xlim[0], xlim[1], 200)
elif xscale=='log':
xdata = np.logspace(np.log10(xlim[0]), np.log10(xlim[1]), 200)
# Plot beads model and standard curve
plt.plot(xdata,
beads_model(xdata),
label='Beads model',
color=standard_curve_colors[1])
plt.plot(xdata,
std_crv(xdata),
label='Standard curve',
color=standard_curve_colors[2])
plt.xscale(xscale)
plt.yscale(yscale)
plt.xlim(xlim)
plt.ylim(ylim)
plt.grid(True)
plt.legend(loc = 'best')
def get_transform_fxn(data_beads,
mef_values,
mef_channels,
clustering_fxn=clustering_gmm,
clustering_params={},
clustering_channels=None,
statistic_fxn=FlowCal.stats.median,
statistic_params={},
selection_fxn=selection_std,
selection_params={},
fitting_fxn=fit_beads_autofluorescence,
fitting_params={},
verbose=False,
plot=False,
plot_dir=None,
plot_filename=None,
full_output=False):
"""
Get a transformation function to convert flow cytometry data to MEF.
Parameters
----------
data_beads : FCSData object
Flow cytometry data describing calibration beads.
mef_values : sequence of sequences
Known MEF values for the calibration bead subpopulations, for each
channel specified in `mef_channels`. The innermost sequences must
have the same length (the same number of bead subpopulations must
exist for each channel). Values of np.nan or None specify that a
subpopulation should be omitted from the fitting procedure.
mef_channels : int, or str, or list of int, or list of str
Channels for which to generate transformation functions.
verbose : bool, optional
Flag specifying whether to print information about step completion
and warnings.
plot : bool, optional
Flag specifying whether to produce diagnostic plots.
plot_dir : str, optional
Directory where to save diagnostics plots. Ignored if `plot` is
False. If ``plot==True`` and ``plot_dir is None``, plot without
saving.
plot_filename : str, optional
Name to use for plot files. If None, use ``str(data_beads)``.
full_output : bool, optional
Flag specifying whether to include intermediate results in the
output. If `full_output` is True, the function returns a
`MEFOutput` ``namedtuple`` with fields as described below. If
`full_output` is False, the function only returns the calculated
transformation function.
Returns
-------
transform_fxn : function
Transformation function to convert flow cytometry data from RFI
units to MEF. This function has the following signature::
data_mef = transform_fxn(data_rfi, channels)
mef_channels : int, or str, or list, only if ``full_output==True``
Channels on which the transformation function has been generated.
Directly copied from the `mef_channels` argument.
clustering : dict, only if ``full_output==True``
Results of the clustering step. The structure of this dictionary
is::
clustering = {"labels": np.array}
A description of each ``"key": value`` is given below.
"labels" : array
Array of length ``N``, where ``N`` is the number of events in
`data_beads`. This array contains labels indicating which
subpopulation each event has been assigned to by the clustering
algorithm. Labels range from ``0`` to ``M - 1``, where ``M`` is
the number of MEF values specified, and therefore the number of
subpopulations identified by the clustering algorithm.
statistic : dict, only if ``full_output==True``
Results of the calculation of bead subpopulations' fluorescence.
The structure of this dictionary is::
statistic = {"values": [np.array, ...]}
A description of each ``"key": value`` is given below.
"values" : list of arrays
Each array contains the representative fluorescence values of
all subpopulations, for a specific fluorescence channel from
`mef_channels`. Therefore, each array has a length equal to the
number of subpopulations, and the outer list has as many arrays
as the number of channels in `mef_channels`.
selection : dict, only if ``full_output==True``
Results of the subpopulation selection step. The structure of this
dictionary is::
selection = {"rfi": [np.array, ...],
"mef": [np.array, ...]}
A description of each ``"key": value`` is given below.
"rfi" : list of arrays
Each array contains the fluorescence values of each selected
subpopulation in RFI units, for a specific fluorescence channel
from `mef_channels`. The outer list has as many arrays as the
number of channels in `mef_channels`. Because the selection
step may discard subpopulations, each array has a length less
than or equal to the total number of subpopulations.
Furthermore, different arrays in this list may not have the
same length. However, the length of each array is consistent
with the corresponding array in ``selection["mef"]`` (see
below).
"mef" : list of arrays
Each array contains the fluorescence values of each selected
subpopulation in MEF units, for a specific fluorescence channel
from `mef_channels`. The outer list has as many arrays as the
number of channels in `mef_channels`. Because the selection
step may discard subpopulations, each array has a length less
than or equal to the total number of subpopulations.
Furthermore, different arrays in this list may not have the
same length. However, the length of each array is consistent
with the corresponding array in ``selection["rfi"]`` (see
above).
fitting : dict, only if ``full_output==True``
Results of the model fitting step. The structure of this dictionary
is::
selection = {"std_crv": [func, ...],
"beads_model": [func, ...],
"beads_params": [np.array, ...],
"beads_model_str": [str, ...],
"beads_params_names": [[], ...]}
A description of each ``"key": value`` is given below.
"std_crv" : list of functions
Functions encoding the fitted standard curves, for each channel
in `mef_channels`. Each element of this list is the ``std_crv``
output of the fitting function (see required signature of the
``fitting_fxn`` optional parameter), after applying it to the
MEF and RFI fluorescence values of a specific channel from
`mef_channels` .
"beads_model" : list of functions
Functions encoding the fluorescence model of the calibration
beads, for each channel in `mef_channels`. Each element of this
list is the ``beads_model`` output of the fitting function (see
required signature of the ``fitting_fxn`` optional parameter),
after applying it to the MEF and RFI fluorescence values of a
specific channel from `mef_channels` .
"beads_params" : list of arrays
Fitted parameter values of the bead fluorescence model, for
each channel in `mef_chanels`. Each element of this list is the
``beads_params`` output of the fitting function (see required
signature of the ``fitting_fxn`` optional parameter), after
applying it to the MEF and RFI fluorescence values of a
specific channel from `mef_channels`.
"beads_model_str" : list of str
String representation of the bead models used, for each channel
in `mef_channels`. Each element of this list is the
``beads_model_str`` output of the fitting function (see
required signature of the ``fitting_fxn`` optional parameter),
after applying it to the MEF and RFI fluorescence values of a
specific channel from `mef_channels` .
"beads_params_names" : list of list
Names of the parameters given in `beads_params`, for each
channel in `mef_channels`. Each element of this list is the
``beads_params_names`` output of the fitting function (see
required signature of the ``fitting_fxn`` optional parameter),
after applying it to the MEF and RFI fluorescence values of a
specific channel from `mef_channels` .
Other parameters
----------------
clustering_fxn : function, optional
Function used for clustering, or identification of subpopulations.
Must have the following signature::
labels = clustering_fxn(data, n_clusters, **clustering_params)
where `data` is a NxD FCSData object or numpy array, `n_clusters`
is the expected number of bead subpopulations, and `labels` is a 1D
numpy array of length N, assigning each event in `data` to one
subpopulation.
clustering_params : dict, optional
Additional keyword parameters to pass to `clustering_fxn`.
clustering_channels : list, optional
Channels used for clustering. If not specified, use `mef_channels`.
If more than three channels are specified and `plot` is True, only
a 3D scatter plot will be produced using the first three channels.
statistic_fxn : function, optional
Function used to calculate the representative fluorescence of each
subpopulation. Must have the following signature::
s = statistic_fxn(data, **statistic_params)
where `data` is a 1D FCSData object or numpy array, and `s` is a
float. Statistical functions from numpy, scipy, or FlowCal.stats
are valid options.
statistic_params : dict, optional
Additional keyword parameters to pass to `statistic_fxn`.
selection_fxn : function, optional
Function to use for bead population selection. Must have the
following signature::
selected_mask = selection_fxn(data_list, **selection_params)
where `data_list` is a list of FCSData objects, each one containing
the events of one population, and `selected_mask` is a boolean
array indicating whether the population has been selected (True) or
discarded (False). If None, don't use a population selection
procedure.
selection_params : dict, optional
Additional keyword parameters to pass to `selection_fxn`.
fitting_fxn : function, optional
Function used to fit the beads fluorescence model and obtain a
standard curve. Must have the following signature::
std_crv, beads_model, beads_params, \\
beads_model_str, beads_params_names = fitting_fxn(
fl_rfi, fl_mef, **fitting_params)
where `std_crv` is a function implementing the standard curve,
`beads_model` is a function implementing the beads fluorescence
model, `beads_params` is an array containing the fitted parameters
of the beads model, `beads_model_str` is a string representation
of the beads model used, `beads_params_names` is a list with the
parameter names in the same order as they are given in
`beads_params`, and `fl_rfi` and `fl_mef` are the fluorescence
values of the beads in RFI units and MEF units, respectively.
Note that the standard curve and the fitted beads model are not
necessarily the same.
fitting_params : dict, optional
Additional keyword parameters to pass to `fitting_fxn`.
Notes
-----
The steps involved in generating the MEF transformation function are:
1. The individual subpopulations of beads are first identified using a
clustering method of choice. Clustering is performed in all
specified channels simultaneously.
2. The fluorescence of each subpopulation is calculated, for each
channel in `mef_channels`.
3. Some subpopulations are then discarded if they are close to either
the minimum or the maximum channel range limits. In addition, if the
MEF value of some subpopulation is unknown (represented as a
``np.nan`` in `mef_values`), the whole subpopulation is also
discarded.
4. The measured fluorescence of each subpopulation is compared with
the known MEF values in `mef_values`, and a standard curve function
is generated using the appropriate MEF model.
At the end, a transformation function is generated using the calculated
standard curves, `mef_channels`, and ``FlowCal.transform.to_mef()``.
Note that applying the resulting transformation function to other
flow cytometry samples only yields correct results if they have been
taken at the same settings as the calibration beads, for all channels
in `mef_channels`.
Examples
--------
Here is a simple application of this function:
>>> transform_fxn = FlowCal.mef.get_transform_fxn(
... beads_data,
... mef_channels=['FL1', 'FL3'],
... mef_values=[np.array([ 0, 646, 1704, 4827,
... 15991, 47609, 135896, 273006],
... np.array([ 0, 1614, 4035, 12025,
... 31896, 95682, 353225, 1077421]],
... )
>>> sample_mef = transform_fxn(data=sample_rfi,
... channels=['FL1', 'FL3'])
Here, we first generate ``transform_fxn`` from flow cytometry data
contained in ``FCSData`` object ``beads_data``, for channels FL1 and
FL3, using provided MEF values for each one of these channels. In the
next line, we use the resulting transformation function to transform
cell sample data in RFI to MEF.
More data about intermediate steps can be obtained with the option
``full_output=True``:
>>> get_transform_output = FlowCal.mef.get_transform_fxn(
... beads_data,
... mef_channels=['FL1', 'FL3'],
... mef_values=[np.array([ 0, 646, 1704, 4827,
... 15991, 47609, 135896, 273006],
... np.array([ 0, 1614, 4035, 12025,
... 31896, 95682, 353225, 1077421]],
... full_output=True)
In this case, the output ``get_transform_output`` will be a
`MEFOutput` ``namedtuple`` similar to the following::
FlowCal.mef.MEFOutput(
transform_fxn=<functools.partial object>,
mef_channels=['FL1', 'FL3'],
clustering={
'labels' : [7, 2, 2, ... 4, 3, 5]
},
statistic={
'values' : [np.array([ 101, 150, 231, 433,
1241, 3106, 7774, 9306]),
np.array([ 3, 30, 71, 204,
704, 2054, 6732, 9912])]
},
selection={
'rfi' : [np.array([ 101, 150, 231, 433,
1241, 3106, 7774]),
np.array([ 30, 71, 204, 704,
2054, 6732])]
'mef' : [np.array([ 0, 646, 1704, 4827,
15991, 47609, 135896]),
np.array([ 1614, 4035, 12025, 31896,
95682, 353225])]
},
fitting={
'std_crv' : [<function <lambda>>,
<function <lambda>>]
'beads_model' : [<function <lambda>>,
<function <lambda>>]
'beads_params' : [np.array([ 1.09e0, 2.02e0, 1.15e3]),
np.array([9.66e-1, 4.17e0, 6.63e1])]
'beads_model_str' : ['m*log(fl_rfi) + b =\
log(fl_mef_auto + fl_mef)',
'm*log(fl_rfi) + b =\
log(fl_mef_auto + fl_mef)']
'beads_params_names' : [['m', 'b', 'fl_mef_auto],
['m', 'b', 'fl_mef_auto]]
},
)
"""
if verbose:
prev_precision = np.get_printoptions()['precision']
np.set_printoptions(precision=2)
# Create directory if plot is True
if plot and plot_dir is not None:
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
# Default plot filename
if plot_filename is None:
plot_filename = str(data_beads)
# mef_channels and mef_values should be iterables.
if hasattr(mef_channels, '__iter__') \
and not isinstance(mef_channels, six.string_types):
mef_channels = list(mef_channels)
else:
mef_channels = [mef_channels]
mef_values = [mef_values]
# Transform mef_values to numpy array
mef_values = np.array(mef_values, dtype=float)
###
# 1. Clustering
###
# If clustering channels not specified, use channels in mef_channels
if clustering_channels is None:
clustering_channels = mef_channels
# Get number of clusters from number of specified MEF values
n_clusters = len(mef_values[0])
# Run clustering function
labels = clustering_fxn(data_beads[:, clustering_channels],
n_clusters,
**clustering_params)
# Separate events corresponding to each cluster
unique_labels = np.array(list(set(labels)))
populations = [data_beads[labels == i] for i in unique_labels]
# Sort populations based on distance to the origin
population_dist = [np.sum((np.mean(population[:,clustering_channels],
axis=0))**2)
for population in populations]
population_sorted_idx = np.argsort(population_dist)
populations = [populations[i] for i in population_sorted_idx]
# Print information
if verbose:
# Calculate and display percentage of events on each population
population_count = np.array([population.shape[0]
for population in populations])
population_perc = population_count * 100.0 / population_count.sum()
# Print information
print("Step 1: Clustering")
print(" Number of populations to find: {}".format(n_clusters))
print(" Percentage of events in each population:")
print(" " + str(population_perc))
# Plot
if plot:
if plot_dir is not None:
savefig = '{}/clustering_{}.png'.format(plot_dir, plot_filename)
else:
savefig = None
# If used one channel for clustering, make histogram
if len(clustering_channels) == 1:
plt.figure(figsize=(8,4))
FlowCal.plot.hist1d(
populations,
channel=clustering_channels[0],
xscale='logicle',
bins=256,
alpha=0.75,
savefig=savefig)
# If used two channels for clustering, make 2D scatter plot
elif len(clustering_channels) == 2:
plt.figure(figsize=(6,4))
FlowCal.plot.scatter2d(
populations,
channels=clustering_channels,
xscale='logicle',
yscale='logicle',
savefig=savefig)
# If used three channels or more for clustering, make 3D scatter plot
# with the first three.
elif len(clustering_channels) >= 3:
plt.figure(figsize=(8,6))
FlowCal.plot.scatter3d_and_projections(
populations,
channels=clustering_channels[:3],
xscale='logicle',
yscale='logicle',
zscale='logicle',
savefig=savefig)
if plot_dir is not None:
plt.close()
# Initialize lists to acumulate results
std_crv_res = []
if full_output:
stats_values_res = []
selected_rfi_res = []
selected_mef_res = []
beads_model_res = []
beads_params_res =[]
beads_model_str_res =[]
beads_params_names_res =[]
# Iterate through each mef channel
for mef_channel, mef_values_channel in zip(mef_channels, mef_values):
populations_channel = [population[:, mef_channel]
for population in populations]
###
# 2. Calculate statistics in each subpopulation.
###
# Calculate statistics
stats_values = [statistic_fxn(population, **statistic_params)
for population in populations_channel]
stats_values = np.array(stats_values)
# Accumulate results
if full_output:
stats_values_res.append(stats_values)
# Print information
if verbose:
print("({}) Step 2: Population Statistic".format(mef_channel))
print(" Fluorescence of each population (RFI):")
print(" " + str(stats_values))
###
# 3. Select populations to be used for fitting
###
# Select populations based on selection_fxn
if selection_fxn is not None:
selected_mask = selection_fxn(
[population for population in populations_channel],
**selection_params)
else:
selected_mask = np.ones(n_clusters, dtype=bool)
# Discard values specified as nan in mef_values_channel
selected_mask = np.logical_and(selected_mask,
~np.isnan(mef_values_channel))
# Get selected rfi and mef values
selected_rfi = stats_values[selected_mask]
selected_mef = mef_values_channel[selected_mask]
# Accumulate results
if full_output:
selected_rfi_res.append(selected_rfi)
selected_mef_res.append(selected_mef)
# Print information
if verbose:
print("({}) Step 3: Population Selection".format(mef_channel))
print(" {} populations selected.".format(len(selected_rfi)))
print(" Fluorescence of selected populations (RFI):")
print(" " + str(selected_rfi))
print(" Fluorescence of selected populations (MEF):")
print(" " + str(selected_mef))
# Plot
if plot:
# Get colors for each population. Colors are taken from the default
# colormap in FlowCal.plot, if the population has been selected.
# Otherwise, the population is displayed in gray.
color_levels = np.linspace(0, 1, n_clusters)
colors = [FlowCal.plot.cmap_default(level)
if selected else (0.6, 0.6, 0.6)
for selected, level in zip(selected_mask, color_levels)]
# Plot histograms
plt.figure(figsize=(8,4))
FlowCal.plot.hist1d(populations,
channel=mef_channel,
xscale='logicle',
bins=256,
alpha=0.75,
facecolor=colors)
# Plot a vertical line for each population, with an x coordinate
# corresponding to their statistic value.
ylim = plt.ylim()
for val, color in zip(stats_values, colors):
plt.plot([val, val], [ylim[0], ylim[1]], color=color)
plt.ylim(ylim)
# Save and close
if plot_dir is not None:
plt.tight_layout()
plt.savefig('{}/populations_{}_{}.png'.format(plot_dir,
mef_channel,
plot_filename),
dpi=FlowCal.plot.savefig_dpi)
plt.close()
###
# 4. Get standard curve
###
# Fit
fitting_output = fitting_fxn(selected_rfi,
selected_mef,
**fitting_params)
std_crv = fitting_output[0]
beads_model = fitting_output[1]
beads_params = fitting_output[2]
beads_model_str = fitting_output[3]
beads_params_names = fitting_output[4]
# Accumulate results
std_crv_res.append(std_crv)
if full_output:
beads_model_res.append(beads_model)
beads_params_res.append(beads_params)
beads_model_str_res.append(beads_model_str)
beads_params_names_res.append(beads_params_names)
# Print information
if verbose:
print("({}) Step 4: Standard Curve Fitting".format(mef_channel))
print(" Parameters of bead fluorescence model:")
print(" " + str(beads_params))
# Plot
if plot:
# Get channel range
xlim = populations[0].range(channels=mef_channel)
# The plot will be made in log scale. If the lower limit of the
# range is zero or less, replace by one or some lower value, such
# that the range covers at least five decades.
if xlim[0] <= 0:
xlim[0] = min(1., xlim[1]/1e5)
# Plot standard curve
plt.figure(figsize=(6,4))
plot_standard_curve(selected_rfi,
selected_mef,
beads_model,
std_crv,
xscale='log',
yscale='log',
xlim=xlim)
plt.xlabel('{} (a.u.)'.format(mef_channel))
plt.ylabel('{} (MEF)'.format(mef_channel))
# Save if required
if plot_dir is not None:
plt.tight_layout()
plt.savefig('{}/std_crv_{}_{}.png'.format(plot_dir,
mef_channel,
plot_filename),
dpi=FlowCal.plot.savefig_dpi)
plt.close()
# Make output transformation function
transform_fxn = functools.partial(FlowCal.transform.to_mef,
sc_list=std_crv_res,
sc_channels=mef_channels)
if verbose:
np.set_printoptions(precision=prev_precision)
if full_output:
# Clustering results
clustering_res = {}
clustering_res['labels'] = labels
# Population stats results
statistic_res = {}
statistic_res['values'] = stats_values_res
# Population selection results
selection_res = {}
selection_res['rfi'] = selected_rfi_res
selection_res['mef'] = selected_mef_res
# Fitting results
fitting_res = {}
fitting_res['std_crv'] = std_crv_res
fitting_res['beads_model'] = beads_model_res
fitting_res['beads_params'] = beads_params_res
fitting_res['beads_model_str'] = beads_model_str_res
fitting_res['beads_params_names'] = beads_params_names_res
# Make namedtuple
fields = ['mef_channels',
'transform_fxn',
'clustering',
'statistic',
'selection',
'fitting']
MEFOutput = collections.namedtuple('MEFOutput', fields)
out = MEFOutput(mef_channels=mef_channels,
transform_fxn=transform_fxn,
clustering=clustering_res,
statistic=statistic_res,
selection=selection_res,
fitting=fitting_res)
return out
else:
return transform_fxn
|
taborlab/FlowCal
|
FlowCal/mef.py
|
Python
|
mit
| 48,430
|
[
"Gaussian"
] |
9fc52a77e127ab4784be8c71f7a22adf74c336f0b10b137a3f52398cad94b0c9
|
import numpy as np
import pylab as pl
from lmfit import minimize, Parameters, Parameter,\
report_fit, Model, CompositeModel
from lmfit.models import StepModel, GaussianModel, LorentzianModel, ConstantModel, RectangleModel, LinearModel
def gaussian(x, height, center, width, offset):
return (height/np.sqrt(2*np.pi)*width) * np.exp(-(x - center)**2/(2*width**2)) + offset
def gauss_step_const(signal, guess):
"""
Fits high contrast data very well
"""
if guess == False:
return [0, 0]
else:
amp, centre, stdev, offset = guess
data = np.array([range(len(signal)), signal]).T
X = data[:,0]
Y = data[:,1]
# gauss_mod = Model(gaussian)
gauss_mod = Model(gaussian)
const_mod = ConstantModel()
step_mod = StepModel(prefix='step')
pars = gauss_mod.make_params(height=amp, center=centre, width=stdev / 3., offset=offset)
# pars = gauss_mod.make_params(amplitude=amp, center=centre, sigma=stdev / 3.)
gauss_mod.set_param_hint('sigma', value = stdev / 3., min=stdev / 2., max=stdev)
pars += step_mod.guess(Y, x=X, center=centre)
pars += const_mod.guess(Y, x=X)
mod = const_mod + gauss_mod + step_mod
result = mod.fit(Y, pars, x=X)
# write error report
#print result.fit_report()
print "contrast fit", result.redchi
return X, result.best_fit, result.redchi
def step(signal, guess):
if guess == False:
return [0, 0]
else:
amp, centre, stdev, offset = guess
data = np.array([range(len(signal)), signal]).T
X = data[:,0]
Y = data[:,1]
step_mod = StepModel(prefix='step')
const_mod = ConstantModel(prefix='const_')
pars = step_mod.guess(Y, x=X, center=centre)
pars += const_mod.guess(Y, x=X)
mod = step_mod + const_mod
result = mod.fit(Y, pars, x=X)
# write error report
#print result.fit_report()
print "step fit", result.redchi
return X, result.best_fit, result.redchi
def gaussFN_const(signal, guess):
if guess == False:
return [0, 0]
else:
amp, centre, stdev, offset = guess
data = np.array([range(len(signal)), signal]).T
X = data[:,0]
Y = data[:,1]
gauss_mod = GaussianModel(prefix='gauss_')
const_mod = ConstantModel(prefix='const_')
#pars = lorentz_mod.make_params(amplitude=amp, center=centre, sigma=stdev / 3.)
#lorentz_mod.set_param_hint('sigma', value = stdev / 3., min=0., max=stdev)
pars = gauss_mod.guess(Y, x=X, center=centre, sigma=stdev / 3., amplitude=amp)
#pars += step_mod.guess(Y, x=X, center=centre)
pars += const_mod.guess(Y, x=X)
mod = gauss_mod + const_mod
result = mod.fit(Y, pars, x=X)
# write error report
#print result.fit_report()
print "gaussFN", result.redchi
return X, result.best_fit, result.redchi
def minimized_residuals(signal, guess):
if guess == False:
return [0, 0]
else:
X1, result1, err1 = gaussFN_const(signal, guess)
X2, result2, err2 = gauss_step_const(signal, guess)
X3, result3, err3 = step(signal, guess)
if err1 == np.min([err1,err2,err3]):
return X1, result1
elif err2 == np.min([err1,err2,err3]):
return X2, result2
elif err3 == np.min([err1,err2,err3]):
return X3, result3
|
DiamondLightSource/auto_tomo_calibration-experimental
|
old_code_scripts/simulate_data/fit_data.py
|
Python
|
apache-2.0
| 3,625
|
[
"Gaussian"
] |
cf96d58034399628b331ac18db1b87da53c02cb2d5242788ad2f1edf603b75a1
|
""" Contains the GI (General Illumination) parent classes. """
# gi.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
from mpf.system.devices import Device
class GI(Device):
""" Represents a light connected to a traditional lamp matrix in a pinball
machine.
This light could be an incandescent lamp or a replacement single-color
LED. The key is that they're connected up to a lamp matrix.
"""
config_section = 'gis'
collection = 'gi'
def __init__(self, machine, name, config, collection=None):
self.log = logging.getLogger('GI.' + name)
super(GI, self).__init__(machine, name, config, collection,
platform_section='gis')
# We save out number_str since the platform driver will convert the
# number into a hardware number, but we need the original number for
# some things later.
self.config['number_str'] = str(config['number']).upper()
self.hw_driver, self.number = self.platform.configure_gi(self.config)
self.registered_handlers = []
def on(self, brightness=255, fade_ms=0, start_brightness=None):
if type(brightness) is list:
brightness = brightness[0]
if self.registered_handlers:
for handler in self.registered_handlers:
handler(light_name=self.name, brightness=brightness)
self.hw_driver.on(brightness, fade_ms, start_brightness)
def off(self):
self.hw_driver.off()
def add_handler(self, callback):
"""Registers a handler to be called when this light changes state."""
self.registered_handlers.append(callback)
def remove_handler(self, callback=None):
"""Removes a handler from the list of registered handlers."""
if not callback: # remove all
self.registered_handlers = []
return
if callback in self.registered_handlers:
self.registered_handlers.remove(callback)
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
jabdoa2/mpf
|
mpf/devices/gi.py
|
Python
|
mit
| 3,293
|
[
"Brian"
] |
63e470950f4bb118ae6fe191253d88dd2d6f4f4d733b9f07af01c5a9c0729829
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkVectorText(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkVectorText(), 'Processing.',
(), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkVectorText.py
|
Python
|
bsd-3-clause
| 467
|
[
"VTK"
] |
c81bf6799bab415dcef8ab24e08901eab2437151480e2ca708dc2460d0090ed8
|
r"""
Alignments (:mod:`skbio.alignment`)
===================================
.. currentmodule:: skbio.alignment
This module provides functionality for computing and manipulating sequence
alignments. DNA, RNA, and protein sequences can be aligned, as well as
sequences with custom alphabets.
Data Structures
---------------
.. autosummary::
:toctree: generated/
TabularMSA
Optimized (i.e., production-ready) Alignment Algorithms
-------------------------------------------------------
.. autosummary::
:toctree: generated/
StripedSmithWaterman
AlignmentStructure
local_pairwise_align_ssw
Slow (i.e., educational-purposes only) Alignment Algorithms
-----------------------------------------------------------
.. autosummary::
:toctree: generated/
global_pairwise_align_nucleotide
global_pairwise_align_protein
global_pairwise_align
local_pairwise_align_nucleotide
local_pairwise_align_protein
local_pairwise_align
General functionality
---------------------
.. autosummary::
:toctree: generated/
make_identity_substitution_matrix
Data Structure Examples
-----------------------
Load two DNA sequences that have been previously aligned into a ``TabularMSA``
object, using sequence IDs as the MSA's index:
>>> from skbio import TabularMSA, DNA
>>> seqs = [DNA("ACC--G-GGTA..", metadata={'id': "seq1"}),
... DNA("TCC--G-GGCA..", metadata={'id': "seq2"})]
>>> msa = TabularMSA(seqs, minter='id')
>>> msa
TabularMSA[DNA]
----------------------
Stats:
sequence count: 2
position count: 13
----------------------
ACC--G-GGTA..
TCC--G-GGCA..
>>> msa.index
Index(['seq1', 'seq2'], dtype='object')
Alignment Algorithm Examples
----------------------------
Optimized Alignment Algorithm Examples
--------------------------------------
Using the convenient ``local_pairwise_align_ssw`` function:
>>> from skbio.alignment import local_pairwise_align_ssw
>>> alignment, score, start_end_positions = local_pairwise_align_ssw(
... DNA("ACTAAGGCTCTCTACCCCTCTCAGAGA"),
... DNA("ACTAAGGCTCCTAACCCCCTTTTCTCAGA")
... )
>>> alignment
TabularMSA[DNA]
------------------------------
Stats:
sequence count: 2
position count: 30
------------------------------
ACTAAGGCTCTCT-ACCCC----TCTCAGA
ACTAAGGCTC-CTAACCCCCTTTTCTCAGA
>>> score
27
>>> start_end_positions
[(0, 24), (0, 28)]
Using the ``StripedSmithWaterman`` object:
>>> from skbio.alignment import StripedSmithWaterman
>>> query = StripedSmithWaterman("ACTAAGGCTCTCTACCCCTCTCAGAGA")
>>> alignment = query("AAAAAACTCTCTAAACTCACTAAGGCTCTCTACCCCTCTTCAGAGAAGTCGA")
>>> print(alignment)
ACTAAGGCTC...
ACTAAGGCTC...
Score: 49
Length: 28
Using the ``StripedSmithWaterman`` object for multiple targets in an efficient
way and finding the aligned sequence representations:
>>> from skbio.alignment import StripedSmithWaterman
>>> alignments = []
>>> target_sequences = [
... "GCTAACTAGGCTCCCTTCTACCCCTCTCAGAGA",
... "GCCCAGTAGCTTCCCAATATGAGAGCATCAATTGTAGATCGGGCC",
... "TCTATAAGATTCCGCATGCGTTACTTATAAGATGTCTCAACGG",
... "TAGAGATTAATTGCCACTGCCAAAATTCTG"
... ]
>>> query_sequence = "ACTAAGGCTCTCTACCCCTCTCAGAGA"
>>> query = StripedSmithWaterman(query_sequence)
>>> for target_sequence in target_sequences:
... alignment = query(target_sequence)
... alignments.append(alignment)
...
>>> print(alignments[0])
ACTAAGGCTC...
ACT-AGGCTC...
Score: 38
Length: 30
>>> print(alignments[0].aligned_query_sequence)
ACTAAGGCTC---TCTACCCCTCTCAGAGA
>>> print(alignments[0].aligned_target_sequence)
ACT-AGGCTCCCTTCTACCCCTCTCAGAGA
Slow Alignment Algorithm Examples
---------------------------------
scikit-bio also provides pure-Python implementations of Smith-Waterman and
Needleman-Wunsch alignment. These are much slower than the methods described
above, but serve as useful educational examples as they're simpler to
experiment with. Functions are provided for local and global alignment of
protein and nucleotide sequences. The ``global*`` and ``local*`` functions
differ in the underlying algorithm that is applied (``global*`` uses Needleman-
Wunsch while ``local*`` uses Smith-Waterman), and ``*protein`` and
``*nucleotide`` differ in their default scoring of matches, mismatches, and
gaps.
Here we locally align a pair of protein sequences using gap open penalty
of 11 and a gap extend penalty of 1 (in other words, it is much more
costly to open a new gap than extend an existing one).
>>> from skbio import Protein
>>> from skbio.alignment import local_pairwise_align_protein
>>> s1 = Protein("HEAGAWGHEE")
>>> s2 = Protein("PAWHEAE")
>>> alignment, score, start_end_positions = local_pairwise_align_protein(
... s1, s2, 11, 1)
This returns an ``skbio.TabularMSA`` object, the alignment score, and start/end
positions of each aligned sequence:
>>> alignment
TabularMSA[Protein]
---------------------
Stats:
sequence count: 2
position count: 5
---------------------
AWGHE
AW-HE
>>> score
25.0
>>> start_end_positions
[(4, 8), (1, 4)]
Similarly, we can perform global alignment of nucleotide sequences:
>>> from skbio import DNA
>>> from skbio.alignment import global_pairwise_align_nucleotide
>>> s1 = DNA("GCGTGCCTAAGGTATGCAAG")
>>> s2 = DNA("ACGTGCCTAGGTACGCAAG")
>>> alignment, score, start_end_positions = global_pairwise_align_nucleotide(
... s1, s2)
>>> alignment
TabularMSA[DNA]
----------------------
Stats:
sequence count: 2
position count: 20
----------------------
GCGTGCCTAAGGTATGCAAG
ACGTGCCTA-GGTACGCAAG
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from ._tabular_msa import TabularMSA
from ._pairwise import (
local_pairwise_align_nucleotide, local_pairwise_align_protein,
local_pairwise_align, global_pairwise_align_nucleotide,
global_pairwise_align_protein, global_pairwise_align,
make_identity_substitution_matrix, local_pairwise_align_ssw
)
from skbio.alignment._ssw_wrapper import (
StripedSmithWaterman, AlignmentStructure)
__all__ = ['TabularMSA', 'StripedSmithWaterman', 'AlignmentStructure',
'local_pairwise_align_ssw', 'global_pairwise_align',
'global_pairwise_align_nucleotide', 'global_pairwise_align_protein',
'local_pairwise_align', 'local_pairwise_align_nucleotide',
'local_pairwise_align_protein', 'make_identity_substitution_matrix']
|
gregcaporaso/scikit-bio
|
skbio/alignment/__init__.py
|
Python
|
bsd-3-clause
| 6,612
|
[
"scikit-bio"
] |
9632a5824eb0c0a15b36125a7eae7891f7f7b9f5e633f1966751e4f3ae576e31
|
__doc__ = """Code by Benjamin S. Murphy
bscott.murphy@gmail.com
Dependencies:
numpy
scipy
matplotlib
Cython
Classes:
OrdinaryKriging: Convenience class for easy access to 2D Ordinary Kriging.
References:
P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology,
(Cambridge University Press, 1997) 272 p.
Copyright (c) 2015 Benjamin S. Murphy
"""
import numpy as np
import scipy.linalg
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
import variogram_models
import core
class OrdinaryKriging:
"""class OrdinaryKriging
Convenience class for easy access to 2D Ordinary Kriging
Dependencies:
numpy
scipy
matplotlib
Inputs:
X (array-like): X-coordinates of data points.
Y (array-like): Y-coordinates of data points.
Z (array-like): Values at data points.
variogram_model (string, optional): Specified which variogram model to use;
may be one of the following: linear, power, gaussian, spherical,
exponential. Default is linear variogram model. To utilize as custom variogram
model, specify 'custom'; you must also provide variogram_parameters and
variogram_function.
variogram_parameters (list, optional): Parameters that define the
specified variogram model. If not provided, parameters will be automatically
calculated such that the root-mean-square error for the fit variogram
function is minimized.
linear - [slope, nugget]
power - [scale, exponent, nugget]
gaussian - [sill, range, nugget]
spherical - [sill, range, nugget]
exponential - [sill, range, nugget]
For a custom variogram model, the parameters are required, as custom variogram
models currently will not automatically be fit to the data. The code does not
check that the provided list contains the appropriate number of parameters for
the custom variogram model, so an incorrect parameter list in such a case will
probably trigger an esoteric exception someplace deep in the code.
variogram_function (callable, optional): A callable function that must be provided
if variogram_model is specified as 'custom'. The function must take only two
arguments: first, a list of parameters for the variogram model; second, the
distances at which to calculate the variogram model. The list provided in
variogram_parameters will be passed to the function as the first argument.
nlags (int, optional): Number of averaging bins for the semivariogram.
Default is 6.
weight (boolean, optional): Flag that specifies if semivariance at smaller lags
should be weighted more heavily when automatically calculating variogram model.
True indicates that weights will be applied. Default is False.
(Kitanidis suggests that the values at smaller lags are more important in
fitting a variogram model, so the option is provided to enable such weighting.)
anisotropy_scaling (float, optional): Scalar stretching value to take
into account anisotropy. Default is 1 (effectively no stretching).
Scaling is applied in the y-direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle, if anisotropy_angle
is not 0).
anisotropy_angle (float, optional): CCW angle (in degrees) by which to
rotate coordinate system in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
verbose (Boolean, optional): Enables program text output to monitor
kriging process. Default is False (off).
enable_plotting (Boolean, optional): Enables plotting to display
variogram. Default is False (off).
enable_statistics (Boolean, optional). Default is False
Callable Methods:
display_variogram_model(): Displays semivariogram and variogram model.
update_variogram_model(variogram_model, variogram_parameters=None, nlags=6,
anisotropy_scaling=1.0, anisotropy_angle=0.0):
Changes the variogram model and variogram parameters for
the kriging system.
Inputs:
variogram_model (string): May be any of the variogram models
listed above. May also be 'custom', in which case variogram_parameters
and variogram_function must be specified.
variogram_parameters (list, optional): List of variogram model
parameters, as listed above. If not provided, a best fit model
will be calculated as described above.
variogram_function (callable, optional): A callable function that must be
provided if variogram_model is specified as 'custom'. See above for
more information.
nlags (int, optional): Number of averaging bins for the semivariogram.
Defualt is 6.
weight (boolean, optional): Flag that specifies if semivariance at smaller lags
should be weighted more heavily when automatically calculating variogram model.
True indicates that weights will be applied. Default is False.
anisotropy_scaling (float, optional): Scalar stretching value to
take into account anisotropy. Default is 1 (effectively no
stretching). Scaling is applied in the y-direction.
anisotropy_angle (float, optional): CCW angle (in degrees) by which to
rotate coordinate system in order to take into account
anisotropy. Default is 0 (no rotation).
switch_verbose(): Enables/disables program text output. No arguments.
switch_plotting(): Enables/disable variogram plot display. No arguments.
get_epsilon_residuals(): Returns the epsilon residuals of the
variogram fit. No arguments.
plot_epsilon_residuals(): Plots the epsilon residuals of the variogram
fit in the order in which they were calculated. No arguments.
get_statistics(): Returns the Q1, Q2, and cR statistics for the
variogram fit (in that order). No arguments.
print_statistics(): Prints out the Q1, Q2, and cR statistics for
the variogram fit. NOTE that ideally Q1 is close to zero,
Q2 is close to 1, and cR is as small as possible.
execute(style, xpoints, ypoints, mask=None): Calculates a kriged grid.
Inputs:
style (string): Specifies how to treat input kriging points.
Specifying 'grid' treats xpoints and ypoints as two arrays of
x and y coordinates that define a rectangular grid.
Specifying 'points' treats xpoints and ypoints as two arrays
that provide coordinate pairs at which to solve the kriging system.
Specifying 'masked' treats xpoints and ypoints as two arrays of)
x and y coordinates that define a rectangular grid and uses mask
to only evaluate specific points in the grid.
xpoints (array-like, dim Nx1): If style is specific as 'grid' or 'masked',
x-coordinates of MxN grid. If style is specified as 'points',
x-coordinates of specific points at which to solve kriging system.
ypoints (array-like, dim Mx1): If style is specified as 'grid' or 'masked',
y-coordinates of MxN grid. If style is specified as 'points',
y-coordinates of specific points at which to solve kriging system.
mask (boolean array, dim MxN, optional): Specifies the points in the rectangular
grid defined by xpoints and ypoints that are to be excluded in the
kriging calculations. Must be provided if style is specified as 'masked'.
False indicates that the point should not be masked; True indicates that
the point should be masked.
backend (string, optional): Specifies which approach to use in kriging.
Specifying 'vectorized' will solve the entire kriging problem at once in a
vectorized operation. This approach is faster but also can consume a
significant amount of memory for large grids and/or large datasets.
Specifying 'loop' will loop through each point at which the kriging system
is to be solved. This approach is slower but also less memory-intensive.
Specifying 'C' will utilize a loop in Cython.
Default is 'vectorized'.
n_closest_points (int, optional): For kriging with a moving window, specifies the number
of nearby points to use in the calculation. This can speed up the calculation for large
datasets, but should be used with caution. As Kitanidis notes, kriging with a moving
window can produce unexpected oddities if the variogram model is not carefully chosen.
Outputs:
zvalues (numpy array, dim MxN or dim Nx1): Z-values of specified grid or at the
specified set of points. If style was specified as 'masked', zvalues will
be a numpy masked array.
sigmasq (numpy array, dim MxN or dim Nx1): Variance at specified grid points or
at the specified set of points. If style was specified as 'masked', sigmasq
will be a numpy masked array.
References:
P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology,
(Cambridge University Press, 1997) 272 p.
"""
eps = 1.e-10 # Cutoff for comparison to zero
variogram_dict = {'linear': variogram_models.linear_variogram_model,
'power': variogram_models.power_variogram_model,
'gaussian': variogram_models.gaussian_variogram_model,
'spherical': variogram_models.spherical_variogram_model,
'exponential': variogram_models.exponential_variogram_model}
def __init__(self, x, y, z, variogram_model='linear', variogram_parameters=None,
variogram_function=None, nlags=6, weight=False, anisotropy_scaling=1.0,
anisotropy_angle=0.0, verbose=False, enable_plotting=False,
enable_statistics=False):
# Code assumes 1D input arrays. Ensures that any extraneous dimensions
# don't get in the way. Copies are created to avoid any problems with
# referencing the original passed arguments.
self.X_ORIG = np.atleast_1d(np.squeeze(np.array(x, copy=True)))
self.Y_ORIG = np.atleast_1d(np.squeeze(np.array(y, copy=True)))
self.Z = np.atleast_1d(np.squeeze(np.array(z, copy=True)))
self.verbose = verbose
self.enable_plotting = enable_plotting
if self.enable_plotting and self.verbose:
print "Plotting Enabled\n"
self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG))/2.0
self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG))/2.0
self.anisotropy_scaling = anisotropy_scaling
self.anisotropy_angle = anisotropy_angle
if self.verbose:
print "Adjusting data for anisotropy..."
self.X_ADJUSTED, self.Y_ADJUSTED = \
core.adjust_for_anisotropy(np.copy(self.X_ORIG), np.copy(self.Y_ORIG),
self.XCENTER, self.YCENTER,
self.anisotropy_scaling, self.anisotropy_angle)
self.variogram_model = variogram_model
if self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != 'custom':
raise ValueError("Specified variogram model '%s' is not supported." % variogram_model)
elif self.variogram_model == 'custom':
if variogram_function is None or not callable(variogram_function):
raise ValueError("Must specify callable function for custom variogram model.")
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if self.verbose:
print "Initializing variogram model..."
self.lags, self.semivariance, self.variogram_model_parameters = \
core.initialize_variogram_model(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z,
self.variogram_model, variogram_parameters,
self.variogram_function, nlags, weight)
if self.verbose:
if self.variogram_model == 'linear':
print "Using '%s' Variogram Model" % 'linear'
print "Slope:", self.variogram_model_parameters[0]
print "Nugget:", self.variogram_model_parameters[1], '\n'
elif self.variogram_model == 'power':
print "Using '%s' Variogram Model" % 'power'
print "Scale:", self.variogram_model_parameters[0]
print "Exponent:", self.variogram_model_parameters[1]
print "Nugget:", self.variogram_model_parameters[2], '\n'
elif self.variogram_model == 'custom':
print "Using Custom Variogram Model"
else:
print "Using '%s' Variogram Model" % self.variogram_model
print "Sill:", self.variogram_model_parameters[0]
print "Range:", self.variogram_model_parameters[1]
print "Nugget:", self.variogram_model_parameters[2], '\n'
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print "Calculating statistics on variogram model fit..."
if enable_statistics:
self.delta, self.sigma, self.epsilon = core.find_statistics(self.X_ADJUSTED, self.Y_ADJUSTED,
self.Z, self.variogram_function,
self.variogram_model_parameters)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print "Q1 =", self.Q1
print "Q2 =", self.Q2
print "cR =", self.cR, '\n'
else:
self.delta, self.sigma, self.epsilon, self.Q1, self.Q2, self.cR = [None]*6
def update_variogram_model(self, variogram_model, variogram_parameters=None,
variogram_function=None, nlags=6, weight=False,
anisotropy_scaling=1.0, anisotropy_angle=0.0):
"""Allows user to update variogram type and/or variogram model parameters."""
if anisotropy_scaling != self.anisotropy_scaling or \
anisotropy_angle != self.anisotropy_angle:
if self.verbose:
print "Adjusting data for anisotropy..."
self.anisotropy_scaling = anisotropy_scaling
self.anisotropy_angle = anisotropy_angle
self.X_ADJUSTED, self.Y_ADJUSTED = \
core.adjust_for_anisotropy(np.copy(self.X_ORIG),
np.copy(self.Y_ORIG),
self.XCENTER, self.YCENTER,
self.anisotropy_scaling,
self.anisotropy_angle)
self.variogram_model = variogram_model
if self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != 'custom':
raise ValueError("Specified variogram model '%s' is not supported." % variogram_model)
elif self.variogram_model == 'custom':
if variogram_function is None or not callable(variogram_function):
raise ValueError("Must specify callable function for custom variogram model.")
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if self.verbose:
print "Updating variogram mode..."
self.lags, self.semivariance, self.variogram_model_parameters = \
core.initialize_variogram_model(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z,
self.variogram_model, variogram_parameters,
self.variogram_function, nlags, weight)
if self.verbose:
if self.variogram_model == 'linear':
print "Using '%s' Variogram Model" % 'linear'
print "Slope:", self.variogram_model_parameters[0]
print "Nugget:", self.variogram_model_parameters[1], '\n'
elif self.variogram_model == 'power':
print "Using '%s' Variogram Model" % 'power'
print "Scale:", self.variogram_model_parameters[0]
print "Exponent:", self.variogram_model_parameters[1]
print "Nugget:", self.variogram_model_parameters[2], '\n'
elif self.variogram_model == 'custom':
print "Using Custom Variogram Model"
else:
print "Using '%s' Variogram Model" % self.variogram_model
print "Sill:", self.variogram_model_parameters[0]
print "Range:", self.variogram_model_parameters[1]
print "Nugget:", self.variogram_model_parameters[2], '\n'
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print "Calculating statistics on variogram model fit..."
self.delta, self.sigma, self.epsilon = core.find_statistics(self.X_ADJUSTED, self.Y_ADJUSTED,
self.Z, self.variogram_function,
self.variogram_model_parameters)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print "Q1 =", self.Q1
print "Q2 =", self.Q2
print "cR =", self.cR, '\n'
def display_variogram_model(self):
"""Displays variogram model with the actual binned data"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, 'r*')
ax.plot(self.lags,
self.variogram_function(self.variogram_model_parameters, self.lags), 'k-')
plt.show()
def switch_verbose(self):
"""Allows user to switch code talk-back on/off. Takes no arguments."""
self.verbose = not self.verbose
def switch_plotting(self):
"""Allows user to switch plot display on/off. Takes no arguments."""
self.enable_plotting = not self.enable_plotting
def get_epsilon_residuals(self):
"""Returns the epsilon residuals for the variogram fit."""
return self.epsilon
def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c='k', marker='*')
ax.axhline(y=0.0)
plt.show()
def get_statistics(self):
return self.Q1, self.Q2, self.cR
def print_statistics(self):
print "Q1 =", self.Q1
print "Q2 =", self.Q2
print "cR =", self.cR
def _get_kriging_matrix(self, n):
"""Assembles the kriging matrix."""
xy = np.concatenate((self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis]), axis=1)
d = cdist(xy, xy, 'euclidean')
a = np.zeros((n+1, n+1))
a[:n, :n] = - self.variogram_function(self.variogram_model_parameters, d)
np.fill_diagonal(a, 0.)
a[n, :] = 1.0
a[:, n] = 1.0
a[n, n] = 0.0
return a
def _exec_vector(self, a, bd, mask):
"""Solves the kriging system as a vectorized operation. This method
can take a lot of memory for large grids and/or large datasets."""
npt = bd.shape[0]
n = self.X_ADJUSTED.shape[0]
zero_index = None
zero_value = False
a_inv = scipy.linalg.inv(a)
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
b = np.zeros((npt, n+1, 1))
b[:, :n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], zero_index[1], 0] = 0.0
b[:, n, 0] = 1.0
if (~mask).any():
mask_b = np.repeat(mask[:, np.newaxis, np.newaxis], n+1, axis=1)
b = np.ma.array(b, mask=mask_b)
x = np.dot(a_inv, b.reshape((npt, n+1)).T).reshape((1, n+1, npt)).T
zvalues = np.sum(x[:, :n, 0] * self.Z, axis=1)
sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1)
return zvalues, sigmasq
def _exec_loop(self, a, bd_all, mask):
"""Solves the kriging system by looping over all specified points.
Less memory-intensive, but involves a Python-level loop."""
npt = bd_all.shape[0]
n = self.X_ADJUSTED.shape[0]
zvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
a_inv = scipy.linalg.inv(a)
for j in np.nonzero(~mask)[0]: # Note that this is the same thing as range(npt) if mask is not defined,
bd = bd_all[j] # otherwise it takes the non-masked elements.
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_index = None
zero_value = False
b = np.zeros((n+1, 1))
b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], 0] = 0.0
b[n, 0] = 1.0
x = np.dot(a_inv, b)
zvalues[j] = np.sum(x[:n, 0] * self.Z)
sigmasq[j] = np.sum(x[:, 0] * -b[:, 0])
return zvalues, sigmasq
def _exec_loop_moving_window(self, a_all, bd_all, mask, bd_idx):
"""Solves the kriging system by looping over all specified points.
Less memory-intensive, but involves a Python-level loop."""
import scipy.linalg.lapack
npt = bd_all.shape[0]
n = bd_idx.shape[1]
zvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
for i in np.nonzero(~mask)[0]: # Note that this is the same thing as range(npt) if mask is not defined,
b_selector = bd_idx[i] # otherwise it takes the non-masked elements.
bd = bd_all[i]
a_selector = np.concatenate((b_selector, np.array([a_all.shape[0] - 1])))
a = a_all[a_selector[:, None], a_selector]
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_index = None
zero_value = False
b = np.zeros((n+1, 1))
b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], 0] = 0.0
b[n, 0] = 1.0
x = scipy.linalg.solve(a, b)
zvalues[i] = x[:n, 0].dot(self.Z[b_selector])
sigmasq[i] = - x[:, 0].dot(b[:, 0])
return zvalues, sigmasq
def execute(self, style, xpoints, ypoints, mask=None, backend='vectorized', n_closest_points=None):
"""Calculates a kriged grid and the associated variance.
This is now the method that performs the main kriging calculation. Note that currently
measurements (i.e., z values) are considered 'exact'. This means that, when a specified
coordinate for interpolation is exactly the same as one of the data points, the variogram
evaluated at the point is forced to be zero. Also, the diagonal of the kriging matrix is
also always forced to be zero. In forcing the variogram evaluated at data points to be zero,
we are effectively saying that there is no variance at that point (no uncertainty,
so the value is 'exact').
In the future, the code may include an extra 'exact_values' boolean flag that can be
adjusted to specify whether to treat the measurements as 'exact'. Setting the flag
to false would indicate that the variogram should not be forced to be zero at zero distance
(i.e., when evaluated at data points). Instead, the uncertainty in the point will be
equal to the nugget. This would mean that the diagonal of the kriging matrix would be set to
the nugget instead of to zero.
Inputs:
style (string): Specifies how to treat input kriging points.
Specifying 'grid' treats xpoints and ypoints as two arrays of
x and y coordinates that define a rectangular grid.
Specifying 'points' treats xpoints and ypoints as two arrays
that provide coordinate pairs at which to solve the kriging system.
Specifying 'masked' treats xpoints and ypoints as two arrays of
x and y coordinates that define a rectangular grid and uses mask
to only evaluate specific points in the grid.
xpoints (array-like, dim N): If style is specific as 'grid' or 'masked',
x-coordinates of MxN grid. If style is specified as 'points',
x-coordinates of specific points at which to solve kriging system.
ypoints (array-like, dim M): If style is specified as 'grid' or 'masked',
y-coordinates of MxN grid. If style is specified as 'points',
y-coordinates of specific points at which to solve kriging system.
Note that in this case, xpoints and ypoints must have the same dimensions
(i.e., M = N).
mask (boolean array, dim MxN, optional): Specifies the points in the rectangular
grid defined by xpoints and ypoints that are to be excluded in the
kriging calculations. Must be provided if style is specified as 'masked'.
False indicates that the point should not be masked, so the kriging system
will be solved at the point.
True indicates that the point should be masked, so the kriging system should
will not be solved at the point.
backend (string, optional): Specifies which approach to use in kriging.
Specifying 'vectorized' will solve the entire kriging problem at once in a
vectorized operation. This approach is faster but also can consume a
significant amount of memory for large grids and/or large datasets.
Specifying 'loop' will loop through each point at which the kriging system
is to be solved. This approach is slower but also less memory-intensive.
Specifying 'C' will utilize a loop in Cython.
Default is 'vectorized'.
n_closest_points (int, optional): For kriging with a moving window, specifies the number
of nearby points to use in the calculation. This can speed up the calculation for large
datasets, but should be used with caution. As Kitanidis notes, kriging with a moving
window can produce unexpected oddities if the variogram model is not carefully chosen.
Outputs:
zvalues (numpy array, dim MxN or dim Nx1): Z-values of specified grid or at the
specified set of points. If style was specified as 'masked', zvalues will
be a numpy masked array.
sigmasq (numpy array, dim MxN or dim Nx1): Variance at specified grid points or
at the specified set of points. If style was specified as 'masked', sigmasq
will be a numpy masked array.
"""
if self.verbose:
print "Executing Ordinary Kriging...\n"
if style != 'grid' and style != 'masked' and style != 'points':
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True)))
ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True)))
n = self.X_ADJUSTED.shape[0]
nx = xpts.size
ny = ypts.size
a = self._get_kriging_matrix(n)
if style in ['grid', 'masked']:
if style == 'masked':
if mask is None:
raise IOError("Must specify boolean masking array when style is 'masked'.")
if mask.shape[0] != ny or mask.shape[1] != nx:
if mask.shape[0] == nx and mask.shape[1] == ny:
mask = mask.T
else:
raise ValueError("Mask dimensions do not match specified grid dimensions.")
mask = mask.flatten()
npt = ny*nx
grid_x, grid_y = np.meshgrid(xpts, ypts)
xpts = grid_x.flatten()
ypts = grid_y.flatten()
elif style == 'points':
if xpts.size != ypts.size:
raise ValueError("xpoints and ypoints must have same dimensions "
"when treated as listing discrete points.")
npt = nx
else:
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
xpts, ypts = core.adjust_for_anisotropy(xpts, ypts, self.XCENTER, self.YCENTER,
self.anisotropy_scaling, self.anisotropy_angle)
if style != 'masked':
mask = np.zeros(npt, dtype='bool')
xy_points = np.concatenate((xpts[:, np.newaxis], ypts[:, np.newaxis]), axis=1)
xy_data = np.concatenate((self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis]), axis=1)
if backend == 'C':
try:
from .lib.cok import _c_exec_loop, _c_exec_loop_moving_window
except ImportError:
raise ImportError('C backend failed to load the Cython extension')
except:
raise RuntimeError("Unknown error in trying to load Cython extension.")
c_pars = {key: getattr(self, key) for key in ['Z', 'eps', 'variogram_model_parameters',
'variogram_function']}
else:
c_pars = None
if n_closest_points is not None:
from scipy.spatial import cKDTree
tree = cKDTree(xy_data)
bd, bd_idx = tree.query(xy_points, k=n_closest_points, eps=0.0)
if backend == 'loop':
zvalues, sigmasq = self._exec_loop_moving_window(a, bd, mask, bd_idx)
elif backend == 'C':
zvalues, sigmasq = _c_exec_loop_moving_window(a, bd, mask.astype('int8'),
bd_idx, self.X_ADJUSTED.shape[0], c_pars)
else:
raise ValueError('Specified backend {} for a moving window is not supported.'.format(backend))
else:
bd = cdist(xy_points, xy_data, 'euclidean')
if backend == 'vectorized':
zvalues, sigmasq = self._exec_vector(a, bd, mask)
elif backend == 'loop':
zvalues, sigmasq = self._exec_loop(a, bd, mask)
elif backend == 'C':
zvalues, sigmasq = _c_exec_loop(a, bd, mask.astype('int8'), self.X_ADJUSTED.shape[0], c_pars)
else:
raise ValueError('Specified backend {} is not supported for 2D ordinary kriging.'.format(backend))
if style == 'masked':
zvalues = np.ma.array(zvalues, mask=mask)
sigmasq = np.ma.array(sigmasq, mask=mask)
if style in ['masked', 'grid']:
zvalues = zvalues.reshape((ny, nx))
sigmasq = sigmasq.reshape((ny, nx))
return zvalues, sigmasq
|
yejingxin/PyKrige
|
pykrige/ok.py
|
Python
|
bsd-3-clause
| 33,614
|
[
"Gaussian"
] |
ef783e8827514c503adb0c4b8e453d9673829e45fb537de11d3031d96b6cccf4
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("text_insight.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception("Bad Request!")}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception("Permissin Denied")}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception("Page not Found")}),
url(r'^500/$', default_views.server_error),
]
|
redmoonlas/textinsight
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,435
|
[
"VisIt"
] |
eae1b18014ba6898de6a6e81f45e06edb60a49e7fb7918575d618afa2eb12efe
|
# -*- coding: utf-8 -*-
#
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006-2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Parser for GAMESS-UK output files"""
import re
import numpy
from . import logfileparser
from . import utils
class GAMESSUK(logfileparser.Logfile):
"""A GAMESS UK log file"""
SCFRMS, SCFMAX, SCFENERGY = list(range(3)) # Used to index self.scftargets[]
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(GAMESSUK, self).__init__(logname="GAMESSUK", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "GAMESS UK log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'GAMESSUK("%s")' % (self.filename)
def normalisesym(self, label):
"""Use standard symmetry labels instead of GAMESS UK labels.
>>> t = GAMESSUK("dummyfile.txt")
>>> labels = ['a', 'a1', 'ag', "a'", 'a"', "a''", "a1''", 'a1"']
>>> labels.extend(["e1+", "e1-"])
>>> answer = [t.normalisesym(x) for x in labels]
>>> answer
['A', 'A1', 'Ag', "A'", 'A"', 'A"', 'A1"', 'A1"', 'E1', 'E1']
"""
label = label.replace("''", '"').replace("+", "").replace("-", "")
ans = label[0].upper() + label[1:]
return ans
def before_parsing(self):
# used for determining whether to add a second mosyms, etc.
self.betamosyms = self.betamoenergies = self.betamocoeffs = False
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
if line[1:22] == "total number of atoms":
natom = int(line.split()[-1])
self.set_attribute('natom', natom)
if line[3:44] == "convergence threshold in optimization run":
# Assuming that this is only found in the case of OPTXYZ
# (i.e. an optimization in Cartesian coordinates)
self.geotargets = [float(line.split()[-2])]
if line[32:61] == "largest component of gradient":
# This is the geotarget in the case of OPTXYZ
if not hasattr(self, "geovalues"):
self.geovalues = []
self.geovalues.append([float(line.split()[4])])
if line[37:49] == "convergence?":
# Get the geovalues and geotargets for OPTIMIZE
if not hasattr(self, "geovalues"):
self.geovalues = []
self.geotargets = []
geotargets = []
geovalues = []
for i in range(4):
temp = line.split()
geovalues.append(float(temp[2]))
if not self.geotargets:
geotargets.append(float(temp[-2]))
line = next(inputfile)
self.geovalues.append(geovalues)
if not self.geotargets:
self.geotargets = geotargets
# This is the only place coordinates are printed in single point calculations. Note that
# in the following fragment, the basis set selection is not always printed:
#
# ******************
# molecular geometry
# ******************
#
# ****************************************
# * basis selected is sto sto3g *
# ****************************************
#
# *******************************************************************************
# * *
# * atom atomic coordinates number of *
# * charge x y z shells *
# * *
# *******************************************************************************
# * *
# * *
# * c 6.0 0.0000000 -2.6361501 0.0000000 2 *
# * 1s 2sp *
# * *
# * *
# * c 6.0 0.0000000 2.6361501 0.0000000 2 *
# * 1s 2sp *
# * *
# ...
#
if line.strip() == "molecular geometry":
self.updateprogress(inputfile, "Coordinates")
self.skip_lines(inputfile, ['s', 'b', 's'])
line = next(inputfile)
if "basis selected is" in line:
self.skip_lines(inputfile, ['s', 'b', 's', 's'])
self.skip_lines(inputfile, ['header1', 'header2', 's', 's'])
atomnos = []
atomcoords = []
line = next(inputfile)
while line.strip():
line = next(inputfile)
if line.strip()[1:10].strip() and list(set(line.strip())) != ['*']:
atomcoords.append([utils.convertor(float(x), "bohr", "Angstrom") for x in line.split()[3:6]])
atomnos.append(int(round(float(line.split()[2]))))
if not hasattr(self, "atomcoords"):
self.atomcoords = []
self.atomcoords.append(atomcoords)
self.set_attribute('atomnos', atomnos)
# Each step of a geometry optimization will also print the coordinates:
#
# search 0
# *******************
# point 0 nuclear coordinates
# *******************
#
# x y z chg tag
# ============================================================
# 0.0000000 -2.6361501 0.0000000 6.00 c
# 0.0000000 2.6361501 0.0000000 6.00 c
# ..
#
if line[40:59] == "nuclear coordinates":
self.updateprogress(inputfile, "Coordinates")
# We need not remember the first geometry in geometry optimizations, as this will
# be already parsed from the "molecular geometry" section (see above).
if not hasattr(self, 'firstnuccoords') or self.firstnuccoords:
self.firstnuccoords = False
return
self.skip_lines(inputfile, ['s', 'b', 'colname', 'e'])
atomcoords = []
atomnos = []
line = next(inputfile)
while list(set(line.strip())) != ['=']:
cols = line.split()
atomcoords.append([utils.convertor(float(x), "bohr", "Angstrom") for x in cols[0:3]])
atomnos.append(int(float(cols[3])))
line = next(inputfile)
if not hasattr(self, "atomcoords"):
self.atomcoords = []
self.atomcoords.append(atomcoords)
self.set_attribute('atomnos', atomnos)
# This is printed when a geometry optimization succeeds, after the last gradient of the energy.
if line[40:62] == "optimization converged":
self.skip_line(inputfile, 's')
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(len(self.geovalues)-1)
# This is apparently printed when a geometry optimization is not converged but the job ends.
if "minimisation not converging" in line:
self.skip_line(inputfile, 's')
self.optdone = []
if line[1:32] == "total number of basis functions":
nbasis = int(line.split()[-1])
self.set_attribute('nbasis', nbasis)
while line.find("charge of molecule") < 0:
line = next(inputfile)
charge = int(line.split()[-1])
self.set_attribute('charge', charge)
mult = int(next(inputfile).split()[-1])
self.set_attribute('mult', mult)
alpha = int(next(inputfile).split()[-1])-1
beta = int(next(inputfile).split()[-1])-1
if self.mult == 1:
self.homos = numpy.array([alpha], "i")
else:
self.homos = numpy.array([alpha, beta], "i")
if line[37:69] == "s-matrix over gaussian basis set":
self.aooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")
self.skip_lines(inputfile, ['d', 'b'])
i = 0
while i < self.nbasis:
self.updateprogress(inputfile, "Overlap")
self.skip_lines(inputfile, ['b', 'b', 'header', 'b', 'b'])
for j in range(self.nbasis):
temp = list(map(float, next(inputfile).split()[1:]))
self.aooverlaps[j, (0+i):(len(temp)+i)] = temp
i += len(temp)
if line[18:43] == 'EFFECTIVE CORE POTENTIALS':
self.skip_line(inputfile, 'stars')
self.coreelectrons = numpy.zeros(self.natom, 'i')
line = next(inputfile)
while line[15:46] != "*"*31:
if line.find("for atoms ...") >= 0:
atomindex = []
line = next(inputfile)
while line.find("core charge") < 0:
broken = line.split()
atomindex.extend([int(x.split("-")[0]) for x in broken])
line = next(inputfile)
charge = float(line.split()[4])
for idx in atomindex:
self.coreelectrons[idx-1] = self.atomnos[idx-1] - charge
line = next(inputfile)
if line[3:27] == "Wavefunction convergence":
self.scftarget = float(line.split()[-2])
self.scftargets = []
if line[11:22] == "normal mode":
if not hasattr(self, "vibfreqs"):
self.vibfreqs = []
self.vibirs = []
units = next(inputfile)
xyz = next(inputfile)
equals = next(inputfile)
line = next(inputfile)
while line != equals:
temp = line.split()
self.vibfreqs.append(float(temp[1]))
self.vibirs.append(float(temp[-2]))
line = next(inputfile)
# Use the length of the vibdisps to figure out
# how many rotations and translations to remove
self.vibfreqs = self.vibfreqs[-len(self.vibdisps):]
self.vibirs = self.vibirs[-len(self.vibdisps):]
if line[44:73] == "normalised normal coordinates":
self.skip_lines(inputfile, ['e', 'b', 'b'])
self.vibdisps = []
freqnum = next(inputfile)
while freqnum.find("=") < 0:
self.skip_lines(inputfile, ['b', 'e', 'freqs', 'e', 'b', 'header', 'e'])
p = [[] for x in range(9)]
for i in range(len(self.atomnos)):
brokenx = list(map(float, next(inputfile)[25:].split()))
brokeny = list(map(float, next(inputfile)[25:].split()))
brokenz = list(map(float, next(inputfile)[25:].split()))
for j, x in enumerate(list(zip(brokenx, brokeny, brokenz))):
p[j].append(x)
self.vibdisps.extend(p)
self.skip_lines(inputfile, ['b', 'b'])
freqnum = next(inputfile)
if line[26:36] == "raman data":
self.vibramans = []
self.skip_lines(inputfile, ['s', 'b', 'header', 'b'])
line = next(inputfile)
while line[1] != "*":
self.vibramans.append(float(line.split()[3]))
self.skip_line(inputfile, 'blank')
line = next(inputfile)
# Use the length of the vibdisps to figure out
# how many rotations and translations to remove
self.vibramans = self.vibramans[-len(self.vibdisps):]
if line[3:11] == "SCF TYPE":
self.scftype = line.split()[-2]
assert self.scftype in ['rhf', 'uhf', 'gvb'], "%s not one of 'rhf', 'uhf' or 'gvb'" % self.scftype
if line[15:31] == "convergence data":
if not hasattr(self, "scfvalues"):
self.scfvalues = []
self.scftargets.append([self.scftarget]) # Assuming it does not change over time
while line[1:10] != "="*9:
line = next(inputfile)
line = next(inputfile)
tester = line.find("tester") # Can be in a different place depending
assert tester >= 0
while line[1:10] != "="*9: # May be two or three lines (unres)
line = next(inputfile)
scfvalues = []
line = next(inputfile)
while line.strip():
if line[2:6] != "****":
# e.g. **** recalulation of fock matrix on iteration 4 (examples/chap12/pyridine.out)
scfvalues.append([float(line[tester-5:tester+6])])
line = next(inputfile)
self.scfvalues.append(scfvalues)
if line[10:22] == "total energy" and len(line.split()) == 3:
if not hasattr(self, "scfenergies"):
self.scfenergies = []
scfenergy = utils.convertor(float(line.split()[-1]), "hartree", "eV")
self.scfenergies.append(scfenergy)
# Total energies after Moller-Plesset corrections
# Second order correction is always first, so its first occurance
# triggers creation of mpenergies (list of lists of energies)
# Further corrections are appended as found
# Note: GAMESS-UK sometimes prints only the corrections,
# so they must be added to the last value of scfenergies
if line[10:32] == "mp2 correlation energy" or \
line[10:42] == "second order perturbation energy":
if not hasattr(self, "mpenergies"):
self.mpenergies = []
self.mpenergies.append([])
self.mp2correction = self.float(line.split()[-1])
self.mp2energy = self.scfenergies[-1] + self.mp2correction
self.mpenergies[-1].append(utils.convertor(self.mp2energy, "hartree", "eV"))
if line[10:41] == "third order perturbation energy":
self.mp3correction = self.float(line.split()[-1])
self.mp3energy = self.mp2energy + self.mp3correction
self.mpenergies[-1].append(utils.convertor(self.mp3energy, "hartree", "eV"))
if line[40:59] == "molecular basis set":
self.gbasis = []
line = next(inputfile)
while line.find("contraction coefficients") < 0:
line = next(inputfile)
equals = next(inputfile)
blank = next(inputfile)
atomname = next(inputfile)
basisregexp = re.compile("\d*(\D+)") # Get everything after any digits
shellcounter = 1
while line != equals:
gbasis = [] # Stores basis sets on one atom
blank = next(inputfile)
blank = next(inputfile)
line = next(inputfile)
shellno = int(line.split()[0])
shellgap = shellno - shellcounter
shellsize = 0
while len(line.split()) != 1 and line != equals:
if line.split():
shellsize += 1
coeff = {}
# coefficients and symmetries for a block of rows
while line.strip() and line != equals:
temp = line.strip().split()
# temp[1] may be either like (a) "1s" and "1sp", or (b) "s" and "sp"
# See GAMESS-UK 7.0 distribution/examples/chap12/pyridine2_21m10r.out
# for an example of the latter
sym = basisregexp.match(temp[1]).groups()[0]
assert sym in ['s', 'p', 'd', 'f', 'sp'], "'%s' not a recognized symmetry" % sym
if sym == "sp":
coeff.setdefault("S", []).append((float(temp[3]), float(temp[6])))
coeff.setdefault("P", []).append((float(temp[3]), float(temp[10])))
else:
coeff.setdefault(sym.upper(), []).append((float(temp[3]), float(temp[6])))
line = next(inputfile)
# either a blank or a continuation of the block
if coeff:
if sym == "sp":
gbasis.append(('S', coeff['S']))
gbasis.append(('P', coeff['P']))
else:
gbasis.append((sym.upper(), coeff[sym.upper()]))
if line == equals:
continue
line = next(inputfile)
# either the start of the next block or the start of a new atom or
# the end of the basis function section (signified by a line of equals)
numtoadd = 1 + (shellgap // shellsize)
shellcounter = shellno + shellsize
for x in range(numtoadd):
self.gbasis.append(gbasis)
if line[50:70] == "----- beta set -----":
self.betamosyms = True
self.betamoenergies = True
self.betamocoeffs = True
# betamosyms will be turned off in the next
# SYMMETRY ASSIGNMENT section
if line[31:50] == "SYMMETRY ASSIGNMENT":
if not hasattr(self, "mosyms"):
self.mosyms = []
multiple = {'a': 1, 'b': 1, 'e': 2, 't': 3, 'g': 4, 'h': 5}
equals = next(inputfile)
line = next(inputfile)
while line != equals: # There may be one or two lines of title (compare mg10.out and duhf_1.out)
line = next(inputfile)
mosyms = []
line = next(inputfile)
while line != equals:
temp = line[25:30].strip()
if temp[-1] == '?':
# e.g. e? or t? or g? (see example/chap12/na7mg_uhf.out)
# for two As, an A and an E, and two Es of the same energy respectively.
t = line[91:].strip().split()
for i in range(1, len(t), 2):
for j in range(multiple[t[i][0]]): # add twice for 'e', etc.
mosyms.append(self.normalisesym(t[i]))
else:
for j in range(multiple[temp[0]]):
mosyms.append(self.normalisesym(temp)) # add twice for 'e', etc.
line = next(inputfile)
assert len(mosyms) == self.nmo, "mosyms: %d but nmo: %d" % (len(mosyms), self.nmo)
if self.betamosyms:
# Only append if beta (otherwise with IPRINT SCF
# it will add mosyms for every step of a geo opt)
self.mosyms.append(mosyms)
self.betamosyms = False
elif self.scftype == 'gvb':
# gvb has alpha and beta orbitals but they are identical
self.mosysms = [mosyms, mosyms]
else:
self.mosyms = [mosyms]
if line[50:62] == "eigenvectors":
# Mocoeffs...can get evalues from here too
# (only if using FORMAT HIGH though will they all be present)
if not hasattr(self, "mocoeffs"):
self.aonames = []
aonames = []
minus = next(inputfile)
mocoeffs = numpy.zeros((self.nmo, self.nbasis), "d")
readatombasis = False
if not hasattr(self, "atombasis"):
self.atombasis = []
for i in range(self.natom):
self.atombasis.append([])
readatombasis = True
self.skip_lines(inputfile, ['b', 'b', 'evalues'])
p = re.compile(r"\d+\s+(\d+)\s*(\w+) (\w+)")
oldatomname = "DUMMY VALUE"
mo = 0
while mo < self.nmo:
self.updateprogress(inputfile, "Coefficients")
self.skip_lines(inputfile, ['b', 'b', 'nums', 'b', 'b'])
for basis in range(self.nbasis):
line = next(inputfile)
# Fill atombasis only first time around.
if readatombasis:
orbno = int(line[1:5])-1
atomno = int(line[6:9])-1
self.atombasis[atomno].append(orbno)
if not self.aonames:
pg = p.match(line[:18].strip()).groups()
atomname = "%s%s%s" % (pg[1][0].upper(), pg[1][1:], pg[0])
if atomname != oldatomname:
aonum = 1
oldatomname = atomname
name = "%s_%d%s" % (atomname, aonum, pg[2].upper())
if name in aonames:
aonum += 1
name = "%s_%d%s" % (atomname, aonum, pg[2].upper())
aonames.append(name)
temp = list(map(float, line[19:].split()))
mocoeffs[mo:(mo+len(temp)), basis] = temp
# Fill atombasis only first time around.
readatombasis = False
if not self.aonames:
self.aonames = aonames
line = next(inputfile) # blank line
while not line.strip():
line = next(inputfile)
evalues = line
if evalues[:17].strip(): # i.e. if these aren't evalues
break # Not all the MOs are present
mo += len(temp)
mocoeffs = mocoeffs[0:(mo+len(temp)), :] # In case some aren't present
if self.betamocoeffs:
self.mocoeffs.append(mocoeffs)
else:
self.mocoeffs = [mocoeffs]
if line[7:12] == "irrep":
########## eigenvalues ###########
# This section appears once at the start of a geo-opt and once at the end
# unless IPRINT SCF is used (when it appears at every step in addition)
if not hasattr(self, "moenergies"):
self.moenergies = []
equals = next(inputfile)
while equals[1:5] != "====": # May be one or two lines of title (compare duhf_1.out and mg10.out)
equals = next(inputfile)
moenergies = []
line = next(inputfile)
if not line.strip(): # May be a blank line here (compare duhf_1.out and mg10.out)
line = next(inputfile)
while line.strip() and line != equals: # May end with a blank or equals
temp = line.strip().split()
moenergies.append(utils.convertor(float(temp[2]), "hartree", "eV"))
line = next(inputfile)
self.nmo = len(moenergies)
if self.betamoenergies:
self.moenergies.append(moenergies)
self.betamoenergies = False
elif self.scftype == 'gvb':
self.moenergies = [moenergies, moenergies]
else:
self.moenergies = [moenergies]
# The dipole moment is printed by default at the beginning of the wavefunction analysis,
# but the value is in atomic units, so we need to convert to Debye. It seems pretty
# evident that the reference point is the origin (0,0,0) which is also the center
# of mass after reorientation at the beginning of the job, although this is not
# stated anywhere (would be good to check).
#
# *********************
# wavefunction analysis
# *********************
#
# commence analysis at 24.61 seconds
#
# dipole moments
#
#
# nuclear electronic total
#
# x 0.0000000 0.0000000 0.0000000
# y 0.0000000 0.0000000 0.0000000
# z 0.0000000 0.0000000 0.0000000
#
if line.strip() == "dipole moments":
# In older version there is only one blank line before the header,
# and newer version there are two.
self.skip_line(inputfile, 'blank')
line = next(inputfile)
if not line.strip():
line = next(inputfile)
self.skip_line(inputfile, 'blank')
dipole = []
for i in range(3):
line = next(inputfile)
dipole.append(float(line.split()[-1]))
reference = [0.0, 0.0, 0.0]
dipole = utils.convertor(numpy.array(dipole), "ebohr", "Debye")
if not hasattr(self, 'moments'):
self.moments = [reference, dipole]
else:
assert self.moments[1] == dipole
# Net atomic charges are not printed at all, it seems,
# but you can get at them from nuclear charges and
# electron populations, which are printed like so:
#
# ---------------------------------------
# mulliken and lowdin population analyses
# ---------------------------------------
#
# ----- total gross population in aos ------
#
# 1 1 c s 1.99066 1.98479
# 2 1 c s 1.14685 1.04816
# ...
#
# ----- total gross population on atoms ----
#
# 1 c 6.0 6.00446 5.99625
# 2 c 6.0 6.00446 5.99625
# 3 c 6.0 6.07671 6.04399
# ...
if line[10:49] == "mulliken and lowdin population analyses":
if not hasattr(self, "atomcharges"):
self.atomcharges = {}
while not "total gross population on atoms" in line:
line = next(inputfile)
self.skip_line(inputfile, 'blank')
line = next(inputfile)
mulliken, lowdin = [], []
while line.strip():
nuclear = float(line.split()[2])
mulliken.append(nuclear - float(line.split()[3]))
lowdin.append(nuclear - float(line.split()[4]))
line = next(inputfile)
self.atomcharges["mulliken"] = mulliken
self.atomcharges["lowdin"] = lowdin
# ----- spinfree UHF natural orbital occupations -----
#
# 2.0000000 2.0000000 2.0000000 2.0000000 2.0000000 2.0000000 2.0000000
#
# 2.0000000 2.0000000 2.0000000 2.0000000 2.0000000 1.9999997 1.9999997
# ...
if "natural orbital occupations" in line:
occupations = []
self.skip_line(inputfile, "blank")
line = inputfile.next()
while line.strip():
occupations += map(float, line.split())
self.skip_line(inputfile, "blank")
line = inputfile.next()
self.set_attribute('nooccnos', occupations)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
ghutchis/cclib
|
src/cclib/parser/gamessukparser.py
|
Python
|
lgpl-2.1
| 28,809
|
[
"GAMESS",
"Gaussian",
"cclib"
] |
863c7e78f72b6565f16f13bbbf159ab5c1bfe1206e59fa4e58c853dac44217ac
|
import numpy as np
import scipy as scipy
import lxmls.classifiers.linear_classifier as lc
import sys
from lxmls.distributions.gaussian import *
class MultinomialNaiveBayes(lc.LinearClassifier):
def __init__(self,xtype="gaussian"):
lc.LinearClassifier.__init__(self)
self.trained = False
self.likelihood = 0
self.prior = 0
self.smooth = False
self.smooth_param = 1
def train(self,x,y):
# n_docs = no. of documents
# n_words = no. of unique words
n_docs,n_words = x.shape
# classes = a list of possible classes
classes = np.unique(y)
# n_classes = no. of classes
n_classes = np.unique(y).shape[0]
# initialization of the prior and likelihood variables
prior = np.zeros(n_classes)
likelihood = np.zeros((n_words,n_classes))
# TODO: This is where you have to write your code!
# You need to compute the values of the prior and likelihood parameters
# and place them in the variables called "prior" and "likelihood".
# Examples:
# prior[0] is the prior probability of a document being of class 0
# likelihood[4, 0] is the likelihood of the fifth(*) feature being
# active, given that the document is of class 0
# (*) recall that Python starts indices at 0, so an index of 4
# corresponds to the fifth feature!
###########################
# Solution to Exercise 1.1
for i in xrange(n_classes):
docs_in_class,_ = np.nonzero(y == classes[i]) # docs_in_class = indices of documents in class i
prior[i] = 1.0*len(docs_in_class)/n_docs # prior = fraction of documents with this class
word_count_in_class = x[docs_in_class,:].sum(0) # word_count_in_class = count of word occurrences in documents of class i
total_words_in_class = word_count_in_class.sum() # total_words_in_class = total number of words in documents of class i
if self.smooth == False:
likelihood[:,i] = word_count_in_class/total_words_in_class # likelihood = count of occurrences of a word in a class
else:
likelihood[:,i] = (word_count_in_class + self.smooth_param) / (total_words_in_class + self.smooth_param*n_words)
# End solution to Exercise 1.1
###########################
params = np.zeros((n_words+1,n_classes))
for i in xrange(n_classes):
params[0,i] = np.log(prior[i])
params[1:,i] = np.nan_to_num(np.log(likelihood[:,i]))
self.likelihood = likelihood
self.prior = prior
self.trained = True
return params
|
iarroyof/lxmls-toolkit
|
lxmls/classifiers/multinomial_naive_bayes.py
|
Python
|
mit
| 2,763
|
[
"Gaussian"
] |
fe8bbe7bb5b4ab8dcb5171e372525ebef14f7aac92f30ef3d0651bbf513d0b05
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
A script that does unsupervised learning on
Spatial Transcriptomics datasets (matrix of counts)
It takes a list of datasets as input and outputs (for each given input):
- a scatter plot with the predicted classes (coulored) for each spot
- the spots plotted onto the images (if given) with the predicted class/color
- a file containing two columns (SPOT and CLASS) for each dataset
The input data frames must have the gene names as columns and
the spots coordinates as rows (1x1).
The user can select what clustering algorithm to use
and what dimensionality reduction technique to use and normalization
method to use.
Noisy spots (very few genes expressed) are removed using a parameter.
Noisy genes (expressed in very few spots) are removed using a parameter.
@Author Jose Fernandez Navarro <jose.fernandez.navarro@scilifelab.se>
"""
import argparse
import sys
import os
import numpy as np
import pandas as pd
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, FastICA, SparsePCA
from sklearn.cluster import DBSCAN
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
from sklearn.mixture import GaussianMixture
from stanalysis.visualization import scatter_plot, scatter_plot3d, histogram
from stanalysis.preprocessing import *
from stanalysis.alignment import parseAlignmentMatrix
from stanalysis.analysis import Rtsne, linear_conv, computeNClusters
from collections import defaultdict
import matplotlib.pyplot as plt
def main(counts_table_files,
normalization,
num_clusters,
num_exp_genes,
num_exp_spots,
min_gene_expression,
num_genes_keep,
clustering,
dimensionality,
use_log_scale,
alignment_files,
image_files,
num_dimensions,
spot_size,
top_genes_criteria,
outdir,
use_adjusted_log,
tsne_perplexity,
tsne_theta,
color_space_plots):
if len(counts_table_files) == 0 or \
any([not os.path.isfile(f) for f in counts_table_files]):
sys.stderr.write("Error, input file/s not present or invalid format\n")
sys.exit(1)
if image_files is not None and len(image_files) > 0 and \
len(image_files) != len(counts_table_files):
sys.stderr.write("Error, the number of images given as " \
"input is not the same as the number of datasets\n")
sys.exit(1)
if alignment_files is not None and len(alignment_files) > 0 \
and len(alignment_files) != len(image_files):
sys.stderr.write("Error, the number of alignments given as " \
"input is not the same as the number of images\n")
sys.exit(1)
if use_adjusted_log and use_log_scale:
sys.stdout.write("Warning, both log and adjusted log are enabled " \
"only adjusted log will be used\n")
use_log_scale = False
if tsne_theta < 0.0 or tsne_theta > 1.0:
sys.stdout.write("Warning, invalid value for theta. Using default..\n")
tsne_theta = 0.5
if num_exp_genes <= 0 or num_exp_spots <= 0:
sys.stdout.write("Error, min_exp_genes and min_exp_spots must be > 0.\n")
sys.exit(1)
if outdir is None or not os.path.isdir(outdir):
outdir = os.getcwd()
outdir = os.path.abspath(outdir)
print("Output directory {}".format(outdir))
print("Input datasets {}".format(" ".join(counts_table_files)))
# Merge input datasets (Spots are rows and genes are columns)
counts = aggregate_datatasets(counts_table_files)
print("Total number of spots {}".format(len(counts.index)))
print("Total number of genes {}".format(len(counts.columns)))
# Remove noisy spots and genes (Spots are rows and genes are columns)
counts = remove_noise(counts, num_exp_genes / 100.0, num_exp_spots / 100.0,
min_expression=min_gene_expression)
if len(counts.index) < 5 or len(counts.columns) < 10:
sys.stdout.write("Error, too many spots/genes were filtered.\n")
sys.exit(1)
# Normalize data
print("Computing per spot normalization...")
center_size_factors = not use_adjusted_log
norm_counts = normalize_data(counts, normalization,
center=center_size_factors, adjusted_log=use_adjusted_log)
# Keep top genes (variance or expressed)
norm_counts = keep_top_genes(norm_counts, num_genes_keep / 100.0, criteria=top_genes_criteria)
# Compute the expected number of clusters
if num_clusters is None:
num_clusters = computeNClusters(counts)
print("Computation of number of clusters obtained {} clusters".format(num_clusters))
if use_log_scale:
print("Using pseudo-log counts log2(counts + 1)")
norm_counts = np.log2(norm_counts + 1)
print("Performing dimensionality reduction...")
if "tSNE" in dimensionality:
# NOTE the Scipy tsne seems buggy so we use the R one instead
reduced_data = Rtsne(norm_counts, num_dimensions, theta=tsne_theta, perplexity=tsne_perplexity)
elif "PCA" in dimensionality:
# n_components = None, number of mle to estimate optimal
decomp_model = PCA(n_components=num_dimensions, whiten=True, copy=True)
elif "ICA" in dimensionality:
decomp_model = FastICA(n_components=num_dimensions,
algorithm='parallel', whiten=True,
fun='logcosh', w_init=None, random_state=None)
elif "SPCA" in dimensionality:
decomp_model = SparsePCA(n_components=num_dimensions, alpha=1)
else:
sys.stderr.write("Error, incorrect dimensionality reduction method\n")
sys.exit(1)
if not "tSNE" in dimensionality:
# Perform dimensionality reduction, outputs a bunch of 2D/3D coordinates
reduced_data = decomp_model.fit_transform(norm_counts)
print("Performing clustering...")
# Do clustering of the dimensionality reduced coordinates
if "KMeans" in clustering:
labels = KMeans(init='k-means++',
n_clusters=num_clusters,
n_init=10).fit_predict(reduced_data)
elif "Hierarchical" in clustering:
labels = AgglomerativeClustering(n_clusters=num_clusters,
affinity='euclidean',
linkage='ward').fit_predict(reduced_data)
elif "DBSCAN" in clustering:
labels = DBSCAN(eps=0.5, min_samples=5,
metric='euclidean', n_jobs=-1).fit_predict(reduced_data)
elif "Gaussian" in clustering:
gm = GaussianMixture(n_components=num_clusters,
covariance_type='full').fit(reduced_data)
labels = gm.predict(reduced_data)
else:
sys.stderr.write("Error, incorrect clustering method\n")
sys.exit(1)
# Check if there are -1 in the labels and that the number of labels is correct
if -1 in labels or len(labels) != len(norm_counts.index):
sys.stderr.write("Error, something went wrong in the clustering..\n")
sys.exit(1)
# We do not want zeroes in the labels
if 0 in labels: labels = labels + 1
# Compute a color_label based on the RGB representation of the
# 2D/3D dimensionality reduced coordinates
labels_colors = list()
x_max = max(reduced_data[:,0])
x_min = min(reduced_data[:,0])
y_max = max(reduced_data[:,1])
y_min = min(reduced_data[:,1])
x_p = reduced_data[:,0]
y_p = reduced_data[:,1]
z_p = y_p
if num_dimensions == 3:
z_p = reduced_data[:,2]
z_max = max(reduced_data[:,2])
z_min = min(reduced_data[:,2])
for x,y,z in zip(x_p,y_p,z_p):
r = linear_conv(x, x_min, x_max, 0.0, 1.0)
g = linear_conv(y, y_min, y_max, 0.0, 1.0)
b = linear_conv(z, z_min, z_max, 0.0, 1.0) if num_dimensions == 3 else 1.0
labels_colors.append((r,g,b))
# Write the spots and their classes to a file
file_writers = [open(os.path.join(outdir,
"{}_clusters.tsv".format(
os.path.splitext(os.path.basename(name))[0])),"w")
for name in counts_table_files]
# Write the coordinates and the label/class that they belong to
spot_plot_data = defaultdict(lambda: [[],[],[],[]])
for i, spot in enumerate(norm_counts.index):
tokens = spot.split("x")
assert(len(tokens) == 2)
y = float(tokens[1])
tokens2 = tokens[0].split("_")
# This is to account for the cases where the spots already contain a tag (separated by "_")
if len(tokens2) == 3:
x = float(tokens2[2])
elif len(tokens2) == 2:
x = float(tokens2[1])
else:
sys.stderr.write("Error, the spots in the input data have "
"the wrong format {}\n.".format(spot))
sys.exit(1)
index = int(tokens2[0])
spot_plot_data[index][0].append(x)
spot_plot_data[index][1].append(y)
spot_plot_data[index][2].append(labels[i])
spot_plot_data[index][3].append(labels_colors[i])
# This is to account for the cases where the spots already contain a tag (separated by "_")
if len(tokens2) == 3:
spot_str = "{}_{}x{}".format(tokens2[1],x,y)
else:
spot_str = "{}x{}".format(x,y)
file_writers[index].write("{0}\t{1}\n".format(spot_str, labels[i]))
# Close the files
for file_writer in file_writers:
file_writer.close()
print("Generating plots...")
# Plot the clustered spots with the class color
if num_dimensions == 3:
scatter_plot3d(x_points=reduced_data[:,0],
y_points=reduced_data[:,1],
z_points=reduced_data[:,2],
colors=labels,
output=os.path.join(outdir,"computed_clusters.pdf"),
title='Computed classes',
alpha=1.0,
size=20)
with open(os.path.join(outdir,"computed_clusters_3D.tsv"), "w") as filehandler:
for x,y,z,l in zip(reduced_data[:,0],
reduced_data[:,1],
reduced_data[:,2],
labels):
filehandler.write("{}\t{}\t{}\t{}\n".format(x,y,z,l))
else:
scatter_plot(x_points=reduced_data[:,0],
y_points=reduced_data[:,1],
colors=labels,
output=os.path.join(outdir,"computed_clusters.pdf"),
title='Computed classes',
alpha=1.0,
size=20)
with open(os.path.join(outdir,"computed_clusters_2D.tsv"), "w") as filehandler:
for x,y,l in zip(reduced_data[:,0],
reduced_data[:,1],
labels):
filehandler.write("{}\t{}\t{}\n".format(x,y,l))
# Plot the spots with colors corresponding to the predicted class
# Use the HE image as background if the image is given
for i, name in enumerate(counts_table_files):
# Get the list of spot coordinates and colors to plot for each dataset
x_points = spot_plot_data[i][0]
y_points = spot_plot_data[i][1]
colors_classes = spot_plot_data[i][2]
colors_dimensionality = spot_plot_data[i][3]
# Retrieve alignment matrix and image if any
image = image_files[i] if image_files is not None \
and len(image_files) >= i else None
alignment = alignment_files[i] if alignment_files is not None \
and len(alignment_files) >= i else None
# alignment_matrix will be identity if alignment file is None
alignment_matrix = parseAlignmentMatrix(alignment)
# Actually plot the data
scatter_plot(x_points=x_points,
y_points=y_points,
colors=colors_classes,
output=os.path.join(outdir,
"{}_clusters.pdf".format(
os.path.splitext(os.path.basename(name))[0])),
alignment=alignment_matrix,
cmap=None,
title=name,
xlabel='X',
ylabel='Y',
image=image,
alpha=1.0,
size=spot_size)
if color_space_plots:
scatter_plot(x_points=x_points,
y_points=y_points,
colors=colors_dimensionality,
output=os.path.join(outdir,
"{}_color_space.pdf".format(
os.path.splitext(os.path.basename(name))[0])),
alignment=alignment_matrix,
cmap=plt.get_cmap("hsv"),
title=name,
xlabel='X',
ylabel='Y',
image=image,
alpha=1.0,
size=spot_size)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--counts-table-files", required=True, nargs='+', type=str,
help="One or more matrices with gene counts per feature/spot (genes as columns)")
parser.add_argument("--normalization", default="DESeq2", metavar="[STR]",
type=str,
choices=["RAW", "DESeq2", "DESeq2Linear", "DESeq2PseudoCount",
"DESeq2SizeAdjusted", "REL", "TMM", "RLE", "Scran"],
help="Normalize the counts using:\n" \
"RAW = absolute counts\n" \
"DESeq2 = DESeq2::estimateSizeFactors(counts)\n" \
"DESeq2PseudoCount = DESeq2::estimateSizeFactors(counts + 1)\n" \
"DESeq2Linear = DESeq2::estimateSizeFactors(counts, linear=TRUE)\n" \
"DESeq2SizeAdjusted = DESeq2::estimateSizeFactors(counts + lib_size_factors)\n" \
"RLE = EdgeR RLE * lib_size\n" \
"TMM = EdgeR TMM * lib_size\n" \
"Scran = Deconvolution Sum Factors (Marioni et al)\n" \
"REL = Each gene count divided by the total count of its spot\n" \
"(default: %(default)s)")
parser.add_argument("--num-clusters", default=None, metavar="[INT]", type=int, choices=range(2, 16),
help="The number of clusters/regions expected to be found.\n" \
"If not given the number of clusters will be computed.\n" \
"Note that this parameter has no effect with DBSCAN clustering.")
parser.add_argument("--num-exp-genes", default=1, metavar="[FLOAT]", type=float,
help="The percentage of number of expressed genes (>= --min-gene-expression) a spot\n" \
"must have to be kept from the distribution of all expressed genes (default: %(default)s)")
parser.add_argument("--num-exp-spots", default=1, metavar="[FLOAT]", type=float,
help="The percentage of number of expressed spots a gene\n" \
"must have to be kept from the total number of spots (default: %(default)s)")
parser.add_argument("--min-gene-expression", default=1, type=int, metavar="[INT]", choices=range(1, 50),
help="The minimum count (number of reads) a gene must have in a spot to be\n"
"considered expressed (default: %(default)s)")
parser.add_argument("--num-genes-keep", default=20, metavar="[INT]", type=int, choices=range(0, 99),
help="The percentage of genes to discard from the distribution of all the genes\n" \
"across all the spots using the variance or the top highest expressed\n" \
"(see --top-genes-criteria)\n " \
"Low variance or low expressed will be discarded (default: %(default)s)")
parser.add_argument("--clustering", default="KMeans", metavar="[STR]",
type=str, choices=["Hierarchical", "KMeans", "DBSCAN", "Gaussian"],
help="What clustering algorithm to use after the dimensionality reduction:\n" \
"Hierarchical = Hierarchical Clustering (Ward)\n" \
"KMeans = Suitable for small number of clusters\n" \
"DBSCAN = Number of clusters will be automatically inferred\n" \
"Gaussian = Gaussian Mixtures Model\n" \
"(default: %(default)s)")
parser.add_argument("--dimensionality", default="tSNE", metavar="[STR]",
type=str, choices=["tSNE", "PCA", "ICA", "SPCA"],
help="What dimensionality reduction algorithm to use:\n" \
"tSNE = t-distributed stochastic neighbor embedding\n" \
"PCA = Principal Component Analysis\n" \
"ICA = Independent Component Analysis\n" \
"SPCA = Sparse Principal Component Analysis\n" \
"(default: %(default)s)")
parser.add_argument("--use-log-scale", action="store_true", default=False,
help="Use log2(counts + 1) values in the dimensionality reduction step")
parser.add_argument("--alignment-files", default=None, nargs='+', type=str,
help="One or more tab delimited files containing and alignment matrix for the images as\n" \
"\t a11 a12 a13 a21 a22 a23 a31 a32 a33\n" \
"Only useful is the image has extra borders, for instance not cropped to the array corners\n" \
"or if you want the keep the original image size in the plots.")
parser.add_argument("--image-files", default=None, nargs='+', type=str,
help="When provided the data will plotted on top of the image\n" \
"It can be one ore more, ideally one for each input dataset\n " \
"It is desirable that the image is cropped to the array\n" \
"corners otherwise an alignment file is needed")
parser.add_argument("--num-dimensions", default=2, metavar="[INT]", type=int, choices=[2,3],
help="The number of dimensions to use in the dimensionality " \
"reduction (2 or 3). (default: %(default)s)")
parser.add_argument("--spot-size", default=20, metavar="[INT]", type=int, choices=range(1, 100),
help="The size of the spots when generating the plots. (default: %(default)s)")
parser.add_argument("--top-genes-criteria", default="Variance", metavar="[STR]",
type=str, choices=["Variance", "TopRanked"],
help="What criteria to use to keep top genes before doing\n" \
"the dimensionality reduction (Variance or TopRanked) (default: %(default)s)")
parser.add_argument("--use-adjusted-log", action="store_true", default=False,
help="Use adjusted log normalized counts (R Scater::normalized())\n"
"in the dimensionality reduction step (recommended with SCRAN normalization)")
parser.add_argument("--tsne-perplexity", default=30, metavar="[INT]", type=int, choices=range(5,500),
help="The value of the perplexity for the t-sne method. (default: %(default)s)")
parser.add_argument("--tsne-theta", default=0.5, metavar="[FLOAT]", type=float,
help="The value of theta for the t-sne method. (default: %(default)s)")
parser.add_argument("--outdir", default=None, help="Path to output dir")
parser.add_argument("--color-space-plots", action="store_true", default=False,
help="Generate also plots using the representation in color space of the\n" \
"dimensionality reduced coordinates")
args = parser.parse_args()
main(args.counts_table_files,
args.normalization,
args.num_clusters,
args.num_exp_genes,
args.num_exp_spots,
args.min_gene_expression,
args.num_genes_keep,
args.clustering,
args.dimensionality,
args.use_log_scale,
args.alignment_files,
args.image_files,
args.num_dimensions,
args.spot_size,
args.top_genes_criteria,
args.outdir,
args.use_adjusted_log,
args.tsne_perplexity,
args.tsne_theta,
args.color_space_plots)
|
SpatialTranscriptomicsResearch/st_analysis
|
scripts/unsupervised.py
|
Python
|
mit
| 21,532
|
[
"Gaussian"
] |
005c517c15bd6eaeeebb47ffba63e8dbcb48fafb9dc3745b9e03029df6d05825
|
import math
import numpy as np
from .utils.io import saveswc
from collections import Counter
from random import gauss
from random import random
from random import randrange
from scipy.spatial.distance import cdist
class SWC(object):
def __init__(self, soma=None):
self._data = np.zeros((1, 8))
if soma:
self._data[0, :] = np.asarray([0, 1, soma.centroid[0], soma.centroid[
1], soma.centroid[2], soma.radius, -1, 1])
def add(self, swc_nodes):
np.vstack((self._data, swc_nodes))
def add_branch(self, branch, pidx=None, random_color=True):
'''
Add a branch to swc.
Note: This swc is special with N X 8 shape. The 8-th column is the online confidence
'''
if random_color:
rand_node_type = randrange(256)
new_branch = np.zeros((len(branch.pts), 8))
id_start = 1 if self._data.shape[
0] == 1 else self._data[:, 0].max() + 1
for i in range(len(branch.pts)):
p, r, c = branch.pts[i], branch.radius[i], branch.conf[i]
id = id_start + i
# 3 for basal dendrite; 4 for apical dendrite;
# However now we cannot differentiate them automatically
nodetype = 3
if i == len(branch.pts) - 1: # The end of this branch
pid = self._data[pidx, 0] if pidx is not None else -2
if pid is not -2 and pid != 0 and self._data.shape[0] != 1:
# Its connected node is fork point
self._data[self._data[:, 0] == pid, 1] = 5
else:
pid = id_start + i + 1
if i == 0:
nodetype = 6 # Endpoint
assert(pid != id)
new_branch[i] = np.asarray([
id, rand_node_type
if random_color else nodetype, p[0], p[1], p[2], r, pid, c])
# Check if any tail should be connected to its tail
tail = new_branch[0]
matched, minidx = self.match(tail[2:5], tail[5])
if matched and self._data[minidx, 6] is -2:
self._data[minidx, 6] = tail[0]
self._data = np.vstack((self._data, new_branch))
def _prune_leaves(self):
# Find all the leaves
childctr = Counter(self._data[:, 6])
leafidlist = [id for id in self._data[:, 0]
if id not in self._data[:, 6]]
id2dump = []
rmean = self._data[:, 5].mean() # Mean radius
for leafid in leafidlist: # Iterate each leaf node
nodeid = leafid
branch = []
while True: # Get the leaf branch out
node = self._data[self._data[:, 0] == nodeid, :].flatten()
if node.size == 0:
break
branch.append(node)
parentid = node[6]
if childctr[parentid] is not 1:
break # merged / unconnected
nodeid = parentid
# Get the length of the leaf
leaflen = sum([
np.linalg.norm(branch[i][2:5] - branch[i - 1][2:5])
for i in range(1, len(branch))
])
# Prune if the leave is too short or
# the confidence of the leave branch is too low
if leaflen <= 4 * rmean:
id2dump.extend([node[0] for node in branch])
# Only keep the swc nodes not in the dump id list
cutted = []
for nodeidx in range(self._data.shape[0]):
if self._data[nodeidx, 0] not in id2dump:
cutted.append(self._data[nodeidx, :])
cutted = np.squeeze(np.dstack(cutted)).T
self._data = cutted
def _prune_unreached(self):
'''
Only keep the largest connected component
'''
swcdict = {}
for n in self._data: # Hash all the swc nodes
swcdict[n[0]] = Node(n[0])
# Try to join all the unconnected branches at first
for i, n in enumerate(self._data):
if n[6] not in swcdict:
# Try to match it
matched, midx = self.match(n[2:5], n[5])
if matched:
self._data[i, 6] = self._data[midx, 0]
# Add mutual links for all nodes
for n in self._data:
id = n[0]
pid = n[6]
if pid >= 0:
swcdict[id].add_link(swcdict[pid])
groups = connected_components(set(swcdict.values()))
lenlist = [len(g) for g in groups]
maxidx = lenlist.index(max(lenlist))
set2keep = groups[maxidx]
id2keep = [n.id for n in set2keep]
self._data = self._data[
np.in1d(self._data[:, 0], np.asarray(id2keep)), :]
def prune(self):
self._prune_unreached()
self._prune_leaves()
def reset(self, crop_region, zoom_factor):
'''
Pad and rescale swc back to the original space
'''
tswc = self._data.copy()
if zoom_factor != 1.: # Pad the swc back to original space
tswc[:, 2:5] *= 1. / zoom_factor
# Pad the swc back
tswc[:, 2] += crop_region[0, 0]
tswc[:, 3] += crop_region[1, 0]
tswc[:, 4] += crop_region[2, 0]
self._data = tswc
def get_id(self, idx):
return self._data[idx, 0]
def match(self, pos, radius):
'''
Find the closest ground truth node
'''
nodes = self._data[:, 2:5]
distlist = np.squeeze(cdist(pos.reshape(1, 3), nodes))
if distlist.size == 0:
return False, -2
minidx = distlist.argmin()
minnode = self._data[minidx, 2:5]
# See if either of them can cover each other with a ball of their own
# radius
mindist = np.linalg.norm(pos - minnode)
return radius > mindist or self._data[minidx, 5] > mindist, minidx
def size(self):
return self._data.shape[0]
def save(self, fname):
saveswc(fname, self._data)
def get_array(self):
return self._data[:, :7]
def view(self):
from rivuletpy.utils.rendering3 import Viewer3, Line3
# Compute the center of mass
center = self._data[:, 2:5].mean(axis=0)
translated = self._data[:, 2:5] - \
np.tile(center, (self._data.shape[0], 1))
# Init viewer
viewer = Viewer3(800, 800, 800)
viewer.set_bounds(self._data[:, 2].min(), self._data[:, 2].max(),
self._data[:, 3].min(), self._data[:, 3].max(),
self._data[:, 4].min(), self._data[:, 4].max())
lid = self._data[:, 0]
line_color = [random(), random(), random()]
for i in range(self._data.shape[0]):
# Change color if its a bifurcation
if (self._data[i, 0] == self._data[:, -1]).sum() > 1:
line_color = [random(), random(), random()]
# Draw a line between this node and its parent
if i < self._data.shape[0] - 1 and self._data[i, 0] == self._data[i + 1, -1]:
l = Line3(translated[i, :], translated[i + 1, :])
l.set_color(*line_color)
viewer.add_geom(l)
else:
pid = self._data[i, -1]
pidx = np.argwhere(pid == lid).flatten()
if len(pidx) == 1:
l = Line3(translated[i, :], translated[pidx, :].flatten())
l.set_color(*line_color)
viewer.add_geom(l)
while(True):
try:
viewer.render(return_rgb_array=False)
except KeyboardInterrupt:
break
def push_nodes_with_binary(self, b, step_ratio=0.1, niter=0):
'''
Push the nodes towards the center with the binary image boundaries
'''
lid = list(self._data[:, 0])
lpid = list(self._data[:, -2])
t_data = self._data.copy()
children_idx = {pid: [i for i, p in enumerate(
lpid) if p == t_data[i, 0]] for pid in lpid}
for _ in range(niter):
for i in range(t_data.shape[0]):
pid, radius, (x, y, z) = int(
t_data[i, -2]), t_data[i, -3], t_data[i, 2:5]
cidx = children_idx[pid]
if pid != i and pid in lid and len(cidx) <= 1:
px, py, pz = t_data[t_data[:, 0] == pid, 2:5][0]
vnorm = norm_vec(np.asarray([x - px, y - py, z - pz]))
if len(cidx) == 1:
cx, cy, cz = t_data[cidx[0], 2:5]
vnorm = (
vnorm + norm_vec(np.asarray([cx - x, cy - y, cz - z]))) / 2
if all([v == 0 for v in vnorm]):
continue
pt = np.asarray([x, y, z])
p_vectors = get_perpendicular_vectors(
pt, vnorm)
p_distances = [get_distance_to_boundary(
pt, pvec, b) for pvec in p_vectors]
dx, dy, dz = np.sum(
[pv * pd for pv, pd in zip(p_vectors, p_distances)], 0)
# Constrain the displacement by the nodo radii
tx = x + dx * step_ratio
ty = y + dy * step_ratio
tz = z + dz * step_ratio
dist = ((tx - self._data[i, 2]) ** 2 +
(ty - self._data[i, 3]) ** 2 +
(tz - self._data[i, 4]) ** 2) ** 0.5
if dist <= radius / 2:
t_data[i, 2] = tx
t_data[i, 3] = ty
t_data[i, 4] = tz
else:
pass
self._data = t_data
def get_distance_to_boundary(pt, vec, b):
temp_pt = pt.copy()
while(True):
next_pt = temp_pt + vec
if b[math.floor(next_pt[0]),
math.floor(next_pt[1]),
math.floor(next_pt[2])] <= 0:
return ((temp_pt - pt) ** 2).sum() ** 0.5
else:
temp_pt = next_pt
def norm_vec(vec):
norm = (vec ** 2).sum() ** 0.5
return vec / norm
def get_perpendicular_vectors(pt, vec):
v1 = perpendicular_vector(vec)
v2 = -v1
v3 = perpendicular_vector(vec, v1)
v4 = -v3
return v1, v2, v3, v4
def make_rand_vector3d():
vec = [gauss(0, 1) for i in range(3)]
mag = sum(x**2 for x in vec) ** .5
return [x / mag for x in vec]
def perpendicular_vector(v, vr=None):
return np.cross(v, make_rand_vector3d() if vr is None else vr)
def get_subtree_nodeids(swc, node):
subtreeids = np.array([])
# Find children
chidx = np.argwhere(node[0] == swc[:, 6])
# Recursion stops when there this node is a
# leaf with no children, return itself
if chidx.size == 0:
return node[0]
else:
# Get the node ids of each children
for c in chidx:
subids = get_subtree_nodeids(swc, swc[c, :].squeeze())
subtreeids = np.hstack((subtreeids, subids, node[0]))
return subtreeids
class Node(object):
def __init__(self, id):
self.__id = id
self.__links = set()
@property
def id(self):
return self.__id
@property
def links(self):
return set(self.__links)
def add_link(self, other):
self.__links.add(other)
other.__links.add(self)
def connected_components(nodes):
'''
The function to look for connected components.
Reference: https://breakingcode.wordpress.com/2013/04/08/finding-connected-components-in-a-graph/
'''
# List of connected components found. The order is random.
result = []
# Make a copy of the set, so we can modify it.
nodes = set(nodes)
# Iterate while we still have nodes to process.
while nodes:
# Get a random node and remove it from the global set.
n = nodes.pop()
# This set will contain the next group of nodes
# connected to each other.
group = {n}
# Build a queue with this node in it.
queue = [n]
# Iterate the queue.
# When it's empty, we finished visiting a group of connected nodes.
while queue:
# Consume the next item from the queue.
n = queue.pop(0)
# Fetch the neighbors.
neighbors = n.links
# Remove the neighbors we already visited.
neighbors.difference_update(group)
# Remove the remaining nodes from the global set.
nodes.difference_update(neighbors)
# Add them to the group of connected nodes.
group.update(neighbors)
# Add them to the queue, so we visit them in the next iterations.
queue.extend(neighbors)
# Add the group to the list of groups.
result.append(group)
# Return the list of groups.
return result
|
RivuletStudio/rivuletpy
|
rivuletpy/swc.py
|
Python
|
bsd-3-clause
| 13,080
|
[
"VisIt"
] |
edad5d08003204b7d493ea0de7b06ed96678b3236ff3ece476b156b39caa47af
|
"""Window manager which controls any pop up windows from MPF. Used to display
game information, status, tests, keyboard-to-switch mapping, on screen DMD,
etc."""
# window.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
try:
import pygame
import pygame.locals
except ImportError:
pass
import version
from mpf.system.timing import Timing
from mpf.media_controller.core.display import MPFDisplay
class WindowManager(MPFDisplay):
"""Parent class for the Pygame-based on screen Window Manager in MPF.
There is only one Window Manager per machine. It's used for lots of things,
including displaying information about the game, an on-screen DMD, and for
capturing key events which are translated to switches.
"""
def __init__(self, machine):
# move some of this to parent class
if 'window' in machine.config:
self.config = machine.config['window']
else:
self.config = dict()
self.depth = 24
self.palette = None
super(WindowManager, self).__init__(machine, self.config)
self.name = 'window'
self.log = logging.getLogger("Window")
self.log.debug("Loading the Window Manager")
if 'window' in self.machine.config:
self.config = self.machine.config['window']
else:
self.config = dict()
self.slides = list()
self.current_slide = None
if 'title' not in self.config:
self.config['title'] = ('Mission Pinball Framework v' +
version.__version__)
if 'resizable' not in self.config:
self.config['resizable'] = True
if 'fullscreen' not in self.config:
self.config['fullscreen'] = False
if 'frame' not in self.config:
self.config['frame'] = True
if 'quit_on_close' not in self.config:
self.config['quit_on_close'] = True
if 'background_image' not in self.config:
self.config['background_image'] = None
if 'fps' not in self.config or self.config['fps'] == 'auto':
self.config['fps'] = Timing.HZ
self._setup_window()
self.machine.events.add_handler('init_phase_5',
self._load_window_elements)
# Block all Pygame events from being reported. We'll selectively enable
# them one-by-one as event handlers are registered.
pygame.event.set_allowed(None)
def _initialize(self):
super(WindowManager, self)._initialize()
#self._load_window_elements()
def _load_window_elements(self):
# Loads the window elements from the config
if 'elements' not in self.config:
return
self.config['elements'][0]['persist_slide'] = True
self.config['elements'][0]['slide'] = 'default_window_slide'
self.machine.display.slide_builder.build_slide(
settings=self.config['elements'],
display='window',
priority=1)
def _setup_window(self):
# Sets up the Pygame window based on the settings in the config file.
flags = 0
if self.config['resizable']:
flags = flags | pygame.locals.RESIZABLE
if not self.config['frame']:
flags = flags | pygame.locals.NOFRAME
if self.config['fullscreen']:
flags = flags | pygame.locals.FULLSCREEN
# Create the actual Pygame window
self.window = pygame.display.set_mode((self.width,
self.height),
flags)
# Set the caption
pygame.display.set_caption(self.config['title'])
def update(self):
"""Updates the display. Called from a timer based on this display's fps
settings.
"""
super(WindowManager, self).update()
# Update the display
self.window.blit(self.current_slide.surface, (0, 0))
pygame.display.flip()
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
spierepf/mpf
|
mpf/media_controller/core/window.py
|
Python
|
mit
| 5,357
|
[
"Brian"
] |
482eb85b2ecced3228e151de8417647badbdabae87ee01c3d6dea4183287bedf
|
import json
from octopus.server.server_command import ServerCommand
SERVER_HOST = 'localhost'
SERVER_PORT = '2480'
class PluginExecutor(object):
def __init__(self, server_host = SERVER_HOST, server_port = SERVER_PORT):
self.command = ServerCommand(server_host, server_port)
def execute(self, pluginname, classname, settings=None):
data = {"plugin": pluginname, "class": classname, "settings": settings}
json_data = json.dumps(data)
return self.post(json_data)
def post(self, json_data):
return self.command.execute_post_command("/executeplugin/", json_data)
|
octopus-platform/octopus
|
python/octopus-tools/octopus/server/plugin_executor.py
|
Python
|
lgpl-3.0
| 615
|
[
"Octopus"
] |
8a9f0a9f903f00f787f1e74fac93e35758180600f5e000f72ce90a001d2d6d36
|
# -*- coding: utf-8 -*-
#
# biodoop-blast documentation build configuration file, created by
# sphinx-quickstart on Sun Jul 17 14:32:07 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, datetime
FIRST_RELEASE_YEAR = 2009
CURRENT_YEAR = datetime.datetime.now().year
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'biodoop-blast'
copyright = u'%d-%d, CRS4' % (FIRST_RELEASE_YEAR, CURRENT_YEAR)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import bl.blast
# The short X.Y version.
version = ".".join(bl.blast.__version__.split(".", 2)[:2])
# The full version, including alpha/beta/rc tags.
release = bl.blast.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'biodoopdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'biodoop-blast.tex', u'biodoop-blast Documentation',
u'CRS4', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'biodoop-blast', u'biodoop-blast Documentation',
[u'CRS4'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
|
crs4/biodoop-blast
|
docs/conf.py
|
Python
|
gpl-3.0
| 7,383
|
[
"BLAST"
] |
58d71c488dbcf64df1889791c0cf0e993a4de41599e3da7a112e9c102b435a41
|
from __future__ import print_function, division
from sympy import (Add, ceiling, divisors, factor_list, factorint, floor, igcd,
ilcm, Integer, integer_nthroot, isprime, Matrix, Mul, nextprime,
perfect_power, Poly, S, sign, solve, sqrt, Subs, Symbol, symbols, sympify,
Wild)
from sympy.core.function import _mexpand
from sympy.simplify.radsimp import rad_rationalize
from sympy.utilities import default_sort_key, numbered_symbols
from sympy.core.numbers import igcdex
from sympy.ntheory.residue_ntheory import sqrt_mod
from sympy.core.compatibility import range
from sympy.core.relational import Eq
from sympy.solvers.solvers import check_assumptions
__all__ = ['base_solution_linear', 'classify_diop', 'cornacchia', 'descent',
'diop_bf_DN', 'diop_DN', 'diop_general_pythagorean',
'diop_general_sum_of_squares', 'diop_linear', 'diop_quadratic',
'diop_solve', 'diop_ternary_quadratic', 'diophantine', 'find_DN',
'partition', 'square_factor', 'sum_of_four_squares',
'sum_of_three_squares', 'transformation_to_DN']
def diophantine(eq, param=symbols("t", integer=True)):
"""
Simplify the solution procedure of diophantine equation ``eq`` by
converting it into a product of terms which should equal zero.
For example, when solving, `x^2 - y^2 = 0` this is treated as
`(x + y)(x - y) = 0` and `x+y = 0` and `x-y = 0` are solved independently
and combined. Each term is solved by calling ``diop_solve()``.
Output of ``diophantine()`` is a set of tuples. Each tuple represents a
solution of the input equation. In a tuple, solution for each variable is
listed according to the alphabetic order of input variables. i.e. if we have
an equation with two variables `a` and `b`, first element of the tuple will
give the solution for `a` and the second element will give the solution for
`b`.
Usage
=====
``diophantine(eq, t)``: Solve the diophantine equation ``eq``.
``t`` is the parameter to be used by ``diop_solve()``.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import diophantine
>>> from sympy.abc import x, y, z
>>> diophantine(x**2 - y**2)
set([(-t_0, -t_0), (t_0, -t_0)])
#>>> diophantine(x*(2*x + 3*y - z))
#set([(0, n1, n2), (3*t - z, -2*t + z, z)])
#>>> diophantine(x**2 + 3*x*y + 4*x)
#set([(0, n1), (3*t - 4, -t)])
See Also
========
diop_solve()
"""
if isinstance(eq, Eq):
eq = eq.lhs - eq.rhs
eq = Poly(eq).as_expr()
if not eq.is_polynomial() or eq.is_number:
raise TypeError("Equation input format not supported")
var = list(eq.expand(force=True).free_symbols)
var.sort(key=default_sort_key)
terms = factor_list(eq)[1]
sols = set([])
for term in terms:
base = term[0]
var_t, jnk, eq_type = classify_diop(base)
solution = diop_solve(base, param)
if eq_type in ["linear", "homogeneous_ternary_quadratic", "general_pythagorean"]:
if merge_solution(var, var_t, solution) != ():
sols.add(merge_solution(var, var_t, solution))
elif eq_type in ["binary_quadratic", "general_sum_of_squares", "univariate"]:
for sol in solution:
if merge_solution(var, var_t, sol) != ():
sols.add(merge_solution(var, var_t, sol))
return sols
def merge_solution(var, var_t, solution):
"""
This is used to construct the full solution from the solutions of sub
equations.
For example when solving the equation `(x - y)(x^2 + y^2 - z^2) = 0`,
solutions for each of the equations `x-y = 0` and `x^2 + y^2 - z^2` are
found independently. Solutions for `x - y = 0` are `(x, y) = (t, t)`. But
we should introduce a value for z when we output the solution for the
original equation. This function converts `(t, t)` into `(t, t, n_{1})`
where `n_{1}` is an integer parameter.
"""
l = []
if None in solution:
return ()
solution = iter(solution)
params = numbered_symbols("n", Integer=True, start=1)
for v in var:
if v in var_t:
l.append(next(solution))
else:
l.append(next(params))
for val, symb in zip(l, var):
if check_assumptions(val, **symb.assumptions0) is False:
return tuple()
return tuple(l)
def diop_solve(eq, param=symbols("t", integer=True)):
"""
Solves the diophantine equation ``eq``.
Similar to ``diophantine()`` but doesn't try to factor ``eq`` as latter
does. Uses ``classify_diop()`` to determine the type of the eqaution and
calls the appropriate solver function.
Usage
=====
``diop_solve(eq, t)``: Solve diophantine equation, ``eq`` using ``t``
as a parameter if needed.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is a parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import diop_solve
>>> from sympy.abc import x, y, z, w
>>> diop_solve(2*x + 3*y - 5)
(3*t_0 - 5, -2*t_0 + 5)
>>> diop_solve(4*x + 3*y -4*z + 5)
(t_0, -4*t_1 + 5, t_0 - 3*t_1 + 5)
>>> diop_solve(x + 3*y - 4*z + w -6)
(t_0, t_0 + t_1, -2*t_0 - 3*t_1 - 4*t_2 - 6, -t_0 - 2*t_1 - 3*t_2 - 6)
>>> diop_solve(x**2 + y**2 - 5)
set([(-2, -1), (-2, 1), (2, -1), (2, 1)])
See Also
========
diophantine()
"""
var, coeff, eq_type = classify_diop(eq)
if eq_type == "linear":
return _diop_linear(var, coeff, param)
elif eq_type == "binary_quadratic":
return _diop_quadratic(var, coeff, param)
elif eq_type == "homogeneous_ternary_quadratic":
x_0, y_0, z_0 = _diop_ternary_quadratic(var, coeff)
return _parametrize_ternary_quadratic((x_0, y_0, z_0), var, coeff)
elif eq_type == "general_pythagorean":
return _diop_general_pythagorean(var, coeff, param)
elif eq_type == "univariate":
l = solve(eq)
s = set([])
for soln in l:
if isinstance(soln, Integer):
s.add((soln,))
return s
elif eq_type == "general_sum_of_squares":
return _diop_general_sum_of_squares(var, coeff)
def classify_diop(eq):
"""
Helper routine used by diop_solve() to find the type of the ``eq`` etc.
Returns a tuple containing the type of the diophantine equation along with
the variables(free symbols) and their coefficients. Variables are returned
as a list and coefficients are returned as a dict with the key being the
respective term and the constant term is keyed to Integer(1). Type is an
element in the set {"linear", "binary_quadratic", "general_pythagorean",
"homogeneous_ternary_quadratic", "univariate", "general_sum_of_squares"}
Usage
=====
``classify_diop(eq)``: Return variables, coefficients and type of the
``eq``.
Details
=======
``eq`` should be an expression which is assumed to be zero.
Examples
========
>>> from sympy.solvers.diophantine import classify_diop
>>> from sympy.abc import x, y, z, w, t
>>> classify_diop(4*x + 6*y - 4)
([x, y], {1: -4, x: 4, y: 6}, 'linear')
>>> classify_diop(x + 3*y -4*z + 5)
([x, y, z], {1: 5, x: 1, y: 3, z: -4}, 'linear')
>>> classify_diop(x**2 + y**2 - x*y + x + 5)
([x, y], {1: 5, x: 1, x**2: 1, y: 0, y**2: 1, x*y: -1}, 'binary_quadratic')
"""
eq = eq.expand(force=True)
coeff = eq.as_coefficients_dict()
diop_type = None
var = []
if isinstance(eq, Symbol):
var.append(eq)
coeff[eq] = Integer(1)
elif isinstance(eq, Mul) and Poly(eq).total_degree() == 1:
var.append(eq.as_two_terms()[1])
coeff[eq.as_two_terms()[1]] = Integer(eq.as_two_terms()[0])
else:
var = list(eq.free_symbols)
var.sort(key=default_sort_key)
coeff = dict([reversed(t.as_independent(*var)) for t in eq.args])
for c in coeff:
if not isinstance(coeff[c], Integer):
raise TypeError("Coefficients should be Integers")
if Poly(eq).total_degree() == 1:
diop_type = "linear"
elif len(var) == 1:
diop_type = "univariate"
elif Poly(eq).total_degree() == 2 and len(var) == 2:
diop_type = "binary_quadratic"
x, y = var[:2]
if isinstance(eq, Mul):
coeff = {x**2: 0, x*y: eq.args[0], y**2: 0, x: 0, y: 0, Integer(1): 0}
else:
for term in [x**2, y**2, x*y, x, y, Integer(1)]:
if term not in coeff.keys():
coeff[term] = Integer(0)
elif Poly(eq).total_degree() == 2 and len(var) == 3 and Integer(1) not in coeff.keys():
for v in var:
if v in coeff.keys():
diop_type = "inhomogeneous_ternary_quadratic"
break
else:
diop_type = "homogeneous_ternary_quadratic"
x, y, z = var[:3]
for term in [x**2, y**2, z**2, x*y, y*z, x*z]:
if term not in coeff.keys():
coeff[term] = Integer(0)
elif Poly(eq).degree() == 2 and len(var) >= 3:
for v in var:
if v in coeff.keys():
diop_type = "inhomogeneous_general_quadratic"
break
else:
if Integer(1) in coeff.keys():
constant_term = True
else:
constant_term = False
non_square_degree_2_terms = False
for v in var:
for u in var:
if u != v and u*v in coeff.keys():
non_square_degree_2_terms = True
break
if non_square_degree_2_terms:
break
if constant_term and non_square_degree_2_terms:
diop_type = "inhomogeneous_general_quadratic"
elif constant_term and not non_square_degree_2_terms:
for v in var:
if coeff[v**2] != 1:
break
else:
diop_type = "general_sum_of_squares"
elif not constant_term and non_square_degree_2_terms:
diop_type = "homogeneous_general_quadratic"
else:
coeff_sign_sum = 0
for v in var:
if not isinstance(sqrt(abs(Integer(coeff[v**2]))), Integer):
break
coeff_sign_sum = coeff_sign_sum + sign(coeff[v**2])
else:
if abs(coeff_sign_sum) == len(var) - 2 and not constant_term:
diop_type = "general_pythagorean"
elif Poly(eq).total_degree() == 3 and len(var) == 2:
x, y = var[:2]
diop_type = "cubic_thue"
for term in [x**3, x**2*y, x*y**2, y**3, Integer(1)]:
if term not in coeff.keys():
coeff[term] == Integer(0)
if diop_type is not None:
return var, coeff, diop_type
else:
raise NotImplementedError("Still not implemented")
def diop_linear(eq, param=symbols("t", integer=True)):
"""
Solves linear diophantine equations.
A linear diophantine equation is an equation of the form `a_{1}x_{1} +
a_{2}x_{2} + .. + a_{n}x_{n} = 0` where `a_{1}, a_{2}, ..a_{n}` are
integer constants and `x_{1}, x_{2}, ..x_{n}` are integer variables.
Usage
=====
``diop_linear(eq)``: Returns a tuple containing solutions to the
diophantine equation ``eq``. Values in the tuple is arranged in the same
order as the sorted variables.
Details
=======
``eq`` is a linear diophantine equation which is assumed to be zero.
``param`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import diop_linear
>>> from sympy.abc import x, y, z, t
>>> from sympy import Integer
>>> diop_linear(2*x - 3*y - 5) #solves equation 2*x - 3*y -5 = 0
(-3*t_0 - 5, -2*t_0 - 5)
Here x = -3*t_0 - 5 and y = -2*t_0 - 5
>>> diop_linear(2*x - 3*y - 4*z -3)
(t_0, -6*t_0 - 4*t_1 + 3, 5*t_0 + 3*t_1 - 3)
See Also
========
diop_quadratic(), diop_ternary_quadratic(), diop_general_pythagorean(),
diop_general_sum_of_squares()
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "linear":
return _diop_linear(var, coeff, param)
def _diop_linear(var, coeff, param):
"""
Solves diophantine equations of the form:
a_0*x_0 + a_1*x_1 + ... + a_n*x_n == c
Note that no solution exists if gcd(a_0, ..., a_n) doesn't divide c.
"""
if len(var) == 0:
return None
if Integer(1) in coeff:
#coeff[] is negated because input is of the form: ax + by + c == 0
# but is used as: ax + by == -c
c = -coeff[Integer(1)]
else:
c = 0
# Some solutions will have multiple free variables in their solutions.
params = [str(param) + "_" + str(i) for i in range(len(var))]
params = [symbols(p, integer=True) for p in params]
if len(var) == 1:
if coeff[var[0]] == 0:
if c == 0:
return tuple([params[0]])
else:
return tuple([None])
elif divisible(c, coeff[var[0]]):
return tuple([c/coeff[var[0]]])
else:
return tuple([None])
"""
base_solution_linear() can solve diophantine equations of the form:
a*x + b*y == c
We break down multivariate linear diophantine equations into a
series of bivariate linear diophantine equations which can then
be solved individually by base_solution_linear().
Consider the following:
a_0*x_0 + a_1*x_1 + a_2*x_2 == c
which can be re-written as:
a_0*x_0 + g_0*y_0 == c
where
g_0 == gcd(a_1, a_2)
and
y == (a_1*x_1)/g_0 + (a_2*x_2)/g_0
This leaves us with two binary linear diophantine equations.
For the first equation:
a == a_0
b == g_0
c == c
For the second:
a == a_1/g_0
b == a_2/g_0
c == the solution we find for y_0 in the first equation.
The arrays A and B are the arrays of integers used for
'a' and 'b' in each of the n-1 bivariate equations we solve.
"""
A = [coeff[v] for v in var]
B = []
if len(var) > 2:
B.append(igcd(A[-2], A[-1]))
A[-2] = A[-2] // B[0]
A[-1] = A[-1] // B[0]
for i in range(len(A) - 3, 0, -1):
gcd = igcd(B[0], A[i])
B[0] = B[0] // gcd
A[i] = A[i] // gcd
B.insert(0, gcd)
B.append(A[-1])
"""
Consider the trivariate linear equation:
4*x_0 + 6*x_1 + 3*x_2 == 2
This can be re-written as:
4*x_0 + 3*y_0 == 2
where
y_0 == 2*x_1 + x_2
(Note that gcd(3, 6) == 3)
The complete integral solution to this equation is:
x_0 == 2 + 3*t_0
y_0 == -2 - 4*t_0
where 't_0' is any integer.
Now that we have a solution for 'x_0', find 'x_1' and 'x_2':
2*x_1 + x_2 == -2 - 4*t_0
We can then solve for '-2' and '-4' independently,
and combine the results:
2*x_1a + x_2a == -2
x_1a == 0 + t_0
x_2a == -2 - 2*t_0
2*x_1b + x_2b == -4*t_0
x_1b == 0*t_0 + t_1
x_2b == -4*t_0 - 2*t_1
==>
x_1 == t_0 + t_1
x_2 == -2 - 6*t_0 - 2*t_1
where 't_0' and 't_1' are any integers.
Note that:
4*(2 + 3*t_0) + 6*(t_0 + t_1) + 3*(-2 - 6*t_0 - 2*t_1) == 2
for any integral values of 't_0', 't_1'; as required.
This method is generalised for many variables, below.
"""
solutions = []
no_solution = tuple([None] * len(var))
for i in range(len(B)):
tot_x, tot_y = 0, 0
if isinstance(c, Add):
# example: 5 + t_0 + 3*t_1
args = c.args
else: # c is a Mul, a Symbol, or an Integer
args = [c]
for j in range(len(args)):
if isinstance(args[j], Mul):
# example: 3*t_1 -> k = 3
k = args[j].as_two_terms()[0]
param_index = params.index(args[j].as_two_terms()[1]) + 1
elif isinstance(args[j], Symbol):
# example: t_0 -> k = 1
k = 1
param_index = params.index(args[j]) + 1
else: #args[j] is an Integer
# example: 5 -> k = 5
k = args[j]
param_index = 0
sol_x, sol_y = base_solution_linear(k, A[i], B[i], params[param_index])
if isinstance(args[j], Mul) or isinstance(args[j], Symbol):
if isinstance(sol_x, Add):
sol_x = sol_x.args[0]*params[param_index - 1] + sol_x.args[1]
elif isinstance(sol_x, Integer):
sol_x = sol_x*params[param_index - 1]
if isinstance(sol_y, Add):
sol_y = sol_y.args[0]*params[param_index - 1] + sol_y.args[1]
elif isinstance(sol_y, Integer):
sol_y = sol_y*params[param_index - 1]
else:
if sol_x is None or sol_y is None:
return no_solution
tot_x += sol_x
tot_y += sol_y
solutions.append(tot_x)
c = tot_y
solutions.append(tot_y)
return tuple(solutions)
def base_solution_linear(c, a, b, t=None):
"""
Return the base solution for a linear diophantine equation with two
variables.
Used by ``diop_linear()`` to find the base solution of a linear
Diophantine equation. If ``t`` is given then the parametrized solution is
returned.
Usage
=====
``base_solution_linear(c, a, b, t)``: ``a``, ``b``, ``c`` are coefficients
in `ax + by = c` and ``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import base_solution_linear
>>> from sympy.abc import t
>>> base_solution_linear(5, 2, 3) # equation 2*x + 3*y = 5
(-5, 5)
>>> base_solution_linear(0, 5, 7) # equation 5*x + 7*y = 0
(0, 0)
>>> base_solution_linear(5, 2, 3, t) # equation 2*x + 3*y = 5
(3*t - 5, -2*t + 5)
>>> base_solution_linear(0, 5, 7, t) # equation 5*x + 7*y = 0
(7*t, -5*t)
"""
d = igcd(a, igcd(b, c))
a = a // d
b = b // d
c = c // d
if c == 0:
if t != None:
return (b*t , -a*t)
else:
return (S.Zero, S.Zero)
else:
x0, y0, d = extended_euclid(int(abs(a)), int(abs(b)))
x0 = x0 * sign(a)
y0 = y0 * sign(b)
if divisible(c, d):
if t != None:
return (c*x0 + b*t, c*y0 - a*t)
else:
return (Integer(c*x0), Integer(c*y0))
else:
return (None, None)
def extended_euclid(a, b):
"""
For given ``a``, ``b`` returns a tuple containing integers `x`, `y` and `d`
such that `ax + by = d`. Here `d = gcd(a, b)`.
Usage
=====
``extended_euclid(a, b)``: returns `x`, `y` and `\gcd(a, b)`.
Details
=======
``a`` Any instance of Integer.
``b`` Any instance of Integer.
Examples
========
>>> from sympy.solvers.diophantine import extended_euclid
>>> extended_euclid(4, 6)
(-1, 1, 2)
>>> extended_euclid(3, 5)
(2, -1, 1)
"""
if b == 0:
return (1, 0, a)
x0, y0, d = extended_euclid(b, a%b)
x, y = y0, x0 - (a//b) * y0
return x, y, d
def divisible(a, b):
"""
Returns `True` if ``a`` is divisible by ``b`` and `False` otherwise.
"""
return igcd(int(a), int(b)) == abs(int(b))
def diop_quadratic(eq, param=symbols("t", integer=True)):
"""
Solves quadratic diophantine equations.
i.e. equations of the form `Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0`. Returns a
set containing the tuples `(x, y)` which contains the solutions. If there
are no solutions then `(None, None)` is returned.
Usage
=====
``diop_quadratic(eq, param)``: ``eq`` is a quadratic binary diophantine
equation. ``param`` is used to indicate the parameter to be used in the
solution.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``param`` is a parameter to be used in the solution.
Examples
========
>>> from sympy.abc import x, y, t
>>> from sympy.solvers.diophantine import diop_quadratic
>>> diop_quadratic(x**2 + y**2 + 2*x + 2*y + 2, t)
set([(-1, -1)])
References
==========
.. [1] Methods to solve Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0,[online],
Available: http://www.alpertron.com.ar/METHODS.HTM
.. [2] Solving the equation ax^2+ bxy + cy^2 + dx + ey + f= 0, [online],
Available: http://www.jpr2718.org/ax2p.pdf
See Also
========
diop_linear(), diop_ternary_quadratic(), diop_general_sum_of_squares(),
diop_general_pythagorean()
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "binary_quadratic":
return _diop_quadratic(var, coeff, param)
def _diop_quadratic(var, coeff, t):
x, y = var[:2]
for term in [x**2, y**2, x*y, x, y, Integer(1)]:
if term not in coeff.keys():
coeff[term] = Integer(0)
A = coeff[x**2]
B = coeff[x*y]
C = coeff[y**2]
D = coeff[x]
E = coeff[y]
F = coeff[Integer(1)]
d = igcd(A, igcd(B, igcd(C, igcd(D, igcd(E, F)))))
A = A // d
B = B // d
C = C // d
D = D // d
E = E // d
F = F // d
# (1) Linear case: A = B = C = 0 ==> considered under linear diophantine equations
# (2) Simple-Hyperbolic case:A = C = 0, B != 0
# In this case equation can be converted to (Bx + E)(By + D) = DE - BF
# We consider two cases; DE - BF = 0 and DE - BF != 0
# More details, http://www.alpertron.com.ar/METHODS.HTM#SHyperb
l = set([])
if A == 0 and C == 0 and B != 0:
if D*E - B*F == 0:
if divisible(int(E), int(B)):
l.add((-E/B, t))
if divisible(int(D), int(B)):
l.add((t, -D/B))
else:
div = divisors(D*E - B*F)
div = div + [-term for term in div]
for d in div:
if divisible(int(d - E), int(B)):
x0 = (d - E) // B
if divisible(int(D*E - B*F), int(d)):
if divisible(int((D*E - B*F)// d - D), int(B)):
y0 = ((D*E - B*F) // d - D) // B
l.add((x0, y0))
# (3) Parabolic case: B**2 - 4*A*C = 0
# There are two subcases to be considered in this case.
# sqrt(c)D - sqrt(a)E = 0 and sqrt(c)D - sqrt(a)E != 0
# More Details, http://www.alpertron.com.ar/METHODS.HTM#Parabol
elif B**2 - 4*A*C == 0:
if A == 0:
s = _diop_quadratic([y, x], coeff, t)
for soln in s:
l.add((soln[1], soln[0]))
else:
g = igcd(A, C)
g = abs(g) * sign(A)
a = A // g
b = B // g
c = C // g
e = sign(B/A)
if e*sqrt(c)*D - sqrt(a)*E == 0:
z = symbols("z", real=True)
roots = solve(sqrt(a)*g*z**2 + D*z + sqrt(a)*F)
for root in roots:
if isinstance(root, Integer):
l.add((diop_solve(sqrt(a)*x + e*sqrt(c)*y - root)[0], diop_solve(sqrt(a)*x + e*sqrt(c)*y - root)[1]))
elif isinstance(e*sqrt(c)*D - sqrt(a)*E, Integer):
solve_x = lambda u: e*sqrt(c)*g*(sqrt(a)*E - e*sqrt(c)*D)*t**2 - (E + 2*e*sqrt(c)*g*u)*t\
- (e*sqrt(c)*g*u**2 + E*u + e*sqrt(c)*F) // (e*sqrt(c)*D - sqrt(a)*E)
solve_y = lambda u: sqrt(a)*g*(e*sqrt(c)*D - sqrt(a)*E)*t**2 + (D + 2*sqrt(a)*g*u)*t \
+ (sqrt(a)*g*u**2 + D*u + sqrt(a)*F) // (e*sqrt(c)*D - sqrt(a)*E)
for z0 in range(0, abs(e*sqrt(c)*D - sqrt(a)*E)):
if divisible(sqrt(a)*g*z0**2 + D*z0 + sqrt(a)*F, e*sqrt(c)*D - sqrt(a)*E):
l.add((solve_x(z0), solve_y(z0)))
# (4) Method used when B**2 - 4*A*C is a square, is descibed in p. 6 of the below paper
# by John P. Robertson.
# http://www.jpr2718.org/ax2p.pdf
elif isinstance(sqrt(B**2 - 4*A*C), Integer):
if A != 0:
r = sqrt(B**2 - 4*A*C)
u, v = symbols("u, v", integer=True)
eq = _mexpand(4*A*r*u*v + 4*A*D*(B*v + r*u + r*v - B*u) + 2*A*4*A*E*(u - v) + 4*A*r*4*A*F)
sol = diop_solve(eq, t)
sol = list(sol)
for solution in sol:
s0 = solution[0]
t0 = solution[1]
x_0 = S(B*t0 + r*s0 + r*t0 - B*s0)/(4*A*r)
y_0 = S(s0 - t0)/(2*r)
if isinstance(s0, Symbol) or isinstance(t0, Symbol):
if check_param(x_0, y_0, 4*A*r, t) != (None, None):
l.add((check_param(x_0, y_0, 4*A*r, t)[0], check_param(x_0, y_0, 4*A*r, t)[1]))
elif divisible(B*t0 + r*s0 + r*t0 - B*s0, 4*A*r):
if divisible(s0 - t0, 2*r):
if is_solution_quad(var, coeff, x_0, y_0):
l.add((x_0, y_0))
else:
_var = var
_var[0], _var[1] = _var[1], _var[0] # Interchange x and y
s = _diop_quadratic(_var, coeff, t)
while len(s) > 0:
sol = s.pop()
l.add((sol[1], sol[0]))
# (5) B**2 - 4*A*C > 0 and B**2 - 4*A*C not a square or B**2 - 4*A*C < 0
else:
P, Q = _transformation_to_DN(var, coeff)
D, N = _find_DN(var, coeff)
solns_pell = diop_DN(D, N)
if D < 0:
for solution in solns_pell:
for X_i in [-solution[0], solution[0]]:
for Y_i in [-solution[1], solution[1]]:
x_i, y_i = (P*Matrix([X_i, Y_i]) + Q)[0], (P*Matrix([X_i, Y_i]) + Q)[1]
if isinstance(x_i, Integer) and isinstance(y_i, Integer):
l.add((x_i, y_i))
else:
# In this case equation can be transformed into a Pell equation
#n = symbols("n", integer=True)
fund_solns = solns_pell
solns_pell = set(fund_solns)
for X, Y in fund_solns:
solns_pell.add((-X, -Y))
a = diop_DN(D, 1)
T = a[0][0]
U = a[0][1]
if (isinstance(P[0], Integer) and isinstance(P[1], Integer) and isinstance(P[2], Integer)
and isinstance(P[3], Integer) and isinstance(Q[0], Integer) and isinstance(Q[1], Integer)):
for sol in solns_pell:
r = sol[0]
s = sol[1]
x_n = S((r + s*sqrt(D))*(T + U*sqrt(D))**t + (r - s*sqrt(D))*(T - U*sqrt(D))**t)/2
y_n = S((r + s*sqrt(D))*(T + U*sqrt(D))**t - (r - s*sqrt(D))*(T - U*sqrt(D))**t)/(2*sqrt(D))
x_n = _mexpand(x_n)
y_n = _mexpand(y_n)
x_n, y_n = (P*Matrix([x_n, y_n]) + Q)[0], (P*Matrix([x_n, y_n]) + Q)[1]
l.add((x_n, y_n))
else:
L = ilcm(S(P[0]).q, ilcm(S(P[1]).q, ilcm(S(P[2]).q,
ilcm(S(P[3]).q, ilcm(S(Q[0]).q, S(Q[1]).q)))))
k = 1
T_k = T
U_k = U
while (T_k - 1) % L != 0 or U_k % L != 0:
T_k, U_k = T_k*T + D*U_k*U, T_k*U + U_k*T
k += 1
for X, Y in solns_pell:
for i in range(k):
Z = P*Matrix([X, Y]) + Q
x, y = Z[0], Z[1]
if isinstance(x, Integer) and isinstance(y, Integer):
Xt = S((X + sqrt(D)*Y)*(T_k + sqrt(D)*U_k)**t +
(X - sqrt(D)*Y)*(T_k - sqrt(D)*U_k)**t)/ 2
Yt = S((X + sqrt(D)*Y)*(T_k + sqrt(D)*U_k)**t -
(X - sqrt(D)*Y)*(T_k - sqrt(D)*U_k)**t)/ (2*sqrt(D))
Zt = P*Matrix([Xt, Yt]) + Q
l.add((Zt[0], Zt[1]))
X, Y = X*T + D*U*Y, X*U + Y*T
return l
def is_solution_quad(var, coeff, u, v):
"""
Check whether `(u, v)` is solution to the quadratic binary diophantine
equation with the variable list ``var`` and coefficient dictionary
``coeff``.
Not intended for use by normal users.
"""
x, y = var[:2]
eq = x**2*coeff[x**2] + x*y*coeff[x*y] + y**2*coeff[y**2] + x*coeff[x] + y*coeff[y] + coeff[Integer(1)]
return _mexpand(Subs(eq, (x, y), (u, v)).doit()) == 0
def diop_DN(D, N, t=symbols("t", integer=True)):
"""
Solves the equation `x^2 - Dy^2 = N`.
Mainly concerned in the case `D > 0, D` is not a perfect square, which is
the same as generalized Pell equation. To solve the generalized Pell
equation this function Uses LMM algorithm. Refer [1]_ for more details on
the algorithm.
Returns one solution for each class of the solutions. Other solutions of
the class can be constructed according to the values of ``D`` and ``N``.
Returns a list containing the solution tuples `(x, y)`.
Usage
=====
``diop_DN(D, N, t)``: D and N are integers as in `x^2 - Dy^2 = N` and
``t`` is the parameter to be used in the solutions.
Details
=======
``D`` and ``N`` correspond to D and N in the equation.
``t`` is the parameter to be used in the solutions.
Examples
========
>>> from sympy.solvers.diophantine import diop_DN
>>> diop_DN(13, -4) # Solves equation x**2 - 13*y**2 = -4
[(3, 1), (393, 109), (36, 10)]
The output can be interpreted as follows: There are three fundamental
solutions to the equation `x^2 - 13y^2 = -4` given by (3, 1), (393, 109)
and (36, 10). Each tuple is in the form (x, y), i. e solution (3, 1) means
that `x = 3` and `y = 1`.
>>> diop_DN(986, 1) # Solves equation x**2 - 986*y**2 = 1
[(49299, 1570)]
See Also
========
find_DN(), diop_bf_DN()
References
==========
.. [1] Solving the generalized Pell equation x**2 - D*y**2 = N, John P.
Robertson, July 31, 2004, Pages 16 - 17. [online], Available:
http://www.jpr2718.org/pell.pdf
"""
if D < 0:
if N == 0:
return [(S.Zero, S.Zero)]
elif N < 0:
return []
elif N > 0:
d = divisors(square_factor(N))
sol = []
for divisor in d:
sols = cornacchia(1, -D, N // divisor**2)
if sols:
for x, y in sols:
sol.append((divisor*x, divisor*y))
return sol
elif D == 0:
if N < 0 or not isinstance(sqrt(N), Integer):
return []
if N == 0:
return [(S.Zero, t)]
if isinstance(sqrt(N), Integer):
return [(sqrt(N), t)]
else: # D > 0
if isinstance(sqrt(D), Integer):
r = sqrt(D)
if N == 0:
return [(r*t, t)]
else:
sol = []
for y in range(floor(sign(N)*(N - 1)/(2*r)) + 1):
if isinstance(sqrt(D*y**2 + N), Integer):
sol.append((sqrt(D*y**2 + N), y))
return sol
else:
if N == 0:
return [(S.Zero, S.Zero)]
elif abs(N) == 1:
pqa = PQa(0, 1, D)
a_0 = floor(sqrt(D))
l = 0
G = []
B = []
for i in pqa:
a = i[2]
G.append(i[5])
B.append(i[4])
if l != 0 and a == 2*a_0:
break
l = l + 1
if l % 2 == 1:
if N == -1:
x = G[l-1]
y = B[l-1]
else:
count = l
while count < 2*l - 1:
i = next(pqa)
G.append(i[5])
B.append(i[4])
count = count + 1
x = G[count]
y = B[count]
else:
if N == 1:
x = G[l-1]
y = B[l-1]
else:
return []
return [(x, y)]
else:
fs = []
sol = []
div = divisors(N)
for d in div:
if divisible(N, d**2):
fs.append(d)
for f in fs:
m = N // f**2
zs = sqrt_mod(D, abs(m), True)
zs = [i for i in zs if i <= abs(m) // 2 ]
if abs(m) != 2:
zs = zs + [-i for i in zs]
if S.Zero in zs:
zs.remove(S.Zero) # Remove duplicate zero
for z in zs:
pqa = PQa(z, abs(m), D)
l = 0
G = []
B = []
for i in pqa:
a = i[2]
G.append(i[5])
B.append(i[4])
if l != 0 and abs(i[1]) == 1:
r = G[l-1]
s = B[l-1]
if r**2 - D*s**2 == m:
sol.append((f*r, f*s))
elif diop_DN(D, -1) != []:
a = diop_DN(D, -1)
sol.append((f*(r*a[0][0] + a[0][1]*s*D), f*(r*a[0][1] + s*a[0][0])))
break
l = l + 1
if l == length(z, abs(m), D):
break
return sol
def cornacchia(a, b, m):
"""
Solves `ax^2 + by^2 = m` where `\gcd(a, b) = 1 = gcd(a, m)` and `a, b > 0`.
Uses the algorithm due to Cornacchia. The method only finds primitive
solutions, i.e. ones with `\gcd(x, y) = 1`. So this method can't be used to
find the solutions of `x^2 + y^2 = 20` since the only solution to former is
`(x,y) = (4, 2)` and it is not primitive. When ` a = b = 1`, only the
solutions with `x \geq y` are found. For more details, see the References.
Examples
========
>>> from sympy.solvers.diophantine import cornacchia
>>> cornacchia(2, 3, 35) # equation 2x**2 + 3y**2 = 35
set([(2, 3), (4, 1)])
>>> cornacchia(1, 1, 25) # equation x**2 + y**2 = 25
set([(4, 3)])
References
===========
.. [1] A. Nitaj, "L'algorithme de Cornacchia"
.. [2] Solving the diophantine equation ax**2 + by**2 = m by Cornacchia's
method, [online], Available:
http://www.numbertheory.org/php/cornacchia.html
"""
sols = set([])
a1 = igcdex(a, m)[0]
v = sqrt_mod(-b*a1, m, True)
if v is None:
return None
if not isinstance(v, list):
v = [v]
for t in v:
if t < m // 2:
continue
u, r = t, m
while True:
u, r = r, u % r
if a*r**2 < m:
break
m1 = m - a*r**2
if m1 % b == 0:
m1 = m1 // b
if isinstance(sqrt(m1), Integer):
s = sqrt(m1)
sols.add((int(r), int(s)))
return sols
def PQa(P_0, Q_0, D):
"""
Returns useful information needed to solve the Pell equation.
There are six sequences of integers defined related to the continued
fraction representation of `\\frac{P + \sqrt{D}}{Q}`, namely {`P_{i}`},
{`Q_{i}`}, {`a_{i}`},{`A_{i}`}, {`B_{i}`}, {`G_{i}`}. ``PQa()`` Returns
these values as a 6-tuple in the same order as mentioned above. Refer [1]_
for more detailed information.
Usage
=====
``PQa(P_0, Q_0, D)``: ``P_0``, ``Q_0`` and ``D`` are integers corresponding
to `P_{0}`, `Q_{0}` and `D` in the continued fraction
`\\frac{P_{0} + \sqrt{D}}{Q_{0}}`.
Also it's assumed that `P_{0}^2 == D mod(|Q_{0}|)` and `D` is square free.
Examples
========
>>> from sympy.solvers.diophantine import PQa
>>> pqa = PQa(13, 4, 5) # (13 + sqrt(5))/4
>>> next(pqa) # (P_0, Q_0, a_0, A_0, B_0, G_0)
(13, 4, 3, 3, 1, -1)
>>> next(pqa) # (P_1, Q_1, a_1, A_1, B_1, G_1)
(-1, 1, 1, 4, 1, 3)
References
==========
.. [1] Solving the generalized Pell equation x^2 - Dy^2 = N, John P.
Robertson, July 31, 2004, Pages 4 - 8. http://www.jpr2718.org/pell.pdf
"""
A_i_2 = B_i_1 = 0
A_i_1 = B_i_2 = 1
G_i_2 = -P_0
G_i_1 = Q_0
P_i = P_0
Q_i = Q_0
while(1):
a_i = floor((P_i + sqrt(D))/Q_i)
A_i = a_i*A_i_1 + A_i_2
B_i = a_i*B_i_1 + B_i_2
G_i = a_i*G_i_1 + G_i_2
yield P_i, Q_i, a_i, A_i, B_i, G_i
A_i_1, A_i_2 = A_i, A_i_1
B_i_1, B_i_2 = B_i, B_i_1
G_i_1, G_i_2 = G_i, G_i_1
P_i = a_i*Q_i - P_i
Q_i = (D - P_i**2)/Q_i
def diop_bf_DN(D, N, t=symbols("t", integer=True)):
"""
Uses brute force to solve the equation, `x^2 - Dy^2 = N`.
Mainly concerned with the generalized Pell equation which is the case when
`D > 0, D` is not a perfect square. For more information on the case refer
[1]_. Let `(t, u)` be the minimal positive solution of the equation
`x^2 - Dy^2 = 1`. Then this method requires
`\sqrt{\\frac{\mid N \mid (t \pm 1)}{2D}}` to be small.
Usage
=====
``diop_bf_DN(D, N, t)``: ``D`` and ``N`` are coefficients in
`x^2 - Dy^2 = N` and ``t`` is the parameter to be used in the solutions.
Details
=======
``D`` and ``N`` correspond to D and N in the equation.
``t`` is the parameter to be used in the solutions.
Examples
========
>>> from sympy.solvers.diophantine import diop_bf_DN
>>> diop_bf_DN(13, -4)
[(3, 1), (-3, 1), (36, 10)]
>>> diop_bf_DN(986, 1)
[(49299, 1570)]
See Also
========
diop_DN()
References
==========
.. [1] Solving the generalized Pell equation x**2 - D*y**2 = N, John P.
Robertson, July 31, 2004, Page 15. http://www.jpr2718.org/pell.pdf
"""
sol = []
a = diop_DN(D, 1)
u = a[0][0]
v = a[0][1]
if abs(N) == 1:
return diop_DN(D, N)
elif N > 1:
L1 = 0
L2 = floor(sqrt(S(N*(u - 1))/(2*D))) + 1
elif N < -1:
L1 = ceiling(sqrt(S(-N)/D))
L2 = floor(sqrt(S(-N*(u + 1))/(2*D))) + 1
else:
if D < 0:
return [(S.Zero, S.Zero)]
elif D == 0:
return [(S.Zero, t)]
else:
if isinstance(sqrt(D), Integer):
return [(sqrt(D)*t, t), (-sqrt(D)*t, t)]
else:
return [(S.Zero, S.Zero)]
for y in range(L1, L2):
if isinstance(sqrt(N + D*y**2), Integer):
x = sqrt(N + D*y**2)
sol.append((x, y))
if not equivalent(x, y, -x, y, D, N):
sol.append((-x, y))
return sol
def equivalent(u, v, r, s, D, N):
"""
Returns True if two solutions `(u, v)` and `(r, s)` of `x^2 - Dy^2 = N`
belongs to the same equivalence class and False otherwise.
Two solutions `(u, v)` and `(r, s)` to the above equation fall to the same
equivalence class iff both `(ur - Dvs)` and `(us - vr)` are divisible by
`N`. See reference [1]_. No test is performed to test whether `(u, v)` and
`(r, s)` are actually solutions to the equation. User should take care of
this.
Usage
=====
``equivalent(u, v, r, s, D, N)``: `(u, v)` and `(r, s)` are two solutions
of the equation `x^2 - Dy^2 = N` and all parameters involved are integers.
Examples
========
>>> from sympy.solvers.diophantine import equivalent
>>> equivalent(18, 5, -18, -5, 13, -1)
True
>>> equivalent(3, 1, -18, 393, 109, -4)
False
References
==========
.. [1] Solving the generalized Pell equation x**2 - D*y**2 = N, John P.
Robertson, July 31, 2004, Page 12. http://www.jpr2718.org/pell.pdf
"""
return divisible(u*r - D*v*s, N) and divisible(u*s - v*r, N)
def length(P, Q, D):
"""
Returns the (length of aperiodic part + length of periodic part) of
continued fraction representation of `\\frac{P + \sqrt{D}}{Q}`.
It is important to remember that this does NOT return the length of the
periodic part but the addition of the legths of the two parts as mentioned
above.
Usage
=====
``length(P, Q, D)``: ``P``, ``Q`` and ``D`` are integers corresponding to
the continued fraction `\\frac{P + \sqrt{D}}{Q}`.
Details
=======
``P``, ``D`` and ``Q`` corresponds to P, D and Q in the continued fraction,
`\\frac{P + \sqrt{D}}{Q}`.
Examples
========
>>> from sympy.solvers.diophantine import length
>>> length(-2 , 4, 5) # (-2 + sqrt(5))/4
3
>>> length(-5, 4, 17) # (-5 + sqrt(17))/4
4
"""
x = P + sqrt(D)
y = Q
x = sympify(x)
v, res = [], []
q = x/y
if q < 0:
v.append(q)
res.append(floor(q))
q = q - floor(q)
num, den = rad_rationalize(1, q)
q = num / den
while 1:
v.append(q)
a = int(q)
res.append(a)
if q == a:
return len(res)
num, den = rad_rationalize(1,(q - a))
q = num / den
if q in v:
return len(res)
def transformation_to_DN(eq):
"""
This function transforms general quadratic,
`ax^2 + bxy + cy^2 + dx + ey + f = 0`
to more easy to deal with `X^2 - DY^2 = N` form.
This is used to solve the general quadratic equation by transforming it to
the latter form. Refer [1]_ for more detailed information on the
transformation. This function returns a tuple (A, B) where A is a 2 X 2
matrix and B is a 2 X 1 matrix such that,
Transpose([x y]) = A * Transpose([X Y]) + B
Usage
=====
``transformation_to_DN(eq)``: where ``eq`` is the quadratic to be
transformed.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.solvers.diophantine import transformation_to_DN
>>> from sympy.solvers.diophantine import classify_diop
>>> A, B = transformation_to_DN(x**2 - 3*x*y - y**2 - 2*y + 1)
>>> A
Matrix([
[1/26, 3/26],
[ 0, 1/13]])
>>> B
Matrix([
[-6/13],
[-4/13]])
A, B returned are such that Transpose((x y)) = A * Transpose((X Y)) + B.
Substituting these values for `x` and `y` and a bit of simplifying work
will give an equation of the form `x^2 - Dy^2 = N`.
>>> from sympy.abc import X, Y
>>> from sympy import Matrix, simplify, Subs
>>> u = (A*Matrix([X, Y]) + B)[0] # Transformation for x
>>> u
X/26 + 3*Y/26 - 6/13
>>> v = (A*Matrix([X, Y]) + B)[1] # Transformation for y
>>> v
Y/13 - 4/13
Next we will substitute these formulas for `x` and `y` and do
``simplify()``.
>>> eq = simplify(Subs(x**2 - 3*x*y - y**2 - 2*y + 1, (x, y), (u, v)).doit())
>>> eq
X**2/676 - Y**2/52 + 17/13
By multiplying the denominator appropriately, we can get a Pell equation
in the standard form.
>>> eq * 676
X**2 - 13*Y**2 + 884
If only the final equation is needed, ``find_DN()`` can be used.
See Also
========
find_DN()
References
==========
.. [1] Solving the equation ax^2 + bxy + cy^2 + dx + ey + f = 0,
John P.Robertson, May 8, 2003, Page 7 - 11.
http://www.jpr2718.org/ax2p.pdf
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "binary_quadratic":
return _transformation_to_DN(var, coeff)
def _transformation_to_DN(var, coeff):
x, y = var[:2]
a = coeff[x**2]
b = coeff[x*y]
c = coeff[y**2]
d = coeff[x]
e = coeff[y]
f = coeff[Integer(1)]
g = igcd(a, igcd(b, igcd(c, igcd(d, igcd(e, f)))))
a = a // g
b = b // g
c = c // g
d = d // g
e = e // g
f = f // g
X, Y = symbols("X, Y", integer=True)
if b != Integer(0):
B = (S(2*a)/b).p
C = (S(2*a)/b).q
A = (S(a)/B**2).p
T = (S(a)/B**2).q
# eq_1 = A*B*X**2 + B*(c*T - A*C**2)*Y**2 + d*T*X + (B*e*T - d*T*C)*Y + f*T*B
coeff = {X**2: A*B, X*Y: 0, Y**2: B*(c*T - A*C**2), X: d*T, Y: B*e*T - d*T*C, Integer(1): f*T*B}
A_0, B_0 = _transformation_to_DN([X, Y], coeff)
return Matrix(2, 2, [S(1)/B, -S(C)/B, 0, 1])*A_0, Matrix(2, 2, [S(1)/B, -S(C)/B, 0, 1])*B_0
else:
if d != Integer(0):
B = (S(2*a)/d).p
C = (S(2*a)/d).q
A = (S(a)/B**2).p
T = (S(a)/B**2).q
# eq_2 = A*X**2 + c*T*Y**2 + e*T*Y + f*T - A*C**2
coeff = {X**2: A, X*Y: 0, Y**2: c*T, X: 0, Y: e*T, Integer(1): f*T - A*C**2}
A_0, B_0 = _transformation_to_DN([X, Y], coeff)
return Matrix(2, 2, [S(1)/B, 0, 0, 1])*A_0, Matrix(2, 2, [S(1)/B, 0, 0, 1])*B_0 + Matrix([-S(C)/B, 0])
else:
if e != Integer(0):
B = (S(2*c)/e).p
C = (S(2*c)/e).q
A = (S(c)/B**2).p
T = (S(c)/B**2).q
# eq_3 = a*T*X**2 + A*Y**2 + f*T - A*C**2
coeff = {X**2: a*T, X*Y: 0, Y**2: A, X: 0, Y: 0, Integer(1): f*T - A*C**2}
A_0, B_0 = _transformation_to_DN([X, Y], coeff)
return Matrix(2, 2, [1, 0, 0, S(1)/B])*A_0, Matrix(2, 2, [1, 0, 0, S(1)/B])*B_0 + Matrix([0, -S(C)/B])
else:
# TODO: pre-simplification: Not necessary but may simplify
# the equation.
return Matrix(2, 2, [S(1)/a, 0, 0, 1]), Matrix([0, 0])
def find_DN(eq):
"""
This function returns a tuple, `(D, N)` of the simplified form,
`x^2 - Dy^2 = N`, corresponding to the general quadratic,
`ax^2 + bxy + cy^2 + dx + ey + f = 0`.
Solving the general quadratic is then equivalent to solving the equation
`X^2 - DY^2 = N` and transforming the solutions by using the transformation
matrices returned by ``transformation_to_DN()``.
Usage
=====
``find_DN(eq)``: where ``eq`` is the quadratic to be transformed.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.solvers.diophantine import find_DN
>>> find_DN(x**2 - 3*x*y - y**2 - 2*y + 1)
(13, -884)
Interpretation of the output is that we get `X^2 -13Y^2 = -884` after
transforming `x^2 - 3xy - y^2 - 2y + 1` using the transformation returned
by ``transformation_to_DN()``.
See Also
========
transformation_to_DN()
References
==========
.. [1] Solving the equation ax^2 + bxy + cy^2 + dx + ey + f = 0,
John P.Robertson, May 8, 2003, Page 7 - 11.
http://www.jpr2718.org/ax2p.pdf
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "binary_quadratic":
return _find_DN(var, coeff)
def _find_DN(var, coeff):
x, y = var[:2]
X, Y = symbols("X, Y", integer=True)
A , B = _transformation_to_DN(var, coeff)
u = (A*Matrix([X, Y]) + B)[0]
v = (A*Matrix([X, Y]) + B)[1]
eq = x**2*coeff[x**2] + x*y*coeff[x*y] + y**2*coeff[y**2] + x*coeff[x] + y*coeff[y] + coeff[Integer(1)]
simplified = _mexpand(Subs(eq, (x, y), (u, v)).doit())
coeff = dict([reversed(t.as_independent(*[X, Y])) for t in simplified.args])
for term in [X**2, Y**2, Integer(1)]:
if term not in coeff.keys():
coeff[term] = Integer(0)
return -coeff[Y**2]/coeff[X**2], -coeff[Integer(1)]/coeff[X**2]
def check_param(x, y, a, t):
"""
Check if there is a number modulo ``a`` such that ``x`` and ``y`` are both
integers. If exist, then find a parametric representation for ``x`` and
``y``.
Here ``x`` and ``y`` are functions of ``t``.
"""
k, m, n = symbols("k, m, n", integer=True)
p = Wild("p", exclude=[k])
q = Wild("q", exclude=[k])
ok = False
for i in range(a):
z_x = _mexpand(Subs(x, t, a*k + i).doit()).match(p*k + q)
z_y = _mexpand(Subs(y, t, a*k + i).doit()).match(p*k + q)
if (isinstance(z_x[p], Integer) and isinstance(z_x[q], Integer) and
isinstance(z_y[p], Integer) and isinstance(z_y[q], Integer)):
ok = True
break
if ok == True:
x_param = x.match(p*t + q)
y_param = y.match(p*t + q)
if x_param[p] == 0 or y_param[p] == 0:
if x_param[p] == 0:
l1, junk = Poly(y).clear_denoms()
else:
l1 = 1
if y_param[p] == 0:
l2, junk = Poly(x).clear_denoms()
else:
l2 = 1
return x*ilcm(l1, l2), y*ilcm(l1, l2)
eq = S(m - x_param[q])/x_param[p] - S(n - y_param[q])/y_param[p]
lcm_denom, junk = Poly(eq).clear_denoms()
eq = eq * lcm_denom
return diop_solve(eq, t)[0], diop_solve(eq, t)[1]
else:
return (None, None)
def diop_ternary_quadratic(eq):
"""
Solves the general quadratic ternary form,
`ax^2 + by^2 + cz^2 + fxy + gyz + hxz = 0`.
Returns a tuple `(x, y, z)` which is a base solution for the above
equation. If there are no solutions, `(None, None, None)` is returned.
Usage
=====
``diop_ternary_quadratic(eq)``: Return a tuple containing a basic solution
to ``eq``.
Details
=======
``eq`` should be an homogeneous expression of degree two in three variables
and it is assumed to be zero.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine import diop_ternary_quadratic
>>> diop_ternary_quadratic(x**2 + 3*y**2 - z**2)
(1, 0, 1)
>>> diop_ternary_quadratic(4*x**2 + 5*y**2 - z**2)
(1, 0, 2)
>>> diop_ternary_quadratic(45*x**2 - 7*y**2 - 8*x*y - z**2)
(28, 45, 105)
>>> diop_ternary_quadratic(x**2 - 49*y**2 - z**2 + 13*z*y -8*x*y)
(9, 1, 5)
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "homogeneous_ternary_quadratic":
return _diop_ternary_quadratic(var, coeff)
def _diop_ternary_quadratic(_var, coeff):
x, y, z = _var[:3]
var = [x]*3
var[0], var[1], var[2] = _var[0], _var[1], _var[2]
# Equations of the form B*x*y + C*z*x + E*y*z = 0 and At least two of the
# coefficients A, B, C are non-zero.
# There are infinitely many solutions for the equation.
# Ex: (0, 0, t), (0, t, 0), (t, 0, 0)
# Equation can be re-written as y*(B*x + E*z) = -C*x*z and we can find rather
# unobviuos solutions. Set y = -C and B*x + E*z = x*z. The latter can be solved by
# using methods for binary quadratic diophantine equations. Let's select the
# solution which minimizes |x| + |z|
if coeff[x**2] == 0 and coeff[y**2] == 0 and coeff[z**2] == 0:
if coeff[x*z] != 0:
sols = diophantine(coeff[x*y]*x + coeff[y*z]*z - x*z)
s = sols.pop()
min_sum = abs(s[0]) + abs(s[1])
for r in sols:
if abs(r[0]) + abs(r[1]) < min_sum:
s = r
min_sum = abs(s[0]) + abs(s[1])
x_0, y_0, z_0 = s[0], -coeff[x*z], s[1]
else:
var[0], var[1] = _var[1], _var[0]
y_0, x_0, z_0 = _diop_ternary_quadratic(var, coeff)
return simplified(x_0, y_0, z_0)
if coeff[x**2] == 0:
# If the coefficient of x is zero change the variables
if coeff[y**2] == 0:
var[0], var[2] = _var[2], _var[0]
z_0, y_0, x_0 = _diop_ternary_quadratic(var, coeff)
else:
var[0], var[1] = _var[1], _var[0]
y_0, x_0, z_0 = _diop_ternary_quadratic(var, coeff)
else:
if coeff[x*y] != 0 or coeff[x*z] != 0:
# Apply the transformation x --> X - (B*y + C*z)/(2*A)
A = coeff[x**2]
B = coeff[x*y]
C = coeff[x*z]
D = coeff[y**2]
E = coeff[y*z]
F = coeff[z**2]
_coeff = dict()
_coeff[x**2] = 4*A**2
_coeff[y**2] = 4*A*D - B**2
_coeff[z**2] = 4*A*F - C**2
_coeff[y*z] = 4*A*E - 2*B*C
_coeff[x*y] = 0
_coeff[x*z] = 0
X_0, y_0, z_0 = _diop_ternary_quadratic(var, _coeff)
if X_0 == None:
return (None, None, None)
l = (S(B*y_0 + C*z_0)/(2*A)).q
x_0, y_0, z_0 = X_0*l - (S(B*y_0 + C*z_0)/(2*A)).p, y_0*l, z_0*l
elif coeff[z*y] != 0:
if coeff[y**2] == 0:
if coeff[z**2] == 0:
# Equations of the form A*x**2 + E*yz = 0.
A = coeff[x**2]
E = coeff[y*z]
b = (S(-E)/A).p
a = (S(-E)/A).q
x_0, y_0, z_0 = b, a, b
else:
# Ax**2 + E*y*z + F*z**2 = 0
var[0], var[2] = _var[2], _var[0]
z_0, y_0, x_0 = _diop_ternary_quadratic(var, coeff)
else:
# A*x**2 + D*y**2 + E*y*z + F*z**2 = 0, C may be zero
var[0], var[1] = _var[1], _var[0]
y_0, x_0, z_0 = _diop_ternary_quadratic(var, coeff)
else:
# Ax**2 + D*y**2 + F*z**2 = 0, C may be zero
x_0, y_0, z_0 = _diop_ternary_quadratic_normal(var, coeff)
return simplified(x_0, y_0, z_0)
def transformation_to_normal(eq):
"""
Returns the transformation Matrix from general ternary quadratic equation
`eq` to normal form.
General form of the ternary quadratic equation is `ax^2 + by^2 cz^2 + dxy +
eyz + fxz`. This function returns a 3X3 transformation Matrix which
transforms the former equation to the form `ax^2 + by^2 + cz^2 = 0`. This
is not used in solving ternary quadratics. Only implemented for the sake
of completeness.
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "homogeneous_ternary_quadratic":
return _transformation_to_normal(var, coeff)
def _transformation_to_normal(var, coeff):
_var = [var[0]]*3
_var[1], _var[2] = var[1], var[2]
x, y, z = var[:3]
if coeff[x**2] == 0:
# If the coefficient of x is zero change the variables
if coeff[y**2] == 0:
_var[0], _var[2] = var[2], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 2)
T.col_swap(0, 2)
return T
else:
_var[0], _var[1] = var[1], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 1)
T.col_swap(0, 1)
return T
else:
# Apply the transformation x --> X - (B*Y + C*Z)/(2*A)
if coeff[x*y] != 0 or coeff[x*z] != 0:
A = coeff[x**2]
B = coeff[x*y]
C = coeff[x*z]
D = coeff[y**2]
E = coeff[y*z]
F = coeff[z**2]
_coeff = dict()
_coeff[x**2] = 4*A**2
_coeff[y**2] = 4*A*D - B**2
_coeff[z**2] = 4*A*F - C**2
_coeff[y*z] = 4*A*E - 2*B*C
_coeff[x*y] = 0
_coeff[x*z] = 0
T_0 = _transformation_to_normal(_var, _coeff)
return Matrix(3, 3, [1, S(-B)/(2*A), S(-C)/(2*A), 0, 1, 0, 0, 0, 1]) * T_0
elif coeff[y*z] != 0:
if coeff[y**2] == 0:
if coeff[z**2] == 0:
# Equations of the form A*x**2 + E*yz = 0.
# Apply transformation y -> Y + Z ans z -> Y - Z
return Matrix(3, 3, [1, 0, 0, 0, 1, 1, 0, 1, -1])
else:
# Ax**2 + E*y*z + F*z**2 = 0
_var[0], _var[2] = var[2], var[0]
T = _transformtion_to_normal(_var, coeff)
T.row_swap(0, 2)
T.col_swap(0, 2)
return T
else:
# A*x**2 + D*y**2 + E*y*z + F*z**2 = 0, F may be zero
_var[0], _var[1] = var[1], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 1)
T.col_swap(0, 1)
return T
else:
return Matrix(3, 3, [1, 0, 0, 0, 1, 0, 0, 0, 1])
def simplified(x, y, z):
"""
Simplify the solution `(x, y, z)`.
"""
if x == None or y == None or z == None:
return (x, y, z)
g = igcd(x, igcd(y, z))
return x // g, y // g, z // g
def parametrize_ternary_quadratic(eq):
"""
Returns the parametrized general solution for the ternary quadratic
equation ``eq`` which has the form
`ax^2 + by^2 + cz^2 + fxy + gyz + hxz = 0`.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine import parametrize_ternary_quadratic
>>> parametrize_ternary_quadratic(x**2 + y**2 - z**2)
(2*p*q, p**2 - q**2, p**2 + q**2)
Here `p` and `q` are two co-prime integers.
>>> parametrize_ternary_quadratic(3*x**2 + 2*y**2 - z**2 - 2*x*y + 5*y*z - 7*y*z)
(2*p**2 - 2*p*q - q**2, 2*p**2 + 2*p*q - q**2, 2*p**2 - 2*p*q + 3*q**2)
>>> parametrize_ternary_quadratic(124*x**2 - 30*y**2 - 7729*z**2)
(-1410*p**2 - 363263*q**2, 2700*p**2 + 30916*p*q - 695610*q**2, -60*p**2 + 5400*p*q + 15458*q**2)
References
==========
.. [1] The algorithmic resolution of Diophantine equations, Nigel P. Smart,
London Mathematical Society Student Texts 41, Cambridge University
Press, Cambridge, 1998.
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "homogeneous_ternary_quadratic":
x_0, y_0, z_0 = _diop_ternary_quadratic(var, coeff)
return _parametrize_ternary_quadratic((x_0, y_0, z_0), var, coeff)
def _parametrize_ternary_quadratic(solution, _var, coeff):
x, y, z = _var[:3]
x_0, y_0, z_0 = solution[:3]
v = [x]*3
v[0], v[1], v[2] = _var[0], _var[1], _var[2]
if x_0 == None:
return (None, None, None)
if x_0 == 0:
if y_0 == 0:
v[0], v[2] = v[2], v[0]
z_p, y_p, x_p = _parametrize_ternary_quadratic((z_0, y_0, x_0), v, coeff)
return x_p, y_p, z_p
else:
v[0], v[1] = v[1], v[0]
y_p, x_p, z_p = _parametrize_ternary_quadratic((y_0, x_0, z_0), v, coeff)
return x_p, y_p, z_p
x, y, z = v[:3]
r, p, q = symbols("r, p, q", integer=True)
eq = x**2*coeff[x**2] + y**2*coeff[y**2] + z**2*coeff[z**2] + x*y*coeff[x*y] + y*z*coeff[y*z] + z*x*coeff[z*x]
eq_1 = Subs(eq, (x, y, z), (r*x_0, r*y_0 + p, r*z_0 + q)).doit()
eq_1 = _mexpand(eq_1)
A, B = eq_1.as_independent(r, as_Add=True)
x = A*x_0
y = (A*y_0 - _mexpand(B/r*p))
z = (A*z_0 - _mexpand(B/r*q))
return x, y, z
def diop_ternary_quadratic_normal(eq):
"""
Solves the quadratic ternary diophantine equation,
`ax^2 + by^2 + cz^2 = 0`.
Here the coefficients `a`, `b`, and `c` should be non zero. Otherwise the
equation will be a quadratic binary or univariate equation. If solvable,
returns a tuple `(x, y, z)` that satisifes the given equation. If the
equation does not have integer solutions, `(None, None, None)` is returned.
Usage
=====
``diop_ternary_quadratic_normal(eq)``: where ``eq`` is an equation of the form
`ax^2 + by^2 + cz^2 = 0`.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine import diop_ternary_quadratic_normal
>>> diop_ternary_quadratic_normal(x**2 + 3*y**2 - z**2)
(1, 0, 1)
>>> diop_ternary_quadratic_normal(4*x**2 + 5*y**2 - z**2)
(1, 0, 2)
>>> diop_ternary_quadratic_normal(34*x**2 - 3*y**2 - 301*z**2)
(4, 9, 1)
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "homogeneous_ternary_quadratic":
return _diop_ternary_quadratic_normal(var, coeff)
def _diop_ternary_quadratic_normal(var, coeff):
x, y, z = var[:3]
a = coeff[x**2]
b = coeff[y**2]
c = coeff[z**2]
if a*b*c == 0:
raise ValueError("Try factoring out you equation or using diophantine()")
g = igcd(a, igcd(b, c))
a = a // g
b = b // g
c = c // g
a_0 = square_factor(a)
b_0 = square_factor(b)
c_0 = square_factor(c)
a_1 = a // a_0**2
b_1 = b // b_0**2
c_1 = c // c_0**2
a_2, b_2, c_2 = pairwise_prime(a_1, b_1, c_1)
A = -a_2*c_2
B = -b_2*c_2
# If following two conditions are satisified then there are no solutions
if A < 0 and B < 0:
return (None, None, None)
if (sqrt_mod(-b_2*c_2, a_2) == None or sqrt_mod(-c_2*a_2, b_2) == None or
sqrt_mod(-a_2*b_2, c_2) == None):
return (None, None, None)
z_0, x_0, y_0 = descent(A, B)
if divisible(z_0, c_2) == True:
z_0 = z_0 // abs(c_2)
else:
x_0 = x_0*(S(z_0)/c_2).q
y_0 = y_0*(S(z_0)/c_2).q
z_0 = (S(z_0)/c_2).p
x_0, y_0, z_0 = simplified(x_0, y_0, z_0)
# Holzer reduction
if sign(a) == sign(b):
x_0, y_0, z_0 = holzer(x_0, y_0, z_0, abs(a_2), abs(b_2), abs(c_2))
elif sign(a) == sign(c):
x_0, z_0, y_0 = holzer(x_0, z_0, y_0, abs(a_2), abs(c_2), abs(b_2))
else:
y_0, z_0, x_0 = holzer(y_0, z_0, x_0, abs(b_2), abs(c_2), abs(a_2))
x_0 = reconstruct(b_1, c_1, x_0)
y_0 = reconstruct(a_1, c_1, y_0)
z_0 = reconstruct(a_1, b_1, z_0)
l = ilcm(a_0, ilcm(b_0, c_0))
x_0 = abs(x_0*l//a_0)
y_0 = abs(y_0*l//b_0)
z_0 = abs(z_0*l//c_0)
return simplified(x_0, y_0, z_0)
def square_factor(a):
"""
Returns an integer `c` s.t. `a = c^2k, \ c,k \in Z`. Here `k` is square
free.
Examples
========
>>> from sympy.solvers.diophantine import square_factor
>>> square_factor(24)
2
>>> square_factor(36)
6
>>> square_factor(1)
1
"""
f = factorint(abs(a))
c = 1
for p, e in f.items():
c = c * p**(e//2)
return c
def pairwise_prime(a, b, c):
"""
Transform `ax^2 + by^2 + cz^2 = 0` into an equivalent equation
`a'x^2 + b'y^2 + c'z^2 = 0` where `a', b', c'` are pairwise relatively
prime.
Returns a tuple containing `a', b', c'`. `\gcd(a, b, c)` should equal `1`
for this to work. The solutions for `ax^2 + by^2 + cz^2 = 0` can be
recovered from the solutions of `a'x^2 + b'y^2 + c'z^2 = 0`.
Examples
========
>>> from sympy.solvers.diophantine import pairwise_prime
>>> pairwise_prime(6, 15, 10)
(5, 2, 3)
See Also
========
make_prime(), reocnstruct()
"""
a, b, c = make_prime(a, b, c)
b, c, a = make_prime(b, c, a)
c, a, b = make_prime(c, a, b)
return a, b, c
def make_prime(a, b, c):
"""
Transform the equation `ax^2 + by^2 + cz^2 = 0` to an equivalent equation
`a'x^2 + b'y^2 + c'z^2 = 0` with `\gcd(a', b') = 1`.
Returns a tuple `(a', b', c')` which satisfies above conditions. Note that
in the returned tuple `\gcd(a', c')` and `\gcd(b', c')` can take any value.
Examples
========
>>> from sympy.solvers.diophantine import make_prime
>>> make_prime(4, 2, 7)
(2, 1, 14)
See Also
========
pairwaise_prime(), reconstruct()
"""
g = igcd(a, b)
if g != 1:
f = factorint(g)
for p, e in f.items():
a = a // p**e
b = b // p**e
if e % 2 == 1:
c = p*c
return a, b, c
def reconstruct(a, b, z):
"""
Reconstruct the `z` value of an equivalent solution of `ax^2 + by^2 + cz^2`
from the `z` value of a solution of a transformed version of the above
equation.
"""
g = igcd(a, b)
if g != 1:
f = factorint(g)
for p, e in f.items():
if e %2 == 0:
z = z*p**(e//2)
else:
z = z*p**((e//2)+1)
return z
def ldescent(A, B):
"""
Uses Lagrange's method to find a non trivial solution to
`w^2 = Ax^2 + By^2`.
Here, `A \\neq 0` and `B \\neq 0` and `A` and `B` are square free. Output a
tuple `(w_0, x_0, y_0)` which is a solution to the above equation.
Examples
========
>>> from sympy.solvers.diophantine import ldescent
>>> ldescent(1, 1) # w^2 = x^2 + y^2
(1, 1, 0)
>>> ldescent(4, -7) # w^2 = 4x^2 - 7y^2
(2, -1, 0)
This means that `x = -1, y = 0` and `w = 2` is a solution to the equation
`w^2 = 4x^2 - 7y^2`
>>> ldescent(5, -1) # w^2 = 5x^2 - y^2
(2, 1, -1)
References
==========
.. [1] The algorithmic resolution of Diophantine equations, Nigel P. Smart,
London Mathematical Society Student Texts 41, Cambridge University
Press, Cambridge, 1998.
.. [2] Efficient Solution of Rational Conices, J. E. Cremona and D. Rusin,
Mathematics of Computation, Volume 00, Number 0.
"""
if abs(A) > abs(B):
w, y, x = ldescent(B, A)
return w, x, y
if A == 1:
return (S.One, S.One, 0)
if B == 1:
return (S.One, 0, S.One)
r = sqrt_mod(A, B)
Q = (r**2 - A) // B
if Q == 0:
B_0 = 1
d = 0
else:
div = divisors(Q)
B_0 = None
for i in div:
if isinstance(sqrt(abs(Q) // i), Integer):
B_0, d = sign(Q)*i, sqrt(abs(Q) // i)
break
if B_0 != None:
W, X, Y = ldescent(A, B_0)
return simplified((-A*X + r*W), (r*X - W), Y*(B_0*d))
# In this module Descent will always be called with inputs which have solutions.
def descent(A, B):
"""
Lagrange's `descent()` with lattice-reduction to find solutions to
`x^2 = Ay^2 + Bz^2`.
Here `A` and `B` should be square free and pairwise prime. Always should be
called with suitable ``A`` and ``B`` so that the above equation has
solutions.
This is more faster than the normal Lagrange's descent algorithm because
the gaussian reduction is used.
Examples
========
>>> from sympy.solvers.diophantine import descent
>>> descent(3, 1) # x**2 = 3*y**2 + z**2
(1, 0, 1)
`(x, y, z) = (1, 0, 1)` is a solution to the above equation.
>>> descent(41, -113)
(-16, -3, 1)
References
==========
.. [1] Efficient Solution of Rational Conices, J. E. Cremona and D. Rusin,
Mathematics of Computation, Volume 00, Number 0.
"""
if abs(A) > abs(B):
x, y, z = descent(B, A)
return x, z, y
if B == 1:
return (1, 0, 1)
if A == 1:
return (1, 1, 0)
if B == -1:
return (None, None, None)
if B == -A:
return (0, 1, 1)
if B == A:
x, z, y = descent(-1, A)
return (A*y, z, x)
w = sqrt_mod(A, B)
x_0, z_0 = gaussian_reduce(w, A, B)
t = (x_0**2 - A*z_0**2) // B
t_2 = square_factor(t)
t_1 = t // t_2**2
x_1, z_1, y_1 = descent(A, t_1)
return simplified(x_0*x_1 + A*z_0*z_1, z_0*x_1 + x_0*z_1, t_1*t_2*y_1)
def gaussian_reduce(w, a, b):
"""
Returns a reduced solution `(x, z)` to the congruence
`X^2 - aZ^2 \equiv 0 \ (mod \ b)` so that `x^2 + |a|z^2` is minimal.
Details
=======
Here ``w`` is a solution of the congruence `x^2 \equiv a \ (mod \ b)`
References
==========
.. [1] Gaussian lattice Reduction [online]. Available:
http://home.ie.cuhk.edu.hk/~wkshum/wordpress/?p=404
.. [2] Efficient Solution of Rational Conices, J. E. Cremona and D. Rusin,
Mathematics of Computation, Volume 00, Number 0.
"""
u = (0, 1)
v = (1, 0)
if dot(u, v, w, a, b) < 0:
v = (-v[0], -v[1])
if norm(u, w, a, b) < norm(v, w, a, b):
u, v = v, u
while norm(u, w, a, b) > norm(v, w, a, b):
k = dot(u, v, w, a, b) // dot(v, v, w, a, b)
u, v = v, (u[0]- k*v[0], u[1]- k*v[1])
u, v = v, u
if dot(u, v, w, a, b) < dot(v, v, w, a, b)/2 or norm((u[0]-v[0], u[1]-v[1]), w, a, b) > norm(v, w, a, b):
c = v
else:
c = (u[0] - v[0], u[1] - v[1])
return c[0]*w + b*c[1], c[0]
def dot(u, v, w, a, b):
"""
Returns a special dot product of the vectors `u = (u_{1}, u_{2})` and
`v = (v_{1}, v_{2})` which is defined in order to reduce solution of
the congruence equation `X^2 - aZ^2 \equiv 0 \ (mod \ b)`.
"""
u_1, u_2 = u[:2]
v_1, v_2 = v[:2]
return (w*u_1 + b*u_2)*(w*v_1 + b*v_2) + abs(a)*u_1*v_1
def norm(u, w, a, b):
"""
Returns the norm of the vector `u = (u_{1}, u_{2})` under the dot product
defined by `u \cdot v = (wu_{1} + bu_{2})(w*v_{1} + bv_{2}) + |a|*u_{1}*v_{1}`
where `u = (u_{1}, u_{2})` and `v = (v_{1}, v_{2})`.
"""
u_1, u_2 = u[:2]
return sqrt(dot((u_1, u_2), (u_1, u_2), w, a, b))
def holzer(x_0, y_0, z_0, a, b, c):
"""
Simplify the solution `(x_{0}, y_{0}, z_{0})` of the equation
`ax^2 + by^2 = cz^2` with `a, b, c > 0` and `z_{0}^2 \geq \mid ab \mid` to
a new reduced solution `(x, y, z)` such that `z^2 \leq \mid ab \mid`.
"""
while z_0 > sqrt(a*b):
if c % 2 == 0:
k = c // 2
u_0, v_0 = base_solution_linear(k, y_0, -x_0)
else:
k = 2*c
u_0, v_0 = base_solution_linear(c, y_0, -x_0)
w = -(a*u_0*x_0 + b*v_0*y_0) // (c*z_0)
if c % 2 == 1:
if w % 2 != (a*u_0 + b*v_0) % 2:
w = w + 1
x = (x_0*(a*u_0**2 + b*v_0**2 + c*w**2) - 2*u_0*(a*u_0*x_0 + b*v_0*y_0 + c*w*z_0)) // k
y = (y_0*(a*u_0**2 + b*v_0**2 + c*w**2) - 2*v_0*(a*u_0*x_0 + b*v_0*y_0 + c*w*z_0)) // k
z = (z_0*(a*u_0**2 + b*v_0**2 + c*w**2) - 2*w*(a*u_0*x_0 + b*v_0*y_0 + c*w*z_0)) // k
x_0, y_0, z_0 = x, y, z
return x_0, y_0, z_0
def diop_general_pythagorean(eq, param=symbols("m", integer=True)):
"""
Solves the general pythagorean equation,
`a_{1}^2x_{1}^2 + a_{2}^2x_{2}^2 + . . . + a_{n}^2x_{n}^2 - a_{n + 1}^2x_{n + 1}^2 = 0`.
Returns a tuple which contains a parametrized solution to the equation,
sorted in the same order as the input variables.
Usage
=====
``diop_general_pythagorean(eq, param)``: where ``eq`` is a general
pythagorean equation which is assumed to be zero and ``param`` is the base
parameter used to construct other parameters by subscripting.
Examples
========
>>> from sympy.solvers.diophantine import diop_general_pythagorean
>>> from sympy.abc import a, b, c, d, e
>>> diop_general_pythagorean(a**2 + b**2 + c**2 - d**2)
(m1**2 + m2**2 - m3**2, 2*m1*m3, 2*m2*m3, m1**2 + m2**2 + m3**2)
>>> diop_general_pythagorean(9*a**2 - 4*b**2 + 16*c**2 + 25*d**2 + e**2)
(10*m1**2 + 10*m2**2 + 10*m3**2 - 10*m4**2, 15*m1**2 + 15*m2**2 + 15*m3**2 + 15*m4**2, 15*m1*m4, 12*m2*m4, 60*m3*m4)
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "general_pythagorean":
return _diop_general_pythagorean(var, coeff, param)
def _diop_general_pythagorean(var, coeff, t):
if sign(coeff[var[0]**2]) + sign(coeff[var[1]**2]) + sign(coeff[var[2]**2]) < 0:
for key in coeff.keys():
coeff[key] = coeff[key] * -1
n = len(var)
index = 0
for i, v in enumerate(var):
if sign(coeff[v**2]) == -1:
index = i
m = symbols(str(t) + "1:" + str(n), integer=True)
l = []
ith = 0
for m_i in m:
ith = ith + m_i**2
l.append(ith - 2*m[n - 2]**2)
for i in range(n - 2):
l.append(2*m[i]*m[n-2])
sol = l[:index] + [ith] + l[index:]
lcm = 1
for i, v in enumerate(var):
if i == index or (index > 0 and i == 0) or (index == 0 and i == 1):
lcm = ilcm(lcm, sqrt(abs(coeff[v**2])))
else:
lcm = ilcm(lcm, sqrt(coeff[v**2]) if sqrt(coeff[v**2]) % 2 else sqrt(coeff[v**2]) // 2)
for i, v in enumerate(var):
sol[i] = (lcm*sol[i]) / sqrt(abs(coeff[v**2]))
return tuple(sol)
def diop_general_sum_of_squares(eq, limit=1):
"""
Solves the equation `x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`.
Returns at most ``limit`` number of solutions. Currently there is no way to
set ``limit`` using higher level API's like ``diophantine()`` or
``diop_solve()`` but that will be fixed soon.
Usage
=====
``general_sum_of_squares(eq, limit)`` : Here ``eq`` is an expression which
is assumed to be zero. Also, ``eq`` should be in the form,
`x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`. At most ``limit`` number of
solutions are returned.
Details
=======
When `n = 3` if `k = 4^a(8m + 7)` for some `a, m \in Z` then there will be
no solutions. Refer [1]_ for more details.
Examples
========
>>> from sympy.solvers.diophantine import diop_general_sum_of_squares
>>> from sympy.abc import a, b, c, d, e, f
>>> diop_general_sum_of_squares(a**2 + b**2 + c**2 + d**2 + e**2 - 2345)
set([(0, 48, 5, 4, 0)])
Reference
=========
.. [1] Representing an Integer as a sum of three squares, [online],
Available:
http://www.proofwiki.org/wiki/Integer_as_Sum_of_Three_Squares
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "general_sum_of_squares":
return _diop_general_sum_of_squares(var, coeff, limit)
def _diop_general_sum_of_squares(var, coeff, limit=1):
n = len(var)
k = -int(coeff[Integer(1)])
s = set([])
if k < 0:
return set([])
if n == 3:
s.add(sum_of_three_squares(k))
elif n == 4:
s.add(sum_of_four_squares(k))
else:
m = n // 4
f = partition(k, m, True)
for j in range(limit):
soln = []
try:
l = next(f)
except StopIteration:
break
for n_i in l:
a, b, c, d = sum_of_four_squares(n_i)
soln = soln + [a, b, c, d]
soln = soln + [0] * (n % 4)
s.add(tuple(soln))
return s
## Functions below this comment can be more suitably grouped under an Additive number theory module
## rather than the Diophantine equation module.
def partition(n, k=None, zeros=False):
"""
Returns a generator that can be used to generate partitions of an integer
`n`.
A partition of `n` is a set of positive integers which add upto `n`. For
example, partitions of 3 are 3 , 1 + 2, 1 + 1+ 1. A partition is returned
as a tuple. If ``k`` equals None, then all possible partitions are returned
irrespective of their size, otherwise only the partitions of size ``k`` are
returned. If there are no partions of `n` with size `k` then an empty tuple
is returned. If the ``zero`` parameter is set to True then a suitable
number of zeros are added at the end of every partition of size less than
``k``.
``zero`` parameter is considered only if ``k`` is not None. When the
partitions are over, the last `next()` call throws the ``StopIteration``
exception, so this function should always be used inside a try - except
block.
Details
=======
``partition(n, k)``: Here ``n`` is a positive integer and ``k`` is the size
of the partition which is also positive integer.
Examples
========
>>> from sympy.solvers.diophantine import partition
>>> f = partition(5)
>>> next(f)
(1, 1, 1, 1, 1)
>>> next(f)
(1, 1, 1, 2)
>>> g = partition(5, 3)
>>> next(g)
(3, 1, 1)
>>> next(g)
(2, 2, 1)
Reference
=========
.. [1] Generating Integer Partitions, [online],
Available: http://jeromekelleher.net/partitions.php
"""
if n < 1:
yield tuple()
if k is not None:
if k < 1:
yield tuple()
elif k > n:
if zeros:
for i in range(1, n):
for t in partition(n, i):
yield (t,) + (0,) * (k - i)
else:
yield tuple()
else:
a = [1 for i in range(k)]
a[0] = n - k + 1
yield tuple(a)
i = 1
while a[0] >= n // k + 1:
j = 0
while j < i and j + 1 < k:
a[j] = a[j] - 1
a[j + 1] = a[j + 1] + 1
yield tuple(a)
j = j + 1
i = i + 1
if zeros:
for m in range(1, k):
for a in partition(n, m):
yield tuple(a) + (0,) * (k - m)
else:
a = [0 for i in range(n + 1)]
l = 1
y = n - 1
while l != 0:
x = a[l - 1] + 1
l -= 1
while 2*x <= y:
a[l] = x
y -= x
l += 1
m = l + 1
while x <= y:
a[l] = x
a[m] = y
yield tuple(a[:l + 2])
x += 1
y -= 1
a[l] = x + y
y = x + y - 1
yield tuple(a[:l + 1])
def prime_as_sum_of_two_squares(p):
"""
Represent a prime `p` which is congruent to 1 mod 4, as a sum of two
squares.
Examples
========
>>> from sympy.solvers.diophantine import prime_as_sum_of_two_squares
>>> prime_as_sum_of_two_squares(5)
(2, 1)
Reference
=========
.. [1] Representing a number as a sum of four squares, [online],
Available: http://www.schorn.ch/howto.html
"""
if p % 8 == 5:
b = 2
else:
b = 3
while pow(b, (p - 1) // 2, p) == 1:
b = nextprime(b)
b = pow(b, (p - 1) // 4, p)
a = p
while b**2 > p:
a, b = b, a % b
return (b, a % b)
def sum_of_three_squares(n):
"""
Returns a 3-tuple `(a, b, c)` such that `a^2 + b^2 + c^2 = n` and
`a, b, c \geq 0`.
Returns (None, None, None) if `n = 4^a(8m + 7)` for some `a, m \in Z`. See
[1]_ for more details.
Usage
=====
``sum_of_three_squares(n)``: Here ``n`` is a non-negative integer.
Examples
========
>>> from sympy.solvers.diophantine import sum_of_three_squares
>>> sum_of_three_squares(44542)
(207, 37, 18)
References
==========
.. [1] Representing a number as a sum of three squares, [online],
Available: http://www.schorn.ch/howto.html
"""
special = {1:(1, 0, 0), 2:(1, 1, 0), 3:(1, 1, 1), 10: (1, 3, 0), 34: (3, 3, 4), 58:(3, 7, 0),
85:(6, 7, 0), 130:(3, 11, 0), 214:(3, 6, 13), 226:(8, 9, 9), 370:(8, 9, 15),
526:(6, 7, 21), 706:(15, 15, 16), 730:(1, 27, 0), 1414:(6, 17, 33), 1906:(13, 21, 36),
2986: (21, 32, 39), 9634: (56, 57, 57)}
v = 0
if n == 0:
return (0, 0, 0)
while n % 4 == 0:
v = v + 1
n = n // 4
if n % 8 == 7:
return (None, None, None)
if n in special.keys():
x, y, z = special[n]
return (2**v*x, 2**v*y, 2**v*z)
l = int(sqrt(n))
if n == l**2:
return (2**v*l, 0, 0)
x = None
if n % 8 == 3:
l = l if l % 2 else l - 1
for i in range(l, -1, -2):
if isprime((n - i**2) // 2):
x = i
break
y, z = prime_as_sum_of_two_squares((n - x**2) // 2)
return (2**v*x, 2**v*(y + z), 2**v*abs(y - z))
if n % 8 == 2 or n % 8 == 6:
l = l if l % 2 else l - 1
else:
l = l - 1 if l % 2 else l
for i in range(l, -1, -2):
if isprime(n - i**2):
x = i
break
y, z = prime_as_sum_of_two_squares(n - x**2)
return (2**v*x, 2**v*y, 2**v*z)
def sum_of_four_squares(n):
"""
Returns a 4-tuple `(a, b, c, d)` such that `a^2 + b^2 + c^2 + d^2 = n`.
Here `a, b, c, d \geq 0`.
Usage
=====
``sum_of_four_squares(n)``: Here ``n`` is a non-negative integer.
Examples
========
>>> from sympy.solvers.diophantine import sum_of_four_squares
>>> sum_of_four_squares(3456)
(8, 48, 32, 8)
>>> sum_of_four_squares(1294585930293)
(0, 1137796, 2161, 1234)
References
==========
.. [1] Representing a number as a sum of four squares, [online],
Available: http://www.schorn.ch/howto.html
"""
if n == 0:
return (0, 0, 0, 0)
v = 0
while n % 4 == 0:
v = v + 1
n = n // 4
if n % 8 == 7:
d = 2
n = n - 4
elif n % 8 == 6 or n % 8 == 2:
d = 1
n = n - 1
else:
d = 0
x, y, z = sum_of_three_squares(n)
return (2**v*d, 2**v*x, 2**v*y, 2**v*z)
def power_representation(n, p, k, zeros=False):
"""
Returns a generator for finding k-tuples `(n_{1}, n_{2}, . . . n_{k})` such
that `n = n_{1}^p + n_{2}^p + . . . n_{k}^p`.
Here `n` is a non-negative integer. StopIteration exception is raised after
all the solutions are generated, so should always be used within a try-
catch block.
Usage
=====
``power_representation(n, p, k, zeros)``: Represent number ``n`` as a sum
of ``k``, ``p``th powers. If ``zeros`` is true, then the solutions will
contain zeros.
Examples
========
>>> from sympy.solvers.diophantine import power_representation
>>> f = power_representation(1729, 3, 2) # Represent 1729 as a sum of two cubes
>>> next(f)
(12, 1)
>>> next(f)
(10, 9)
"""
if p < 1 or k < 1 or n < 1:
raise ValueError("Expected: n > 0 and k >= 1 and p >= 1")
if k == 1:
if perfect_power(n):
yield (perfect_power(n)[0],)
else:
yield tuple()
elif p == 1:
for t in partition(n, k, zeros):
yield t
else:
l = []
a = integer_nthroot(n, p)[0]
for t in pow_rep_recursive(a, k, n, [], p):
yield t
if zeros:
for i in range(2, k):
for t in pow_rep_recursive(a, i, n, [], p):
yield t + (0,) * (k - i)
def pow_rep_recursive(n_i, k, n_remaining, terms, p):
if k == 0 and n_remaining == 0:
yield tuple(terms)
else:
if n_i >= 1 and k > 0 and n_remaining >= 0:
if n_i**p <= n_remaining:
for t in pow_rep_recursive(n_i, k - 1, n_remaining - n_i**p, terms + [n_i], p):
yield t
for t in pow_rep_recursive(n_i - 1, k, n_remaining, terms, p):
yield t
|
Arafatk/sympy
|
sympy/solvers/diophantine.py
|
Python
|
bsd-3-clause
| 83,608
|
[
"Gaussian"
] |
be27cd4729973b0961651e3ad6517644ebd797556f41bcfe14e233b92ed4ce88
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("revolution.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^games/', include("revolution.games.urls", namespace="games")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
|
arruda/revolution
|
config/urls.py
|
Python
|
mit
| 1,354
|
[
"VisIt"
] |
d786e35dc39e24019d662fc6cf53fe80cca4ea3df62062286c9159ae42e0d820
|
#!/usr/bin/env python
import sys
import argparse
import glob
import re
import vtk
import vtktools
def parse_args(argv):
parser = argparse.ArgumentParser(
prog="vtu2ensight",
description="""This converts a vtu file to a ensight file. If applied to checkpointed files, use rename_checkpoint first and ensure that 'checkpoint' is removed from the basename of the solution files.""")
parser.add_argument(
"-v",
"--verbose",
help="Print something...",
action = "store_true",
dest = "verbose",
default = False
)
parser.add_argument(
"-s",
"--static",
help="Use this flag only when a fixed mesh was used. By default a dynamically varying (adaptive) spatial mesh is assumed.",
action = "store_true",
dest = "static",
default = False
)
parser.add_argument(
"-i",
help="Use this flag to set the index of the vtu file you wish to convert. By default all vtu files with the matching basename are converted.",
dest = "dumpno",
default = -1
)
parser.add_argument(
"-l",
"--last-dump",
help="Use this flag to automatically find the vtu file with the highest dump number and only convert that opposed to all vtu files. Note: It does not check the timestamps. If -l and -i are given, -i is neglected.",
action = "store_true",
dest = "lastdump",
default = False
)
parser.add_argument(
'basename',
metavar='basename',
help="Basename of output (without .pvtu or .vtu)",
)
args = parser.parse_args()
return args
# Function taken from:
# http://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python
def sorted_nicely(l):
""" Sort the given iterable in the way that humans expect."""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def getvtulist(basename, dumpno, lastdump):
# Find all vtu/pvtu files for (p)vtus in this folder:
vtus = []
searchstring = basename+"_"
if (dumpno <0): searchstring = searchstring+"[0-9]*vtu"
else: searchstring = searchstring+str(int(dumpno))+".*vtu"
for file in sorted_nicely(glob.glob(searchstring)):
if (not ('checkpoint' in file)):
vtus.append(file)
if (lastdump):
vtus = [vtus[-1]]
return vtus
def getvtk(filename):
# read in vtu file:
reader = vtktools.vtu(filename)
return reader
def getensightwriter(basename, static):
writer=vtk.vtkEnSightWriter()
writer.SetFileName(basename)
writer.SetTransientGeometry(not(static))
return writer
def addblockid(ug):
# get number of elements in ug:
nele = int(ug.GetNumberOfCells())
# add blockID to ug (required by the ensight format)
blockIDs = vtk.vtkUnsignedIntArray()
blockIDs.SetNumberOfTuples(nele)
blockIDs.SetNumberOfComponents(1)
blockIDs.SetName("BlockId")
for j in range(nele):
blockIDs.SetValue(j,1)
ug.GetCellData().AddArray(blockIDs)
return ug
def removeghostlevel(reader, ug):
for i in range(reader.gridreader.GetNumberOfCellArrays()):
if (reader.gridreader.GetCellArrayName(i) == "vtkGhostLevels"):
ug.GetCellData().RemoveArray(i)
break
return ug
def writedata(writer, ug, i):
#writer.SetGhostLevel(0)
#writer.SetBlockIDs(1)
writer.SetNumberOfBlocks(1)
writer.SetTimeStep(i)
if vtk.vtkVersion.GetVTKMajorVersion() <= 5:
writer.SetInput(ug)
else:
writer.SetInputData(ug)
writer.Write()
def writecase(writer, ntimesteps):
# write header information (case file)
writer.WriteCaseFile(ntimesteps)
def main(args):
verbose = args.verbose
static = args.static
basename = args.basename
dumpno = args.dumpno
lastdump = args.lastdump
if (dumpno>=0 or lastdump): static = True
# get list of vtu/pvtu files:
vtus = getvtulist(basename, dumpno, lastdump)
if (not vtus): raise IOError
# prevent reading errors, if only one vtu file was found, set static to True:
if (len(vtus) == 1): static = True
# writer:
writer = getensightwriter(basename, static)
# write data for each vtu-file:
for i in range(len(vtus)):
if (verbose):
print "processing vtu file: "+vtus[i]
# get vtk object:
reader = getvtk(vtus[i])
# add block id (required by the ensight format):
ug = addblockid(reader.ugrid)
# check/remove ghostlevel array:
ug = removeghostlevel(reader, ug)
# write data:
writedata(writer, ug, i)
# write case file:
writecase(writer, len(vtus))
if __name__ == "__main__":
# get arguments:
args = parse_args(sys.argv)
try:
main(args)
print "EnSight output files have been written successfully."
except IOError:
print "Error: Could not find any output files with a basename \""+args.basename+"\"."
except:
raise Exception("Something went wrong. Aborting operation.")
|
rjferrier/fluidity
|
tools/vtu2ensight.py
|
Python
|
lgpl-2.1
| 5,336
|
[
"VTK"
] |
b87ce5e094b4641ca84aab7ad38b0ddbe3b58888a820bc1a37f8d1c6eb187ed9
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("cookiedjango.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
|
shanenater/shanecookie
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,282
|
[
"VisIt"
] |
a6f695922166a8c7f3124ee6d99b63eb86cec3b47519dd5af2ab85ca4f35aa07
|
from vtk import *
source = vtkRandomGraphSource()
source.SetNumberOfVertices(10)
#source.SetStartWithTree(True)
source.SetIncludeEdgeWeights(True)
source.SetGeneratePedigreeIds(True)
# selection source
sel1 = vtkSelectionSource()
#vtkSelectionNode :: SELECTIONS,GLOBALIDS,PEDIGREEIDS,VALUES,INDICES,
# FRUSTRUM,LOCATIONS,THRESHOLDS,BLOCKS
#sel1.SetContentType( vtkSelectionNode.VALUES )
## sel1.SetContentType( vtkSelectionNode.PEDIGREEIDS )
sel1.SetContentType( vtkSelectionNode.INDICES )
#vtkSelectionNode :: CELL,POINT,FIELD,VERTEX,EDGE,ROW
sel1.SetFieldType( vtkSelectionNode.VERTEX )
#sel1.SetArrayName("vertex id")
sel1.AddID(0, 0)
sel1.AddID(0, 2)
sel1.AddID(0, 3)
sel1.Update()
G = source.GetOutput()
selExp0 = vtkExpandSelectedGraph()
selExp0.SetInputConnection(0, sel1.GetOutputPort());
selExp0.SetGraphConnection( G.GetProducerPort() )
selExp0.SetBFSDistance(0)
selExp0.Update()
selExp1 = vtkExpandSelectedGraph()
selExp1.SetInputConnection(0, sel1.GetOutputPort());
selExp1.SetGraphConnection( G.GetProducerPort() )
selExp1.SetBFSDistance(2)
selExp1.Update()
selExp1.GetOutput().Subtract( selExp0.GetOutput() )
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(G.GetProducerPort())
view.SetVertexLabelArrayName("vertex id")
view.SetVertexColorArrayName("vertex id")
view.SetVertexLabelVisibility(True)
#view.SetColorVertices(True)
view.SetEdgeColorArrayName("edge weight")
view.SetEdgeLabelArrayName("edge weight")
#view.SetEdgeLabelVisibility(True)
view.SetColorEdges(True)
view.SetLayoutStrategyToSimple2D()
view.SetVertexLabelFontSize(20)
# create selection link
annotationLink = vtkAnnotationLink()
view.GetRepresentation(0).SetAnnotationLink(annotationLink)
#annotationLink.SetCurrentSelection(sel1.GetOutput())
annotationLink.SetCurrentSelection(selExp1.GetOutput())
updater = vtkViewUpdater()
updater.AddAnnotationLink(annotationLink)
updater.AddView(view)
# set the theme on the view
theme = vtkViewTheme.CreateOceanTheme()
theme.SetLineWidth(2)
theme.SetPointSize(10)
view.ApplyViewTheme(theme)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view.GetInteractor().Start()
|
spthaolt/VTK
|
Examples/Infovis/Python/graph_selection_subtract.py
|
Python
|
bsd-3-clause
| 2,194
|
[
"VTK"
] |
884cf97db05e4b3a8071a5975b9182a7f4d53fc4f9728530e8a5b92d8cd45620
|
#!/usr/bin/python2.7
import pysam
import glob
import os
from time import time
from time import sleep
import subprocess
import multiprocessing as mp
from optparse import OptionParser
# -------------------------------------------------
parser = OptionParser()
parser.add_option("--sambamba", dest="sambamba", help="Path to sambamba/samtools executable", default="sambamba")
parser.add_option("--bamdir", dest="bamdir", help="Path to directory containing BAM files", default=False)
parser.add_option("--outdir", dest="outdir", help="Path to directory to write output to", default="./telomeres/")
parser.add_option("--repsize", dest="repsize", help="Number of required matching 6mers (TTAGGG)", default=10)
parser.add_option("--s", dest="nr_samples", help="Number of Samples to analyse simulatiously", default=6)
parser.add_option("--t", dest="nr_cpus", help="Number of CPUs to use per sample", default=2)
(options, args) = parser.parse_args()
# -------------------------------------------------
def check_arguments():
if not os.path.exists(options.bamdir):
print("Invalid BAM folder %s"%(options.bamdir))
return False
if not os.path.exists(options.outdir):
print("Creating output folder %s"%(options.outdir))
try:
os.makedir(options.outdir)
except OSError:
print("Invalid / unable to create, output folder %s"%(options.outdir))
return False
options.repsize = int(options.repsize)
options.nr_samples = int(options.nr_samples)
options.nr_cpus = int(options.nr_cpus)
print("Running with the following settings:")
print("------------------------------------")
print(options)
print("------------------------------------")
return True
# -------------------------------------------------
def count_telomeric_reads(bamfile, q):
# generate Telomere reads file name
telofile = bamfile.replace(options.bamdir,options.outdir).replace(".bam","_TelomericReads.sam")
# check if the file was already generated
if not os.path.exists(telofile):
# print("---- Processing BAM file: "+bamfile)
# extract telomeric reads and write to file
cmd = options.sambamba+" view "+bamfile+" -t "+ str(options.nr_cpus) +" | LC_ALL=C grep -E \"" + "TTAGGG"*options.repsize +"|"+ "CCCTAA"*options.repsize + "\"" + " > " + telofile
print("++++ Generating SAM file: "+telofile)
os.system(cmd)
# count total number of reads
total_rc = reduce(lambda x, y: x + y, [ eval('+'.join(l.rstrip('\n').split('\t')[2:]) ) for l in pysam.idxstats(bamfile) ])
sleep(1)
telomere_rc = 0
if os.path.exists(telofile):
# count number of telomeric reads by line count
telomere_rc = sum(1 for line in open(telofile,'r'))
else:
print("Something went wrong with BAM file: "+bamfile)
# return results
result = [str(bamfile.split("/")[-1].split("_")[0]), str(total_rc), str(telomere_rc), str((telomere_rc/(total_rc*1.0))*100000.0)]
q.put(result)
return(result)
# -------------------------------------------------
def listener(q):
'''listens for messages on the q, writes to file. '''
#sys.stdout.write('Starting listener\n')
f = open(os.path.join(options.outdir, "TelomereCounts_"+time.strftime("%d_%m_%Y")+".txt"), 'wb')
f.write('\t'.join(["#Sample","TotalReads","TelomericReads","NormalisedFraction"])+'\n')
f.flush()
while 1:
m = q.get()
#print(m)
if m == 'kill':
if not q.empty():
# received kill signal without finishing all the processes
sys.stdout.write('ERROR\n')
break
# received kill signal, finished all the processes, done
sys.stdout.write('DONE\n')
break
f.write('\t'.join(m)+'\n')
f.flush()
f.close()
# -------------------------------------------------
def main():
currtime = time()
#Init Manager queue
manager = mp.Manager()
q = manager.Queue()
# Init worker pool
pool = mp.Pool(int(options.nr_samples))
#Init Listener
watcher = pool.apply_async(listener, (q,))
bamfiles = glob.glob(os.path.join(options.bamdir, "*.bam"))
jobs = []
#fire off workers
for bamfile in bamfiles:
baifile = bamfile+".bai"
# check if index file exists
if not os.path.exists(baifile):
print("No index file found for %s, indexing now"%(bamfile))
subprocess.call(options.sambamba, " index " + bamfile)
job = pool.apply_async(count_telomeric_reads, (bamfile, q))
jobs.append(job)
for job in jobs:
job.get()
# now we are done, kill the listener
q.put("kill")
pool.close()
pool.join()
print 'time elapsed:', time() - currtime
# -------------------------------------------------
print("Starting Analysis")
if __name__ == '__main__':
# check specified options
if check_arguments():
main()
else:
print("Error in provided arguments")
print("DONE")
# -------------------------------------------------
|
jdeligt/Genetics
|
Count_Telomeric_Sequence_Reads.py
|
Python
|
mit
| 4,708
|
[
"pysam"
] |
fae7920047e6b8b4f52c5a8f7625f4be6674b6c7e8a577c8710a26e923e1cff7
|
from numbers import Integral
from functools import wraps
import numpy
from scipy import signal
def record(method):
@wraps(method)
def recorded_method(recorder, *args, **kwargs):
# If this isn't being called from within a recorded method,
# then we should add the call to the object's record. We
# also need to indicate that other calls are coming from
# within a recorded method. Otherwise, we just call the
# method without touching anything.
if not recorder._record_lock:
recorder._record_lock = True
recorder._record.append((recorded_method, args, kwargs))
result = method(recorder, *args, **kwargs)
recorder._record_lock = False
return result
else:
return method(recorder, *args, **kwargs)
return recorded_method
class Noise(object):
def __init__(self, duration=1, sample_rate=44100):
self.duration = duration
self.sample_rate = sample_rate
self.samples = None
self._record = []
self._record_lock = False
def copy(self):
new = type(self)()
new.duration = self.duration
new.sample_rate = self.sample_rate
new.samples = numpy.array(self.samples)
new._record = list(self._record)
new._record_lock = self._record_lock
return new
@property
def n_original_samples(self):
return int(self.duration * self.sample_rate)
@property
def n_samples(self):
if self.samples is not None:
return len(self.samples)
else:
return self.n_original_samples
def resample(self):
n = Noise(self.duration, self.sample_rate)
n._record = self._record
n._playback()
return n
def resample_array(self, n_cols, wav=True):
if wav:
return numpy.hstack([self.resample().wav().reshape(-1, 1)
for x in range(n_cols)])
else:
return numpy.hstack([self.resample().samples.reshape(-1, 1)
for x in range(n_cols)])
def _playback(self):
recorded_methods = self._record
self._record = [] # Same record will be re-recorded
for method, args, kwargs in recorded_methods:
method(self, *args, **kwargs)
@record
def _mix_in(self, np_rand, integrate=False, proportion=1.0):
"""Take a series of random samples from a gaussian distribution.
Replace the previous samples by default. If `0 < proportion < 1.0`,
combine the gaussian samples with existing samples such that
`final == (1.0 - proportion) * original + proportion * new)`.
"""
if not 0 <= proportion <= 1.0:
raise ValueError('Proportion must be greater than zero '
'and less than one.')
if self.samples is None:
self.samples = numpy.zeros(self.n_samples)
old_samples = self.samples
self.samples = np_rand(-1, 1, self.n_samples)
if integrate:
self.integrate()
self.samples *= proportion
self.samples += old_samples * (1.0 - proportion)
self.samples -= self.samples.mean()
return self
def white(self, proportion=1.0):
"""Take a series of random samples from a gaussian distribution."""
return self._mix_in(numpy.random.normal, proportion=proportion)
def laplacian(self, proportion=1.0):
"""Take a series of random samples from a laplacian distribution."""
return self._mix_in(numpy.random.laplace, proportion=proportion)
def brownian(self, proportion=1.0):
"""Integrate a series of random samples from a gaussian distribution."""
return self._mix_in(numpy.random.normal,
integrate=True, proportion=proportion)
@record
def integrate(self):
"""'Integrate' (i.e. do a cumulative sum of) the current sample set.
This is useful because integrating white noise produces Brownian
noise.
There's a wonderfully concrete way to understand that fact. Imagine
you're a particle that wiggles, and that each time you move, you
decide how to move by taking a sample from a Gaussian distribution.
If you plotted all your movements by starting from zero each time,
you'd get a white noise (Gaussian noise) pattern. But if you plotted
the resulting path, you'd get a Brownian noise pattern. The
resulting path is just the sum of the individual movements; hence
integrating (summing) white noise produces Brownian noise.
"""
# Subtract the mean to avoid positive or negative explosion
self.samples -= self.samples.mean()
self.samples = self.samples.cumsum()
# Re-center the data
self.samples -= self.samples.mean()
return self
@record
def butter_high(self, freq=2 ** -8, order=1):
b, a = signal.butter(order, freq, 'high', analog=False)
self.samples[:] = signal.lfilter(b, a, self.samples)
return self
@record
def butter_low(self, freq=2 ** -1, order=1):
b, a = signal.butter(order, freq, 'low', analog=False)
self.samples[:] = signal.lfilter(b, a, self.samples)
return self
# No need to record here, since this just calls recorded methods
def butter_filter(self, lowpass=2 ** -1, highpass=2 ** -8, order=1):
"""Perform a high- and low-pass butterworth filter."""
self.butter_low(lowpass, order)
self.butter_high(highpass, order)
return self
@record
def gauss_filter(self, sample_width=0.02, edge_policy='same'):
"""Convolve with a unit area Gaussian kernel. This is the
same thing as a weighted moving average with a Gaussian
weight curve.
`sample_width` may be a floating point number in the range
`(0, 1)`, representing the width of the kernel relative to
the original sample set. It may also be an integer specifying
the precise width of the kernel in samples.
`edge_policy` determines the way the edges of the sample are
handled; `'valid'` avoids zero-padding but reduces the total
number of samples, and `'same'` uses zero-padding to guarantee
that the number of samples remains the same.
"""
if 0 < sample_width < 1:
sample_width = int(self.duration * self.sample_rate * sample_width)
elif not isinstance(sample_width, Integral) or sample_width <= 0:
raise ValueError('sample_width must be a floating point number '
'in the range (0, 1], or an integer greater '
'than zero.')
kernel = numpy.exp(-numpy.linspace(-3, 3, sample_width) ** 2)
kernel /= kernel.sum()
self.samples = signal.convolve(self.samples, kernel, edge_policy)
return self
@record
def square_filter(self, sample_width=0.02, edge_policy='same'):
"""Convolve with a unit area constant kernel. This is the
same thing as an unweighted moving average.
`sample_width` may be a floating point number in the range
`(0, 1)`, representing the width of the kernel relative to
the original sample set. It may also be an integer specifying
the precise width of the kernel in samples.
`edge_policy` determines the way the edges of the sample are
handled; `'valid'` avoids zero-padding but reduces the total
number of samples, and `'same'` uses zero-padding to guarantee
that the number of samples remains the same.
"""
if 0 < sample_width < 1:
sample_width = int(self.duration * self.sample_rate * sample_width)
elif not isinstance(sample_width, Integral) or sample_width <= 0:
raise ValueError('sample_width must be a floating point number '
'in the range (0, 1), or an integer greater '
'than zero.')
kernel = numpy.ones(sample_width)
kernel /= kernel.sum()
self.samples = signal.convolve(self.samples, kernel, edge_policy)
return self
@record
def autofilter(self, sample_width=0.002, mean=False, median=False):
"""Bin-sum the signal. This will attenuate noise that has
no local correlation, while amplifying noise that does have
local correlation."""
if 0 < sample_width < 1:
sample_width = int(self.duration * self.sample_rate * sample_width)
elif not isinstance(sample_width, Integral) or sample_width <= 0:
raise ValueError('sample_width must be a floating point number '
'in the range (0, 1), or an integer greater '
'than zero.')
truncate_len = len(self.samples) - len(self.samples) % sample_width
bins = self.samples[:truncate_len].reshape(-1, sample_width)
if mean:
self.samples = bins.mean(axis=1).ravel()
elif median:
self.samples = numpy.median(bins, axis=1).ravel()
else:
self.samples = bins.sum(axis=1).ravel()
self.samples -= bins.max(axis=1).ravel()
self.samples -= bins.min(axis=1).ravel()
self.samples /= sample_width - 2
return self
@record
def autoresample(self, sample_width=0.002, mean=False):
"""Bin-sum a "bootstrapped" resampling of the signal. I tried
this, but it performed worse than `autofilter` above. Resampling
strategies at the ensemble level might be more useful."""
if 0 < sample_width < 1:
sample_width = int(self.duration * self.sample_rate * sample_width)
elif not isinstance(sample_width, Integral) or sample_width <= 0:
raise ValueError('sample_width must be a floating point number '
'in the range (0, 1), or an integer greater '
'than zero.')
truncate_len = len(self.samples) - len(self.samples) % sample_width
bins = self.samples[:truncate_len].reshape(-1, sample_width)
for i, bn in enumerate(bins):
resample = numpy.random.choice(bn, (sample_width, sample_width))
resample = resample.sum(axis=0)
if mean:
resample /= sample_width
bins[i, :] = resample
self.samples = bins.ravel()
self.autofilter(sample_width, mean)
return self
@record
def autoconvolve(self, edge_policy='same'):
self.samples = signal.convolve(self.samples, self.samples, edge_policy)
return self
@record
def fade(self, sample_width=0.1):
"""Fade in at the beginning and out at the end. This softens the
perceived 'click' at the beginning and end of the noise.
"""
sample_width = int(self.duration * self.sample_rate * sample_width)
self.samples[:sample_width] *= numpy.linspace(0, 1, sample_width)
self.samples[-sample_width:] *= numpy.linspace(1, 0, sample_width)
return self
@record
def scale(self):
"""Scale the current data by the absolute maximum. This maximizes
volume without causing clipping artifacts.
"""
self.samples -= self.samples.mean()
self.samples /= numpy.max(numpy.abs(self.samples))
self.samples *= 32767 # max amplitude at 16 bits per sample
return self
@record
def amplify(self, amplitude=1):
"""Amplify the tone by the given amplitude.
This may produce clipping.
"""
self.samples *= amplitude
return self
def wav(self):
"""Return data suitable for saving as a PCM or .wav file."""
newobj = self.copy()
newobj.scale()
return numpy.int16(newobj.samples)
if __name__ == '__main__':
n = Noise().brownian()
print(n.samples.sum())
print(n._record)
print(n.resample().samples.sum())
print(n._record)
print(n.resample().samples.sum())
print(n._record)
print(n.wav())
print(n._record)
|
senderle/svd-noise
|
noise.py
|
Python
|
mit
| 12,253
|
[
"Gaussian"
] |
e6a342af94df5e754fecd91af9aa794a43cef8d63592f5230c7894a3f26ff181
|
"""This file implements the gym environment of minitaur.
"""
import math
import time
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import pybullet
from pybullet_envs.minitaur.envs import bullet_client
import pybullet_data
from pybullet_envs.minitaur.envs import minitaur
from pybullet_envs.minitaur.envs import minitaur_derpy
from pybullet_envs.minitaur.envs import minitaur_logging
from pybullet_envs.minitaur.envs import minitaur_logging_pb2
from pybullet_envs.minitaur.envs import minitaur_rainbow_dash
from pybullet_envs.minitaur.envs import motor
from pkg_resources import parse_version
NUM_MOTORS = 8
MOTOR_ANGLE_OBSERVATION_INDEX = 0
MOTOR_VELOCITY_OBSERVATION_INDEX = MOTOR_ANGLE_OBSERVATION_INDEX + NUM_MOTORS
MOTOR_TORQUE_OBSERVATION_INDEX = MOTOR_VELOCITY_OBSERVATION_INDEX + NUM_MOTORS
BASE_ORIENTATION_OBSERVATION_INDEX = MOTOR_TORQUE_OBSERVATION_INDEX + NUM_MOTORS
ACTION_EPS = 0.01
OBSERVATION_EPS = 0.01
RENDER_HEIGHT = 360
RENDER_WIDTH = 480
SENSOR_NOISE_STDDEV = minitaur.SENSOR_NOISE_STDDEV
DEFAULT_URDF_VERSION = "default"
DERPY_V0_URDF_VERSION = "derpy_v0"
RAINBOW_DASH_V0_URDF_VERSION = "rainbow_dash_v0"
NUM_SIMULATION_ITERATION_STEPS = 300
MINIATUR_URDF_VERSION_MAP = {
DEFAULT_URDF_VERSION: minitaur.Minitaur,
DERPY_V0_URDF_VERSION: minitaur_derpy.MinitaurDerpy,
RAINBOW_DASH_V0_URDF_VERSION: minitaur_rainbow_dash.MinitaurRainbowDash,
}
def convert_to_list(obj):
try:
iter(obj)
return obj
except TypeError:
return [obj]
class MinitaurGymEnv(gym.Env):
"""The gym environment for the minitaur.
It simulates the locomotion of a minitaur, a quadruped robot. The state space
include the angles, velocities and torques for all the motors and the action
space is the desired motor angle for each motor. The reward function is based
on how far the minitaur walks in 1000 steps and penalizes the energy
expenditure.
"""
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 100}
def __init__(self,
urdf_root=pybullet_data.getDataPath(),
urdf_version=None,
distance_weight=1.0,
energy_weight=0.005,
shake_weight=0.0,
drift_weight=0.0,
distance_limit=float("inf"),
observation_noise_stdev=SENSOR_NOISE_STDDEV,
self_collision_enabled=True,
motor_velocity_limit=np.inf,
pd_control_enabled=False,
leg_model_enabled=True,
accurate_motor_model_enabled=False,
remove_default_joint_damping=False,
motor_kp=1.0,
motor_kd=0.02,
control_latency=0.0,
pd_latency=0.0,
torque_control_enabled=False,
motor_overheat_protection=False,
hard_reset=True,
on_rack=False,
render=False,
num_steps_to_log=1000,
action_repeat=1,
control_time_step=None,
env_randomizer=None,
forward_reward_cap=float("inf"),
reflection=True,
log_path=None):
"""Initialize the minitaur gym environment.
Args:
urdf_root: The path to the urdf data folder.
urdf_version: [DEFAULT_URDF_VERSION, DERPY_V0_URDF_VERSION,
RAINBOW_DASH_V0_URDF_VERSION] are allowable
versions. If None, DEFAULT_URDF_VERSION is used. DERPY_V0_URDF_VERSION
is the result of first pass system identification for derpy.
We will have a different URDF and related Minitaur class each time we
perform system identification. While the majority of the code of the
class remains the same, some code changes (e.g. the constraint location
might change). __init__() will choose the right Minitaur class from
different minitaur modules based on
urdf_version.
distance_weight: The weight of the distance term in the reward.
energy_weight: The weight of the energy term in the reward.
shake_weight: The weight of the vertical shakiness term in the reward.
drift_weight: The weight of the sideways drift term in the reward.
distance_limit: The maximum distance to terminate the episode.
observation_noise_stdev: The standard deviation of observation noise.
self_collision_enabled: Whether to enable self collision in the sim.
motor_velocity_limit: The velocity limit of each motor.
pd_control_enabled: Whether to use PD controller for each motor.
leg_model_enabled: Whether to use a leg motor to reparameterize the action
space.
accurate_motor_model_enabled: Whether to use the accurate DC motor model.
remove_default_joint_damping: Whether to remove the default joint damping.
motor_kp: proportional gain for the accurate motor model.
motor_kd: derivative gain for the accurate motor model.
control_latency: It is the delay in the controller between when an
observation is made at some point, and when that reading is reported
back to the Neural Network.
pd_latency: latency of the PD controller loop. PD calculates PWM based on
the motor angle and velocity. The latency measures the time between when
the motor angle and velocity are observed on the microcontroller and
when the true state happens on the motor. It is typically (0.001-
0.002s).
torque_control_enabled: Whether to use the torque control, if set to
False, pose control will be used.
motor_overheat_protection: Whether to shutdown the motor that has exerted
large torque (OVERHEAT_SHUTDOWN_TORQUE) for an extended amount of time
(OVERHEAT_SHUTDOWN_TIME). See ApplyAction() in minitaur.py for more
details.
hard_reset: Whether to wipe the simulation and load everything when reset
is called. If set to false, reset just place the minitaur back to start
position and set its pose to initial configuration.
on_rack: Whether to place the minitaur on rack. This is only used to debug
the walking gait. In this mode, the minitaur's base is hanged midair so
that its walking gait is clearer to visualize.
render: Whether to render the simulation.
num_steps_to_log: The max number of control steps in one episode that will
be logged. If the number of steps is more than num_steps_to_log, the
environment will still be running, but only first num_steps_to_log will
be recorded in logging.
action_repeat: The number of simulation steps before actions are applied.
control_time_step: The time step between two successive control signals.
env_randomizer: An instance (or a list) of EnvRandomizer(s). An
EnvRandomizer may randomize the physical property of minitaur, change
the terrrain during reset(), or add perturbation forces during step().
forward_reward_cap: The maximum value that forward reward is capped at.
Disabled (Inf) by default.
log_path: The path to write out logs. For the details of logging, refer to
minitaur_logging.proto.
Raises:
ValueError: If the urdf_version is not supported.
"""
# Set up logging.
self._log_path = log_path
self.logging = minitaur_logging.MinitaurLogging(log_path)
# PD control needs smaller time step for stability.
if control_time_step is not None:
self.control_time_step = control_time_step
self._action_repeat = action_repeat
self._time_step = control_time_step / action_repeat
else:
# Default values for time step and action repeat
if accurate_motor_model_enabled or pd_control_enabled:
self._time_step = 0.002
self._action_repeat = 5
else:
self._time_step = 0.01
self._action_repeat = 1
self.control_time_step = self._time_step * self._action_repeat
# TODO(b/73829334): Fix the value of self._num_bullet_solver_iterations.
self._num_bullet_solver_iterations = int(NUM_SIMULATION_ITERATION_STEPS / self._action_repeat)
self._urdf_root = urdf_root
self._self_collision_enabled = self_collision_enabled
self._motor_velocity_limit = motor_velocity_limit
self._observation = []
self._true_observation = []
self._objectives = []
self._objective_weights = [distance_weight, energy_weight, drift_weight, shake_weight]
self._env_step_counter = 0
self._num_steps_to_log = num_steps_to_log
self._is_render = render
self._last_base_position = [0, 0, 0]
self._distance_weight = distance_weight
self._energy_weight = energy_weight
self._drift_weight = drift_weight
self._shake_weight = shake_weight
self._distance_limit = distance_limit
self._observation_noise_stdev = observation_noise_stdev
self._action_bound = 1
self._pd_control_enabled = pd_control_enabled
self._leg_model_enabled = leg_model_enabled
self._accurate_motor_model_enabled = accurate_motor_model_enabled
self._remove_default_joint_damping = remove_default_joint_damping
self._motor_kp = motor_kp
self._motor_kd = motor_kd
self._torque_control_enabled = torque_control_enabled
self._motor_overheat_protection = motor_overheat_protection
self._on_rack = on_rack
self._cam_dist = 1.0
self._cam_yaw = 0
self._cam_pitch = -30
self._forward_reward_cap = forward_reward_cap
self._hard_reset = True
self._last_frame_time = 0.0
self._control_latency = control_latency
self._pd_latency = pd_latency
self._urdf_version = urdf_version
self._ground_id = None
self._reflection = reflection
self._env_randomizers = convert_to_list(env_randomizer) if env_randomizer else []
self._episode_proto = minitaur_logging_pb2.MinitaurEpisode()
if self._is_render:
self._pybullet_client = bullet_client.BulletClient(connection_mode=pybullet.GUI)
else:
self._pybullet_client = bullet_client.BulletClient()
if self._urdf_version is None:
self._urdf_version = DEFAULT_URDF_VERSION
self._pybullet_client.setPhysicsEngineParameter(enableConeFriction=0)
self.seed()
self.reset()
observation_high = (self._get_observation_upper_bound() + OBSERVATION_EPS)
observation_low = (self._get_observation_lower_bound() - OBSERVATION_EPS)
action_dim = NUM_MOTORS
action_high = np.array([self._action_bound] * action_dim)
self.action_space = spaces.Box(-action_high, action_high)
self.observation_space = spaces.Box(observation_low, observation_high)
self.viewer = None
self._hard_reset = hard_reset # This assignment need to be after reset()
def close(self):
if self._env_step_counter > 0:
self.logging.save_episode(self._episode_proto)
self.minitaur.Terminate()
def add_env_randomizer(self, env_randomizer):
self._env_randomizers.append(env_randomizer)
def reset(self, initial_motor_angles=None, reset_duration=1.0):
self._pybullet_client.configureDebugVisualizer(self._pybullet_client.COV_ENABLE_RENDERING, 0)
if self._env_step_counter > 0:
self.logging.save_episode(self._episode_proto)
self._episode_proto = minitaur_logging_pb2.MinitaurEpisode()
minitaur_logging.preallocate_episode_proto(self._episode_proto, self._num_steps_to_log)
if self._hard_reset:
self._pybullet_client.resetSimulation()
self._pybullet_client.setPhysicsEngineParameter(
numSolverIterations=int(self._num_bullet_solver_iterations))
self._pybullet_client.setTimeStep(self._time_step)
self._ground_id = self._pybullet_client.loadURDF("%s/plane.urdf" % self._urdf_root)
if (self._reflection):
self._pybullet_client.changeVisualShape(self._ground_id, -1, rgbaColor=[1, 1, 1, 0.8])
self._pybullet_client.configureDebugVisualizer(
self._pybullet_client.COV_ENABLE_PLANAR_REFLECTION, self._ground_id)
self._pybullet_client.setGravity(0, 0, -10)
acc_motor = self._accurate_motor_model_enabled
motor_protect = self._motor_overheat_protection
if self._urdf_version not in MINIATUR_URDF_VERSION_MAP:
raise ValueError("%s is not a supported urdf_version." % self._urdf_version)
else:
self.minitaur = (MINIATUR_URDF_VERSION_MAP[self._urdf_version](
pybullet_client=self._pybullet_client,
action_repeat=self._action_repeat,
urdf_root=self._urdf_root,
time_step=self._time_step,
self_collision_enabled=self._self_collision_enabled,
motor_velocity_limit=self._motor_velocity_limit,
pd_control_enabled=self._pd_control_enabled,
accurate_motor_model_enabled=acc_motor,
remove_default_joint_damping=self._remove_default_joint_damping,
motor_kp=self._motor_kp,
motor_kd=self._motor_kd,
control_latency=self._control_latency,
pd_latency=self._pd_latency,
observation_noise_stdev=self._observation_noise_stdev,
torque_control_enabled=self._torque_control_enabled,
motor_overheat_protection=motor_protect,
on_rack=self._on_rack))
self.minitaur.Reset(reload_urdf=False,
default_motor_angles=initial_motor_angles,
reset_time=reset_duration)
# Loop over all env randomizers.
for env_randomizer in self._env_randomizers:
env_randomizer.randomize_env(self)
self._pybullet_client.setPhysicsEngineParameter(enableConeFriction=0)
self._env_step_counter = 0
self._last_base_position = [0, 0, 0]
self._objectives = []
self._pybullet_client.resetDebugVisualizerCamera(self._cam_dist, self._cam_yaw,
self._cam_pitch, [0, 0, 0])
self._pybullet_client.configureDebugVisualizer(self._pybullet_client.COV_ENABLE_RENDERING, 1)
return self._get_observation()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _transform_action_to_motor_command(self, action):
if self._leg_model_enabled:
for i, action_component in enumerate(action):
if not (-self._action_bound - ACTION_EPS <= action_component <=
self._action_bound + ACTION_EPS):
raise ValueError("{}th action {} out of bounds.".format(i, action_component))
action = self.minitaur.ConvertFromLegModel(action)
return action
def step(self, action):
"""Step forward the simulation, given the action.
Args:
action: A list of desired motor angles for eight motors.
Returns:
observations: The angles, velocities and torques of all motors.
reward: The reward for the current state-action pair.
done: Whether the episode has ended.
info: A dictionary that stores diagnostic information.
Raises:
ValueError: The action dimension is not the same as the number of motors.
ValueError: The magnitude of actions is out of bounds.
"""
self._last_base_position = self.minitaur.GetBasePosition()
if self._is_render:
# Sleep, otherwise the computation takes less time than real time,
# which will make the visualization like a fast-forward video.
time_spent = time.time() - self._last_frame_time
self._last_frame_time = time.time()
time_to_sleep = self.control_time_step - time_spent
if time_to_sleep > 0:
time.sleep(time_to_sleep)
base_pos = self.minitaur.GetBasePosition()
# Keep the previous orientation of the camera set by the user.
[yaw, pitch, dist] = self._pybullet_client.getDebugVisualizerCamera()[8:11]
self._pybullet_client.resetDebugVisualizerCamera(dist, yaw, pitch, base_pos)
for env_randomizer in self._env_randomizers:
env_randomizer.randomize_step(self)
action = self._transform_action_to_motor_command(action)
self.minitaur.Step(action)
reward = self._reward()
done = self._termination()
if self._log_path is not None:
minitaur_logging.update_episode_proto(self._episode_proto, self.minitaur, action,
self._env_step_counter)
self._env_step_counter += 1
if done:
self.minitaur.Terminate()
return np.array(self._get_observation()), reward, done, {}
def render(self, mode="rgb_array", close=False):
if mode != "rgb_array":
return np.array([])
base_pos = self.minitaur.GetBasePosition()
view_matrix = self._pybullet_client.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = self._pybullet_client.computeProjectionMatrixFOV(fov=60,
aspect=float(RENDER_WIDTH) /
RENDER_HEIGHT,
nearVal=0.1,
farVal=100.0)
(_, _, px, _, _) = self._pybullet_client.getCameraImage(
width=RENDER_WIDTH,
height=RENDER_HEIGHT,
renderer=self._pybullet_client.ER_BULLET_HARDWARE_OPENGL,
viewMatrix=view_matrix,
projectionMatrix=proj_matrix)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
def get_minitaur_motor_angles(self):
"""Get the minitaur's motor angles.
Returns:
A numpy array of motor angles.
"""
return np.array(self._observation[MOTOR_ANGLE_OBSERVATION_INDEX:MOTOR_ANGLE_OBSERVATION_INDEX +
NUM_MOTORS])
def get_minitaur_motor_velocities(self):
"""Get the minitaur's motor velocities.
Returns:
A numpy array of motor velocities.
"""
return np.array(
self._observation[MOTOR_VELOCITY_OBSERVATION_INDEX:MOTOR_VELOCITY_OBSERVATION_INDEX +
NUM_MOTORS])
def get_minitaur_motor_torques(self):
"""Get the minitaur's motor torques.
Returns:
A numpy array of motor torques.
"""
return np.array(
self._observation[MOTOR_TORQUE_OBSERVATION_INDEX:MOTOR_TORQUE_OBSERVATION_INDEX +
NUM_MOTORS])
def get_minitaur_base_orientation(self):
"""Get the minitaur's base orientation, represented by a quaternion.
Returns:
A numpy array of minitaur's orientation.
"""
return np.array(self._observation[BASE_ORIENTATION_OBSERVATION_INDEX:])
def is_fallen(self):
"""Decide whether the minitaur has fallen.
If the up directions between the base and the world is larger (the dot
product is smaller than 0.85) or the base is very low on the ground
(the height is smaller than 0.13 meter), the minitaur is considered fallen.
Returns:
Boolean value that indicates whether the minitaur has fallen.
"""
orientation = self.minitaur.GetBaseOrientation()
rot_mat = self._pybullet_client.getMatrixFromQuaternion(orientation)
local_up = rot_mat[6:]
pos = self.minitaur.GetBasePosition()
return (np.dot(np.asarray([0, 0, 1]), np.asarray(local_up)) < 0.85 or pos[2] < 0.13)
def _termination(self):
position = self.minitaur.GetBasePosition()
distance = math.sqrt(position[0]**2 + position[1]**2)
return self.is_fallen() or distance > self._distance_limit
def _reward(self):
current_base_position = self.minitaur.GetBasePosition()
forward_reward = current_base_position[0] - self._last_base_position[0]
# Cap the forward reward if a cap is set.
forward_reward = min(forward_reward, self._forward_reward_cap)
# Penalty for sideways translation.
drift_reward = -abs(current_base_position[1] - self._last_base_position[1])
# Penalty for sideways rotation of the body.
orientation = self.minitaur.GetBaseOrientation()
rot_matrix = pybullet.getMatrixFromQuaternion(orientation)
local_up_vec = rot_matrix[6:]
shake_reward = -abs(np.dot(np.asarray([1, 1, 0]), np.asarray(local_up_vec)))
energy_reward = -np.abs(
np.dot(self.minitaur.GetMotorTorques(),
self.minitaur.GetMotorVelocities())) * self._time_step
objectives = [forward_reward, energy_reward, drift_reward, shake_reward]
weighted_objectives = [o * w for o, w in zip(objectives, self._objective_weights)]
reward = sum(weighted_objectives)
self._objectives.append(objectives)
return reward
def get_objectives(self):
return self._objectives
@property
def objective_weights(self):
"""Accessor for the weights for all the objectives.
Returns:
List of floating points that corresponds to weights for the objectives in
the order that objectives are stored.
"""
return self._objective_weights
def _get_observation(self):
"""Get observation of this environment, including noise and latency.
The minitaur class maintains a history of true observations. Based on the
latency, this function will find the observation at the right time,
interpolate if necessary. Then Gaussian noise is added to this observation
based on self.observation_noise_stdev.
Returns:
The noisy observation with latency.
"""
observation = []
observation.extend(self.minitaur.GetMotorAngles().tolist())
observation.extend(self.minitaur.GetMotorVelocities().tolist())
observation.extend(self.minitaur.GetMotorTorques().tolist())
observation.extend(list(self.minitaur.GetBaseOrientation()))
self._observation = observation
return self._observation
def _get_true_observation(self):
"""Get the observations of this environment.
It includes the angles, velocities, torques and the orientation of the base.
Returns:
The observation list. observation[0:8] are motor angles. observation[8:16]
are motor velocities, observation[16:24] are motor torques.
observation[24:28] is the orientation of the base, in quaternion form.
"""
observation = []
observation.extend(self.minitaur.GetTrueMotorAngles().tolist())
observation.extend(self.minitaur.GetTrueMotorVelocities().tolist())
observation.extend(self.minitaur.GetTrueMotorTorques().tolist())
observation.extend(list(self.minitaur.GetTrueBaseOrientation()))
self._true_observation = observation
return self._true_observation
def _get_observation_upper_bound(self):
"""Get the upper bound of the observation.
Returns:
The upper bound of an observation. See GetObservation() for the details
of each element of an observation.
"""
upper_bound = np.zeros(self._get_observation_dimension())
num_motors = self.minitaur.num_motors
upper_bound[0:num_motors] = math.pi # Joint angle.
upper_bound[num_motors:2 * num_motors] = (motor.MOTOR_SPEED_LIMIT) # Joint velocity.
upper_bound[2 * num_motors:3 * num_motors] = (motor.OBSERVED_TORQUE_LIMIT) # Joint torque.
upper_bound[3 * num_motors:] = 1.0 # Quaternion of base orientation.
return upper_bound
def _get_observation_lower_bound(self):
"""Get the lower bound of the observation."""
return -self._get_observation_upper_bound()
def _get_observation_dimension(self):
"""Get the length of the observation list.
Returns:
The length of the observation list.
"""
return len(self._get_observation())
if parse_version(gym.__version__) < parse_version('0.9.6'):
_render = render
_reset = reset
_seed = seed
_step = step
def set_time_step(self, control_step, simulation_step=0.001):
"""Sets the time step of the environment.
Args:
control_step: The time period (in seconds) between two adjacent control
actions are applied.
simulation_step: The simulation time step in PyBullet. By default, the
simulation step is 0.001s, which is a good trade-off between simulation
speed and accuracy.
Raises:
ValueError: If the control step is smaller than the simulation step.
"""
if control_step < simulation_step:
raise ValueError("Control step should be larger than or equal to simulation step.")
self.control_time_step = control_step
self._time_step = simulation_step
self._action_repeat = int(round(control_step / simulation_step))
self._num_bullet_solver_iterations = (NUM_SIMULATION_ITERATION_STEPS / self._action_repeat)
self._pybullet_client.setPhysicsEngineParameter(
numSolverIterations=self._num_bullet_solver_iterations)
self._pybullet_client.setTimeStep(self._time_step)
self.minitaur.SetTimeSteps(action_repeat=self._action_repeat, simulation_step=self._time_step)
@property
def pybullet_client(self):
return self._pybullet_client
@property
def ground_id(self):
return self._ground_id
@ground_id.setter
def ground_id(self, new_ground_id):
self._ground_id = new_ground_id
@property
def env_step_counter(self):
return self._env_step_counter
|
MTASZTAKI/ApertusVR
|
plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_gym_env.py
|
Python
|
mit
| 25,379
|
[
"Gaussian"
] |
cdacbad7be157b8fcceccefdc74e580658ec425f635fd12c3da728115408bc1f
|
# Copyright 2022 The Scenic Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""COCO evaluation metrics based on pycocotools.
Implementation is based on
https://github.com/google/flax/blob/ac5e46ed448f4c6801c35d15eb15f4638167d8a1/examples/retinanet/coco_eval.py
"""
import collections
import contextlib
import functools
import io
import json
import os
import tempfile
from typing import Any, Dict, List, Optional, Set
import zipfile
from absl import logging
import jax
import numpy as np
import PIL
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tensorflow.io import gfile
COCO_ANNOTATIONS_PATH = os.path.join(
os.path.dirname(__file__),
'data',
'instances_val2017.json')
PANOPTIC_ANNOTATIONS_PATH = os.path.join(
os.path.dirname(__file__),
'data',
'panoptic_val2017.json')
PANOPTIC_CATEGORIES_PATH = os.path.join(
os.path.dirname(__file__),
'data',
'panoptic_coco_categories.json')
PANOPTIC_ANNOTATIONS_DIR = (
'panoptic_annotations_trainval2017')
@functools.lru_cache(maxsize=1)
def _load_json(path):
return json.load(gfile.GFile(path, 'r'))
class UniversalCOCO(COCO):
"""Extends the COCO API to (optionally) support panoptic annotations."""
def __init__(self, annotation_file: Optional[str] = None):
"""Constructor of Microsoft COCO helper class.
Args:
annotation_file: path to annotation file.
"""
self.annotation_file = annotation_file
self.reload_ground_truth()
def reload_ground_truth(self, included_image_ids: Optional[List[int]] = None):
"""Reload GT annotations, optionally just a subset."""
self.dataset, self.anns, self.cats, self.imgs = {}, {}, {}, {}
self.imgToAnns = collections.defaultdict(list) # pylint: disable=invalid-name
self.catToImgs = collections.defaultdict(list) # pylint: disable=invalid-name
if self.annotation_file is not None:
dataset = _load_json(self.annotation_file)
assert isinstance(
dataset, dict), 'annotation file format {} not supported'.format(
type(dataset))
if 'segments_info' in dataset['annotations'][0]:
# Dataset is in panoptic format. Translate to standard format:
dataset['annotations'] = _panoptic_to_standard_annotations(
dataset['annotations'])
if 'iscrowd' not in dataset['annotations'][0]:
# Dataset is in LVIS format. Add missing 'iscrowd' field":
for image_annotation in dataset['annotations']:
image_annotation['iscrowd'] = 0
# Subselect included image IDs:
if included_image_ids is not None:
included_image_ids = set(included_image_ids)
logging.warn('Using only a subset of validation set: %s of %s images.',
len(included_image_ids), len(dataset['images']))
dataset['images'] = [
a for a in dataset['images'] if a['id'] in included_image_ids]
dataset['annotations'] = [
a for a in dataset['annotations']
if a['image_id'] in included_image_ids]
self.dataset = dataset
self.createIndex()
def _panoptic_to_standard_annotations(annotations):
"""Translates panoptic annotations to standard annotations.
Panoptic annotations have one extra level of nesting compared to
detection annotations (see https://cocodataset.org/#format-data), which
we remove here. Also see
pycocotools/panopticapi/converters/panoptic2detection_coco_format.py
for reference regarding the conversion. Here, we do not convert the
segmentation masks, since they are not required for the detection
metric.
Args:
annotations: Dict with panoptic annotations loaded from JSON.
Returns:
Updated annotations dict in standard COCO format.
"""
object_annotations = []
for image_annotation in annotations:
for object_annotation in image_annotation['segments_info']:
object_annotations.append({
'image_id': image_annotation['image_id'],
'id': object_annotation['id'],
'category_id': object_annotation['category_id'],
'iscrowd': object_annotation['iscrowd'],
'bbox': object_annotation['bbox'],
'area': object_annotation['area'],
})
return object_annotations
class DetectionEvaluator():
"""Main evaluator class."""
def __init__(self,
annotations_loc: Optional[str] = None,
threshold: float = 0.05,
disable_output: bool = True):
"""Initializes a DetectionEvaluator object.
Args:
annotations_loc: a path towards the .json files storing the COCO/2014
ground truths for object detection. To get the annotations, please
download the relevant files from https://cocodataset.org/#download
threshold: a scalar which indicates the lower threshold (inclusive) for
the scores. Anything below this value will be removed.
disable_output: if True disables the output produced by the COCO API
"""
self.annotations = []
self.annotated_img_ids = []
self.threshold = threshold
self.disable_output = disable_output
if annotations_loc is None:
annotations_loc = COCO_ANNOTATIONS_PATH
if self.disable_output:
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
self.coco = UniversalCOCO(annotations_loc)
else:
self.coco = UniversalCOCO(annotations_loc)
# Dict to translate model labels to COCO category IDs:
self.label_to_coco_id = {
i: cat['id'] for i, cat in enumerate(self.coco.dataset['categories'])}
@staticmethod
def construct_result_dict(coco_metrics):
"""Packs the COCOEval results into a dictionary.
Args:
coco_metrics: an array of length 12, as returned by `COCOeval.summarize()`
Returns:
A dictionary which contains all the COCO metrics. For more details,
visit: https://cocodataset.org/#detection-eval.
"""
return {
'AP': coco_metrics[0],
'AP_50': coco_metrics[1],
'AP_75': coco_metrics[2],
'AP_small': coco_metrics[3],
'AP_medium': coco_metrics[4],
'AP_large': coco_metrics[5],
'AR_max_1': coco_metrics[6],
'AR_max_10': coco_metrics[7],
'AR_max_100': coco_metrics[8],
'AR_small': coco_metrics[9],
'AR_medium': coco_metrics[10],
'AR_large': coco_metrics[11]
}
def clear_annotations(self):
"""Clears the annotations collected in this object.
It is important to call this method either at the end or at the beginning
of a new evaluation round (or both). Otherwise, previous model inferences
will skew the results due to residual annotations.
"""
self.annotations.clear()
self.annotated_img_ids.clear()
def extract_classifications(self, bboxes, scores):
"""Extracts the label for each bbox, and sorts the results by score.
More specifically, after extracting each bbox's label, the bboxes and
scores are sorted in descending order based on score. The scores which fall
below `threshold` are removed.
Args:
bboxes: a matrix of the shape (|B|, 4), where |B| is the number of
bboxes; each row contains the `[x1, y1, x2, y2]` of the bbox
scores: a matrix of the shape (|B|, K), where `K` is the number of
classes in the object detection task
Returns:
A tuple consisting of the bboxes, a vector of length |B| containing
the label of each of the anchors, and a vector of length |B| containing
the label score. All elements are sorted in descending order relative
to the score.
"""
# Extract the labels and max score for each anchor
labels = np.argmax(scores, axis=1)
# Get the score associated to each anchor's label
scores = scores[np.arange(labels.shape[0]), labels]
# Apply the threshold
kept_idx = np.where(scores >= self.threshold)[0]
scores = scores[kept_idx]
labels = labels[kept_idx]
bboxes = bboxes[kept_idx]
# Sort everything in descending order and return
sorted_idx = np.flip(np.argsort(scores, axis=0))
scores = scores[sorted_idx]
labels = labels[sorted_idx]
bboxes = bboxes[sorted_idx]
return bboxes, labels, scores
def add_annotation(self, bboxes, scores, img_id):
"""Add a single inference example as COCO annotation for later evaluation.
Labels should not include a background/padding class, but only valid object
classes.
Note that this method raises an exception if the `threshold` is too
high and thus eliminates all detections.
Args:
bboxes: [num_objects, 4] array of bboxes in COCO format [x, y, w, h] in
absolute image coorinates.
scores: [num_objects, num_classes] array of scores (softmax outputs).
img_id: scalar COCO image ID.
"""
# Get the sorted bboxes, labels and scores (threshold is applied here):
i_bboxes, i_labels, i_scores = self.extract_classifications(
bboxes, scores)
if not i_bboxes.size:
raise ValueError('All objects were thresholded out.')
# Iterate through the thresholded predictions and pack them in COCO format:
for bbox, label, score in zip(i_bboxes, i_labels, i_scores):
single_classification = {
'image_id': img_id,
'category_id': self.label_to_coco_id[label],
'bbox': bbox.tolist(),
'score': score
}
self.annotations.append(single_classification)
self.annotated_img_ids.append(img_id)
def get_annotations_and_ids(self):
"""Returns copies of `self.annotations` and `self.annotated_img_ids`.
Returns:
Copies of `self.annotations` and `self.annotated_img_ids`.
"""
return self.annotations.copy(), self.annotated_img_ids.copy()
def set_annotations_and_ids(self, annotations, ids):
"""Sets the `self.annotations` and `self.annotated_img_ids`.
This method should only be used when trying to compute the metrics across
hosts, where one host captures the data from everyone in an effort to
produce the entire dataset metrics.
Args:
annotations: the new `annotations`
ids: the new `annotated_img_ids`
"""
self.annotations = annotations
self.annotated_img_ids = ids
def compute_coco_metrics(self, clear_annotations=False):
"""Compute the COCO metrics for the collected annotations.
Args:
clear_annotations: if True, clears the `self.annotations`
parameter after obtaining the COCO metrics
Returns:
The COCO metrics as a dictionary, defining the following entries:
```
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ]
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ]
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ]
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ]
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ]
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ]
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ]
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ]
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ]
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ]
```
"""
def _run_eval():
# Create prediction object for producing mAP metric values
pred_object = self.coco.loadRes(self.annotations)
# Compute mAP
coco_eval = COCOeval(self.coco, pred_object, 'bbox')
coco_eval.params.imgIds = self.annotated_img_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
if self.disable_output:
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
coco_eval = _run_eval()
else:
coco_eval = _run_eval()
# Clear annotations if requested
if clear_annotations:
self.clear_annotations()
# Pack the results
return self.construct_result_dict(coco_eval.stats)
|
google-research/scenic
|
scenic/dataset_lib/coco_dataset/coco_eval.py
|
Python
|
apache-2.0
| 12,814
|
[
"VisIt"
] |
1b5300b1f7ba03359828c478fd17f7e8398eaf93231e0fb6f86e1416afdc8ad1
|
#!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
'''
Azure External Inventory Script
===============================
Generates dynamic inventory by making API requests to the Azure Resource
Manager using the AAzure Python SDK. For instruction on installing the
Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/
Authentication
--------------
The order of precedence is command line arguments, environment variables,
and finally the [default] profile found in ~/.azure/credentials.
If using a credentials file, it should be an ini formatted file with one or
more sections, which we refer to as profiles. The script looks for a
[default] section, if a profile is not specified either on the command line
or with an environment variable. The keys in a profile will match the
list of command line arguments below.
For command line arguments and environment variables specify a profile found
in your ~/.azure/credentials file, or a service principal or Active Directory
user.
Command line arguments:
- profile
- client_id
- secret
- subscription_id
- tenant
- ad_user
- password
Environment variables:
- AZURE_PROFILE
- AZURE_CLIENT_ID
- AZURE_SECRET
- AZURE_SUBSCRIPTION_ID
- AZURE_TENANT
- AZURE_AD_USER
- AZURE_PASSWORD
Run for Specific Host
-----------------------
When run for a specific host using the --host option, a resource group is
required. For a specific host, this script returns the following variables:
{
"ansible_host": "XXX.XXX.XXX.XXX",
"computer_name": "computer_name2",
"fqdn": null,
"id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name",
"image": {
"offer": "CentOS",
"publisher": "OpenLogic",
"sku": "7.1",
"version": "latest"
},
"location": "westus",
"mac_address": "00-0D-3A-31-2C-EC",
"name": "object-name",
"network_interface": "interface-name",
"network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1",
"network_security_group": null,
"network_security_group_id": null,
"os_disk": {
"name": "object-name",
"operating_system_type": "Linux"
},
"plan": null,
"powerstate": "running",
"private_ip": "172.26.3.6",
"private_ip_alloc_method": "Static",
"provisioning_state": "Succeeded",
"public_ip": "XXX.XXX.XXX.XXX",
"public_ip_alloc_method": "Static",
"public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name",
"public_ip_name": "object-name",
"resource_group": "galaxy-production",
"security_group": "object-name",
"security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name",
"tags": {
"db": "database"
},
"type": "Microsoft.Compute/virtualMachines",
"virtual_machine_size": "Standard_DS4"
}
Groups
------
When run in --list mode, instances are grouped by the following categories:
- azure
- location
- resource_group
- security_group
- tag key
- tag key_value
Control groups using azure_rm.ini or set environment variables:
AZURE_GROUP_BY_RESOURCE_GROUP=yes
AZURE_GROUP_BY_LOCATION=yes
AZURE_GROUP_BY_SECURITY_GROUP=yes
AZURE_GROUP_BY_TAG=yes
Select hosts within specific resource groups by assigning a comma separated list to:
AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b
Select hosts for specific tag key by assigning a comma separated list of tag keys to:
AZURE_TAGS=key1,key2,key3
Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
AZURE_TAGS=key1:value1,key2:value2
If you don't need the powerstate, you can improve performance by turning off powerstate fetching:
AZURE_INCLUDE_POWERSTATE=no
azure_rm.ini
----------------------
As mentioned above you can control execution using environment variables or an .ini file. A sample
azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case
'azure_rm') with a .ini extension. This provides you with the flexibility of copying and customizing this
script and having matching .ini files. Go forth and customize your Azure inventory!
Powerstate:
-----------
The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is
up. If the value is anything other than 'running', the machine is down, and will be unreachable.
Examples:
---------
Execute /bin/uname on all instances in the galaxy-qa resource group
$ ansible -i azure_rm_inventory.py galaxy-qa -m shell -a "/bin/uname -a"
Use the inventory script to print instance specific information
$ contrib/inventory/azure_rm_inventory.py --host my_instance_host_name --pretty
Use with a playbook
$ ansible-playbook -i contrib/inventory/azure_rm_inventory.py my_playbook.yml --limit galaxy-qa
Insecure Platform Warning
-------------------------
If you receive InsecurePlatformWarning from urllib3, install the
requests security packages:
pip install requests[security]
author:
- Chris Houseknecht (@chouseknecht)
- Matt Davis (@nitzmahone)
Company: Ansible by Red Hat
Version: 1.0.0
'''
import argparse
import ConfigParser
import json
import os
import re
import sys
from os.path import expanduser
HAS_AZURE = True
HAS_AZURE_EXC = None
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.compute import __version__ as azure_compute_version
from azure.common import AzureMissingResourceHttpError, AzureHttpError
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network.network_management_client import NetworkManagementClient,\
NetworkManagementClientConfiguration
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient,\
ResourceManagementClientConfiguration
from azure.mgmt.compute.compute_management_client import ComputeManagementClient,\
ComputeManagementClientConfiguration
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD'
)
AZURE_CONFIG_SETTINGS = dict(
resource_groups='AZURE_RESOURCE_GROUPS',
tags='AZURE_TAGS',
include_powerstate='AZURE_INCLUDE_POWERSTATE',
group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP',
group_by_location='AZURE_GROUP_BY_LOCATION',
group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP',
group_by_tag='AZURE_GROUP_BY_TAG'
)
AZURE_MIN_VERSION = "2016-03-30"
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
index = 0
while index < len(pieces) - 1:
result[pieces[index]] = pieces[index + 1]
index += 1
return result
class AzureRM(object):
def __init__(self, args):
self._args = args
self._compute_client = None
self._resource_client = None
self._network_client = None
self.debug = False
if args.debug:
self.debug = True
self.credentials = self._get_credentials(args)
if not self.credentials:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"or define a profile in ~/.azure/credentials.")
if self.credentials.get('subscription_id', None) is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
if self.credentials.get('client_id') is not None and \
self.credentials.get('secret') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'])
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], self.credentials['password'])
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password.")
def log(self, msg):
if self.debug:
print (msg + u'\n')
def fail(self, msg):
raise Exception(msg)
def _get_profile(self, profile="default"):
path = expanduser("~")
path += "/.azure/credentials"
try:
config = ConfigParser.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except:
pass
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
return credentials
return None
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile'] is not None:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None:
return env_credentials
return None
def _get_credentials(self, params):
# Get authentication credentials.
# Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
arg_credentials[attribute] = getattr(params, attribute)
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['client_id'] is not None:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
return None
def _register(self, key):
try:
# We have to perform the one-time registration here. Otherwise, we receive an error the first
# time we attempt to use the requested client.
resource_client = self.rm_client
resource_client.providers.register(key)
except Exception as exc:
self.fail("One-time registration of {0} failed - {1}".format(key, str(exc)))
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = NetworkManagementClient(
NetworkManagementClientConfiguration(self.azure_credentials, self.subscription_id))
self._register('Microsoft.Network')
return self._network_client
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = ResourceManagementClient(
ResourceManagementClientConfiguration(self.azure_credentials, self.subscription_id))
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = ComputeManagementClient(
ComputeManagementClientConfiguration(self.azure_credentials, self.subscription_id))
self._register('Microsoft.Compute')
return self._compute_client
class AzureInventory(object):
def __init__(self):
self._args = self._parse_cli_args()
try:
rm = AzureRM(self._args)
except Exception as e:
sys.exit("{0}".format(str(e)))
self._compute_client = rm.compute_client
self._network_client = rm.network_client
self._resource_client = rm.rm_client
self._security_groups = None
self.resource_groups = []
self.tags = None
self.replace_dash_in_groups = False
self.group_by_resource_group = True
self.group_by_location = True
self.group_by_security_group = True
self.group_by_tag = True
self.include_powerstate = True
self._inventory = dict(
_meta=dict(
hostvars=dict()
),
azure=[]
)
self._get_settings()
if self._args.resource_groups:
self.resource_groups = self._args.resource_groups.split(',')
if self._args.tags:
self.tags = self._args.tags.split(',')
if self._args.no_powerstate:
self.include_powerstate = False
self.get_inventory()
print (self._json_format_dict(pretty=self._args.pretty))
sys.exit(0)
def _parse_cli_args(self):
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file for an Azure subscription')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
help='Send debug messages to STDOUT')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty print JSON output(default: False)')
parser.add_argument('--profile', action='store',
help='Azure profile contained in ~/.azure/credentials')
parser.add_argument('--subscription_id', action='store',
help='Azure Subscription Id')
parser.add_argument('--client_id', action='store',
help='Azure Client Id ')
parser.add_argument('--secret', action='store',
help='Azure Client Secret')
parser.add_argument('--tenant', action='store',
help='Azure Tenant Id')
parser.add_argument('--ad-user', action='store',
help='Active Directory User')
parser.add_argument('--password', action='store',
help='password')
parser.add_argument('--resource-groups', action='store',
help='Return inventory for comma separated list of resource group names')
parser.add_argument('--tags', action='store',
help='Return inventory for comma separated list of tag key:value pairs')
parser.add_argument('--no-powerstate', action='store_true', default=False,
help='Do not include the power state of each virtual host')
return parser.parse_args()
def get_inventory(self):
if len(self.resource_groups) > 0:
# get VMs for requested resource groups
for resource_group in self.resource_groups:
try:
virtual_machines = self._compute_client.virtual_machines.list(resource_group)
except Exception as exc:
sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group,
str(exc)))
if self._args.host or self.tags:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
else:
# get all VMs within the subscription
try:
virtual_machines = self._compute_client.virtual_machines.list_all()
except Exception as exc:
sys.exit("Error: fetching virtual machines - {0}".format(str(exc)))
if self._args.host or self.tags > 0:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
def _load_machines(self, machines):
for machine in machines:
id_dict = azure_id_to_dict(machine.id)
#TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
# fixed, we should remove the .lower(). Opened Issue
# #574: https://github.com/Azure/azure-sdk-for-python/issues/574
resource_group = id_dict['resourceGroups'].lower()
if self.group_by_security_group:
self._get_security_groups(resource_group)
host_vars = dict(
ansible_host=None,
private_ip=None,
private_ip_alloc_method=None,
public_ip=None,
public_ip_name=None,
public_ip_id=None,
public_ip_alloc_method=None,
fqdn=None,
location=machine.location,
name=machine.name,
type=machine.type,
id=machine.id,
tags=machine.tags,
network_interface_id=None,
network_interface=None,
resource_group=resource_group,
mac_address=None,
plan=(machine.plan.name if machine.plan else None),
virtual_machine_size=machine.hardware_profile.vm_size.value,
computer_name=machine.os_profile.computer_name,
provisioning_state=machine.provisioning_state,
)
host_vars['os_disk'] = dict(
name=machine.storage_profile.os_disk.name,
operating_system_type=machine.storage_profile.os_disk.os_type.value
)
if self.include_powerstate:
host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name)
if machine.storage_profile.image_reference:
host_vars['image'] = dict(
offer=machine.storage_profile.image_reference.offer,
publisher=machine.storage_profile.image_reference.publisher,
sku=machine.storage_profile.image_reference.sku,
version=machine.storage_profile.image_reference.version
)
# Add windows details
if machine.os_profile.windows_configuration is not None:
host_vars['windows_auto_updates_enabled'] = \
machine.os_profile.windows_configuration.enable_automatic_updates
host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone
host_vars['windows_rm'] = None
if machine.os_profile.windows_configuration.win_rm is not None:
host_vars['windows_rm'] = dict(listeners=None)
if machine.os_profile.windows_configuration.win_rm.listeners is not None:
host_vars['windows_rm']['listeners'] = []
for listener in machine.os_profile.windows_configuration.win_rm.listeners:
host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol,
certificate_url=listener.certificate_url))
for interface in machine.network_profile.network_interfaces:
interface_reference = self._parse_ref_id(interface.id)
network_interface = self._network_client.network_interfaces.get(
interface_reference['resourceGroups'],
interface_reference['networkInterfaces'])
if network_interface.primary:
if self.group_by_security_group and \
self._security_groups[resource_group].get(network_interface.id, None):
host_vars['security_group'] = \
self._security_groups[resource_group][network_interface.id]['name']
host_vars['security_group_id'] = \
self._security_groups[resource_group][network_interface.id]['id']
host_vars['network_interface'] = network_interface.name
host_vars['network_interface_id'] = network_interface.id
host_vars['mac_address'] = network_interface.mac_address
for ip_config in network_interface.ip_configurations:
host_vars['private_ip'] = ip_config.private_ip_address
host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method.value
if ip_config.public_ip_address:
public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id)
public_ip_address = self._network_client.public_ip_addresses.get(
public_ip_reference['resourceGroups'],
public_ip_reference['publicIPAddresses'])
host_vars['ansible_host'] = public_ip_address.ip_address
host_vars['public_ip'] = public_ip_address.ip_address
host_vars['public_ip_name'] = public_ip_address.name
host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method.value
host_vars['public_ip_id'] = public_ip_address.id
if public_ip_address.dns_settings:
host_vars['fqdn'] = public_ip_address.dns_settings.fqdn
self._add_host(host_vars)
def _selected_machines(self, virtual_machines):
selected_machines = []
for machine in virtual_machines:
if self._args.host and self._args.host == machine.name:
selected_machines.append(machine)
if self.tags and self._tags_match(machine.tags, self.tags):
selected_machines.append(machine)
return selected_machines
def _get_security_groups(self, resource_group):
''' For a given resource_group build a mapping of network_interface.id to security_group name '''
if not self._security_groups:
self._security_groups = dict()
if not self._security_groups.get(resource_group):
self._security_groups[resource_group] = dict()
for group in self._network_client.network_security_groups.list(resource_group):
if group.network_interfaces:
for interface in group.network_interfaces:
self._security_groups[resource_group][interface.id] = dict(
name=group.name,
id=group.id
)
def _get_powerstate(self, resource_group, name):
try:
vm = self._compute_client.virtual_machines.get(resource_group,
name,
expand='instanceview')
except Exception as exc:
sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc)))
return next((s.code.replace('PowerState/', '')
for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
def _add_host(self, vars):
host_name = self._to_safe(vars['name'])
resource_group = self._to_safe(vars['resource_group'])
security_group = None
if vars.get('security_group'):
security_group = self._to_safe(vars['security_group'])
if self.group_by_resource_group:
if not self._inventory.get(resource_group):
self._inventory[resource_group] = []
self._inventory[resource_group].append(host_name)
if self.group_by_location:
if not self._inventory.get(vars['location']):
self._inventory[vars['location']] = []
self._inventory[vars['location']].append(host_name)
if self.group_by_security_group and security_group:
if not self._inventory.get(security_group):
self._inventory[security_group] = []
self._inventory[security_group].append(host_name)
self._inventory['_meta']['hostvars'][host_name] = vars
self._inventory['azure'].append(host_name)
if self.group_by_tag and vars.get('tags'):
for key, value in vars['tags'].iteritems():
safe_key = self._to_safe(key)
safe_value = safe_key + '_' + self._to_safe(value)
if not self._inventory.get(safe_key):
self._inventory[safe_key] = []
if not self._inventory.get(safe_value):
self._inventory[safe_value] = []
self._inventory[safe_key].append(host_name)
self._inventory[safe_value].append(host_name)
def _json_format_dict(self, pretty=False):
# convert inventory to json
if pretty:
return json.dumps(self._inventory, sort_keys=True, indent=2)
else:
return json.dumps(self._inventory)
def _get_settings(self):
# Load settings from the .ini, if it exists. Otherwise,
# look for environment values.
file_settings = self._load_settings()
if file_settings:
for key in AZURE_CONFIG_SETTINGS:
if key in ('resource_groups', 'tags') and file_settings.get(key, None) is not None:
values = file_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif file_settings.get(key, None) is not None:
val = self._to_boolean(file_settings[key])
setattr(self, key, val)
else:
env_settings = self._get_env_settings()
for key in AZURE_CONFIG_SETTINGS:
if key in('resource_groups', 'tags') and env_settings.get(key, None) is not None:
values = env_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif env_settings.get(key, None) is not None:
val = self._to_boolean(env_settings[key])
setattr(self, key, val)
def _parse_ref_id(self, reference):
response = {}
keys = reference.strip('/').split('/')
for index in range(len(keys)):
if index < len(keys) - 1 and index % 2 == 0:
response[keys[index]] = keys[index + 1]
return response
def _to_boolean(self, value):
if value in ['Yes', 'yes', 1, 'True', 'true', True]:
result = True
elif value in ['No', 'no', 0, 'False', 'false', False]:
result = False
else:
result = True
return result
def _get_env_settings(self):
env_settings = dict()
for attribute, env_variable in AZURE_CONFIG_SETTINGS.iteritems():
env_settings[attribute] = os.environ.get(env_variable, None)
return env_settings
def _load_settings(self):
basename = os.path.splitext(os.path.basename(__file__))[0]
path = basename + '.ini'
config = None
settings = None
try:
config = ConfigParser.ConfigParser()
config.read(path)
except:
pass
if config is not None:
settings = dict()
for key in AZURE_CONFIG_SETTINGS:
try:
settings[key] = config.get('azure', key, raw=True)
except:
pass
return settings
def _tags_match(self, tag_obj, tag_args):
'''
Return True if the tags object from a VM contains the requested tag values.
:param tag_obj: Dictionary of string:string pairs
:param tag_args: List of strings in the form key=value
:return: boolean
'''
if not tag_obj:
return False
matches = 0
for arg in tag_args:
arg_key = arg
arg_value = None
if re.search(r':', arg):
arg_key, arg_value = arg.split(':')
if arg_value and tag_obj.get(arg_key, None) == arg_value:
matches += 1
elif not arg_value and tag_obj.get(arg_key, None) is not None:
matches += 1
if matches == len(tag_args):
return True
return False
def _to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def main():
if not HAS_AZURE:
sys.exit("The Azure python sdk is not installed (try 'pip install azure') - {0}".format(HAS_AZURE_EXC))
if azure_compute_version < AZURE_MIN_VERSION:
sys.exit("Expecting azure.mgmt.compute.__version__ to be >= {0}. Found version {1} "
"Do you have Azure >= 2.0.0rc2 installed?".format(AZURE_MIN_VERSION, azure_compute_version))
AzureInventory()
if __name__ == '__main__':
main()
|
cmvelo/ansible
|
contrib/inventory/azure_rm.py
|
Python
|
gpl-3.0
| 32,093
|
[
"Galaxy"
] |
64c2f8b35c5f3054d5d491b0f3b74fed7ce22b55e6dd2b9beae79bc1d81d0cb6
|
#
import os
import numpy as np
import healpy as hp
import astropy.io.fits as pyfits
from multiprocessing import Pool
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from quicksipManera import *
import fitsio
from random import random
### ------------ A couple of useful conversions -----------------------
def zeropointToScale(zp):
return 10.**((zp - 22.5)/2.5)
def nanomaggiesToMag(nm):
return -2.5 * (log(nm,10.) - 9.)
def Magtonanomaggies(m):
return 10.**(-m/2.5+9.)
#-2.5 * (log(nm,10.) - 9.)
def thphi2radec(theta,phi):
return 180./pi*phi,-(180./pi*theta-90)
# --------- Definitions for polinomial footprints -------
def convertCoordsToPoly (ra, dec) :
poly = []
for i in range(0,len(ra)) :
poly.append((ra[i], dec[i]))
return poly
def point_in_poly(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
### ------------ SHARED CLASS: HARDCODED INPUTS GO HERE ------------------------
### Please, add here your own harcoded values if any, so other may use them
class mysample(object):
"""
This class mantains the basic information of the sample
to minimize hardcoded parameters in the test functions
Everyone is meant to call mysample to obtain information like
- path to ccd-annotated files : ccds
- zero points : zp0
- magnitude limits (recm) : recm
- photoz requirements : phreq
- extintion coefficient : extc
- extintion index : be
- mask var eqv. to blacklist_ok : maskname
- predicted frac exposures : FracExp
- footprint poly (in some cases) : polyFoot
Current Inputs are: survey, DR, band, localdir)
survey: DECaLS, MZLS, BASS, DEShyb, NGCproxy
DR: DR3, DR4, DR5
band: g,r,z
localdir: output directory
(DEShyb is some DES proxy area; NGC is a proxy of North Gal Cap)
"""
def __init__(self,survey,DR,band,localdir,verb):
"""
Initialize image survey, data release, band, output path
Calculate variables and paths
"""
self.survey = survey
self.DR = DR
self.band = band
self.localdir = localdir
self.verbose =verb
# Check bands
if(self.band != 'g' and self.band !='r' and self.band!='z'):
raise RuntimeError("Band seems wrong options are 'g' 'r' 'z'")
# Check surveys
if(self.survey !='DECaLS' and self.survey !='BASS' and self.survey !='MZLS' and self.survey !='DEShyb' and self.survey !='NGCproxy'):
raise RuntimeError("Survey seems wrong options are 'DECAaLS' 'BASS' MZLS' ")
# Annotated CCD paths
if(self.DR == 'DR3'):
inputdir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr3/'
self.ccds =inputdir+'ccds-annotated-decals.fits.gz'
self.catalog = 'DECaLS_DR3'
if(self.survey != 'DECaLS'): raise RuntimeError("Survey name seems inconsistent; only DECaLS accepted")
elif(self.DR == 'DR4'):
inputdir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr4/'
if (band == 'g' or band == 'r'):
#self.ccds = inputdir+'ccds-annotated-dr4-90prime.fits.gz'
self.ccds = inputdir+'ccds-annotated-bass.fits.gz'
self.catalog = 'BASS_DR4'
if(self.survey != 'BASS'): raise RuntimeError("Survey name seems inconsistent")
elif(band == 'z'):
#self.ccds = inputdir+'ccds-annotated-dr4-mzls.fits.gz'
self.ccds = inputdir+'ccds-annotated-mzls.fits.gz'
self.catalog = 'MZLS_DR4'
if(self.survey != 'MZLS'): raise RuntimeError("Survey name seems inconsistent")
else: raise RuntimeError("Input sample band seems inconsisent")
elif(self.DR == 'DR5'):
inputdir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr5/'
self.ccds =inputdir+'ccds-annotated-dr5.fits.gz'
if(self.survey == 'DECaLS') :
self.catalog = 'DECaLS_DR5'
elif(self.survey == 'DEShyb') :
self.catalog = 'DEShyb_DR5'
elif(self.survey == 'NGCproxy' ) :
self.catalog = 'NGCproxy_DR5'
else: raise RuntimeError("Survey name seems inconsistent")
elif(self.DR == 'DR6'):
inputdir = '/global/cscratch1/sd/dstn/dr6plus/'
if (band == 'g'):
self.ccds = inputdir+'ccds-annotated-90prime-g.fits.gz'
self.catalog = 'BASS_DR6'
if(self.survey != 'BASS'): raise RuntimeError("Survey name seems inconsistent")
elif (band == 'r'):
self.ccds = inputdir+'ccds-annotated-90prime-r.fits.gz'
self.catalog = 'BASS_DR6'
if(self.survey != 'BASS'): raise RuntimeError("Survey name seems inconsistent")
elif(band == 'z'):
self.ccds = inputdir+'ccds-annotated-mosaic-z.fits.gz'
self.catalog = 'MZLS_DR6'
if(self.survey != 'MZLS'): raise RuntimeError("Survey name seems inconsistent")
else: raise RuntimeError("Input sample band seems inconsisent")
elif(self.DR == 'DR7'):
inputdir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr7/'
self.ccds =inputdir+'ccds-annotated-dr7.fits.gz'
if(self.survey == 'DECaLS') :
self.catalog = 'DECaLS_DR7'
elif(self.survey == 'DEShyb') :
self.catalog = 'DEShyb_DR7'
elif(self.survey == 'NGCproxy' ) :
self.catalog = 'NGCproxy_DR7'
else: raise RuntimeError("Survey name seems inconsistent")
else: raise RuntimeError("Data Realease seems wrong")
# Make directory for outputs if it doesn't exist
dirplots = localdir + self.catalog
try:
os.makedirs(dirplots)
print "creating directory for plots", dirplots
except OSError:
if not os.path.isdir(dirplots):
raise
# Predicted survey exposure fractions
if(self.survey =='DECaLS' or self.survey =='DEShyb' or self.survey =='NGCproxy'):
# DECALS final survey will be covered by
# 1, 2, 3, 4, and 5 exposures in the following fractions:
self.FracExp=[0.02,0.24,0.50,0.22,0.02]
elif(self.survey == 'BASS'):
# BASS coverage fractions for 1,2,3,4,5 exposures are:
self.FracExp=[0.0014,0.0586,0.8124,0.1203,0.0054,0.0019]
elif(self.survey == 'MZLS'):
# For MzLS fill factors of 100% with a coverage of at least 1,
# 99.5% with a coverage of at least 2, and 85% with a coverage of 3.
self.FracExp=[0.005,0.145,0.85,0,0]
else:
raise RuntimeError("Survey seems to have wrong options for fraction of exposures ")
#Bands inputs
if band == 'g':
self.be = 1
self.extc = 3.303 #/2.751
self.zp0 = 25.08
self.recm = 24.
self.phreq = 0.01
if band == 'r':
self.be = 2
self.extc = 2.285 #/2.751
self.zp0 = 25.29
self.recm = 23.4
self.phreq = 0.01
if band == 'z':
self.be = 4
self.extc = 1.263 #/2.751
self.zp0 = 24.92
self.recm = 22.5
self.phreq = 0.02
# ------------------------------------------------------------------
# --- Footprints ----
polyDEScoord=np.loadtxt('/global/homes/m/manera/round13-poly-radec.dat')
polyDES = convertCoordsToPoly(polyDEScoord[:,0], polyDEScoord[:,1])
def InDEShybFootprint(RA,DEC):
''' Decides if it is in DES footprint '''
# The DES footprint
if(RA > 180) : RA = RA -360
return point_in_poly(RA, DEC, polyDES)
def InNGCproxyFootprint(RA):
if(RA < -180 ) : RA = RA + 360
if( 100 <= RA <= 300 ) : return True
return False
def plot_magdepth2D(sample,ral,decl,depth,mapfile,mytitle):
# Plot depth
from matplotlib import pyplot as plt
import matplotlib.cm as cm
ralB = [ ra-360 if ra > 300 else ra for ra in ral ]
vmax = sample.recm
vmin = vmax - 2.0
mapa = plt.scatter(ralB,decl,c=depth, cmap=cm.gnuplot,s=2., vmin=vmin, vmax=vmax, lw=0,edgecolors='none')
mapa.cmap.set_over('lawngreen')
cbar = plt.colorbar(mapa,extend='both')
plt.xlabel('r.a. (degrees)')
plt.ylabel('declination (degrees)')
plt.title(mytitle)
plt.xlim(-60,300)
plt.ylim(-30,90)
print 'saving plot to ', mapfile
plt.savefig(mapfile)
plt.close()
return
# ------------------------------------------------------------------
# ------------ VALIDATION TESTS ------------------------------------
# ------------------------------------------------------------------
# Note: part of the name of the function should startw with number valXpX
def val3p4c_depthfromIvarOLD(sample):
"""
Requirement V3.4
90% filled to g=24, r=23.4 and z=22.5 and 95% and 98% at 0.3/0.6 mag shallower.
Produces extinction correction magnitude maps for visual inspection
MARCM stable version, improved from AJR quick hack
This now included extinction from the exposures
Uses quicksip subroutines from Boris, corrected
for a bug I found for BASS and MzLS ccd orientation
"""
nside = 1024 # Resolution of output maps
nsideSTR='1024' # same as nside but in string format
nsidesout = None # if you want full sky degraded maps to be written
ratiores = 1 # Superresolution/oversampling ratio, simp mode doesn't allow anything other than 1
mode = 1 # 1: fully sequential, 2: parallel then sequential, 3: fully parallel
pixoffset = 0 # How many pixels are being removed on the edge of each CCD? 15 for DES.
oversamp='1' # ratiores in string format
band = sample.band
catalogue_name = sample.catalog
fname = sample.ccds
localdir = sample.localdir
extc = sample.extc
#Read ccd file
tbdata = pyfits.open(fname)[1].data
# ------------------------------------------------------
# Obtain indices
auxstr='band_'+band
sample_names = [auxstr]
if(sample.DR == 'DR3'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True))
elif(sample.DR == 'DR4'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['bitmask'] == 0))
elif(sample.DR == 'DR5'):
if(sample.survey == 'DECaLS'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True))
elif(sample.survey == 'DEShyb'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True) & (map(InDEShybFootprint,tbdata['ra'],tbdata['dec'])))
elif(sample.survey == 'NGCproxy'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True) & (map(InNGCproxyFootprint,tbdata['ra'])))
elif(sample.DR == 'DR6'):
inds = np.where((tbdata['filter'] == band))
#Read data
#obtain invnoisesq here, including extinction
nmag = Magtonanomaggies(tbdata['galdepth']-extc*tbdata['EBV'])/5.
ivar= 1./nmag**2.
hits=np.ones(np.shape(ivar))
# What properties do you want mapped?
# Each each tuple has [(quantity to be projected, weighting scheme, operation),(etc..)]
propertiesandoperations = [ ('ivar', '', 'total'), ('hits','','total') ]
# What properties to keep when reading the images?
# Should at least contain propertiesandoperations and the image corners.
# MARCM - actually no need for ra dec image corners.
# Only needs ra0 ra1 ra2 ra3 dec0 dec1 dec2 dec3 only if fast track appropriate quicksip subroutines were implemented
#propertiesToKeep = [ 'filter', 'FWHM','mjd_obs'] \
# + ['RA', 'DEC', 'crval1', 'crval2', 'crpix1', 'crpix2', 'cd1_1', 'cd1_2', 'cd2_1', 'cd2_2','width','height']
propertiesToKeep = [ 'filter', 'FWHM','mjd_obs'] \
+ ['RA', 'DEC', 'ra0','ra1','ra2','ra3','dec0','dec1','dec2','dec3']
# Create big table with all relevant properties.
tbdata = np.core.records.fromarrays([tbdata[prop] for prop in propertiesToKeep] + [ivar] + [hits], names = propertiesToKeep + [ 'ivar', 'hits'])
# Read the table, create Healtree, project it into healpix maps, and write these maps.
# Done with Quicksip library, note it has quite a few hardcoded values (use new version by MARCM for BASS and MzLS)
# project_and_write_maps_simp(mode, propertiesandoperations, tbdata, catalogue_name, outroot, sample_names, inds, nside)
#project_and_write_maps(mode, propertiesandoperations, tbdata, catalogue_name, localdir, sample_names, inds, nside, ratiores, pixoffset, nsidesout)
project_and_write_maps_simp(mode, propertiesandoperations, tbdata, catalogue_name, localdir, sample_names, inds, nside)
# Read Haelpix maps from quicksip
prop='ivar'
op='total'
vmin=21.0
vmax=24.0
fname2=localdir+catalogue_name+'/nside'+nsideSTR+'_oversamp'+oversamp+'/'+\
catalogue_name+'_band_'+band+'_nside'+nsideSTR+'_oversamp'+oversamp+'_'+prop+'__'+op+'.fits.gz'
f = fitsio.read(fname2)
# HEALPIX DEPTH MAPS
# convert ivar to depth
import healpy as hp
from healpix import pix2ang_ring,thphi2radec
ral = []
decl = []
val = f['SIGNAL']
pix = f['PIXEL']
# Obtain values to plot
if (prop == 'ivar'):
myval = []
mylabel='depth'
below=0
for i in range(0,len(val)):
depth=nanomaggiesToMag(sqrt(1./val[i]) * 5.)
if(depth < vmin):
below=below+1
else:
myval.append(depth)
th,phi = hp.pix2ang(int(nside),pix[i])
ra,dec = thphi2radec(th,phi)
ral.append(ra)
decl.append(dec)
npix=len(pix)
print 'Area is ', npix/(float(nside)**2.*12)*360*360./pi, ' sq. deg.'
print below, 'of ', npix, ' pixels are not plotted as their ', mylabel,' < ', vmin
print 'Within the plot, min ', mylabel, '= ', min(myval), ' and max ', mylabel, ' = ', max(myval)
# Plot depth
from matplotlib import pyplot as plt
import matplotlib.cm as cm
mapa = plt.scatter(ral,decl,c=myval, cmap=cm.rainbow,s=2., vmin=vmin, vmax=vmax, lw=0,edgecolors='none')
cbar = plt.colorbar(mapa)
plt.xlabel('r.a. (degrees)')
plt.ylabel('declination (degrees)')
plt.title('Map of '+ mylabel +' for '+catalogue_name+' '+band+'-band')
plt.xlim(0,360)
plt.ylim(-30,90)
mapfile=localdir+mylabel+'_'+band+'_'+catalogue_name+str(nside)+'.png'
print 'saving plot to ', mapfile
plt.savefig(mapfile)
plt.close()
#plt.show()
#cbar.set_label(r'5$\sigma$ galaxy depth', rotation=270,labelpad=1)
#plt.xscale('log')
# Statistics depths
deptharr=np.array(myval)
p90=np.percentile(deptharr,10)
p95=np.percentile(deptharr,5)
p98=np.percentile(deptharr,2)
med=np.percentile(deptharr,50)
mean = sum(deptharr)/float(np.size(deptharr)) # 1M array, too long for precision
std = sqrt(sum(deptharr**2.)/float(len(deptharr))-mean**2.)
ndrawn=np.size(deptharr)
print "Total pixels", np.size(deptharr), "probably too many for exact mean and std"
print "Mean = ", mean, "; Median = ", med ,"; Std = ", std
print "Results for 90% 95% and 98% are: ", p90, p95, p98
# Statistics pases
prop = 'hits'
op = 'total'
fname2=localdir+catalogue_name+'/nside'+nsideSTR+'_oversamp'+oversamp+'/'+\
catalogue_name+'_band_'+band+'_nside'+nsideSTR+'_oversamp'+oversamp+'_'+prop+'__'+op+'.fits.gz'
f = fitsio.read(fname2)
hitsb=f['SIGNAL']
hist, bin_edges =np.histogram(hitsb,bins=[-0.5,0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,100],density=True)
#print hitsb[1000:10015]
#hist, bin_edges =np.histogram(hitsb,density=True)
print "Percentage of hits for 0,1,2., to >7 pases\n",
#print bin_edges
print hist
#print 100*hist
return mapfile
def val3p4c_depthfromIvar(sample,Nexpmin=1):
"""
Requirement V3.4
90% filled to g=24, r=23.4 and z=22.5 and 95% and 98% at 0.3/0.6 mag shallower.
Produces extinction correction magnitude maps for visual inspection
MARCM includes min number of exposures
MARCM stable version, improved from AJR quick hack
This now included extinction from the exposures
Uses quicksip subroutines from Boris, corrected
for a bug I found for BASS and MzLS ccd orientation
"""
nside = 1024 # Resolution of output maps
nsideSTR='1024' # same as nside but in string format
nsidesout = None # if you want full sky degraded maps to be written
ratiores = 1 # Superresolution/oversampling ratio, simp mode doesn't allow anything other than 1
mode = 1 # 1: fully sequential, 2: parallel then sequential, 3: fully parallel
pixoffset = 0 # How many pixels are being removed on the edge of each CCD? 15 for DES.
oversamp='1' # ratiores in string format
band = sample.band
catalogue_name = sample.catalog
fname = sample.ccds
localdir = sample.localdir
extc = sample.extc
#Read ccd file
tbdata = pyfits.open(fname)[1].data
# ------------------------------------------------------
# Obtain indices
auxstr='band_'+band
sample_names = [auxstr]
if(sample.DR == 'DR3'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True))
elif(sample.DR == 'DR4'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['bitmask'] == 0))
elif(sample.DR == 'DR5'):
if(sample.survey == 'DECaLS'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True))
elif(sample.survey == 'DEShyb'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True) & (map(InDEShybFootprint,tbdata['ra'],tbdata['dec'])))
elif(sample.survey == 'NGCproxy'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True) & (map(InNGCproxyFootprint,tbdata['ra'])))
elif(sample.DR == 'DR6'):
inds = np.where((tbdata['filter'] == band))
#Read data
#obtain invnoisesq here, including extinction
nmag = Magtonanomaggies(tbdata['galdepth']-extc*tbdata['EBV'])/5.
ivar= 1./nmag**2.
hits=np.ones(np.shape(ivar))
# What properties do you want mapped?
# Each each tuple has [(quantity to be projected, weighting scheme, operation),(etc..)]
propertiesandoperations = [ ('ivar', '', 'total'), ('hits','','total') ]
# What properties to keep when reading the images?
# Should at least contain propertiesandoperations and the image corners.
# MARCM - actually no need for ra dec image corners.
# Only needs ra0 ra1 ra2 ra3 dec0 dec1 dec2 dec3 only if fast track appropriate quicksip subroutines were implemented
#propertiesToKeep = [ 'filter', 'FWHM','mjd_obs'] \
# + ['RA', 'DEC', 'crval1', 'crval2', 'crpix1', 'crpix2', 'cd1_1', 'cd1_2', 'cd2_1', 'cd2_2','width','height']
propertiesToKeep = [ 'filter', 'FWHM','mjd_obs'] \
+ ['RA', 'DEC', 'ra0','ra1','ra2','ra3','dec0','dec1','dec2','dec3']
# Create big table with all relevant properties.
tbdata = np.core.records.fromarrays([tbdata[prop] for prop in propertiesToKeep] + [ivar] + [hits], names = propertiesToKeep + [ 'ivar', 'hits'])
# Read the table, create Healtree, project it into healpix maps, and write these maps.
# Done with Quicksip library, note it has quite a few hardcoded values (use new version by MARCM for BASS and MzLS)
# project_and_write_maps_simp(mode, propertiesandoperations, tbdata, catalogue_name, outroot, sample_names, inds, nside)
#project_and_write_maps(mode, propertiesandoperations, tbdata, catalogue_name, localdir, sample_names, inds, nside, ratiores, pixoffset, nsidesout)
project_and_write_maps_simp(mode, propertiesandoperations, tbdata, catalogue_name, localdir, sample_names, inds, nside)
# Read Haelpix maps from quicksip
prop='ivar'
op='total'
vmin=21.0
vmax=24.0
fname2=localdir+catalogue_name+'/nside'+nsideSTR+'_oversamp'+oversamp+'/'+\
catalogue_name+'_band_'+band+'_nside'+nsideSTR+'_oversamp'+oversamp+'_'+prop+'__'+op+'.fits.gz'
f = fitsio.read(fname2)
# HEALPIX DEPTH MAPS
# convert ivar to depth
import healpy as hp
from healpix import pix2ang_ring,thphi2radec
ral = []
decl = []
val = f['SIGNAL']
pix = f['PIXEL']
#get hits
prop = 'hits'
op = 'total'
fname2=localdir+catalogue_name+'/nside'+nsideSTR+'_oversamp'+oversamp+'/'+\
catalogue_name+'_band_'+band+'_nside'+nsideSTR+'_oversamp'+oversamp+'_'+prop+'__'+op+'.fits.gz'
f = fitsio.read(fname2)
hitsb=f['SIGNAL']
# Obtain values to plot
#if (prop == 'ivar'):
myval = []
mylabel='depth'
below=0
for i in range(0,len(val)):
depth=nanomaggiesToMag(sqrt(1./val[i]) * 5.)
npases=hitsb[i]
if(npases >= Nexpmin ):
myval.append(depth)
th,phi = hp.pix2ang(int(nside),pix[i])
ra,dec = thphi2radec(th,phi)
ral.append(ra)
decl.append(dec)
if(depth < vmin):
below=below+1
npix=len(myval)
print 'Area is ', npix/(float(nside)**2.*12)*360*360./pi, ' sq. deg.'
print below, 'of ', npix, ' pixels are not plotted as their ', mylabel,' < ', vmin
print 'Within the plot, min ', mylabel, '= ', min(myval), ' and max ', mylabel, ' = ', max(myval)
# Plot depth
#from matplotlib import pyplot as plt
#import matplotlib.cm as cm
#ralB = [ ra-360 if ra > 300 else ra for ra in ral ]
#vmax = sample.recm
#vmin = vmax - 2.0
#mapa = plt.scatter(ralB,decl,c=myval, cmap=cm.rainbow,s=2., vmin=vmin, vmax=vmax, lw=0,edgecolors='none')
#mapa = plt.scatter(ralB,decl,c=myval, cmap=cm.gnuplot,s=2., vmin=vmin, vmax=vmax, lw=0,edgecolors='none')
#mapa.cmap.set_over('yellowgreen')
#cbar = plt.colorbar(mapa,extend='both')
#plt.xlabel('r.a. (degrees)')
#plt.ylabel('declination (degrees)')
#plt.title('Map of '+ mylabel +' for '+catalogue_name+' '+band+'-band \n with 3 or more exposures')
#plt.xlim(-60,300)
#plt.ylim(-30,90)
#mapfile=localdir+mylabel+'_'+band+'_'+catalogue_name+str(nside)+'.png'
#print 'saving plot to ', mapfile
#plt.savefig(mapfile)
#plt.close()
#plt.show()
#cbar.set_label(r'5$\sigma$ galaxy depth', rotation=270,labelpad=1)
#plt.xscale('log')
mapfile=localdir+mylabel+'_'+band+'_'+catalogue_name+str(nside)+'.png'
mytitle='Map of '+ mylabel +' for '+catalogue_name+' '+band+'-band \n with '+str(Nexpmin)+\
' or more exposures'
plot_magdepth2D(sample,ral,decl,myval,mapfile,mytitle)
# Statistics depths
deptharr=np.array(myval)
p90=np.percentile(deptharr,10)
p95=np.percentile(deptharr,5)
p98=np.percentile(deptharr,2)
med=np.percentile(deptharr,50)
mean = sum(deptharr)/float(np.size(deptharr)) # 1M array, too long for precision
std = sqrt(sum(deptharr**2.)/float(len(deptharr))-mean**2.)
ndrawn=np.size(deptharr)
print "Total pixels", np.size(deptharr), "probably too many for exact mean and std"
print "Mean = ", mean, "; Median = ", med ,"; Std = ", std
print "Results for 90% 95% and 98% are: ", p90, p95, p98
# Statistics pases
#prop = 'hits'
#op = 'total'
#fname2=localdir+catalogue_name+'/nside'+nsideSTR+'_oversamp'+oversamp+'/'+\
#catalogue_name+'_band_'+band+'_nside'+nsideSTR+'_oversamp'+oversamp+'_'+prop+'__'+op+'.fits.gz'
#f = fitsio.read(fname2)
#hitsb=f['SIGNAL']
hist, bin_edges =np.histogram(hitsb,bins=[-0.5,0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,100],density=True)
#print hitsb[1000:10015]
#hist, bin_edges =np.histogram(hitsb,density=True)
print "Percentage of hits for 0,1,2., to >7 pases\n",
#print bin_edges
print hist
#print 100*hist
return mapfile
def plot_magdepth2D(sample,ral,decl,depth,mapfile,mytitle):
# Plot depth
from matplotlib import pyplot as plt
import matplotlib.cm as cm
ralB = [ ra-360 if ra > 300 else ra for ra in ral ]
vmax = sample.recm
vmin = vmax - 2.0
mapa = plt.scatter(ralB,decl,c=depth, cmap=cm.gnuplot,s=2., vmin=vmin, vmax=vmax, lw=0,edgecolors='none')
mapa.cmap.set_over('yellowgreen')
cbar = plt.colorbar(mapa,extend='both')
plt.xlabel('r.a. (degrees)')
plt.ylabel('declination (degrees)')
plt.title(mytitle)
plt.xlim(-60,300)
plt.ylim(-30,90)
print 'saving plot to ', mapfile
plt.savefig(mapfile)
plt.close()
return mapfile
def val3p4b_maghist_pred(sample,ndraw=1e5, nbin=100, vmin=21.0, vmax=25.0, Nexpmin=1):
"""
Requirement V3.4
90% filled to g=24, r=23.4 and z=22.5 and 95% and 98% at 0.3/0.6 mag shallower.
MARCM
Makes histogram of predicted magnitudes
by MonteCarlo from exposures converving fraction of number of exposures
This produces the histogram for Dustin's processed galaxy depth
"""
# Check fraction of number of exposures adds to 1.
if( abs(sum(sample.FracExp) - 1.0) > 1e-5 ):
raise ValueError("Fration of number of exposures don't add to one")
# Survey inputs
rel = sample.DR
catalogue_name = sample.catalog
band = sample.band
be = sample.be
zp0 = sample.zp0
recm = sample.recm
verbose = sample.verbose
f = fitsio.read(sample.ccds)
#read in magnitudes including extinction
counts2014 = 0
counts20 = 0
nl = []
for i in range(0,len(f)):
#year = int(f[i]['date_obs'].split('-')[0])
#if (year <= 2014): counts2014 = counts2014 + 1
if f[i]['dec'] < -20 : counts20 = counts20 + 1
if(sample.DR == 'DR3'):
if f[i]['filter'] == sample.band and f[i]['photometric'] == True and f[i]['blacklist_ok'] == True :
magext = f[i]['galdepth'] - f[i]['decam_extinction'][be]
nmag = Magtonanomaggies(magext)/5. #total noise
nl.append(nmag)
if(sample.DR == 'DR4'):
if f[i]['filter'] == sample.band and f[i]['photometric'] == True and f[i]['bitmask'] == 0 :
magext = f[i]['galdepth'] - f[i]['decam_extinction'][be]
nmag = Magtonanomaggies(magext)/5. #total noise
nl.append(nmag)
if(sample.DR == 'DR5'):
if f[i]['filter'] == sample.band and f[i]['photometric'] == True and f[i]['blacklist_ok'] == True :
if(sample.survey == 'DEShyb'):
RA = f[i]['ra']
DEC = f[i]['dec'] # more efficient to put this inside the function directly
if(not InDEShybFootprint(RA,DEC) ): continue # skip if not in DEShyb
if(sample.survey == 'NGCproxy'):
RA = f[i]['ra'] # more efficient to put this inside the function directly
if(not InNGCproxyFootprint(RA) ): continue # skip if not in NGC
magext = f[i]['galdepth'] - f[i]['decam_extinction'][be]
nmag = Magtonanomaggies(magext)/5. #total noise
nl.append(nmag)
if(sample.DR == 'DR6'):
if f[i]['filter'] == sample.band :
magext = f[i]['galdepth'] - f[i]['decam_extinction'][be]
nmag = Magtonanomaggies(magext)/5. #total noise
nl.append(nmag)
ng = len(nl)
print "-----------"
if(verbose) : print "Number of objects = ", len(f)
#if(verbose) : print "Counts before or during 2014 = ", counts2014
if(verbose) : print "Counts with dec < -20 = ", counts20
print "Number of objects in the sample = ", ng
#Monte Carlo to predict magnitudes histogram
ndrawn = 0
nbr = 0
NTl = []
n = 0
for indx, f in enumerate(sample.FracExp,1) :
Nexp = indx # indx starts at 1 bc argument on enumearate :-), thus is the number of exposures
nd = int(round(ndraw * f))
if(Nexp < Nexpmin): nd=0
ndrawn=ndrawn+nd
for i in range(0,nd):
detsigtoti = 0
for j in range(0,Nexp):
ind = int(random()*ng)
detsig1 = nl[ind]
detsigtoti += 1./detsig1**2.
detsigtot = sqrt(1./detsigtoti)
m = nanomaggiesToMag(detsigtot * 5.)
if m > recm: # pass requirement
nbr += 1.
NTl.append(m)
n += 1.
# Run some statistics
NTl=np.array(NTl)
p90=np.percentile(NTl,10)
p95=np.percentile(NTl,5)
p98=np.percentile(NTl,2)
mean = sum(NTl)/float(len(NTl))
std = sqrt(sum(NTl**2.)/float(len(NTl))-mean**2.)
NTl.sort()
if len(NTl)/2. != len(NTl)/2:
med = NTl[len(NTl)/2+1]
else:
med = (NTl[len(NTl)/2+1]+NTl[len(NTl)/2])/2.
print "Total images drawn with either ", Nexpmin, " to 5 exposures", ndrawn
print "We are in fact runing with a minimum of", Nexpmin, "exposures"
print "Mean = ", mean, "; Median = ", med ,"; Std = ", std
print 'percentage better than requirements = '+str(nbr/float(ndrawn))
if(sample.band =='g'): print "Requirements are > 90%, 95% and 98% at 24, 23.7, 23.4"
if(sample.band =='r'): print "Requirements are > 90%, 95% and 98% at 23.4, 23.1, 22.8"
if(sample.band =='z'): print "Requirements are > 90%, 95% and 98% at 22.5, 22.2, 21.9"
print "Results are: ", p90, p95, p98
# Prepare historgram
minN = max(min(NTl),vmin)
maxN = max(NTl)+.0001
hl = np.zeros((nbin)) # histogram counts
lowcounts=0
for i in range(0,len(NTl)):
bin = int(nbin*(NTl[i]-minN)/(maxN-minN))
if(bin >= 0) :
hl[bin] += 1
else:
lowcounts +=1
Nl = [] # x bin centers
for i in range(0,len(hl)):
Nl.append(minN+i*(maxN-minN)/float(nbin)+0.5*(maxN-minN)/float(nbin))
NTl = np.array(NTl)
print "min,max depth = ",min(NTl), max(NTl)
print "counts below ", minN, " = ", lowcounts
#### Ploting histogram
fname=sample.localdir+'validationplots/'+sample.catalog+sample.band+'_pred_exposures.png'
print "saving histogram plot in", fname
#--- pdf version ---
#from matplotlib.backends.backend_pdf import PdfPages
#pp = PdfPages(fname)
plt.clf()
plt.plot(Nl,hl,'k-')
plt.xlabel(r'5$\sigma$ '+sample.band+ ' depth')
plt.ylabel('# of images')
plt.title('MC combined exposure depth '+str(mean)[:5]+r'$\pm$'+str(std)[:4]+r', $f_{\rm pass}=$'+str(nbr/float(ndrawn))[:5]+'\n '+catalogue_name)
#plt.xscale('log') # --- pdf ---
plt.savefig(fname) #pp.savefig()
plt.close #pp.close()
return fname
def v5p1e_photometricReqPlot(sample):
"""
No region > 3deg will be based upon non-photometric observations
Produces a plot of zero-point variations across the survey to inspect visually
MARCM It can be extended to check the 3deg later if is necessary
"""
nside = 1024 # Resolution of output maps
nsideSTR = '1024' # String value for nside
nsidesout = None # if you want full sky degraded maps to be written
ratiores = 1 # Superresolution/oversampling ratio, simp mode doesn't allow anything other than 1
oversamp = '1' # string oversaple value
mode = 1 # 1: fully sequential, 2: parallel then sequential, 3: fully parallel
pixoffset = 0 # How many pixels are being removed on the edge of each CCD
mjd_max = 10e10
mjdw = ''
rel = sample.DR
catalogue_name = sample.catalog
band = sample.band
sample_names = ['band_'+band]
localdir = sample.localdir
verbose = sample.verbose
tbdata = pyfits.open(sample.ccds)[1].data
if(sample.DR == 'DR3'):
inds = np.where((tbdata['filter'] == band) & (tbdata['blacklist_ok'] == True))
if(sample.DR == 'DR4'):
inds = np.where((tbdata['filter'] == band) & (tbdata['bitmask'] == 0))
elif(sample.DR == 'DR5'):
if(sample.survey == 'DECaLS'):
inds = np.where((tbdata['filter'] == band) & (tbdata['blacklist_ok'] == True))
elif(sample.survey == 'DEShyb'):
inds = np.where((tbdata['filter'] == band) & (tbdata['blacklist_ok'] == True) & (map(InDEShybFootprint,tbdata['ra'],tbdata['dec'])))
elif(sample.survey == 'NGCproxy'):
inds = np.where((tbdata['filter'] == band) & (tbdata['blacklist_ok'] == True) & (map(InNGCproxyFootprint,tbdata['ra'])))
if(sample.DR == 'DR6'):
inds = np.where((tbdata['filter'] == band))
#Read data
#obtain invnoisesq here, including extinction
zptvar = tbdata['CCDPHRMS']**2/tbdata['CCDNMATCH']
zptivar = 1./zptvar
nccd = np.ones(len(tbdata))
# What properties do you want mapped?
# Each each tuple has [(quantity to be projected, weighting scheme, operation),(etc..)]
quicksipVerbose(sample.verbose)
propertiesandoperations = [('zptvar','','min')]
#propertiesandoperations = [ ('zptvar', '', 'total') , ('zptvar','','min') , ('nccd','','total') , ('zptivar','','total')]
# What properties to keep when reading the images?
#Should at least contain propertiesandoperations and the image corners.
# MARCM - actually no need for ra dec image corners.
# Only needs ra0 ra1 ra2 ra3 dec0 dec1 dec2 dec3 only if fast track appropriate quicksip subroutines were implemented
propertiesToKeep = [ 'filter', 'AIRMASS', 'FWHM','mjd_obs'] \
+ ['RA', 'DEC', 'crval1', 'crval2', 'crpix1', 'crpix2', 'cd1_1', 'cd1_2', 'cd2_1', 'cd2_2','width','height']
# Create big table with all relevant properties.
tbdata = np.core.records.fromarrays([tbdata[prop] for prop in propertiesToKeep] + [zptvar,zptivar,nccd], names = propertiesToKeep + [ 'zptvar','zptivar','nccd'])
# Read the table, create Healtree, project it into healpix maps, and write these maps.
# Done with Quicksip library, note it has quite a few hardcoded values (use new version by MARCM for BASS and MzLS)
# project_and_write_maps_simp(mode, propertiesandoperations, tbdata, catalogue_name, outroot, sample_names, inds, nside)
project_and_write_maps(mode, propertiesandoperations, tbdata, catalogue_name, localdir, sample_names, inds, nside, ratiores, pixoffset, nsidesout)
# ----- Read healpix maps for first case [zptvar min]-------------------------
prop='zptvar'
op= 'min'
fname=localdir+catalogue_name+'/nside'+nsideSTR+'_oversamp'+oversamp+'/'+catalogue_name+'_band_'+band+'_nside'+nsideSTR+'_oversamp'+oversamp+'_'+prop+'__'+op+'.fits.gz'
f = fitsio.read(fname)
ral = []
decl = []
val = f['SIGNAL']
pix = f['PIXEL']
# -------------- plot of values ------------------
print 'Plotting min zpt rms'
myval = []
for i in range(0,len(val)):
myval.append(1.086 * np.sqrt(val[i])) #1.086 converts d(mag) into d(flux)
th,phi = hp.pix2ang(int(nside),pix[i])
ra,dec = thphi2radec(th,phi)
ral.append(ra)
decl.append(dec)
mylabel = 'min-zpt-rms-flux'
vmin = 0.0 #min(myval)
vmax = 0.03 #max(myval)
npix = len(myval)
below = 0
print 'Min and Max values of ', mylabel, ' values is ', min(myval), max(myval)
print 'Number of pixels is ', npix
print 'Area is ', npix/(float(nside)**2.*12)*360*360./pi, ' sq. deg.'
import matplotlib.cm as cm
mapa = plt.scatter(ral,decl,c=myval, cmap=cm.rainbow,s=2., vmin=vmin, vmax=vmax, lw=0,edgecolors='none')
fname = localdir+mylabel+'_'+band+'_'+catalogue_name+str(nside)+'.png'
cbar = plt.colorbar(mapa)
plt.xlabel('r.a. (degrees)')
plt.ylabel('declination (degrees)')
plt.title('Map of '+ mylabel +' for '+catalogue_name+' '+band+'-band')
plt.xlim(0,360)
plt.ylim(-30,90)
plt.savefig(fname)
plt.close()
#-plot of status in udgrade maps: nside64 = 1.406 deg pix size
phreq = sample.phreq
# Obtain values to plot
nside2 = 64 # 1.40625 deg per pixel
npix2 = hp.nside2npix(nside2)
myreq = np.zeros(npix2) # 0 off footprint, 1 at least one pass requirement, -1 none pass requirement
ral = np.zeros(npix2)
decl = np.zeros(npix2)
mylabel = 'photometric-pixels'
if(verbose) : print 'Plotting photometric requirement'
for i in range(0,len(val)):
th,phi = hp.pix2ang(int(nside),pix[i])
ipix = hp.ang2pix(nside2,th,phi)
dF= 1.086 * (sqrt(val[i])) # 1.086 converts d(magnitudes) into d(flux)
if(dF < phreq):
myreq[ipix]=1
else:
if(myreq[ipix] == 0): myreq[ipix]=-1
below=sum( x for x in myreq if x < 0)
below=-below
print 'Number of udegrade pixels with ', mylabel,' > ', phreq, ' for all subpixels =', below
print 'nside of udgraded pixels is : ', nside2
return fname
def v3p5_Areas(sample1,sample2):
"""
450 sq deg overlap between all instruments (R3.5)
14000 sq deg filled (R3.1)
MARCM
Uses healpix pixels projected from ccd to count standing, join and overlap areas
Produces a map of the overlap
It can be extended to 3 samples if necessary
"""
nside = 1024 # Resolution of output maps
nsideSTR = '1024' # String value for nside
nsidesout = None # if you want full sky degraded maps to be written
ratiores = 1 # Superresolution/oversampling ratio, simp mode doesn't allow anything other than 1
oversamp = '1' # string oversaple value
mode = 1 # 1: fully sequential, 2: parallel then sequential, 3: fully parallel
pixoffset = 0 # How many pixels are being removed on the edge of each CCD
mjdw = ''
# ----- sample1 ------
tbdata = pyfits.open(sample1.ccds)[1].data
if(sample1.DR == 'DR3'):
inds = np.where((tbdata['filter'] == sample1.band) & (tbdata['blacklist_ok'] == True))
if(sample1.DR == 'DR4'):
inds = np.where((tbdata['filter'] == sample1.band) & (tbdata['bitmask'] == 0))
elif(sample.DR == 'DR5'):
if(sample.survey == 'DECaLS'):
inds = np.where((tbdata['filter'] == band) & (tbdata['blacklist_ok'] == True))
elif(sample.survey == 'DEShyb'):
inds = np.where((tbdata['filter'] == band) & (tbdata['blacklist_ok'] == True) & (map(InDEShybFootprint,tbdata['ra'],tbdata['dec'])))
elif(sample.survey == 'NGCproxy'):
inds = np.where((tbdata['filter'] == band) & (tbdata['blacklist_ok'] == True) & (map(InNGCproxyFootprint,tbdata['ra'])))
if(sample1.DR == 'DR6'):
inds = np.where((tbdata['filter'] == sample1.band))
#number of ccds at each point
nccd1=np.ones(len(tbdata))
catalogue_name=sample1.catalog
band = sample1.band
sample_names = ['band_'+band]
localdir=sample1.localdir
# What properties do you want mapped?
# Each each tuple has [(quantity to be projected, weighting scheme, operation),(etc..)]
quicksipVerbose(sample1.verbose)
propertiesandoperations = [('nccd1','','total')]
# What properties to keep when reading the images?
#Should at least contain propertiesandoperations and the image corners.
# MARCM - actually no need for ra dec image corners.
# Only needs ra0 ra1 ra2 ra3 dec0 dec1 dec2 dec3 only if fast track appropriate quicksip subroutines were implemented
#propertiesToKeep = [ 'filter', 'AIRMASS', 'FWHM','mjd_obs'] \
# + ['RA', 'DEC', 'crval1', 'crval2', 'crpix1', 'crpix2', 'cd1_1', 'cd1_2', 'cd2_1', 'cd2_2','width','height']
propertiesToKeep = ['RA', 'DEC', 'crval1', 'crval2', 'crpix1', 'crpix2', 'cd1_1', 'cd1_2', 'cd2_1', 'cd2_2','width','height']
# Create big table with all relevant properties.
tbdata = np.core.records.fromarrays([tbdata[prop] for prop in propertiesToKeep] + [nccd1], names = propertiesToKeep + [ 'nccd1'])
# Read the table, create Healtree, project it into healpix maps, and write these maps.
# Done with Quicksip library, note it has quite a few hardcoded values (use new version by MARCM for BASS and MzLS)
# project_and_write_maps_simp(mode, propertiesandoperations, tbdata, catalogue_name, outroot, sample_names, inds, nside)
project_and_write_maps(mode, propertiesandoperations, tbdata, catalogue_name, localdir, sample_names, inds, nside, ratiores, pixoffset, nsidesout)
# ----- sample2 ------
tbdata = pyfits.open(sample2.ccds)[1].data
if(sample2.DR == 'DR3'):
inds = np.where((tbdata['filter'] == sample2.band) & (tbdata['blacklist_ok'] == True))
if(sample2.DR == 'DR4'):
inds = np.where((tbdata['filter'] == sample2.band) & (tbdata['bitmask'] == 0))
if(sample2.DR == 'DR5'):
inds = np.where((tbdata['filter'] == sample2.band) & (tbdata['blacklist_ok'] == True))
if(sample2.DR == 'DR6'):
inds = np.where((tbdata['filter'] == sample2.band))
#number of ccds at each point
nccd2=np.ones(len(tbdata))
catalogue_name=sample2.catalog
band = sample2.band
sample_names = ['band_'+band]
localdir=sample2.localdir
# Quick sip projections
quicksipVerbose(sample2.verbose)
propertiesandoperations = [('nccd2','','total')]
propertiesToKeep = ['RA', 'DEC', 'crval1', 'crval2', 'crpix1', 'crpix2', 'cd1_1', 'cd1_2', 'cd2_1', 'cd2_2','width','height']
tbdata = np.core.records.fromarrays([tbdata[prop] for prop in propertiesToKeep] + [nccd2], names = propertiesToKeep + [ 'nccd2'])
project_and_write_maps(mode, propertiesandoperations, tbdata, catalogue_name, localdir, sample_names, inds, nside, ratiores, pixoffset, nsidesout)
# --- read ccd counts per pixel sample 1 ----
prop='nccd1'
op='total'
localdir=sample1.localdir
band=sample1.band
catalogue_name1=sample1.catalog
fname=localdir+catalogue_name1+'/nside'+nsideSTR+'_oversamp'+oversamp+'/'+catalogue_name1+'_band_'+band+'_nside'+nsideSTR+'_oversamp'+oversamp+'_'+prop+'__'+op+'.fits.gz'
f = fitsio.read(fname)
val1 = f['SIGNAL']
pix1 = f['PIXEL']
npix1 = np.size(pix1)
# ---- read ccd counts per pixel sample 2 ----
prop='nccd2'
opt='total'
localdir=sample2.localdir
band=sample2.band
catalogue_name2=sample2.catalog
fname=localdir+catalogue_name2+'/nside'+nsideSTR+'_oversamp'+oversamp+'/'+catalogue_name2+'_band_'+band+'_nside'+nsideSTR+'_oversamp'+oversamp+'_'+prop+'__'+op+'.fits.gz'
f = fitsio.read(fname)
val2 = f['SIGNAL']
pix2 = f['PIXEL']
npix2 = np.size(pix2)
# ---- compute common healpix pixels #this is python 2.7 use isin for 3.X
commonpix = np.in1d(pix1,pix2,assume_unique=True) #unique: no pixel indicies are repeated
npix=np.sum(commonpix) #sum of bolean in array size pix1
area1 = npix1/(float(nside)**2.*12)*360*360./pi
area2 = npix2/(float(nside)**2.*12)*360*360./pi
area = npix/(float(nside)**2.*12)*360*360./pi
print 'Area of sample', sample1.catalog,' is ', np.round(area1) , ' sq. deg.'
print 'Area of sample', sample2.catalog,' is ', np.round(area2) , ' sq. deg.'
print 'The total JOINT area is ', np.round(area1+area2-area) ,'sq. deg.'
print 'The INTERSECTING area is ', np.round(area) , ' sq. deg.'
# ----- plot join area using infomration from sample1 ---
ral = []
decl = []
myval = []
for i in range(0,len(pix1)):
if(commonpix[i]):
th,phi = hp.pix2ang(int(nside),pix1[i])
ra,dec = thphi2radec(th,phi)
ral.append(ra)
decl.append(dec)
myval.append(1)
mylabel = 'join-area'
import matplotlib.cm as cm
mapa = plt.scatter(ral,decl,c=myval, cmap=cm.rainbow,s=2.,lw=0,edgecolors='none')
fname = localdir+mylabel+'_'+band+'_'+catalogue_name1+'_'+catalogue_name2+str(nside)+'.png'
cbar = plt.colorbar(mapa)
plt.xlabel('r.a. (degrees)')
plt.ylabel('declination (degrees)')
plt.title('Map of '+ mylabel +' for '+catalogue_name1+' and '+catalogue_name2+' '+band+'-band; Area = '+str(area))
plt.xlim(0,360)
plt.ylim(-30,90)
plt.savefig(fname)
plt.close()
return fname
def val3p4c_seeing(sample,passmin=3,nbin=100,nside=1024):
"""
Requirement V4.1: z-band image quality will be smaller than 1.3 arcsec FWHM in at least one pass.
Produces FWHM maps and histograms for visual inspection
H-JS's addition to MARCM stable version.
MARCM stable version, improved from AJR quick hack
This now included extinction from the exposures
Uses quicksip subroutines from Boris, corrected
for a bug I found for BASS and MzLS ccd orientation
"""
#nside = 1024 # Resolution of output maps
nsideSTR=str(nside)
#nsideSTR='1024' # same as nside but in string format
nsidesout = None # if you want full sky degraded maps to be written
ratiores = 1 # Superresolution/oversampling ratio, simp mode doesn't allow anything other than 1
mode = 1 # 1: fully sequential, 2: parallel then sequential, 3: fully parallel
pixoffset = 0 # How many pixels are being removed on the edge of each CCD? 15 for DES.
oversamp='1' # ratiores in string format
band = sample.band
catalogue_name = sample.catalog
fname = sample.ccds
localdir = sample.localdir
extc = sample.extc
#Read ccd file
tbdata = pyfits.open(fname)[1].data
# ------------------------------------------------------
# Obtain indices
auxstr='band_'+band
sample_names = [auxstr]
if(sample.DR == 'DR3'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True))
elif(sample.DR == 'DR4'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['bitmask'] == 0))
elif(sample.DR == 'DR5'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True))
elif(sample.DR == 'DR6'):
inds = np.where((tbdata['filter'] == band))
#Read data
#obtain invnoisesq here, including extinction
nmag = Magtonanomaggies(tbdata['galdepth']-extc*tbdata['EBV'])/5.
ivar= 1./nmag**2.
# What properties do you want mapped?
# Each each tuple has [(quantity to be projected, weighting scheme, operation),(etc..)]
propertiesandoperations = [
('FWHM', '', 'min'),
('FWHM', '', 'num')]
# What properties to keep when reading the images?
# Should at least contain propertiesandoperations and the image corners.
# MARCM - actually no need for ra dec image corners.
# Only needs ra0 ra1 ra2 ra3 dec0 dec1 dec2 dec3 only if fast track appropriate quicksip subroutines were implemented
propertiesToKeep = [ 'filter', 'AIRMASS', 'FWHM','mjd_obs'] \
+ ['RA', 'DEC', 'crval1', 'crval2', 'crpix1', 'crpix2', 'cd1_1', 'cd1_2', 'cd2_1', 'cd2_2','width','height']
# Create big table with all relevant properties.
#tbdata = np.core.records.fromarrays([tbdata[prop] for prop in propertiesToKeep] + [ivar], names = propertiesToKeep + [ 'ivar'])
tbdata = np.core.records.fromarrays([tbdata[prop] for prop in propertiesToKeep], names = propertiesToKeep)
# Read the table, create Healtree, project it into healpix maps, and write these maps.
# Done with Quicksip library, note it has quite a few hardcoded values (use new version by MARCM for BASS and MzLS)
# project_and_write_maps_simp(mode, propertiesandoperations, tbdata, catalogue_name, outroot, sample_names, inds, nside)
project_and_write_maps(mode, propertiesandoperations, tbdata, catalogue_name, localdir, sample_names, inds, nside, ratiores, pixoffset, nsidesout)
# Read Haelpix maps from quicksip
prop='FWHM'
op='min'
vmin=21.0
vmax=24.0
fname2=localdir+catalogue_name+'/nside'+nsideSTR+'_oversamp'+oversamp+'/'+\
catalogue_name+'_band_'+band+'_nside'+nsideSTR+'_oversamp'+oversamp+'_'+prop+'__'+op+'.fits.gz'
f = fitsio.read(fname2)
prop='FWHM'
op1='num'
fname2=localdir+catalogue_name+'/nside'+nsideSTR+'_oversamp'+oversamp+'/'+\
catalogue_name+'_band_'+band+'_nside'+nsideSTR+'_oversamp'+oversamp+'_'+prop+'__'+op1+'.fits.gz'
f1 = fitsio.read(fname2)
# HEALPIX DEPTH MAPS
# convert ivar to depth
import healpy as hp
from healpix import pix2ang_ring,thphi2radec
ral = []
decl = []
valf = []
val = f['SIGNAL']
npass = f1['SIGNAL']
pixelarea = (180./np.pi)**2*4*np.pi/(12*nside**2)
pix = f['PIXEL']
print min(val),max(val)
print min(npass),max(npass)
# Obtain values to plot
j = 0
for i in range(0,len(f)):
if (npass[i] >= passmin):
#th,phi = pix2ang_ring(4096,f[i]['PIXEL'])
th,phi = hp.pix2ang(nside,f[i]['PIXEL'])
ra,dec = thphi2radec(th,phi)
ral.append(ra)
decl.append(dec)
valf.append(val[i])
print len(val),len(valf)
# print len(val),len(valf),valf[0],valf[1],valf[len(valf)-1]
minv = np.min(valf)
maxv = np.max(valf)
print minv,maxv
# Draw the map
from matplotlib import pyplot as plt
import matplotlib.cm as cm
mylabel= prop + op
mapa = plt.scatter(ral,decl,c=valf, cmap=cm.rainbow,s=2., vmin=minv, vmax=maxv, lw=0,edgecolors='none')
cbar = plt.colorbar(mapa)
plt.xlabel('r.a. (degrees)')
plt.ylabel('declination (degrees)')
plt.title('Map of '+ mylabel +' for '+catalogue_name+' '+band+'-band pass >='+str(passmin))
plt.xlim(0,360)
plt.ylim(-30,90)
mapfile=localdir+mylabel+'pass'+str(passmin)+'_'+band+'_'+catalogue_name+str(nside)+'map.png'
print 'saving plot to ', mapfile
plt.savefig(mapfile)
plt.close()
#plt.show()
#cbar.set_label(r'5$\sigma$ galaxy depth', rotation=270,labelpad=1)
#plt.xscale('log')
npix=len(pix)
print 'The total area is ', npix/(float(nside)**2.*12)*360*360./pi, ' sq. deg.'
# Draw the histogram
minN = minv-0.0001
maxN = maxv+.0001
hl = np.zeros((nbin))
for i in range(0,len(valf)):
bin = int(nbin*(valf[i]-minN)/(maxN-minN))
hl[bin] += 1
Nl = []
for i in range(0,len(hl)):
Nl.append(minN+i*(maxN-minN)/float(nbin)+0.5*(maxN-minN)/float(nbin))
#When an array object is printed or converted to a string, it is represented as array(typecode, initializer)
NTl = np.array(valf)
Nl = np.array(Nl)*0.262 # in arcsec
hl = np.array(hl)*pixelarea
hlcs = np.sum(hl)-np.cumsum(hl)
print "A total of ",np.sum(hl),"squaredegrees with pass >=",str(passmin)
# print "#FWHM Area(>FWHM)"
# print np.column_stack((Nl,hlcs))
idx = (np.abs(Nl-1.3)).argmin()
print "#FWHM Area(>FWHM) Fractional Area(>FWHM)"
print Nl[idx], hlcs[idx], hlcs[idx]/hlcs[0]
mean = np.sum(NTl)/float(len(NTl))
std = sqrt(np.sum(NTl**2.)/float(len(NTl))-mean**2.)
print "#mean STD"
print mean,std
plt.plot(Nl,hl,'k-')
#plt.xscale('log')
plt.xlabel(op+' '+band+ ' seeing (")')
plt.ylabel('Area (squaredegrees)')
plt.title('Historam of '+mylabel+' for '+catalogue_name+' '+band+'-band: pass >='+str(passmin))
ax2 = plt.twinx()
ax2.plot(Nl,hlcs,'r')
y0 = np.arange(0,10000, 100)
x0 = y0*0+1.3
shlcs = '%.2f' % (hlcs[idx])
shlcsr = '%.2f' % (hlcs[idx]/hlcs[0])
print "Area with FWHM greater than 1.3 arcsec fraction"
print shlcs, shlcsr
ax2.plot(x0,y0,'r--', label = r'$Area_{\rm FWHM > 1.3arcsec}= $'+shlcs+r'$deg^2$ ( $f_{\rm fail}=$'+ shlcsr+')')
legend = ax2.legend(loc='upper center', shadow=True)
ax2.set_ylabel('Cumulative Area sqdegrees', color='r')
#plt.show()
histofile = localdir+mylabel+'pass'+str(passmin)+'_'+band+'_'+catalogue_name+str(nside)+'histo.png'
print 'saving plot to ', histofile
plt.savefig(histofile)
# fig.savefig(histofile)
plt.close()
return mapfile,histofile
def val3p4c_seeingplots(sample,passmin=3,nbin=100,nside=1024):
"""
Requirement V4.1: z-band image quality will be smaller than 1.3 arcsec FWHM in at least one pass.
Produces FWHM maps and histograms for visual inspection
H-JS's addition to MARCM stable version.
MARCM stable version, improved from AJR quick hack
This now included extinction from the exposures
Uses quicksip subroutines from Boris, corrected
for a bug I found for BASS and MzLS ccd orientation
The same as val3p4c_seeing, but this makes only plots without regenerating input files for the plots
"""
#nside = 1024 # Resolution of output maps
nsideSTR=str(nside)
#nsideSTR='1024' # same as nside but in string format
nsidesout = None # if you want full sky degraded maps to be written
ratiores = 1 # Superresolution/oversampling ratio, simp mode doesn't allow anything other than 1
mode = 1 # 1: fully sequential, 2: parallel then sequential, 3: fully parallel
pixoffset = 0 # How many pixels are being removed on the edge of each CCD? 15 for DES.
oversamp='1' # ratiores in string format
band = sample.band
catalogue_name = sample.catalog
fname = sample.ccds
localdir = sample.localdir
extc = sample.extc
#Read ccd file
tbdata = pyfits.open(fname)[1].data
# ------------------------------------------------------
# Obtain indices
auxstr='band_'+band
sample_names = [auxstr]
if(sample.DR == 'DR3'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True))
elif(sample.DR == 'DR4'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['bitmask'] == 0))
elif(sample.DR == 'DR5'):
if(sample.survey == 'DECaLS'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True))
elif(sample.survey == 'DEShyb'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True) & (map(InDEShybFootprint,tbdata['ra'],tbdata['dec'])))
elif(sample.survey == 'NGCproxy'):
inds = np.where((tbdata['filter'] == band) & (tbdata['photometric'] == True) & (tbdata['blacklist_ok'] == True) & (map(InNGCproxyFootprint,tbdata['ra'])))
elif(sample.DR == 'DR6'):
inds = np.where((tbdata['filter'] == band))
#Read data
#obtain invnoisesq here, including extinction
nmag = Magtonanomaggies(tbdata['galdepth']-extc*tbdata['EBV'])/5.
ivar= 1./nmag**2.
# What properties do you want mapped?
# Each each tuple has [(quantity to be projected, weighting scheme, operation),(etc..)]
propertiesandoperations = [
('FWHM', '', 'min'),
('FWHM', '', 'num')]
# Read Haelpix maps from quicksip
print "Generating only plots using the existing intermediate outputs"
prop='FWHM'
op='min'
vmin=21.0
vmax=24.0
fname2=localdir+catalogue_name+'/nside'+nsideSTR+'_oversamp'+oversamp+'/'+\
catalogue_name+'_band_'+band+'_nside'+nsideSTR+'_oversamp'+oversamp+'_'+prop+'__'+op+'.fits.gz'
f = fitsio.read(fname2)
prop='FWHM'
op1='num'
fname2=localdir+catalogue_name+'/nside'+nsideSTR+'_oversamp'+oversamp+'/'+\
catalogue_name+'_band_'+band+'_nside'+nsideSTR+'_oversamp'+oversamp+'_'+prop+'__'+op1+'.fits.gz'
f1 = fitsio.read(fname2)
# HEALPIX DEPTH MAPS
# convert ivar to depth
import healpy as hp
from healpix import pix2ang_ring,thphi2radec
ral = []
decl = []
valf = []
val = f['SIGNAL']
npass = f1['SIGNAL']
pixelarea = (180./np.pi)**2*4*np.pi/(12*nside**2)
pix = f['PIXEL']
print min(val),max(val)
print min(npass),max(npass)
# Obtain values to plot
j = 0
for i in range(0,len(f)):
if (npass[i] >= passmin):
#th,phi = pix2ang_ring(4096,f[i]['PIXEL'])
th,phi = hp.pix2ang(nside,f[i]['PIXEL'])
ra,dec = thphi2radec(th,phi)
ral.append(ra)
decl.append(dec)
valf.append(val[i])
print len(val),len(valf)
# print len(val),len(valf),valf[0],valf[1],valf[len(valf)-1]
minv = np.min(valf)
maxv = np.max(valf)
print minv,maxv
# Draw the map
from matplotlib import pyplot as plt
import matplotlib.cm as cm
mylabel= prop + op
mapa = plt.scatter(ral,decl,c=valf, cmap=cm.rainbow,s=2., vmin=minv, vmax=maxv, lw=0,edgecolors='none')
cbar = plt.colorbar(mapa)
plt.xlabel('r.a. (degrees)')
plt.ylabel('declination (degrees)')
plt.title('Map of '+ mylabel +' for '+catalogue_name+' '+band+'-band pass >='+str(passmin))
plt.xlim(0,360)
plt.ylim(-30,90)
mapfile=localdir+mylabel+'pass'+str(passmin)+'_'+band+'_'+catalogue_name+str(nside)+'map.png'
print 'saving plot to ', mapfile
plt.savefig(mapfile)
plt.close()
#plt.show()
#cbar.set_label(r'5$\sigma$ galaxy depth', rotation=270,labelpad=1)
#plt.xscale('log')
npix=len(pix)
print 'The total area is ', npix/(float(nside)**2.*12)*360*360./pi, ' sq. deg.'
# Draw the histogram
minN = minv-0.0001
maxN = maxv+.0001
hl = np.zeros((nbin))
for i in range(0,len(valf)):
bin = int(nbin*(valf[i]-minN)/(maxN-minN))
hl[bin] += 1
Nl = []
for i in range(0,len(hl)):
Nl.append(minN+i*(maxN-minN)/float(nbin)+0.5*(maxN-minN)/float(nbin))
#When an array object is printed or converted to a string, it is represented as array(typecode, initializer)
NTl = np.array(valf)
Nl = np.array(Nl)*0.262 # in arcsec
hl = np.array(hl)*pixelarea
hlcs = np.sum(hl)-np.cumsum(hl)
print "A total of ",np.sum(hl),"squaredegrees with pass >=",str(passmin)
# print "#FWHM Area(>FWHM)"
# print np.column_stack((Nl,hlcs))
idx = (np.abs(Nl-1.3)).argmin()
print "#FWHM Area(>FWHM) Fractional Area(>FWHM)"
print Nl[idx], hlcs[idx], hlcs[idx]/hlcs[0]
mean = np.sum(NTl)/float(len(NTl))
std = sqrt(np.sum(NTl**2.)/float(len(NTl))-mean**2.)
print "#mean STD"
print mean,std
plt.plot(Nl,hl,'k-')
#plt.xscale('log')
plt.xlabel(op+' '+band+ ' seeing (")')
plt.ylabel('Area (squaredegrees)')
plt.title('Historam of '+mylabel+' for '+catalogue_name+' '+band+'-band: pass >='+str(passmin))
ax2 = plt.twinx()
ax2.plot(Nl,hlcs,'r')
y0 = np.arange(0,10000, 100)
x0 = y0*0+1.3
shlcs = '%.2f' % (hlcs[idx])
shlcsr = '%.2f' % (hlcs[idx]/hlcs[0])
print "Area with FWHM greater than 1.3 arcsec fraction"
print shlcs, shlcsr
ax2.plot(x0,y0,'r--', label = r'$Area_{\rm FWHM > 1.3arcsec}= $'+shlcs+r'$deg^2$ ( $f_{\rm fail}=$'+ shlcsr+')')
legend = ax2.legend(loc='upper center', shadow=True)
ax2.set_ylabel('Cumulative Area sqdegrees', color='r')
#plt.show()
# fig = plt.gcf()
histofile = localdir+mylabel+'pass'+str(passmin)+'_'+band+'_'+catalogue_name+str(nside)+'histo.png'
print 'saving plot to ', histofile
plt.savefig(histofile)
# fig.savefig(histofile)
plt.close()
return mapfile,histofile
|
legacysurvey/pipeline
|
validationtests/desi_image_validation.py
|
Python
|
gpl-2.0
| 60,916
|
[
"Galaxy"
] |
0ed8b575330b3b843bb51eba52f340d8e764b37743ac1e10187004ed5a25fccd
|
# -*- coding: utf-8 -*-
#
# Buildbot documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 10 15:13:31 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import textwrap
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'bbdocs.ext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Buildbot'
copyright = u'Buildbot Team Members'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
if 'VERSION' in os.environ:
version = os.environ['VERSION']
else:
gl = {'__file__': '../buildbot/__init__.py'}
execfile('../buildbot/__init__.py', gl)
version = gl['version']
# The full version, including alpha/beta/rc tags.
release = version
# add a loud note for anyone looking at the latest docs
if release == 'latest':
rst_prolog = textwrap.dedent("""\
.. caution:: This page documents the latest, unreleased version of
Buildbot. For documentation for released versions, see
http://buildbot.net/buildbot/docs.
""")
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'release-notes/*.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
intersphinx_mapping = {
'python': ('http://python.readthedocs.org/en/latest/', None),
'sqlalchemy': ('http://sqlalchemy.readthedocs.org/en/latest/', None),
}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'agogo'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {'stickysidebar': 'true'}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = os.path.join('_images', 'header-text-transparent.png')
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = os.path.join('_static', 'buildbot.ico')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = False
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
html_use_index = True
html_use_modindex = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'BuildBotdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '11pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'BuildBot.tex', u'BuildBot Documentation',
u'Brian Warner', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = os.path.join('_images', 'header-text-transparent.png')
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
latex_show_urls = True
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'buildbot', u'BuildBot Documentation',
[u'Brian Warner'], 1)
]
# Monkey-patch Sphinx to treat unhiglighted code as error.
import sphinx
import sphinx.highlighting
from pkg_resources import parse_version
from sphinx.errors import SphinxWarning
# Versions of Sphinx below changeset 1860:19b394207746 (before v0.6.6 release)
# won't work due to different PygmentsBridge interface.
required_sphinx_version = '0.6.6'
sphinx_version_supported = \
parse_version(sphinx.__version__) >= parse_version(required_sphinx_version)
# This simple monkey-patch allows either fail on first unhighlighted block or
# print all unhighlighted blocks and don't fail at all.
# First behaviour is useful for testing that all code is highlighted, second ---
# for fixing lots of unhighlighted code.
fail_on_first_unhighlighted = True
class UnhighlightedError(SphinxWarning):
pass
# PygmentsBridge.unhighlighted() added in Sphinx in changeset 574:f1c885fdd6ad
# (0.5 release).
def patched_unhighlighted(self, source):
indented_source = ' ' + '\n '.join(source.split('\n'))
if fail_on_first_unhighlighted:
msg = textwrap.dedent(u"""\
Block not highlighted:
%s
If it should be unhighlighted, please specify explicitly language of
this block as "none":
.. code-block:: none
...
If this block is Python example, then it probably contains syntax
errors, such as unmatched brackets or invalid indentation.
Note that in most places you can use "..." in Python code as valid
anonymous expression.
""") % indented_source
raise UnhighlightedError(msg)
else:
msg = textwrap.dedent(u"""\
Unhighlighted block:
%s
""") % indented_source
sys.stderr.write(msg.encode('ascii', 'ignore'))
return orig_unhiglighted(self, source)
# Compatible with PygmentsBridge.highlight_block since Sphinx'
# 1860:19b394207746 changeset (v0.6.6 release)
def patched_highlight_block(self, *args, **kwargs):
try:
return orig_highlight_block(self, *args, **kwargs)
except UnhighlightedError, ex:
msg = ex.args[0]
if 'warn' in kwargs:
kwargs['warn'](msg)
raise
if sphinx_version_supported:
orig_unhiglighted = sphinx.highlighting.PygmentsBridge.unhighlighted
orig_highlight_block = sphinx.highlighting.PygmentsBridge.highlight_block
sphinx.highlighting.PygmentsBridge.unhighlighted = patched_unhighlighted
sphinx.highlighting.PygmentsBridge.highlight_block = patched_highlight_block
else:
msg = textwrap.dedent("""\
WARNING: Your Sphinx version %s is too old and will not work with
monkey-patch for checking unhighlighted code. Minimal required version
of Sphinx is %s. Check disabled.
""") % (sphinx.__version__, required_sphinx_version)
sys.stderr.write(msg)
|
mitya57/debian-buildbot
|
docs/conf.py
|
Python
|
gpl-2.0
| 10,762
|
[
"Brian"
] |
79868484738f9f20fac6964b05cc6cbe4df738051e72d24d4885794d9c6ecab6
|
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as fits
import astropy.table as table
from astropy import units as u, constants as c
import ppxf_util as util
from ppxf import ppxf
from scipy import ndimage
import manga_tools as m
import os
import pickle as pk
# for sampling
import emcee
import george
from george import kernels
import triangle
import misc_tools
def BPT(NII, SII, OI, OIII):
'''
BPT diagram in all three main styles
Inputs are 2d arrays
'''
def convolve_variable_width(a, sig, prec=1.):
'''
approximate convolution with a kernel that varies along the spectral
direction, by stretching the data by the inverse of the kernel's
width at a given position
N.B.: this is an approximation to the proper operation, which
involves iterating over each pixel of each template and
performing ~10^6 convolution operations
Parameters:
- a: N-D array; convolution will occur along the final axis
- sig: 1-D array (must have same length as the final axis of a);
describes the varying width of the kernel
- prec: precision argument. When higher, oversampling is more thorough
'''
assert (len(sig) == a.shape[-1]), '\tLast dimension of `a` must equal \
length of `sig` (each element of a must have a convolution width)'
sig0 = sig.max() # the "base" width that results in minimal blurring
# if prec = 1, just use sig0 as base.
n = np.rint(prec * sig0/sig).astype(int)
print n.min()
# print n
print '\tWarped array length: {}'.format(n.sum())
# define "warped" array a_w with n[i] instances of a[:,:,i]
a_w = np.repeat(a, n, axis=-1)
# now a "warped" array sig_w
sig_w = np.repeat(sig, n)
# define start and endpoints for each value
nl = np.cumsum(np.insert(n, 0, 0))[:-1]
nr = np.cumsum(n)
# now the middle of the interval
nm = np.rint(np.median(np.column_stack((nl, nr)), axis=1)).astype(int)
# print nm
# print a_w.shape, sig_w.shape # check against n.sum()
# now convolve the whole thing with a Gaussian of width sig0
print '\tCONVOLVE...'
# account for the increased precision required
a_w_f = np.empty_like(a_w)
# have to iterate over the rows and columns, to avoid MemoryError
c = 0 # counter (don't judge me, it was early in the morning)
for i in range(a_w_f.shape[0]):
for j in range(a_w_f.shape[1]):
'''c += 1
print '\t\tComputing convolution {} of {}...'.format(
c, a_w_f.shape[0] * a_w_f.shape[1])'''
a_w_f[i, j, :] = ndimage.gaussian_filter1d(
a_w[i, j, :], prec*sig0)
# print a_w_f.shape # should be the same as the original shape
# and downselect the pixels (take approximate middle of each slice)
# f is a mask that will be built and applied to a_w_f
# un-warp the newly-convolved array by selecting only the slices
# in dimension -1 that are in nm
a_f = a_w_f[:, :, nm]
return a_f
def setup_MaNGA_stellar_libraries(fname_ifu, fname_tem, z=0.01,
plot=False):
'''
set up all the required stellar libraries for a MaNGA datacube
this should only need to be run once.
'''
print 'Reading drpall...'
drpall = fits.open(m.drpall_loc + 'drpall-v1_3_3.fits')[0].data
print 'Reading MaNGA HDU...'
MaNGA_hdu = fits.open(fname_ifu)
# open global MaNGA header
glob_h = MaNGA_hdu[0].header
print 'Constructing wavelength grid...'
# read in some average value for wavelength solution and spectral res
L_ifu = m.wave(MaNGA_hdu).data
R_avg, l_avg, = m.res_over_plate('MPL-3', '7443', plot=plot)
FWHM_avg = l_avg / R_avg # FWHM of a galaxy in AA at some wavelength
# now read in basic info about templates and
# up-sample "real" spectral resolution to the model wavelength grid
tems = fits.open(fname_tem)[0]
htems = tems.header
logL_tem = np.linspace(
htems['CRVAL1'],
htems['CRVAL1'] + (htems['NAXIS1'] - 1) * htems['CDELT1'],
htems['NAXIS1']) # base e
L_tem = np.exp(logL_tem)
dL_tem = np.empty_like(L_tem)
dL_tem[:-1] = L_tem[1:] - L_tem[:-1]
dL_tem[-1] = dL_tem[-2] # this is not exact, but it's efficient
# since the impulse-response of the templates is infinitely thin
# approximate the FWHM as half the pixel width
FWHM_tem = dL_tem/2.
FWHM_avg_s = np.interp(x=L_tem, xp=l_avg, fp=FWHM_avg)
if plot == True:
plt.close('all')
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
for z in [0.00, 0.01, 0.02]:
# get sigma for a bunch of different redshifts
FWHM_diff_ = np.sqrt(
(FWHM_avg_s / (1. + z))**2. - FWHM_tem**2.)
sigma_ = FWHM_diff_/2.355/dL_tem
ax.plot(L_tem, sigma_,
label='z = {:.3f}'.format(z))
ax.legend(loc='best')
ax.set_xlabel(r'$\lambda[\AA]$')
ax.set_ylabel(r'$\frac{R_{tem}}{R_{spec}}$')
plt.tight_layout()
plt.show()
logL_ifu = np.log(L_ifu)
dlogL_ifu = np.log(10.**MaNGA_hdu[0].header['CD3_3'])
velscale_ifu = np.asarray((dlogL_ifu * c.c).to(u.km/u.s))
print 'velscale:', velscale_ifu
print 'Constructing spectral library files...'
#
# file format is st-<REDSHIFT>.fits
# <REDSHIFT> is of form 0.XXXX
FWHM_diff = np.sqrt(
(FWHM_avg_s / (1. + z))**2. - FWHM_tem**2.)
sigma = FWHM_diff/2.355/dL_tem
print tems.data.shape
a_f = convolve_variable_width(tems.data, sigma, prec=4.)
# return a_f, dlogL_ifu, logL_tem
spec_ssp_new_sample, logL_ssp_new = m.ssp_rebin(
logL_tem, a_f[0, 0, :], dlogL_ifu)
spec_ssp_new = np.empty([a_f.shape[0], a_f.shape[1], len(logL_ssp_new)])
for Ti in range(a_f.shape[0]):
for Zi in range(a_f.shape[1]):
spec_ssp_new[Ti, Zi, :] = m.ssp_rebin(
logL_tem, a_f[Ti, Zi, :], dlogL_ifu)[0]
fname2 = 'stellar_libraries/st-{0:.4f}.fits'.format(z)
print '\tMaking HDU:', fname2
blurred_hdu = fits.PrimaryHDU(spec_ssp_new)
blurred_hdu.header = tems.header
blurred_hdu.header['z'] = z
blurred_hdu.header['NAXIS1'] = len(logL_ssp_new)
blurred_hdu.header['CRVAL1'] = logL_ssp_new[0]
blurred_hdu.header['CDELT1'] = dlogL_ifu
blurred_hdu.writeto(fname2, clobber=True)
def kin_models(tems):
'''
Return those SSP models suitable for kinematic fitting. We're i
in solar-metallicity models that best span the spectral shape-space
'''
htems = tems[0].header
dtems = tems[0].data
nT, nZ, nL = dtems.shape
Zs = htems['CRVAL2'] + np.linspace(0.,
htems['CDELT2'] * (htems['NAXIS2'] - 1), htems['NAXIS2'])
Ts = htems['CRVAL3'] + np.linspace(0.,
htems['CDELT3'] * (htems['NAXIS3'] - 1), htems['NAXIS3'])
logL_tem = np.linspace(
htems['CRVAL1'],
htems['CRVAL1'] + (nL - 1) * htems['CDELT1'], nL) # base e
tems_sol = dtems[:, np.argmin(np.abs(Zs)), :]
tems_red = tems_sol
def ppxf_run_MaNGA_galaxy(ifu, fname_tem, first_few=None, Tsample = 4,
over = True):
plt.close('all')
plate = ifu[0]
ifudsgn = ifu[1]
fname_ifu = 'manga-{}-{}-LOGCUBE.fits.gz'.format(plate, ifudsgn)
# now read in drpall
drpall = table.Table.read(m.drpall_loc + 'drpall-v1_3_3.fits',
format='fits')
objconds = drpall['plateifu'] == '{}-{}'.format(plate, ifudsgn)
obj = drpall[objconds]
c = 299792.458
z = obj['nsa_redshift']
if z == -9999.:
z = 0.1
# now read in IFU
ifu = fits.open(fname_ifu)
ifu_flux = ifu['FLUX'].data
ifu_ivar = ifu['ivar'].data
ifu_mask = ifu['MASK'].data
L_ifu = ifu['WAVE'].data
red_scattered_light = L_ifu > 9500.
logL_ifu = np.log(L_ifu) - z
L_ifu = np.exp(logL_ifu)
res_path = '{}-{}/'.format(plate, ifudsgn)
try:
os.makedirs(res_path)
except OSError:
pass
# read in stellar templates
# templates have already been convolved to the proper resolution
tems = fits.open(fname_tem)
htems = tems[0].header
dtems = tems[0].data[::Tsample, :, :]
dtems_red = kin_models(tems)
print dtems_red.shape
nT, nZ, nL = dtems.shape
Zs = htems['CRVAL2'] + np.linspace(0.,
htems['CDELT2'] * (htems['NAXIS2'] - 1), htems['NAXIS2'])
Ts = htems['CRVAL3'] + np.linspace(0.,
htems['CDELT3'] * (htems['NAXIS3'] - 1), htems['NAXIS3'])
logL_tem = np.linspace(
htems['CRVAL1'],
htems['CRVAL1'] + (nL - 1) * htems['CDELT1'], nL) # base e
L_tem = np.exp(logL_tem)
dtems_med = np.median(dtems)
dtems /= dtems_med
ssps = np.swapaxes(dtems, 0, 2)
ssps = np.reshape(ssps, (nL, -1))
ssps_red = np.swapaxes(dtems_red, 0, 2)
ssps_red = np.reshape(ssps_red, (nL, -1))
vel = z * c
veldisp = obj['nsa_vdisp']
if veldisp < 0.: # deal with non-measured veldisps
veldisp = 300.
velscale = (logL_ifu[1] - logL_ifu[0])*c
dl = L_tem[0] - L_ifu[0]
dv = c * (logL_tem[0] - logL_ifu[0]) # km/s
moments = 4
regul_err = .01
ebvgal = obj['ebvgal']
start = [0., veldisp]
reg_dim = [nZ, nT]
spaxels = which_spaxels(fname_ifu)
n_stellar_tems = nZ * nT
moments0 = [2, ]
moments1 = [-2, ]
start = [0., veldisp]
i = 0
for spaxel in spaxels:
gridx, gridy = spaxel['gridx'], spaxel['gridy']
print 'Spaxel row {}; col {}'.format(gridx, gridy)
figpath = res_path + '{}-{}_pp_fit.png'.format(gridx, gridy)
fig_ex = os.path.exists(figpath)
if (spaxel['good'] == True) and ((fig_ex == False) or (over == True)):
goodpixels = 1 - (ifu_mask[:, gridy, gridx] & 10)
goodpixels *= (1 - (ifu_mask[:, gridy, gridx] & 8))
goodpixels *= (np.isfinite(ifu_ivar[:, gridy, gridx]))
# red end has some scattered light, so throw it out
goodpixels *= (1 - red_scattered_light)
emlines = line_mask(logL_ifu, 800.)
goodpixels *= emlines
goodpixels_i = np.where(goodpixels)[0]
galaxy = ifu_flux[:, gridy, gridx]
ivar = ifu_ivar[:, gridy, gridx]
noise = 1./np.sqrt(ivar)
noise = np.where(np.isfinite(noise), noise, 9999.)
med = np.median(galaxy)
try:
pp0 = ppxf(templates=ssps_red,
galaxy=galaxy/med,
noise=noise/med,
goodpixels=goodpixels_i, start=start, vsyst=dv,
velScale=velscale, moments=moments0, degree=-1,
mdegree=-1, clean=False, lam=L_ifu, regul=None)
pp = ppxf(templates=ssps,
galaxy=galaxy/med,
noise=noise/med,
goodpixels=goodpixels_i, start=pp0.sol, vsyst=dv,
velScale=velscale, moments=moments1, degree=-1,
mdegree=-1, reddening=ebvgal,
clean=False, lam=L_ifu,
regul = 1./regul_err, reg_dim=reg_dim)
#print('Desired Delta Chi^2: %.4g' % np.sqrt(2*galaxy.size))
#print('Current Delta Chi^2: %.4g' % ((pp.chi2 - 1)*\
# galaxy.size))
except:
print '\tFITTING PROBLEM'
fit_success = False
else:
ppxf_fig(pp, (gridx, gridy), tems, (plate, ifudsgn), reg_dim)
plt.savefig(figpath, dpi=300)
start = pp.sol[:2]
pp_res = {'bestfit': pp.bestfit, 'chi2': pp.chi2,
'error': pp0.error, 'galaxy': pp.galaxy, 'lam': pp.lam,
'sol': pp0.sol, 'vsyst': pp.vsyst, 'noise': pp.noise,
'specres': ifu['SPECRES']}
pk.dump(pp_res, file(res_path + '{}-{}-pp.pickle'.format(
gridx, gridy), 'w'))
finally:
i += 1
if i >= first_few:
break
def line_mask(logL_tem, dv=800.):
# mask out Ha - H12, OIII, OII, NII, SIII, SII
# these will really distrupt fitting
balmer = rydberg_formula(np.ones(15).astype(int),
np.arange(2, 17, 1).astype(int))
metal_lines = []
# OIII doublet
metal_lines += [5008.240, 4960.295]
# OII doublet
metal_lines += [3727.092, 3729.875]
# SII doublet
metal_lines += [6718.294, 6732.674]
# NII doublet
metal_lines += [6547.35, 6584.42]
# SIII
metal_lines += [9068.6, 9530.6]
lines = np.concatenate((balmer, metal_lines))
lines_logL = np.log(lines)
mask = np.ones(len(logL_tem))
c = 299792.458
logL_tol = dv/c
for i, l in enumerate(lines_logL):
mask *= (np.abs(logL_tem - l) >= dv/c)
return mask
def rydberg_formula(n1, n2):
R = 1.097e7
l_vac = 1. / (R * (1./n1**2. - 1./n2**2.))
return l_vac * 1.e10
def ppxf_fig(pp, spaxel, tems, galname, reg_dim):
nT, nZ = tems[0].data.shape[:-1]
Z0 = tems[0].header['CRVAL2']
dZ = tems[0].header['CDELT2']
LT0 = np.log10(np.exp(tems[0].header['CRVAL3']))
dLT = tems[0].header['CDELT3'] / (np.log(10))
Zrange = [Z0, Z0 + (nZ - 1) * dZ]
logTrange = [LT0, LT0 + (nT - 1) * dLT]
n_stellar_tems = np.array(reg_dim).prod()
n_gas_tems = len(pp.matrix) - n_stellar_tems
# print Zrange, logTrange
galaxy = pp.galaxy
noise = pp.noise
NAXIS1 = len(galaxy)
plt.close('all')
fig = plt.figure(figsize=(8, 6), dpi=300)
import matplotlib.gridspec as gridspec
gs = gridspec.GridSpec(4, 1, height_ratios=[3, 1, 1.25, 2])
ax1 = plt.subplot(gs[0])
# first plot the input spectrum
ax1.plot(pp.lam, pp.galaxy, c='k', linewidth=0.25, zorder=2,
label='galaxy', drawstyle='steps-mid', alpha=0.5)
ax1.fill_between(
pp.lam,
(galaxy - noise),
(galaxy + noise), edgecolor='#ff0000',
facecolor='coral', zorder=1, linewidth=0.5, alpha=0.75)
# residuals
mn = 0.*np.min(pp.bestfit[pp.goodpixels])
mx = np.max(pp.bestfit[pp.goodpixels])
resid = mn + pp.galaxy - pp.bestfit
mn1 = np.min(resid[pp.goodpixels])
ax1.plot(pp.lam[pp.goodpixels], resid[pp.goodpixels],
marker='.', markersize=2, c='cyan',
markeredgecolor='cyan', linestyle='None', zorder=1)
ax1.plot(pp.lam[pp.goodpixels], pp.goodpixels*0 + mn,
marker=',', c='k', zorder=0)
w = np.where(np.diff(pp.goodpixels) > 1)[0]
if w.size > 0:
for wj in w:
x = np.arange(pp.goodpixels[wj], pp.goodpixels[wj+1])
ax1.plot(pp.lam[x], resid[x], 'indigo')
w = np.hstack([0, w, w+1, -1]) # Add first and last point
else:
w = [0, -1]
for gj in pp.goodpixels[w]:
ax1.plot([pp.lam[gj], pp.lam[gj]], [mn, pp.bestfit[gj]],
color='orange', linewidth=0.5)
# turn off tick labels for x axis
ax1.spines['bottom'].set_visible(False)
plt.setp(ax1.get_xticklabels(), visible=False)
ax1.set_ylabel("Counts", fontsize=16)
#ax1.legend(loc='best')
# set up a twin axis to display pixel positions
# DO NOT CHANGE XLIMS OF ANYTHING!!!!
ax1_pix = ax1.twiny()
ax1_pix.plot(np.arange(NAXIS1), np.zeros(NAXIS1))
ax1_pix.set_xlim([0, NAXIS1 - 1])
ax1_pix.set_xlabel('Pixel')
ax1.set_ylim([max(mn1 - .05, -.25), mx + .05])
# set up an axis to display residuals vs noise
ax1_res = plt.subplot(gs[1], sharex=ax1)
ax1_res.plot(pp.lam[pp.goodpixels],
(noise/galaxy)[pp.goodpixels], marker='.',
c='coral', linestyle='None', markersize=0.5,
label='noise', alpha = 0.5)
ax1_res.plot(pp.lam[pp.goodpixels],
(resid/galaxy)[pp.goodpixels], marker='.',
c='cyan', linestyle='None', markersize=0.5,
label='resid', alpha=0.5)
ax1_res.set_xlabel(r'$\lambda_r ~ [\AA]$')
ax1_res.legend(loc='best', prop={'size': 6})
ax1_res.set_ylabel(r'$\Delta_{rel}$')
ax1_res.set_yscale('log')
ax1_res.set_ylim([10**-2.5, 1.])
_ = [tick.label.set_fontsize(8) for tick in
ax1_res.yaxis.get_major_ticks()]
ax1.set_xlim([np.min(pp.lam), np.max(pp.lam)])
# this is just a dummy axis for spacing purposes
pad_ax = plt.subplot(gs[2])
pad_ax.axis('off')
ax2 = plt.subplot(gs[3])
# extract the kinematics of the stars first
weights = np.reshape(pp.weights, reg_dim)
weights /= weights.sum()
# print weights
plt.imshow(
weights, origin='lower', interpolation='nearest',
cmap='cubehelix_r',
vmin=0.0, extent=(LT0 - dLT/2., logTrange[1] + dLT/2.,
Z0 - dZ/2., Zrange[1] + dZ/2.))
cb = plt.colorbar()
for t in cb.ax.get_yticklabels():
t.set_fontsize(14)
plt.title("Mass Fraction", size=16)
plt.xlabel(r'$\log_{10} \tau ~ [\mathrm{Gyr}]$', size=16)
plt.ylabel(r'$[M/H]$', size=16)
t = r'{}-{} pPXF fit: spaxel ({}, {})'.format(galname[0], galname[1],
spaxel[0], spaxel[1])
plt.suptitle(t, size=18)
plt.subplots_adjust(hspace=0.01, top=0.85)
# plt.tight_layout(rect=[0, 0.03, 1, 0.95])
return fig
def which_spaxels(fname_ifu):
'''
list of spaxel indices in the order that they should be run
WILL NEED TO BE ALTERED FOR MPL-4, SINCE THE AT THAT POINT, THE
NEEDED HEADER KEYWORDS WILL BE FIXED
'''
ifu = fits.open(fname_ifu)
ifu_flux = ifu['FLUX'].data
r_im = ifu['RIMG'].data
# now figure out where in the ifu the center of the galaxy is
# and use that info to figure out where in the galaxy to start
NL, NX, NY = ifu_flux.shape
# print NX
# print pixpos_x.shape, pixpos_y.shape, r_im.shape
pos_x = np.linspace(-0.5*NX/2., 0.5*NX/2., NX)
pos_y = np.linspace(-0.5*NY/2., 0.5*NY/2., NY)
pixpos_x, pixpos_y = np.meshgrid(pos_x, pos_y)
peak_inds = np.unravel_index(np.argmax(r_im), pixpos_x.shape)
# print peak_inds
peak_distance = np.sqrt((pixpos_x - pos_x[peak_inds[1]])**2. +
(pixpos_y - pos_y[peak_inds[0]])**2.)
grid = np.indices(peak_distance.shape)
gridx, gridy = grid[1], grid[0]
xnew = pixpos_x - peak_inds[1]
ynew = pixpos_y - peak_inds[0]
theta_new = np.arctan2(ynew, xnew)
# peak_distance and theta_new are coords relative to ctr of galaxy
# plt.imshow(r_im, origin='lower', aspect='equal')
# plt.scatter(peak_inds[1], peak_inds[0])
# plt.show()
# now make a table out of everything, so that it can get sorted
pixels = table.Table()
pixels['good'] = m.good_spaxels(ifu).flatten()
pixels['gridx'] = gridx.flatten()
pixels['gridy'] = gridy.flatten()
pixels['r'] = peak_distance.flatten()
pixels['theta'] = theta_new.flatten()
pixels.sort(['r', 'theta'])
pixels['order'] = range(len(pixels))
# print pixels
'''plt.scatter(pixels['gridx'], pixels['gridy'],
c=pixels['order']*pixels['good'], s=5)
plt.colorbar()
plt.show()'''
return pixels
|
zpace/MaNGA-fitting
|
MaNGA_stars_gas_fit.py
|
Python
|
mit
| 19,438
|
[
"Galaxy",
"Gaussian"
] |
08cbcae06206dfae0acddaa0888f7b27403072e8e90f2952ae0aa6048d2cd6cb
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import collections
from matplotlib.patches import Circle
import networkx as nx
from skimage import img_as_float, morphology
from skimage.color import gray2rgb
def _normalise_image(image, *, image_cmap=None):
image = img_as_float(image)
if image.ndim == 2:
if image_cmap is None:
image = gray2rgb(image)
else:
image = plt.get_cmap(image_cmap)(image)[..., :3]
return image
def pixel_perfect_figsize(image, dpi=80):
"""Return the Matplotlib figure size tuple (w, h) for given image and dpi.
Parameters
----------
image : array, shape (M, N[, 3])
The image to be plotted.
dpi : int, optional
The desired figure dpi.
Returns
-------
figsize : tuple of float
The desired figure size.
Examples
--------
>>> image = np.empty((768, 1024))
>>> pixel_perfect_figsize(image)
(12.8, 9.6)
"""
hpix, wpix = image.shape[:2]
return wpix / dpi, hpix / dpi
def overlay_skeleton_2d(
image,
skeleton,
*,
image_cmap=None,
color=(1, 0, 0),
alpha=1,
dilate=0,
axes=None
):
"""Overlay the skeleton pixels on the input image.
Parameters
----------
image : array, shape (M, N[, 3])
The input image. Can be grayscale or RGB.
skeleton : array, shape (M, N)
The input 1-pixel-wide skeleton.
Other Parameters
----------------
image_cmap : matplotlib colormap name or object, optional
If the input image is grayscale, colormap it with this colormap.
The default is grayscale.
color : tuple of float in [0, 1], optional
The RGB color for the skeleton pixels.
alpha : float, optional
Blend the skeleton pixels with the given alpha.
dilate : int, optional
Dilate the skeleton by this amount. This is useful when rendering
large images where aliasing may cause some pixels of the skeleton
not to be drawn.
axes : matplotlib Axes
The Axes on which to plot the image. If None, new ones are created.
Returns
-------
axes : matplotlib Axes
The Axis on which the image is drawn.
"""
image = _normalise_image(image, image_cmap=image_cmap)
skeleton = skeleton.astype(bool)
if dilate > 0:
selem = morphology.disk(dilate)
skeleton = morphology.binary_dilation(skeleton, selem)
if axes is None:
fig, axes = plt.subplots()
image[skeleton] = alpha * np.array(color) + (1-alpha) * image[skeleton]
axes.imshow(image)
axes.axis('off')
return axes
def overlay_euclidean_skeleton_2d(
image,
stats,
*,
image_cmap=None,
skeleton_color_source='branch-type',
skeleton_colormap='viridis',
axes=None
):
"""Plot the image, and overlay the straight-line skeleton over it.
Parameters
----------
image : array, shape (M, N)
The input image.
stats : array, shape (M, N)
Skeleton statistics.
Other Parameters
----------------
image_cmap : matplotlib colormap name or object, optional
The colormap to use for the input image. Defaults to grayscale.
skeleton_color_source : string, optional
The name of the column to use for the skeleton edge color. See the
output of `skan.summarise` for valid choices. Most common choices
would be:
- skeleton-id: each individual skeleton (connected component) will
have a different colour.
- branch-type: each branch type (tip-tip, tip-junction,
junction-junction, path-path). This is the default.
- branch-distance: the curved length of the skeleton branch.
- euclidean-distance: the straight-line length of the skeleton branch.
skeleton_colormap : matplotlib colormap name or object, optional
The colormap for the skeleton values.
axes : matplotlib Axes object, optional
An Axes object on which to draw. If `None`, a new one is created.
Returns
-------
axes : matplotlib Axes object
The Axes on which the plot is drawn.
"""
image = _normalise_image(image, image_cmap=image_cmap)
summary = stats
# transforming from row, col to x, y
coords_cols = (['image-coord-src-%i' % i for i in [1, 0]]
+ ['image-coord-dst-%i' % i for i in [1, 0]])
coords = summary[coords_cols].values.reshape((-1, 2, 2))
if axes is None:
fig, axes = plt.subplots()
axes.imshow(image)
axes.axis('off')
color_values = summary[skeleton_color_source]
cmap = plt.get_cmap(
skeleton_colormap, min(len(np.unique(color_values)), 256)
)
colormapped = cmap((color_values - np.min(color_values)) /
(np.max(color_values) - np.min(color_values)))
linecoll = collections.LineCollection(coords, colors=colormapped)
axes.add_collection(linecoll)
return axes
def overlay_skeleton_2d_class(
skeleton,
*,
image_cmap='gray',
skeleton_color_source='path_means',
skeleton_colormap='viridis',
vmin=None,
vmax=None,
axes=None
):
"""Plot the image, and overlay the skeleton over it.
Parameters
----------
skeleton : skan.Skeleton object
The input skeleton, which contains both the skeleton and the source
image.
Other Parameters
----------------
image_cmap : matplotlib colormap name or object, optional
The colormap to use for the input image. Defaults to grayscale.
skeleton_color_source : string or callable, optional
The name of the method to use for the skeleton edge color. See the
documentation of `skan.Skeleton` for valid choices. Most common choices
would be:
- path_means: the mean value of the skeleton along each path.
- path_lengths: the length of each path.
- path_stdev: the standard deviation of pixel values along the path.
Alternatively, a callable can be provided that takes as input a
Skeleton object and outputs a list of floating point values of the same
length as the number of paths.
skeleton_colormap : matplotlib colormap name or object, optional
The colormap for the skeleton values.
vmin, vmax : float, optional
The minimum and maximum values for the colormap. Use this to pin the
colormapped values to a certain range.
axes : matplotlib Axes object, optional
An Axes object on which to draw. If `None`, a new one is created.
Returns
-------
axes : matplotlib Axes object
The Axes on which the plot is drawn.
mappable : matplotlib ScalarMappable object
The mappable values corresponding to the line colors. This can be used
to create a colorbar for the plot.
"""
if axes is None:
fig, axes = plt.subplots()
if skeleton.source_image is not None:
axes.imshow(skeleton.source_image, cmap=image_cmap)
if callable(skeleton_color_source):
values = skeleton_color_source(skeleton)
elif hasattr(skeleton, skeleton_color_source):
values = getattr(skeleton, skeleton_color_source)()
else:
raise ValueError(
'Unknown skeleton color source: %s. Provide an '
'attribute of skan.csr.Skeleton or a callable.'
% skeleton_color_source
)
cmap = plt.get_cmap(skeleton_colormap, min(len(np.unique(values)), 256))
if vmin is None:
vmin = np.min(values)
if vmax is None:
vmax = np.max(values)
mapping_values = (values-vmin) / (vmax-vmin)
mappable = plt.cm.ScalarMappable(plt.Normalize(vmin, vmax), cmap)
mappable._A = mapping_values
colors = cmap(mapping_values)
coordinates = [
skeleton.path_coordinates(i)[:, ::-1]
for i in range(skeleton.n_paths)
]
linecoll = collections.LineCollection(coordinates, colors=colors)
axes.add_collection(linecoll)
return axes, mappable
def sholl_shells(center, radii, *, axes=None, **kwargs):
"""Draw concentric circles around a center point.
Parameters
----------
center : array of float, shape (2,)
The center of the circles. This should be in NumPy-style row/column
coordinates.
radii : array of float, shape (N,)
The radii of the concentric circles.
axes : matplotlib Axes, optional
The axes on which to draw the circles. If None, create a new instance.
Returns
-------
axes : matplotlib Axes
The axes on which the circles were drawn
patches : list of matplotlib Patches
The patch objects that were drawn.
Notes
-----
Additional keyword arguments are passed directly to the
`matplotlib.patches.Circle` call. Valid keywords include ``edgecolor``,
``linestyle``, and `linewidth``. See matplotlib documentation for details.
"""
row, col = center
color = kwargs.pop('edgecolor', 'cornflowerblue')
circles = [
Circle((col, row), radius=r, fill=False, edgecolor=color, **kwargs)
for r in radii
]
if axes is None:
_, axes = plt.subplots()
for c in circles:
axes.add_patch(c)
return axes, circles
def pipeline_plot(
image,
thresholded,
skeleton,
stats,
*,
figure=None,
axes=None,
figsize=(9, 9)
):
"""Draw the image, the thresholded version, and its skeleton.
Parameters
----------
image : array, shape (M, N, ...[, 3])
Input image, conformant with scikit-image data type
specification [1]_.
thresholded : array, same shape as image
Binarized version of the input image.
skeleton : array, same shape as image
Skeletonized version of the input image.
stats : pandas DataFrame
Skeleton statistics from the input image/skeleton.
Other Parameters
----------------
figure : matplotlib Figure, optional
If given, where to make the plots.
axes : array of matplotlib Axes, optional
If given, use these axes to draw the plots. Should have len 4.
figsize : 2-tuple of float, optional
The width and height of the figure.
smooth_method : {'Gaussian', 'TV', 'NL'}, optional
Which denoising method to use on the image.
Returns
-------
fig : matplotlib Figure
The Figure containing all the plots
axes : array of matplotlib Axes
The four axes containing the drawn images.
References
----------
.. [1] http://scikit-image.org/docs/dev/user_guide/data_types.html
"""
if figure is None:
fig, axes = plt.subplots(
2, 2, figsize=figsize, sharex=True, sharey=True
)
axes = np.ravel(axes)
else:
fig = figure
if axes is None:
ax0 = fig.add_subplot(2, 2, 1)
axes = [ax0] + [
fig.add_subplot(2, 2, i, sharex=ax0, sharey=ax0)
for i in range(2, 5)
]
axes = np.ravel(axes)
axes[0].imshow(image, cmap='gray')
axes[0].axis('off')
axes[1].imshow(thresholded, cmap='gray')
axes[1].axis('off')
overlay_skeleton_2d(image, skeleton, axes=axes[2])
overlay_euclidean_skeleton_2d(image, stats, axes=axes[3])
fig.subplots_adjust(0, 0, 1, 1, 0, 0)
return fig, axes
def _clean_positions_dict(d, g):
for k in list(d.keys()):
if k not in g:
del d[k]
elif g.degree(k) == 0:
g.remove_node(k)
def overlay_skeleton_networkx(
csr_graph, coordinates, *, axis=None, image=None, cmap=None, **kwargs
):
"""Draw the skeleton as a NetworkX graph, optionally overlaid on an image.
Due to the size of NetworkX drawing elements, this is only recommended
for very small skeletons.
Parameters
----------
csr_graph : SciPy Sparse matrix
The skeleton graph in SciPy CSR format.
coordinates : array, shape (N_points, 2)
The coordinates of each point in the skeleton. ``coordinates.shape[0]``
should be equal to ``csr_graph.shape[0]``.
Other Parameters
----------------
axis : Matplotlib Axes object, optional
The Axes on which to plot the data. If None, a new figure and axes will
be created.
image : array, shape (M, N[, 3])
An image on which to overlay the skeleton. ``image.shape`` should be
greater than ``np.max(coordinates, axis=0)``.
**kwargs : keyword arguments
Arguments passed on to `nx.draw_networkx`. Particularly useful ones
include ``node_size=`` and ``font_size=``.
"""
if axis is None:
_, axis = plt.subplots()
if image is not None:
cmap = cmap or 'gray'
axis.imshow(image, cmap=cmap)
gnx = nx.from_scipy_sparse_matrix(csr_graph)
# Note: we invert the positions because Matplotlib uses x/y for
# scatterplot, but the coordinates are row/column NumPy indexing
positions = dict(zip(range(coordinates.shape[0]), coordinates[:, ::-1]))
_clean_positions_dict(positions, gnx) # remove nodes not in Graph
nx.draw_networkx(gnx, pos=positions, ax=axis, **kwargs)
return axis
|
jni/skan
|
src/skan/draw.py
|
Python
|
bsd-3-clause
| 13,467
|
[
"Gaussian"
] |
a53ada6f0b1538c8d15281b5dce083a468d4daf8033d0bdbcd9ca70d1a8c2867
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
docdir = os.path.join(cwd,'..','documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','rebel.facebook.js')
if not os.path.exists(js_file):
js_file = os.path.join(cwd,'..','assets','rebel.facebook.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','RebelFacebookModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
c = open(license_file).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[],includeJSFiles=False):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc': continue
if not includeJSFiles and len(e) == 2 and e[1] == '.js': continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
p = os.path.join(cwd, 'assets')
if not os.path.exists(p):
p = os.path.join(cwd, '..', 'assets')
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,'assets'),['README'])
for dn in ('example','platform'):
p = os.path.join(cwd, dn)
if not os.path.exists(p):
p = os.path.join(cwd, '..', dn)
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,dn),['README'],True)
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
zf.write(license_file,'%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
timanrebel/Facebook
|
iphone/build.py
|
Python
|
mit
| 7,584
|
[
"VisIt"
] |
0e878b305f810e59704147a3c917fd1ed267d38aa2dbf39cb78e6318d278b22c
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from .optproc import *
from .text import *
from .procutil import *
from .util import *
from .testing import *
from .exceptions import *
from .inpsight import *
from .numpy_helper import *
from .p4regex import *
from .python_helpers import *
from .solvers import *
from .prop_util import *
from .spectrum import spectrum
from . import writer
|
lothian/psi4
|
psi4/driver/p4util/__init__.py
|
Python
|
lgpl-3.0
| 1,255
|
[
"Psi4"
] |
0f623875a0e903053106a8d70fd22cbef671a0ced0c6c1857495c2b877f1af6e
|
import re
import sys
from circlator import program, common
from circlator import __version__ as circlator_version
import shutil
import pyfastaq
class Error (Exception): pass
prog_to_env_var = {
'samtools': 'CIRCLATOR_SAMTOOLS',
'spades': 'CIRCLATOR_SPADES',
'canu': 'CIRCLATOR_CANU',
}
prog_to_version_cmd = {
'bwa': ('', re.compile(r'^Version: ([0-9\.]+)')),
'nucmer': ('--version', re.compile(r'([0-9\.]+)')),
'prodigal': ('-v', re.compile(r'^Prodigal V([0-9\.]+):')),
'samtools': ('', re.compile(r'Version: (\d+\.\d+[\.\d]*)')),
'spades': ('-v', re.compile(r'v.?([0-9][0-9\.]+)')),
'canu': ('-version', re.compile(r'^Canu \D*([\d][\d\.]+)')),
}
min_versions = {
'bwa': '0.7.12',
'nucmer': '3.1',
'prodigal': '2.6',
'samtools': '0.1.19',
'spades': '3.6.2', # this is the first version to support python3
'canu': '0.0',
}
bad_versions = {
'spades': '3.6.1'
}
prog_name_to_default = {
'bwa': 'bwa',
'nucmer': 'nucmer',
'prodigal': 'prodigal',
'spades': 'spades.py',
'samtools': 'samtools',
'canu': 'canu',
}
not_required = {'spades', 'canu'}
def handle_error(message, raise_error=True):
if raise_error:
raise Error(message + '\nCannot continue')
else:
print(message)
def make_and_check_prog(name, verbose=False, raise_error=True, filehandle=None, debug=False, required=False):
p = program.Program(
prog_name_to_default[name],
prog_to_version_cmd[name][0],
prog_to_version_cmd[name][1],
environment_var=prog_to_env_var.get(name, None),
debug=debug
)
if not p.in_path():
if required:
die = True
else:
die = raise_error and (name not in not_required)
handle_error("WARNING: Didn't find " + name + " in path. Looked for:" + p.path, raise_error=die)
return p
version = p.version
if version is None:
handle_error('Found ' + name + " but couldn't get version.", raise_error=raise_error)
return p
if not p.version_at_least(min_versions[name]):
handle_error('Version of ' + name + ' too low. I found ' + p.version + ', but must be at least ' + min_versions[name] + '. Found here:\n' + p.from_which, raise_error=raise_error)
return p
if name == 'spades' and p.version == bad_versions['spades']:
handle_error('ERROR! SPAdes version ' + bad_versions['spades'] + ' is incompatible with Circlator. Please use SPAdes 3.7.1', raise_error=raise_error)
return p
if name == 'spades' and not p.version.startswith('3.7.'):
print('WARNING: SPAdes version', p.version, 'is being used. It will work, but better results are usually obtained from Circlator using SPAdes version 3.7.1. Although 3.7.1 is not the latest version, we recommend it for Circlator.', file=sys.stderr)
if verbose:
print(name, p.version, p.from_which, sep='\t')
if filehandle:
print(name, p.version, p.from_which, sep='\t', file=filehandle)
return p
def check_all_progs(verbose=False, raise_error=False, filehandle=None, debug=False, assembler=None):
for prog in sorted(prog_name_to_default):
if debug:
print('__________ checking', prog, '____________', flush=True)
make_and_check_prog(prog, verbose=verbose, raise_error=raise_error, filehandle=filehandle, debug=debug, required=prog==assembler)
|
martinghunt/circlator
|
circlator/external_progs.py
|
Python
|
gpl-3.0
| 3,423
|
[
"BWA"
] |
d936af282c639d000f4536785488fae6810b9f91eedabf6003436577ad794c45
|
"""
Death Streams Addon
Copyright (C) 2017 Mr.Blamo
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import datetime
import re
import urllib
import urlparse
import kodi
import log_utils # @UnusedImport
import dom_parser2
from salts_lib.utils2 import i18n
from salts_lib import source_utils
from salts_lib import scraper_utils
from salts_lib import client
from salts_lib import debrid
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import SHORT_MONS
from salts_lib.constants import VIDEO_TYPES
import scraper
logger = log_utils.Logger.get_logger(__name__)
BASE_URL = 'http://ddl2.org'
CATEGORIES = {VIDEO_TYPES.MOVIE: '/category/movies/', VIDEO_TYPES.TVSHOW: '/category/tv-shows/'}
EXCLUDE_LINKS = ['adf.ly', urlparse.urlparse(BASE_URL).hostname]
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return '2DDL'
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if not source_url or source_url == FORCE_NO_MATCH: return hosters
html = self._http_get(source_url, require_debrid=True, cache_limit=.5)
if video.video_type == VIDEO_TYPES.MOVIE:
pattern = '<singlelink>(.*?)(?=<hr\s*/>|download>|thanks_button_div)'
else:
pattern = '<hr\s*/>\s*<strong>(.*?)</strong>.*?<singlelink>(.*?)(?=<hr\s*/>|download>|thanks_button_div)'
for match in re.finditer(pattern, html, re.DOTALL):
if video.video_type == VIDEO_TYPES.MOVIE:
links = match.group(1)
match = re.search('<h2>\s*<a[^>]+>(.*?)</a>', html)
if match:
title = match.group(1)
else:
title = ''
else:
title, links = match.groups()
for match in re.finditer('href="([^"]+)', links):
stream_url = match.group(1).lower()
if any(link in stream_url for link in EXCLUDE_LINKS): continue
host = urlparse.urlparse(stream_url).hostname
quality = scraper_utils.blog_get_quality(video, title, host)
hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'quality': quality, 'direct': False}
hosters.append(hoster)
return hosters
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings = scraper_utils.disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" %s" default="60" visible="eq(-3,true)"/>' % (name, i18n('filter_results_days')))
return settings
def _get_episode_url(self, show_url, video):
force_title = scraper_utils.force_title(video)
title_fallback = kodi.get_setting('title-fallback') == 'true'
norm_title = scraper_utils.normalize_title(video.ep_title)
page_url = [show_url]
too_old = False
while page_url and not too_old:
html = self._http_get(page_url[0], require_debrid=True, cache_limit=1)
for _attr, post in dom_parser2.parse_dom(html, 'div', {'id': re.compile('post-\d+')}):
if self.__too_old(post):
too_old = True
break
if CATEGORIES[VIDEO_TYPES.TVSHOW] in post and show_url in post:
match = dom_parser2.parse_dom(post, 'a', req='href')
if match:
url, title = match[0].attrs['href'], match[0].content
if not force_title:
if scraper_utils.release_check(video, title, require_title=False):
return scraper_utils.pathify_url(url)
else:
if title_fallback and norm_title:
match = re.search('</strong>(.*?)</p>', post)
if match and norm_title == scraper_utils.normalize_title(match.group(1)):
return scraper_utils.pathify_url(url)
page_url = dom_parser2.parse_dom(html, 'a', {'class': 'nextpostslink'}, req='href')
if page_url: page_url = [page_url[0].attrs['href']]
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
search_url = '/search/' + urllib.quote_plus(title)
html = self._http_get(search_url, require_debrid=True, cache_limit=1)
if video_type == VIDEO_TYPES.TVSHOW:
seen_urls = {}
for _attr, post in dom_parser2.parse_dom(html, 'div', {'id': re.compile('post-\d+')}):
if CATEGORIES[video_type] not in post: continue
match = re.search('<span>\s*TAGS:\s*</span>\s*<a\s+href="([^"]+)[^>]+>([^<]+)', post, re.I)
if match:
show_url, match_title = match.groups()
if show_url in seen_urls: continue
result = {'url': scraper_utils.pathify_url(show_url), 'title': scraper_utils.cleanse_title(match_title), 'year': ''}
seen_urls[show_url] = result
results.append(result)
elif video_type == VIDEO_TYPES.MOVIE:
norm_title = scraper_utils.normalize_title(title)
headings = re.findall('<h2>\s*<a\s+href="([^"]+)[^>]+>(.*?)</a>', html)
posts = [result.content for result in dom_parser2.parse_dom(html, 'div', {'id': re.compile('post-\d+')})]
for heading, post in zip(headings, posts):
if CATEGORIES[video_type] not in post or self.__too_old(post): continue
post_url, post_title = heading
meta = scraper_utils.parse_movie_link(post_title)
full_title = '%s [%s] (%sp)' % (meta['title'], meta['extra'], meta['height'])
match_year = meta['year']
match_norm_title = scraper_utils.normalize_title(meta['title'])
if (match_norm_title in norm_title or norm_title in match_norm_title) and (not year or not match_year or year == match_year):
result = {'url': scraper_utils.pathify_url(post_url), 'title': scraper_utils.cleanse_title(full_title), 'year': match_year}
results.append(result)
return results
def _http_get(self, url, params=None, data=None, multipart_data=None, headers=None, cookies=None, allow_redirect=True, method=None, require_debrid=True, read_error=False, cache_limit=8):
real_url = scraper_utils.urljoin(self.base_url, url)
html = super(self.__class__, self)._http_get(real_url, params=params, data=data, multipart_data=multipart_data, headers=headers, cookies=cookies,
allow_redirect=allow_redirect, method=method, require_debrid=True, read_error=read_error,
cache_limit=cache_limit)
if self.__update_base_url(html):
real_url = scraper_utils.urljoin(self.base_url, url)
html = super(self.__class__, self)._http_get(real_url, params=params, data=data, multipart_data=multipart_data, headers=headers,
cookies=cookies, allow_redirect=allow_redirect, method=method, require_debrid=True,
read_error=read_error, cache_limit=cache_limit)
return html
def __update_base_url(self, html):
if re.search('new domain', html, re.I):
match = dom_parser2.parse_dom(html, 'a', {'rel': 'nofollow'}, req='href')
if match:
html = super(self.__class__, self)._http_get(match[0].attrs['href'], require_debrid=True, cache_limit=24)
match = dom_parser2.parse_dom(html, 'link', {'rel': 'canonical'}, req='href')
if match:
new_base = match[0].attrs['href']
parts = urlparse.urlparse(new_base)
new_base = parts.scheme + '://' + parts.hostname
if new_base not in self.base_url:
logger.log('Updating 2DDL Base Url from: %s to %s' % (self.base_url, new_base))
self.base_url = new_base
kodi.set_setting('%s-base_url' % (self.get_name()), new_base)
return True
return False
def __too_old(self, post):
try:
filter_days = datetime.timedelta(days=int(kodi.get_setting('%s-filter' % (self.get_name()))))
if filter_days:
today = datetime.date.today()
match = re.search('<a[^>]+title="posting time[^"]*">(.*?)\s+(\d+)\s*(\d{2,4})<', post)
mon_name, post_day, post_year = match.groups()
post_year = int(post_year)
if post_year < 2000: post_year += 2000
post_month = SHORT_MONS.index(mon_name) + 1
post_date = datetime.date(post_year, post_month, int(post_day))
if today - post_date > filter_days:
return True
except ValueError:
return False
return False
|
repotvsupertuga/tvsupertuga.repository
|
script.vodextende/scrapers/2ddl_scraper.py
|
Python
|
gpl-2.0
| 10,345
|
[
"ADF"
] |
480f563ecebe798d0467b001c88822e8b7ef14855fb59e30ecfeeab1d55220ff
|
from __future__ import absolute_import
import numpy as np
from sklearn.base import BaseEstimator
from . import metrics
from .rank_correlation import spearman_correlation, kendalltau_correlation
def _init_coefs(X, method="corrcoef"):
if method == "corrcoef":
return np.corrcoef(X, rowvar=False), 1.0
elif method == "cov":
init_cov = np.cov(X, rowvar=False)
return init_cov, np.max(np.abs(np.triu(init_cov)))
elif method == "spearman":
return spearman_correlation(X, rowvar=False), 1.0
elif method == "kendalltau":
return kendalltau_correlation(X, rowvar=False), 1.0
elif callable(method):
return method(X)
else:
raise ValueError(
(
"initialize_method must be 'corrcoef' or 'cov', "
"passed '{}' .".format(method)
)
)
def _compute_error(comp_cov, covariance_, precision_, score_metric="frobenius"):
"""Computes the covariance error vs. comp_cov.
Parameters
----------
comp_cov : array-like, shape = (n_features, n_features)
The precision to compare with.
This should normally be the test sample covariance/precision.
scaling : bool
If True, the squared error norm is divided by n_features.
If False (default), the squared error norm is not rescaled.
score_metric : str
The type of norm used to compute the error between the estimated
self.precision, self.covariance and the reference `comp_cov`.
Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
- 'kl': kl-divergence
- 'quadratic': quadratic loss
- 'log_likelihood': negative log likelihood
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
"""
if score_metric == "frobenius":
return np.linalg.norm(np.triu(comp_cov - covariance_, 1), ord="fro")
elif score_metric == "spectral":
error = comp_cov - covariance_
return np.amax(np.linalg.svdvals(np.dot(error.T, error)))
elif score_metric == "kl":
return metrics.kl_loss(comp_cov, precision_)
elif score_metric == "quadratic":
return metrics.quadratic_loss(comp_cov, precision_)
elif score_metric == "log_likelihood":
return -metrics.log_likelihood(comp_cov, precision_)
else:
raise NotImplementedError(
("Must be frobenius, spectral, kl, " "quadratic, or log_likelihood")
)
def _validate_path(path):
"""Sorts path values from largest to smallest.
Will warn if path parameter was not already sorted.
"""
if path is None:
return None
new_path = np.array(sorted(set(path), reverse=True))
if new_path[0] != path[0]:
print("Warning: Path must be sorted largest to smallest.")
return new_path
class InverseCovarianceEstimator(BaseEstimator):
"""
Base class for inverse covariance estimators.
Provides initialization method, metrics, scoring function,
and ebic model selection.
Parameters
-----------
score_metric : one of 'log_likelihood' (default), 'frobenius', 'spectral',
'kl', or 'quadratic'
Used for computing self.score().
init_method : one of 'corrcoef', 'cov', 'spearman', 'kendalltau',
or a custom function.
Computes initial covariance and scales lambda appropriately.
Using the custom function extends graphical model estimation to
distributions beyond the multivariate Gaussian.
The `spearman` or `kendalltau` options extend inverse covariance
estimation to nonparanormal and transelliptic graphical models.
Custom function must return ((n_features, n_features) ndarray, float)
where the scalar parameter will be used to scale the penalty lam.
auto_scale : bool
If True, will compute self.lam_scale_ = max off-diagonal value when
init_method='cov'.
If false, then self.lam_scale_ = 1.
lam_scale_ is used to scale user-supplied self.lam during fit.
Attributes
----------
covariance_ : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
This can also be a len(path) list of
2D ndarray, shape (n_features, n_features)
(e.g., see mode='path' in QuicGraphLasso)
precision_ : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
This can also be a len(path) list of
2D ndarray, shape (n_features, n_features)
(e.g., see mode='path' in QuicGraphLasso)
sample_covariance_ : 2D ndarray, shape (n_features, n_features)
Estimated sample covariance matrix
lam_scale_ : (float)
Additional scaling factor on lambda (due to magnitude of
sample_covariance_ values).
"""
def __init__(
self, score_metric="log_likelihood", init_method="cov", auto_scale=True
):
self.score_metric = score_metric
self.init_method = init_method
self.auto_scale = auto_scale
# these must be updated upon self.fit()
# the first 4 will be set if self.init_coefs is used.
# self.covariance_
# self.precision_
# self.sample_covariance_
# self.lam_scale_
# self.n_samples_
# self.n_features_
# self.is_fitted_
super(InverseCovarianceEstimator, self).__init__()
def init_coefs(self, X):
"""Computes ...
Initialize the following values:
self.n_samples
self.n_features
self.sample_covariance_
self.lam_scale_
"""
self.n_samples_, self.n_features_ = X.shape
self.sample_covariance_, self.lam_scale_ = _init_coefs(
X, method=self.init_method
)
if not self.auto_scale:
self.lam_scale_ = 1.0
def score(self, X_test, y=None):
"""Computes the score between cov/prec of sample covariance of X_test
and X via 'score_metric'.
Note: We want to maximize score so we return the negative error.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : not used.
Returns
-------
result : float or list of floats
The negative of the min error between `self.covariance_` and
the sample covariance of X_test.
"""
if isinstance(self.precision_, list):
print("Warning: returning a list of scores.")
S_test, lam_scale_test = _init_coefs(X_test, method=self.init_method)
error = self.cov_error(S_test, score_metric=self.score_metric)
# maximize score with -error
return -error
def cov_error(self, comp_cov, score_metric="frobenius"):
"""Computes the covariance error vs. comp_cov.
May require self.path_
Parameters
----------
comp_cov : array-like, shape = (n_features, n_features)
The precision to compare with.
This should normally be the test sample covariance/precision.
scaling : bool
If True, the squared error norm is divided by n_features.
If False (default), the squared error norm is not rescaled.
score_metric : str
The type of norm used to compute the error between the estimated
self.precision, self.covariance and the reference `comp_cov`.
Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
- 'kl': kl-divergence
- 'quadratic': quadratic loss
- 'log_likelihood': negative log likelihood
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The min error between `self.covariance_` and `comp_cov`.
If self.precision_ is a list, returns errors for each matrix, otherwise
returns a scalar.
"""
if not isinstance(self.precision_, list):
return _compute_error(
comp_cov, self.covariance_, self.precision_, score_metric
)
path_errors = []
for lidx, lam in enumerate(self.path_):
path_errors.append(
_compute_error(
comp_cov,
self.covariance_[lidx],
self.precision_[lidx],
score_metric,
)
)
return np.array(path_errors)
def ebic(self, gamma=0):
"""Compute EBIC scores for each model. If model is not "path" then
returns a scalar score value.
May require self.path_
See:
Extended Bayesian Information Criteria for Gaussian Graphical Models
R. Foygel and M. Drton
NIPS 2010
Parameters
----------
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
Scalar ebic score or list of ebic scores.
"""
if not self.is_fitted_:
return
if not isinstance(self.precision_, list):
return metrics.ebic(
self.sample_covariance_,
self.precision_,
self.n_samples_,
self.n_features_,
gamma=gamma,
)
ebic_scores = []
for lidx, lam in enumerate(self.path_):
ebic_scores.append(
metrics.ebic(
self.sample_covariance_,
self.precision_[lidx],
self.n_samples_,
self.n_features_,
gamma=gamma,
)
)
return np.array(ebic_scores)
def ebic_select(self, gamma=0):
"""Uses Extended Bayesian Information Criteria for model selection.
Can only be used in path mode (doesn't really make sense otherwise).
See:
Extended Bayesian Information Criteria for Gaussian Graphical Models
R. Foygel and M. Drton
NIPS 2010
Parameters
----------
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
Lambda index with best ebic score. When multiple ebic scores are the
same, returns the smallest lambda (largest index) with minimum score.
"""
if not isinstance(self.precision_, list):
raise ValueError("EBIC requires multiple models to select from.")
return
if not self.is_fitted_:
return
ebic_scores = self.ebic(gamma=gamma)
min_indices = np.where(np.abs(ebic_scores - ebic_scores.min()) < 1e-10)
return np.max(min_indices)
|
skggm/skggm
|
inverse_covariance/inverse_covariance.py
|
Python
|
mit
| 11,604
|
[
"Gaussian"
] |
c648682167e3e07477f0343875b76c98e2acb4cc981e71b50e9c5c6c1ec3101c
|
from __future__ import division, print_function, absolute_import
import unittest
import os
from math import floor
from src.utilities import split_file, count_lines
from src.parse_journal.parse_worker import JournalParsingWorker
import json
class TestParsing(unittest.TestCase):
def create_test_data(self):
#create a file to load in and run tests on
if not os.path.exists("./test_parse_data"):
os.makedirs("./test_parse_data")
with open("./test_parse_data/test_input.json", "wb") as f:
for s in range(4):
for j in range(6):
if j == 0:
# normal data, no delete flags, all fields exist
line = '{ "_id" : { "$oid" : "' + str(j + 3 * s) + '" }, "siteId" : ' + str(s) + ', "journalId" : ' + str(j) + ', "userId" : 0, "isDraft" : "0", "title" : "TITLE", "amps" : [], "platform" : "iphone", "ip" : "65.128.152.3", "body" : "BODY' + str(s) + str(j) + '", "updatedAt" : { "$date" : 1371412342000 }, "createdAt" : { "$date" : 1371412342000 } }\n'
elif j == 1:
# no titles
line = '{ "_id" : { "$oid" : "' + str(j + 3 * s) + '" }, "siteId" : ' + str(s) + ', "journalId" : ' + str(j) + ', "userId" : 0, "isDraft" : "0", "platform" : "iphone", "ip" : "65.128.152.3", "body" : "BODY' + str(s) + str(j) + '", "createdAt" : { "$date" : 1371412342000 } }\n'
elif j == 2:
# no journal id
line = '{ "_id" : { "$oid" : "' + str(j + 3 * s) + '" }, "siteId" : ' + str(s) + ', "userId" : 0, "isDraft" : "0", "platform" : "iphone", "ip" : "65.128.152.3", "body" : "BODY' + str(s) + str(j) + '", "createdAt" : { "$date" : 1371412342000 } }\n'
elif j == 3:
# no user id
line = '{ "_id" : { "$oid" : "' + str(j + 3 * s) + '" }, "siteId" : ' + str(s) + ', "journalId" : ' + str(j) + ', "isDraft" : "0", "platform" : "iphone", "ip" : "65.128.152.3", "body" : "BODY' + str(s) + str(j) + '", "createdAt" : { "$date" : 1371412342000 } }\n'
elif j == 4:
# default journal entry, should be removed
line = '{ "_id" : { "$oid" : "' + str(j + 3 * s) + '" }, "siteId" : ' + str(s) + ', "journalId" : ' + str(j) + ', "userId" : 0, "isDraft" : "0", "platform" : "iphone", "ip" : "65.128.152.3", "body" : "This CaringBridge site was created just recently. Please visit again soon for a journal update.", "createdAt" : { "$date" : 1371412342000 } }\n'
else:
# delete flag
line ='{ "_id" : { "$oid" : "' + str(j + 3 * s) + '" }, "siteId" : ' + str(s) + ', "journalId" : ' + str(j) + ', "userId" : 0, "isDraft" : "1", "title" : "TITLE' + str(s) + str(j) + '", "amps" : [], "platform" : "iphone", "ip" : "65.128.152.3", "body" : "BODY' + str(s) + str(j) + '", "updatedAt" : { "$date" : 1371412342000 }, "createdAt" : { "$date" : 1371412342000 } }\n'
f.write(line)
def remove_test_data(self):
os.remove('./test_parse_data/test_input.json')
os.remove('./test_parse_data/parsed_test_input.txt')
os.rmdir('./test_parse_data')
def test_worker_check_skip(self):
# create a few examples that should be skipped
isdraft = """{ "_id" : { "$oid" : "x" }, "siteId" : 0, "journalId" : 0,
"userId" : 0, "isDraft" : "1", "title" : "TITLE", "amps" : [],
"platform" : "iphone", "ip" : "65.128.152.3", "body" : "BODY.",
"updatedAt" : { "$date" : 371412342000 }, "createdAt" : { "$date" : 1371412342000 } }"""
isdraft = json.loads(isdraft)
isdeleted = """{ "_id" : { "$oid" : "x" }, "siteId" : 0, "journalId" : 0,
"userId" : 0, "isDeleted" : "1", "title" : "TITLE", "amps" : [],
"platform" : "iphone", "ip" : "65.128.152.3", "body" : "BODY.",
"updatedAt" : { "$date" : 371412342000 }, "createdAt" : { "$date" : 1371412342000 } }"""
isdeleted = json.loads(isdeleted)
default = """{ "_id" : { "$oid" : "x" }, "siteId" : 0, "journalId" : 0,
"userId" : 0, "isDeleted" : "0", "title" : "TITLE", "amps" : [],
"platform" : "iphone", "ip" : "65.128.152.3", "body" : "This CaringBridge site was created just recently. Please visit again soon for a journal update.",
"updatedAt" : { "$date" : 371412342000 }, "createdAt" : { "$date" : 1371412342000 } }"""
default = json.loads(default)
nobody = """{ "_id" : { "$oid" : "x" }, "siteId" : 0, "journalId" : 0,
"userId" : 0, "isDeleted" : "0", "title" : "TITLE", "amps" : [],
"platform" : "iphone", "ip" : "65.128.152.3",
"updatedAt" : { "$date" : 371412342000 }, "createdAt" : { "$date" : 1371412342000 } }"""
nobody = json.loads(nobody)
nodate = """{"siteId" : 0, "journalId" : 0,
"userId" : 0, "isDeleted" : "0", "title" : "TITLE", "amps" : [],
"platform" : "iphone", "ip" : "65.128.152.3", "body" : "BODY." }"""
nodate = json.loads(nodate)
nositeid = """{"journalId" : 0,
"userId" : 0, "isDeleted" : "1", "title" : "TITLE", "amps" : [],
"platform" : "iphone", "ip" : "65.128.152.3", "body" : "BODY." }"""
nositeid = json.loads(nositeid)
# is the number of skips equal to 5?
worker = JournalParsingWorker(input_path=None, output_dir=None, verbose=False)
expected = worker.check_skip(isdraft) + worker.check_skip(isdeleted) + worker.check_skip(default) + worker.check_skip(nobody) + worker.check_skip(nodate) + worker.check_skip(nositeid)
self.assertEqual(expected, 6)
def test_worker_check_no_skip(self):
# give some examples that shouldn't be skipped
text1 = """{ "_id" : { "$oid" : "x" }, "siteId" : 0, "journalId" : 0,
"userId" : 0, "isDraft" : "0", "title" : "TITLE", "amps" : [],
"platform" : "iphone", "ip" : "65.128.152.3", "body" : "BODY.",
"updatedAt" : { "$date" : 371412342000 }, "createdAt" : { "$date" : 1371412342000 } }"""
json_dict1 = json.loads(text1)
text2 = """{ "_id" : { "$oid" : "x" }, "siteId" : 0, "journalId" : 0,
"userId" : 0, "isDeleted" : "0", "title" : "TITLE", "amps" : [],
"platform" : "iphone", "ip" : "65.128.152.3", "body" : "BODY.",
"updatedAt" : { "$date" : 371412342000 }, "createdAt" : { "$date" : 1371412342000 } }"""
json_dict2 = json.loads(text2)
worker = JournalParsingWorker(input_path=None, output_dir=None, verbose=False)
expected = worker.check_skip(json_dict1) + worker.check_skip(json_dict2)
self.assertEqual(expected, 0)
def test_worker_parse_file_content(self):
self.create_test_data()
worker = JournalParsingWorker(input_path='./test_parse_data/test_input.json', output_dir='./test_parse_data/', verbose=False)
worker.parse_file()
expected = []
for siteId in range(4):
for journalId in range(4):
if journalId == 0:
expected.append(str(siteId) + '\t0\t' + str(journalId) + '\t1371412342000\tTITLE BODY' + str(siteId) + str(journalId))
elif journalId == 1:
expected.append(str(siteId) + '\t0\t' + str(journalId) + '\t1371412342000\tBODY'+ str(siteId) + str(journalId))
elif journalId == 2:
expected.append(str(siteId) + '\t0\t-1' + '\t1371412342000\tBODY'+ str(siteId) + str(journalId))
elif journalId == 3:
expected.append(str(siteId) + '\t-1\t' + str(journalId) + '\t1371412342000\tBODY'+ str(siteId) + str(journalId))
actual = []
with open('./test_parse_data/parsed_test_input.txt', 'r') as fin:
for line in fin:
actual.append(line.replace("\n", ""))
self.assertItemsEqual(actual, expected)
self.remove_test_data()
|
robert-giaquinto/text-analysis
|
src/tests/test_parse_worker.py
|
Python
|
mit
| 8,115
|
[
"VisIt"
] |
1d06184ae8600a44215b8b5c5b941193a7af246e1ffe1a73095aa6b3c6c1c550
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_l4policyset
author: Chaitanya Deshpande (@chaitanyaavi) <chaitanya.deshpande@avinetworks.com>
short_description: Module for setup of L4PolicySet Avi RESTful Object
description:
- This module is used to configure L4PolicySet object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.6"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
choices: ["add", "replace", "delete"]
created_by:
description:
- Creator name.
- Field introduced in 17.2.7.
description:
description:
- Field introduced in 17.2.7.
is_internal_policy:
description:
- Field introduced in 17.2.7.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
l4_connection_policy:
description:
- Policy to apply when a new transport connection is setup.
- Field introduced in 17.2.7.
name:
description:
- Name of the l4 policy set.
- Field introduced in 17.2.7.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.2.7.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Id of the l4 policy set.
- Field introduced in 17.2.7.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create L4PolicySet object
avi_l4policyset:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_l4policyset
"""
RETURN = '''
obj:
description: L4PolicySet (api/l4policyset) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
created_by=dict(type='str',),
description=dict(type='str',),
is_internal_policy=dict(type='bool',),
l4_connection_policy=dict(type='dict',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'l4policyset',
set([]))
if __name__ == '__main__':
main()
|
simonwydooghe/ansible
|
lib/ansible/modules/network/avi/avi_l4policyset.py
|
Python
|
gpl-3.0
| 4,150
|
[
"VisIt"
] |
2c163015d6b7c9d461c358f1d1ce0bbf706f6290e8e0245676f96321e34f54fc
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Brian Coca <bcoca@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
module: systemd
author:
- Ansible Core Team
version_added: "2.2"
short_description: Manage services
description:
- Controls systemd services on remote hosts.
options:
name:
description:
- Name of the service. When using in a chroot environment you always need to specify the full name i.e. (crond.service).
aliases: [ service, unit ]
state:
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
C(restarted) will always bounce the service. C(reloaded) will always reload.
choices: [ reloaded, restarted, started, stopped ]
enabled:
description:
- Whether the service should start on boot. B(At least one of state and enabled are required.)
type: bool
force:
description:
- Whether to override existing symlinks.
type: bool
version_added: 2.6
masked:
description:
- Whether the unit should be masked or not, a masked unit is impossible to start.
type: bool
daemon_reload:
description:
- run daemon-reload before doing any other operations, to make sure systemd has read any changes.
type: bool
default: 'no'
aliases: [ daemon-reload ]
user:
description:
- run systemctl talking to the service manager of the calling user, rather than the service manager
of the system. This is deprecated and the scope paramater should be used instead.
type: bool
default: 'no'
scope:
description:
- run systemctl within a given service manager scope, either as the default system scope (system),
the current user's scope (user), or the scope of all users (global).
choices: [ system, user, global ]
default: 'system'
version_added: "2.7"
no_block:
description:
- Do not synchronously wait for the requested operation to finish.
Enqueued job will continue without Ansible blocking on its completion.
type: bool
default: 'no'
version_added: "2.3"
notes:
- Since 2.4, one of the following options is required 'state', 'enabled', 'masked', 'daemon_reload', and all except 'daemon_reload' also require 'name'.
- Before 2.4 you always required 'name'.
requirements:
- A system managed by systemd.
'''
EXAMPLES = '''
- name: Make sure a service is running
systemd:
state: started
name: httpd
- name: stop service cron on debian, if running
systemd:
name: cron
state: stopped
- name: restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes
systemd:
state: restarted
daemon_reload: yes
name: crond
- name: reload service httpd, in all cases
systemd:
name: httpd
state: reloaded
- name: enable service httpd and ensure it is not masked
systemd:
name: httpd
enabled: yes
masked: no
- name: enable a timer for dnf-automatic
systemd:
name: dnf-automatic.timer
state: started
enabled: yes
- name: just force systemd to reread configs (2.4 and above)
systemd:
daemon_reload: yes
'''
RETURN = '''
status:
description: A dictionary with the key=value pairs returned from `systemctl show`
returned: success
type: complex
contains: {
"ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ActiveEnterTimestampMonotonic": "8135942",
"ActiveExitTimestampMonotonic": "0",
"ActiveState": "active",
"After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
"AllowIsolate": "no",
"Before": "shutdown.target multi-user.target",
"BlockIOAccounting": "no",
"BlockIOWeight": "1000",
"CPUAccounting": "no",
"CPUSchedulingPolicy": "0",
"CPUSchedulingPriority": "0",
"CPUSchedulingResetOnFork": "no",
"CPUShares": "1024",
"CanIsolate": "no",
"CanReload": "yes",
"CanStart": "yes",
"CanStop": "yes",
"CapabilityBoundingSet": "18446744073709551615",
"ConditionResult": "yes",
"ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ConditionTimestampMonotonic": "7902742",
"Conflicts": "shutdown.target",
"ControlGroup": "/system.slice/crond.service",
"ControlPID": "0",
"DefaultDependencies": "yes",
"Delegate": "no",
"Description": "Command Scheduler",
"DevicePolicy": "auto",
"EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
"ExecMainCode": "0",
"ExecMainExitTimestampMonotonic": "0",
"ExecMainPID": "595",
"ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ExecMainStartTimestampMonotonic": "8134990",
"ExecMainStatus": "0",
"ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"FragmentPath": "/usr/lib/systemd/system/crond.service",
"GuessMainPID": "yes",
"IOScheduling": "0",
"Id": "crond.service",
"IgnoreOnIsolate": "no",
"IgnoreOnSnapshot": "no",
"IgnoreSIGPIPE": "yes",
"InactiveEnterTimestampMonotonic": "0",
"InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"InactiveExitTimestampMonotonic": "8135942",
"JobTimeoutUSec": "0",
"KillMode": "process",
"KillSignal": "15",
"LimitAS": "18446744073709551615",
"LimitCORE": "18446744073709551615",
"LimitCPU": "18446744073709551615",
"LimitDATA": "18446744073709551615",
"LimitFSIZE": "18446744073709551615",
"LimitLOCKS": "18446744073709551615",
"LimitMEMLOCK": "65536",
"LimitMSGQUEUE": "819200",
"LimitNICE": "0",
"LimitNOFILE": "4096",
"LimitNPROC": "3902",
"LimitRSS": "18446744073709551615",
"LimitRTPRIO": "0",
"LimitRTTIME": "18446744073709551615",
"LimitSIGPENDING": "3902",
"LimitSTACK": "18446744073709551615",
"LoadState": "loaded",
"MainPID": "595",
"MemoryAccounting": "no",
"MemoryLimit": "18446744073709551615",
"MountFlags": "0",
"Names": "crond.service",
"NeedDaemonReload": "no",
"Nice": "0",
"NoNewPrivileges": "no",
"NonBlocking": "no",
"NotifyAccess": "none",
"OOMScoreAdjust": "0",
"OnFailureIsolate": "no",
"PermissionsStartOnly": "no",
"PrivateNetwork": "no",
"PrivateTmp": "no",
"RefuseManualStart": "no",
"RefuseManualStop": "no",
"RemainAfterExit": "no",
"Requires": "basic.target",
"Restart": "no",
"RestartUSec": "100ms",
"Result": "success",
"RootDirectoryStartOnly": "no",
"SameProcessGroup": "no",
"SecureBits": "0",
"SendSIGHUP": "no",
"SendSIGKILL": "yes",
"Slice": "system.slice",
"StandardError": "inherit",
"StandardInput": "null",
"StandardOutput": "journal",
"StartLimitAction": "none",
"StartLimitBurst": "5",
"StartLimitInterval": "10000000",
"StatusErrno": "0",
"StopWhenUnneeded": "no",
"SubState": "running",
"SyslogLevelPrefix": "yes",
"SyslogPriority": "30",
"TTYReset": "no",
"TTYVHangup": "no",
"TTYVTDisallocate": "no",
"TimeoutStartUSec": "1min 30s",
"TimeoutStopUSec": "1min 30s",
"TimerSlackNSec": "50000",
"Transient": "no",
"Type": "simple",
"UMask": "0022",
"UnitFileState": "enabled",
"WantedBy": "multi-user.target",
"Wants": "system.slice",
"WatchdogTimestampMonotonic": "0",
"WatchdogUSec": "0",
}
''' # NOQA
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing
from ansible.module_utils._text import to_native
def is_running_service(service_status):
return service_status['ActiveState'] in set(['active', 'activating'])
def request_was_ignored(out):
return '=' not in out and 'ignoring request' in out
def parse_systemctl_show(lines):
# The output of 'systemctl show' can contain values that span multiple lines. At first glance it
# appears that such values are always surrounded by {}, so the previous version of this code
# assumed that any value starting with { was a multi-line value; it would then consume lines
# until it saw a line that ended with }. However, it is possible to have a single-line value
# that starts with { but does not end with } (this could happen in the value for Description=,
# for example), and the previous version of this code would then consume all remaining lines as
# part of that value. Cryptically, this would lead to Ansible reporting that the service file
# couldn't be found.
#
# To avoid this issue, the following code only accepts multi-line values for keys whose names
# start with Exec (e.g., ExecStart=), since these are the only keys whose values are known to
# span multiple lines.
parsed = {}
multival = []
k = None
for line in lines:
if k is None:
if '=' in line:
k, v = line.split('=', 1)
if k.startswith('Exec') and v.lstrip().startswith('{'):
if not v.rstrip().endswith('}'):
multival.append(v)
continue
parsed[k] = v.strip()
k = None
else:
multival.append(line)
if line.rstrip().endswith('}'):
parsed[k] = '\n'.join(multival).strip()
multival = []
k = None
return parsed
# ===========================================
# Main control flow
def main():
# initialize
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', aliases=['service', 'unit']),
state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped']),
enabled=dict(type='bool'),
force=dict(type='bool'),
masked=dict(type='bool'),
daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']),
user=dict(type='bool', default=False),
scope=dict(type='str', default='system', choices=['system', 'user', 'global']),
no_block=dict(type='bool', default=False),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']],
)
systemctl = module.get_bin_path('systemctl', True)
if module.params['user'] and module.params['scope'] == 'system':
module.deprecate("The 'user' paramater is being renamed to 'scope'", version=2.8)
systemctl = systemctl + " --user"
if module.params['scope'] == 'user':
systemctl = systemctl + " --user"
if module.params['scope'] == 'global':
systemctl = systemctl + " --global"
if module.params['no_block']:
systemctl = systemctl + " --no-block"
if module.params['force']:
systemctl = systemctl + " --force"
unit = module.params['name']
rc = 0
out = err = ''
result = dict(
name=unit,
changed=False,
status=dict(),
)
for requires in ('state', 'enabled', 'masked'):
if module.params[requires] is not None and unit is None:
module.fail_json(msg="name is also required when specifying %s" % requires)
# Run daemon-reload first, if requested
if module.params['daemon_reload'] and not module.check_mode:
(rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
if unit:
found = False
is_initd = sysv_exists(unit)
is_systemd = False
# check service data, cannot error out on rc as it changes across versions, assume not found
(rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
if request_was_ignored(out) or request_was_ignored(err):
# fallback list-unit-files as show does not work on some systems (chroot)
# not used as primary as it skips some services (like those using init.d) and requires .service/etc notation
(rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit))
if rc == 0:
is_systemd = True
elif rc == 0:
# load return of systemctl show into dictionary for easy access and return
if out:
result['status'] = parse_systemctl_show(to_native(out).split('\n'))
is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found'
is_masked = 'LoadState' in result['status'] and result['status']['LoadState'] == 'masked'
# Check for loading error
if is_systemd and not is_masked and 'LoadError' in result['status']:
module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError']))
else:
# Check for systemctl command
module.run_command(systemctl, check_rc=True)
# Does service exist?
found = is_systemd or is_initd
if is_initd and not is_systemd:
module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit)
# mask/unmask the service, if requested, can operate on services before they are installed
if module.params['masked'] is not None:
# state is not masked unless systemd affirms otherwise
masked = ('LoadState' in result['status'] and result['status']['LoadState'] == 'masked')
if masked != module.params['masked']:
result['changed'] = True
if module.params['masked']:
action = 'mask'
else:
action = 'unmask'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
# some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
fail_if_missing(module, found, unit, msg='host')
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
fail_if_missing(module, found, unit, msg='host')
# do we need to enable the service?
enabled = False
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
# check systemctl result or if it is a init script
if rc == 0:
enabled = True
elif rc == 1:
# if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
if module.params['scope'] == 'system' and \
not module.params['user'] and \
is_initd and \
(not out.strip().endswith('disabled') or sysv_is_enabled(unit)):
enabled = True
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err))
result['enabled'] = not enabled
# set service state if requested
if module.params['state'] is not None:
fail_if_missing(module, found, unit, msg="host")
# default to desired state
result['state'] = module.params['state']
# What is current service state?
if 'ActiveState' in result['status']:
action = None
if module.params['state'] == 'started':
if not is_running_service(result['status']):
action = 'start'
elif module.params['state'] == 'stopped':
if is_running_service(result['status']):
action = 'stop'
else:
if not is_running_service(result['status']):
action = 'start'
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
if action:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
else:
# this should not happen?
module.fail_json(msg="Service is in unknown state", status=result['status'])
module.exit_json(**result)
if __name__ == '__main__':
main()
|
skg-net/ansible
|
lib/ansible/modules/system/systemd.py
|
Python
|
gpl-3.0
| 19,072
|
[
"Brian"
] |
9230527ba9e834f8a2a46e5c97ee95dec318f21cd29b8cdc9dd7471b4d4af5ab
|
# -*- coding: utf-8 -*-
"""
Simple Counter
~~~~~~~~~~~~~~
Instrumentation example that gathers method invocation counts
and dumps the numbers when the program exists, in JSON format.
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
import sys
from equip import Program, \
Instrumentation, \
SimpleRewriter, \
MethodVisitor
import equip.utils.log as logutils
from equip.utils.log import logger
logutils.enableLogger(to_file='./equip.log')
# Declaration of the code to be injected in various places. This
# code is compiled to bytecode which is then added to the various
# code_objects (e.g., method, etc.) based on what the visitor specifies.
BEFORE_CODE = """
GlobalCounterInst.count(file='{file_name}',
class_name='{class_name}',
method='{method_name}',
lineno={lineno})
"""
# We need to inject a new import statement that contains the GlobalCounterInst
IMPORT_CODE = """
from counter import GlobalCounterInst
"""
ON_ENTER_CODE = """
print "Starting instrumented program"
"""
# When the instrumented code exits, we want to serialize the data
ON_EXIT_CODE = """
GlobalCounterInst.to_json('./data.json')
"""
# The visitor is called for each method in the program (function or method)
class CounterInstrumentationVisitor(MethodVisitor):
def __init__(self):
MethodVisitor.__init__(self)
def visit(self, meth_decl):
rewriter = SimpleRewriter(meth_decl)
# Ensure we have imported our `GlobalCounterInst`
rewriter.insert_import(IMPORT_CODE, module_import=True)
# This is the main instrumentation code with a callback to
# `GlobalCounterInst::count`
rewriter.insert_before(BEFORE_CODE)
HELP_MESSAGE = """
1. Run counter_instrument.py on the code you want to instrument:
$ python counter_instrument.py <path/to/code>
2. Run your original program:
$ export PYTHONPATH=$PYTHONPATH:/path/to/counter
$ python start_my_program.pyc
"""
def main(argc, argv):
if argc < 2:
print HELP_MESSAGE
return
visitor = CounterInstrumentationVisitor()
instr = Instrumentation(argv[1])
instr.set_option('force-rebuild')
if not instr.prepare_program():
print "[ERROR] Cannot find program code to instrument"
return
# Add code at the very beginning of each module (only triggered if __main__ routine)
instr.on_enter(ON_ENTER_CODE)
# Add code at the end of each module (only triggered if __main__ routine)
instr.on_exit(ON_EXIT_CODE)
# Apply the instrumentation with the visitor, and when a change has been made
# it will overwrite the pyc file.
instr.apply(visitor, rewrite=True)
if __name__ == '__main__':
main(len(sys.argv), sys.argv)
|
neuroo/equip
|
examples/counter/counter_instrument.py
|
Python
|
apache-2.0
| 2,808
|
[
"VisIt"
] |
9ce84285ac4c637f32aa909f0d3e64d8177e1cefe40e2639af322dafd7167476
|
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument('-pn', '--planet_name', required=True, type=str, help='Directory Name for the Planet (i.e. GJ1214).')
ap.add_argument('-c', '--channel', required=True, type=str, help='Channel number string (i.e. ch1 or ch2).')
ap.add_argument('-ad', '--aor_dir', required=True, type=str, help='AOR director (i.e. r59217921).')
ap.add_argument('-sd', '--save_sub_dir', required=False, type=str, default='ExtracedData', help='Subdirectory inside Planet_Directory to store extracted outputs.')
ap.add_argument('-pd', '--planets_dir', required=False, type=str, default='/Research/Planets/', help='Location of planet directory name from $HOME.')
ap.add_argument('-ds', '--data_sub_dir', required=False, type=str, default='/data/raw/', help='Sub directory structure from $HOME/Planet_Name/THIS/aor_dir/..')
ap.add_argument('-dt', '--data_tail_dir', required=False, type=str, default='/big/', help='String inside AOR DIR.')
ap.add_argument('-ff', '--fits_format', required=False, type=str, default='bcd', help='Format of the fits files (i.e. bcd).')
ap.add_argument('-uf', '--unc_format', required=False, type=str, default='bunc', help='Format of the photometric noise files (i.e. bcd).')
ap.add_argument('-m', '--method', required=False, type=str, default='median', help='method for photmetric extraction (i.e. median).')
ap.add_argument('-t', '--telescope', required=False, type=str, default='Spitzer', help='Telescope: [Spitzer, Hubble, JWST].')
ap.add_argument('-ou', '--outputUnits', required=False, type=str, default='electrons', help='Units for the extracted photometry [electrons, muJ_per_Pixel, etc].')
ap.add_argument('-d', '--data_dir', required=False, type=str, default='', help='Set location of all `bcd` and `bunc` files: bypass previous setup.')
ap.add_argument('-v', '--verbose', required=False, type=bool, default=False, help='Print out normally irrelevent things.')
args = vars(ap.parse_args())
planetName = args['planet_name']
channel = args['channel']
aor_dir = args['aor_dir']
planetDirectory = args['planets_dir']
save_sub_dir = args['save_sub_dir']
data_sub_dir = args['data_sub_dir']
data_tail_dir = args['data_tail_dir']
fits_format = args['fits_format']
unc_format = args['unc_format']
method = args['method']
telescope = args['telescope']
outputUnits = args['outputUnits']
data_dir = args['data_dir']
verbose = args['verbose']
# from astroML.plotting import hist
from astropy.io import fits
from astropy.modeling import models, fitting
from datetime import datetime
# from image_registration import cross_correlation_shifts
from glob import glob
from functools import partial
# from matplotlib.ticker import MaxNLocator
# from matplotlib import style
# from least_asymmetry.asym import actr, moments, fitgaussian
from multiprocessing import cpu_count, Pool
from numpy import min as npmin, max as npmax, zeros, arange, sum, float, isnan, hstack
from numpy import int32 as npint, round as npround, nansum as sum, nanstd as std
from os import environ, path, mkdir
from pandas import DataFrame, read_csv, read_pickle, scatter_matrix
from photutils import CircularAperture, CircularAnnulus, aperture_photometry, findstars
from numpy import sort, linspace, indices, median, mean, std, empty, transpose, ceil
from numpy import concatenate, pi, sqrt, ones, diag, inf, isnan, isfinite, array, nanmax
# from pylab import gcf, ion, figure, plot, imshow, scatter, legend, rcParams
# from seaborn import *
from scipy.special import erf
from scipy import stats
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
from socket import gethostname
from statsmodels.robust import scale
from statsmodels.nonparametric import kde
from sys import exit
from time import time, localtime
from tqdm import tqdm
from numpy import zeros, nanmedian as median, nanmean as mean, nan
from sys import exit
from sklearn.externals import joblib
import numpy as np
startFull = time()
print('\n\n**Initializing Master Class for Exoplanet Time Series Observation Photometry**\n\n')
from wanderer import wanderer
def clipOutlier2D(arr2D, nSig=10):
arr2D = arr2D.copy()
medArr2D = median(arr2D,axis=0)
sclArr2D = np.sqrt(((scale.mad(arr2D)**2.).sum()))
outliers = abs(arr2D - medArr2D) > nSig*sclArr2D
inliers = abs(arr2D - medArr2D) <= nSig*sclArr2D
arr2D[outliers] = median(arr2D[inliers],axis=0)
return arr2D
# As an example, Spitzer data is expected to be store in the directory structure:
#
# `$HOME/PLANET_DIRECTORY/PLANETNAME/data/raw/AORDIR/CHANNEL/bcd/`
#
# EXAMPLE:
#
# 1. On a Linux machine
# 2. With user `tempuser`,
# 3. And all Spitzer data is store in `Research/Planets`
# 4. The planet named `Happy-5b`
# 5. Observed during AOR r11235813
# 6. In CH2 (4.5 microns)
#
# The `loadfitsdir` should read as: `/home/tempuser/Research/Planets/HAPPY5/data/raw/r11235813/ch2/bcd/`
# channel = 'ch2/'
dataSub = fits_format+'/'
if data_dir is '': data_dir = environ['HOME'] + planetDirectory + planetName + data_sub_dir + channel + data_tail_dir
print('Current Data Dir: {}'.format(data_dir))
fileExt = '*{}.fits'.format(fits_format)
uncsExt = '*{}.fits'.format(unc_format)
loadfitsdir = data_dir + aor_dir + '/' + channel + '/' + dataSub
print('Directory to load fits files from: {}'.format(loadfitsdir))
nCores = cpu_count()
print('Found {} cores to process'.format(nCores))
fitsFilenames = glob(loadfitsdir + fileExt);
uncsFilenames = glob(loadfitsdir + uncsExt);
print('Found {} {}.fits files'.format(len(fitsFilenames), fits_format))
print('Found {} unc.fits files'.format(len(uncsFilenames)))
if len(fitsFilenames) == 0: raise ValueError('There are NO `{}.fits` files in the directory {}'.format(fits_format, loadfitsdir))
if len(uncsFilenames) == 0: raise ValueError('There are NO `{}.fits` files in the directory {}'.format(unc_format, loadfitsdir))
do_db_scan = False# len(fitsFilenames*64) < 6e4
if do_db_scan:
pass
else:
print('There are too many images for a DB-Scan; i.e. >1e5 images')
header_test = fits.getheader(fitsFilenames[0])
print('\n\nAORLABEL:\t{}\nNum Fits Files:\t{}\nNum Unc Files:\t{}\n\n'.format(header_test['AORLABEL'], len(fitsFilenames), len(uncsFilenames)))
if verbose: print(fitsFilenames)
if verbose: print(uncsFilenames)
# Necessary Constants Spitzer
ppm = 1e6
y,x = 0,1
yguess, xguess = 15., 15. # Specific to Spitzer circa 2010 and beyond
filetype = '{}.fits'.format(fits_format) # Specific to Spitzer Basic Calibrated Data
print('Initialize an instance of `wanderer` as `example_wanderer_median`\n')
example_wanderer_median = wanderer(fitsFileDir=loadfitsdir, filetype=filetype, telescope=telescope,
yguess=yguess, xguess=xguess, method=method, nCores=nCores)
example_wanderer_median.AOR = aor_dir
example_wanderer_median.planetName = planetName
example_wanderer_median.channel = channel
print('Load Data From Fits Files in ' + loadfitsdir + '\n')
example_wanderer_median.spitzer_load_fits_file(outputUnits=outputUnits)
print('**Double check for NaNs**')
example_wanderer_median.imageCube[np.where(isnan(example_wanderer_median.imageCube))] = np.nanmedian(example_wanderer_median.imageCube)
print('**Identifier Strong Outliers**')
print('Find, flag, and NaN the "Bad Pixels" Outliers' + '\n')
example_wanderer_median.find_bad_pixels()
print('Fit for All Centers: Flux Weighted, Gaussian Fitting, Gaussian Moments, Least Asymmetry' + '\n')
# example_wanderer_median.fit_gaussian_centering()
example_wanderer_median.fit_flux_weighted_centering()
# example_wanderer_median.fit_least_asymmetry_centering()
# example_wanderer_median.fit_all_centering() # calling this calls least_asymmetry, which does not work :(
start = time()
example_wanderer_median.mp_lmfit_gaussian_centering(subArraySize=6, recheckMethod=None, median_crop=False)
print('Operation took {} seconds with {} cores'.format(time()-start, example_wanderer_median.nCores))
if do_db_scan:
print('DBScanning Gaussian Fit Centers')
from sklearn.cluster import DBSCAN
dbs = DBSCAN(n_jobs=-1, eps=0.2, leaf_size=10)
dbsPred = dbs.fit_predict(example_wanderer_median.centering_GaussianFit)
dbs_options = [k for k in range(-1,100) if (dbsPred==k).sum()]
else:
dbsPred = None
dbs_options = []
npix = 3
# stillOutliers = np.where(abs(example_wanderer_median.centering_GaussianFit - medGaussCenters) > 4*sclGaussCenterAvg)[0]
# print('There are {} outliers remaining'.format(len(stillOutliers)))
if do_db_scan:
dbsClean = 0
dbsKeep = (dbsPred == dbsClean)
nCores = example_wanderer_median.nCores
start = time()
example_wanderer_median.mp_measure_background_circle_masked()
print('CircleBG took {} seconds with {} cores'.format(time() - start, nCores))
start = time()
example_wanderer_median.mp_measure_background_annular_mask()
print('AnnularBG took {} seconds with {} cores'.format(time() - start, nCores))
start = time()
example_wanderer_median.mp_measure_background_KDE_Mode()
print('KDEUnivBG took {} seconds with {} cores'.format(time() - start, nCores))
start = time()
example_wanderer_median.mp_measure_background_median_masked()
print('MedianBG took {} seconds with {} cores'.format(time() - start, nCores))
example_wanderer_median.measure_effective_width()
print(example_wanderer_median.effective_widths.mean(), sqrt(example_wanderer_median.effective_widths).mean())
print('Pipeline took {} seconds thus far'.format(time() - startFull))
print('Iterating over Background Techniques, Centering Techniques, Aperture Radii' + '\n')
centering_choices = ['Gaussian_Fit']#, 'Gaussian_Mom', 'FluxWeighted']#, 'LeastAsymmetry']
background_choices = ['AnnularMask']#example_wanderer_median.background_df.columns
staticRads = np.arange(1, 6,0.5)#[1.0 ]# aperRads = np.arange(1, 6,0.5)
varRads = [0.0, 0.25, 0.50, 0.75, 1.0, 1.25, 1.50]#[None]#
vrad_dist = example_wanderer_median.quadrature_widths - np.median(example_wanderer_median.quadrature_widths)
vrad_dist = clipOutlier2D(vrad_dist, nSig=5)
for staticRad in tqdm(staticRads, total=len(staticRads), desc='Static'):
for varRad in tqdm(varRads, total=len(varRads), desc='Variable'):
startMPFlux = time()
example_wanderer_median.mp_compute_flux_over_time_varRad(staticRad, varRad, centering_choices[0], background_choices[0], useTheForce=True)
print('**Create Beta Variable Radius**')
example_wanderer_median.mp_compute_flux_over_time_betaRad()# Gaussian_Fit_AnnularMask_rad_betaRad_0.0_0.0
print('Entire Pipeline took {} seconds'.format(time() - startFull))
if do_db_scan:
print('DB_Scanning All Flux Vectors')
example_wanderer_median.mp_DBScan_Flux_All()
print('Creating master Inliers Array')
# inlier_master = array(list(example_wanderer_median.inliers_Phots.values())).mean(axis=0) == 1.0
print('Extracting PLD Components')
example_wanderer_median.extract_PLD_components()
if do_db_scan:
print('Running DBScan on the PLD Components')
example_wanderer_median.mp_DBScan_PLD_All()
print('Saving `example_wanderer_median` to a set of pickles for various Image Cubes and the Storage Dictionary')
savefiledir_parts = [environ['HOME']+planetDirectory, planetName+'/' , save_sub_dir + '/' , channel + '/' , aor_dir + '/']
savefiledir = ''
for sfpart in savefiledir_parts:
savefiledir = savefiledir + sfpart
if not path.exists(savefiledir): mkdir(savefiledir)
# savefiledir = environ['HOME']+planetDirectory+planetName+'/' + save_sub_dir + '/' + channel + '/' + aor_dir + '/'
saveFileNameHeader = planetName+'_'+ aor_dir +'_Median'
saveFileType = '.joblib.save'
if not path.exists(environ['HOME']+planetDirectory+planetName+'/'+save_sub_dir+'/'):
mkdir(environ['HOME']+planetDirectory+planetName+'/'+save_sub_dir+'/')
if not path.exists(savefiledir):
print('Creating ' + savefiledir)
mkdir(savefiledir)
print()
print('Saving to ' + savefiledir + saveFileNameHeader + saveFileType)
print()
example_wanderer_median.save_data_to_save_files(savefiledir=savefiledir, saveFileNameHeader=saveFileNameHeader, saveFileType=saveFileType)
print('Entire Pipeline took {} seconds'.format(time() - startFull))
|
exowanderer/ExoplanetTSO
|
Exoplanet_TSO_-_Photometric_Extraction_Pipeline.py
|
Python
|
gpl-3.0
| 12,708
|
[
"Gaussian"
] |
d0db554448e3c67118ad9f7871151ffb2051ab0250bac91168c53831b0c4b101
|
# This file is part of BurpyHooves.
#
# BurpyHooves is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BurpyHooves is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the# GNU General Public License
# along with BurpyHooves. If not, see <http://www.gnu.org/licenses/>.
from modules import Module
import random
class BuhModule(Module):
def module_init(self, bot):
random.seed()
self.hook_command("bored", self.command_bored)
self.hook_command("boredx", self.command_boredx)
def gen_buh(self,length):
colors = [random.randint(1,15) for i in xrange(2)]
uColors = [random.randint(1,15) for i in xrange(random.randint(1,length))]
blast = True if random.randint(1,25) == 1 else False
msg = '\x03' + str(colors[0])
if blast:
msg += 'h'
else:
msg += 'b'
msg += self.gen_u(uColors)
msg += '\x03' + str(random.randint(1,15))
if blast:
msg += 'b'
else:
msg += 'h'
return msg
def gen_u(self, colors):
u = ""
for i in colors:
u += '\x03' + str(i) + 'u'
return u
def command_bored(self, bot, event_args):
msg = self.gen_buh(random.randint(1,20))
bot.reply(msg)
def command_boredx(self, bot, event_args):
msg = self.gen_buh(random.randint(1,50))
bot.reply(msg)
|
SparklerPone/muckbot
|
modules/buh.py
|
Python
|
gpl-3.0
| 1,785
|
[
"BLAST"
] |
6f04cd5748a6d59d08701ba8f83555370a0f3e63c8ef921442d7d7633b88d28b
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" example train fit utility """
import logging
import os
import time
import re
import math
import mxnet as mx
import time
def get_epoch_size(args, kv):
return math.ceil(int(args.num_examples / kv.num_workers) / args.batch_size)
def _get_lr_scheduler(args, kv):
if 'lr_factor' not in args or args.lr_factor >= 1:
return (args.lr, None)
epoch_size = get_epoch_size(args, kv)
begin_epoch = args.load_epoch if args.load_epoch else 0
if 'pow' in args.lr_step_epochs:
lr = args.lr
max_up = args.num_epochs * epoch_size
pwr = float(re.sub('pow[- ]*', '', args.lr_step_epochs))
poly_sched = mx.lr_scheduler.PolyScheduler(max_up, lr, pwr)
return (lr, poly_sched)
step_epochs = [int(l) for l in args.lr_step_epochs.split(',')]
lr = args.lr
for s in step_epochs:
if begin_epoch >= s:
lr *= args.lr_factor
if lr != args.lr:
logging.info('Adjust learning rate to %e for epoch %d',
lr, begin_epoch)
steps = [epoch_size * (x - begin_epoch)
for x in step_epochs if x - begin_epoch > 0]
if steps:
return (lr, mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=args.lr_factor))
else:
return (lr, None)
def _load_model(args, rank=0):
if 'load_epoch' not in args or args.load_epoch is None:
return (None, None, None)
assert args.model_prefix is not None
model_prefix = args.model_prefix
if rank > 0 and os.path.exists("%s-%d-symbol.json" % (model_prefix, rank)):
model_prefix += "-%d" % (rank)
sym, arg_params, aux_params = mx.model.load_checkpoint(
model_prefix, args.load_epoch)
logging.info('Loaded model %s_%04d.params', model_prefix, args.load_epoch)
return (sym, arg_params, aux_params)
def _save_model(args, rank=0):
if args.model_prefix is None:
return None
return mx.callback.do_checkpoint(args.model_prefix if rank == 0 else "%s-%d" % (
args.model_prefix, rank), period=args.save_period)
def add_fit_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
train = parser.add_argument_group('Training', 'model training')
train.add_argument('--network', type=str,
help='the neural network to use')
train.add_argument('--num-layers', type=int,
help='number of layers in the neural network, \
required by some networks such as resnet')
train.add_argument('--gpus', type=str,
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')
train.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
train.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
train.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
train.add_argument('--lr-factor', type=float, default=0.1,
help='the ratio to reduce lr on each step')
train.add_argument('--lr-step-epochs', type=str,
help='the epochs to reduce the lr, e.g. 30,60')
train.add_argument('--initializer', type=str, default='default',
help='the initializer type')
train.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
train.add_argument('--mom', type=float, default=0.9,
help='momentum for sgd')
train.add_argument('--wd', type=float, default=0.0001,
help='weight decay for sgd')
train.add_argument('--batch-size', type=int, default=128,
help='the batch size')
train.add_argument('--disp-batches', type=int, default=20,
help='show progress for every n batches')
train.add_argument('--model-prefix', type=str,
help='model prefix')
train.add_argument('--save-period', type=int, default=1, help='params saving period')
parser.add_argument('--monitor', dest='monitor', type=int, default=0,
help='log network parameters every N iters if larger than 0')
train.add_argument('--load-epoch', type=int,
help='load the model on an epoch using the model-load-prefix')
train.add_argument('--top-k', type=int, default=0,
help='report the top-k accuracy. 0 means no report.')
train.add_argument('--loss', type=str, default='',
help='show the cross-entropy or nll loss. ce strands for cross-entropy, nll-loss stands for likelihood loss')
train.add_argument('--test-io', type=int, default=0,
help='1 means test reading speed without training')
train.add_argument('--dtype', type=str, default='float32',
help='precision: float32 or float16')
train.add_argument('--gc-type', type=str, default='none',
help='type of gradient compression to use, \
takes `2bit` or `none` for now')
train.add_argument('--gc-threshold', type=float, default=0.5,
help='threshold for 2bit gradient compression')
# additional parameters for large batch sgd
train.add_argument('--macrobatch-size', type=int, default=0,
help='distributed effective batch size')
train.add_argument('--warmup-epochs', type=int, default=5,
help='the epochs to ramp-up lr to scaled large-batch value')
train.add_argument('--warmup-strategy', type=str, default='linear',
help='the ramping-up strategy for large batch sgd')
return train
def fit(args, network, data_loader, **kwargs):
"""
train a model
args : argparse returns
network : the symbol definition of the nerual network
data_loader : function that returns the train and val data iterators
"""
# kvstore
kv = mx.kvstore.create(args.kv_store)
time.sleep(30)
if args.gc_type != 'none':
kv.set_gradient_compression({'type': args.gc_type,
'threshold': args.gc_threshold})
# logging
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
epoch_size = get_epoch_size(args, kv)
# data iterators
(train, val) = data_loader(args, kv)
if 'dist' in args.kv_store and not 'async' in args.kv_store:
logging.info('Resizing training data to %d batches per machine', epoch_size)
# resize train iter to ensure each machine has same number of batches per epoch
# if not, dist_sync can hang at the end with one machine waiting for other machines
train = mx.io.ResizeIter(train, epoch_size)
if args.test_io:
tic = time.time()
for i, batch in enumerate(train):
if isinstance(batch, list):
for b in batch:
for j in b.data:
j.wait_to_read()
else:
for j in batch.data:
j.wait_to_read()
if (i + 1) % args.disp_batches == 0:
logging.info('Batch [%d]\tSpeed: %.2f samples/sec', i,
args.disp_batches * args.batch_size / (time.time() - tic))
tic = time.time()
return
# load model
if 'arg_params' in kwargs and 'aux_params' in kwargs:
arg_params = kwargs['arg_params']
aux_params = kwargs['aux_params']
else:
sym, arg_params, aux_params = _load_model(args, kv.rank)
if sym is not None:
assert sym.tojson() == network.tojson()
# save model
checkpoint = _save_model(args, kv.rank)
# devices for training
local_rank = kv.local_rank
devs = mx.cpu() if args.gpus is None or args.gpus == "" else mx.gpu(local_rank)
# learning rate
lr, lr_scheduler = _get_lr_scheduler(args, kv)
# create model
model = mx.mod.Module(
context=devs,
symbol=network
)
lr_scheduler = lr_scheduler
optimizer_params = {
'learning_rate': lr,
'wd': args.wd,
'lr_scheduler': lr_scheduler,
'multi_precision': True}
# Only a limited number of optimizers have 'momentum' property
has_momentum = {'sgd', 'dcasgd', 'nag'}
if args.optimizer in has_momentum:
optimizer_params['momentum'] = args.mom
monitor = mx.mon.Monitor(
args.monitor, pattern=".*") if args.monitor > 0 else None
# A limited number of optimizers have a warmup period
has_warmup = {'lbsgd', 'lbnag'}
if args.optimizer in has_warmup:
nworkers = kv.num_workers
if epoch_size < 1:
epoch_size = 1
macrobatch_size = args.macrobatch_size
if macrobatch_size < args.batch_size * nworkers:
macrobatch_size = args.batch_size * nworkers
#batch_scale = round(float(macrobatch_size) / args.batch_size / nworkers +0.4999)
batch_scale = math.ceil(
float(macrobatch_size) / args.batch_size / nworkers)
optimizer_params['updates_per_epoch'] = epoch_size
optimizer_params['begin_epoch'] = args.load_epoch if args.load_epoch else 0
optimizer_params['batch_scale'] = batch_scale
optimizer_params['warmup_strategy'] = args.warmup_strategy
optimizer_params['warmup_epochs'] = args.warmup_epochs
optimizer_params['num_epochs'] = args.num_epochs
if args.initializer == 'default':
if args.network == 'alexnet':
# AlexNet will not converge using Xavier
initializer = mx.init.Normal()
# VGG will not trend to converge using Xavier-Gaussian
elif args.network and 'vgg' in args.network:
initializer = mx.init.Xavier()
else:
initializer = mx.init.Xavier(
rnd_type='gaussian', factor_type="in", magnitude=2)
# initializer = mx.init.Xavier(factor_type="in", magnitude=2.34),
elif args.initializer == 'xavier':
initializer = mx.init.Xavier()
elif args.initializer == 'msra':
initializer = mx.init.MSRAPrelu()
elif args.initializer == 'orthogonal':
initializer = mx.init.Orthogonal()
elif args.initializer == 'normal':
initializer = mx.init.Normal()
elif args.initializer == 'uniform':
initializer = mx.init.Uniform()
elif args.initializer == 'one':
initializer = mx.init.One()
elif args.initializer == 'zero':
initializer = mx.init.Zero()
# evaluation metrices
eval_metrics = ['accuracy']
if args.top_k > 0:
eval_metrics.append(mx.metric.create(
'top_k_accuracy', top_k=args.top_k))
supported_loss = ['ce', 'nll_loss']
if len(args.loss) > 0:
# ce or nll loss is only applicable to softmax output
loss_type_list = args.loss.split(',')
if 'softmax_output' in network.list_outputs():
for loss_type in loss_type_list:
loss_type = loss_type.strip()
if loss_type == 'nll':
loss_type = 'nll_loss'
if loss_type not in supported_loss:
logging.warning(loss_type + ' is not an valid loss type, only cross-entropy or ' \
'negative likelihood loss is supported!')
else:
eval_metrics.append(mx.metric.create(loss_type))
else:
logging.warning("The output is not softmax_output, loss argument will be skipped!")
# callbacks that run after each batch
batch_end_callbacks = [mx.callback.Speedometer(
args.batch_size, args.disp_batches)]
if 'batch_end_callback' in kwargs:
cbs = kwargs['batch_end_callback']
batch_end_callbacks += cbs if isinstance(cbs, list) else [cbs]
# run
model.fit(train,
begin_epoch=args.load_epoch if args.load_epoch else 0,
num_epoch=args.num_epochs,
eval_data=val,
eval_metric=eval_metrics,
kvstore=kv,
optimizer=args.optimizer,
optimizer_params=optimizer_params,
initializer=initializer,
arg_params=arg_params,
aux_params=aux_params,
batch_end_callback=batch_end_callbacks,
epoch_end_callback=checkpoint,
allow_missing=True,
monitor=monitor)
|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/horovod/examples/mxnet/common/fit.py
|
Python
|
apache-2.0
| 13,466
|
[
"Gaussian"
] |
d1c6570ddf5547ddfa9b022a01a9a761752f9be0d8350872f0b941aed44d26d0
|
#!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://setuptools.readthedocs.io/en/latest/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import (
DistutilsArgError, DistutilsOptionError,
DistutilsError, DistutilsPlatformError,
)
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
from distutils.spawn import find_executable
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
import io
import six
from six.moves import configparser, map
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from setuptools.py27compat import rmtree_safe
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import (
PackageIndex, parse_requirement_arg, URL_SCHEME,
)
from setuptools.command import bdist_egg, egg_info
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
"""
Determine if two paths reference the same file.
Augments os.path.samefile to work on Windows and
suppresses errors if the path doesn't exist.
"""
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if six.PY2:
def _to_ascii(s):
return s
def isascii(s):
try:
six.text_type(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
_one_liner = lambda text: textwrap.dedent(text).strip().replace('\n', '; ')
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
# the --user option seems to be an opt-in one,
# so the default should be False.
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
extant_blockers = (
filename for filename in blockers
if os.path.exists(filename) or os.path.islink(filename)
)
list(map(self._delete_path, extant_blockers))
def _delete_path(self, path):
log.info("Deleting %s", path)
if self.dry_run:
return
is_tree = os.path.isdir(path) and not os.path.islink(path)
remover = rmtree if is_tree else os.unlink
remover(path)
@staticmethod
def _render_version():
"""
Render the Setuptools version and installation details, then exit.
"""
ver = sys.version[:3]
dist = get_distribution('setuptools')
tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
print(tmpl.format(**locals()))
raise SystemExit()
def finalize_options(self):
self.version and self._render_version()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
self._fix_install_dir_for_user_site()
self.expand_basedirs()
self.expand_dirs()
self._expand(
'install_dir', 'script_dir', 'build_directory',
'site_dirs',
)
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, six.string_types):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _fix_install_dir_for_user_site(self):
"""
Fix the install_dir if "--user" was used.
"""
if not self.user or not site.ENABLE_USER_SITE:
return
self.create_home_path()
if self.install_userbase is None:
msg = "User base directory is not specified"
raise DistutilsPlatformError(msg)
self.install_base = self.install_platbase = self.install_userbase
scheme_name = os.name.replace('posix', 'unix') + '_user'
self.select_scheme(scheme_name)
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
dirs = [
'install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',
]
self._expand_attrs(dirs)
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except Exception:
pid = random.randint(0, sys.maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep)
if instdir not in map(normalize_path, filter(None, PYTHONPATH)):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
__cant_write_msg = textwrap.dedent("""
can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""").lstrip()
__not_exists_id = textwrap.dedent("""
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
""").lstrip()
__access_msg = textwrap.dedent("""
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://setuptools.readthedocs.io/en/latest/easy_install.html
Please make the appropriate changes for your system and try again.
""").lstrip()
def cant_write_to_target(self):
msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += '\n' + self.__not_exists_id
else:
msg += '\n' + self.__access_msg
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
tmpl = _one_liner("""
import os
f = open({ok_file!r}, 'w')
f.write('OK')
f.close()
""") + '\n'
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write(tmpl.format(**locals()))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
use_alt = (
basename.lower() == 'python.exe' and
os.path.exists(alt)
)
if use_alt:
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
@contextlib.contextmanager
def _tmpdir(self):
tmpdir = tempfile.mkdtemp(prefix=six.u("easy_install-"))
try:
# cast to str as workaround for #709 and #710 and #712
yield str(tmpdir)
finally:
os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir))
def easy_install(self, spec, deps=False):
if not self.editable:
self.install_site_py()
with self._tmpdir() as tmpdir:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
dl = self.package_index.download(spec, tmpdir)
return self.install_item(None, dl, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = Requirement(str(distreq))
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(str(e))
except VersionConflict as e:
raise DistutilsError(e.report())
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = (
"%r already exists in %s; build directory %s will not be kept"
)
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if self.exclude_scripts:
return
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
body = self._load_template(dev_path) % locals()
script_text = ScriptWriter.get_header(script_text) + body
self.write_script(script_name, _to_ascii(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://github.com/pypa/setuptools/issues/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
with open(target, "w" + mode) as f:
f.write(contents)
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(
self.install_dir,
os.path.basename(egg_path),
)
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(
f,
(egg_path, destination),
(m + " %s to %s") % (
os.path.basename(egg_path),
os.path.dirname(destination)
),
)
update_dist_caches(
destination,
fix_zipimporter_caches=new_dist_is_zipped,
)
except Exception:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
dist.location = egg_path
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers([
os.path.join(script_dir, args[0])
for args in ScriptWriter.get_args(dist)
])
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
__mv_warning = textwrap.dedent("""
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
""").lstrip()
__id_warning = textwrap.dedent("""
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
""")
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += '\n' + self.__mv_warning
if self.install_dir not in map(normalize_path, sys.path):
msg += '\n' + self.__id_warning
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
__editable_msg = textwrap.dedent("""
Extracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""").lstrip()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return '\n' + self.__editable_msg % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist,
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run,
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
__no_default_msg = textwrap.dedent("""
bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again.""").lstrip()
def no_default_version_msg(self):
template = self.__no_default_msg
return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
source = source.decode('utf-8')
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
with io.open(sitepy) as strm:
current = strm.read()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
with io.open(sitepy, 'w', encoding='utf-8') as strm:
strm.write(source)
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in six.iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([
os.path.join(
prefix,
"lib",
"python" + sys.version[:3],
"site-packages",
),
os.path.join(prefix, "lib", "site-python"),
])
else:
sitedirs.extend([
prefix,
os.path.join(prefix, "lib", "site-packages"),
])
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
home_sp = os.path.join(
home,
'Library',
'Python',
sys.version[:3],
'site-packages',
)
sitedirs.append(home_sp)
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
try:
sitedirs.extend(site.getsitepackages())
except AttributeError:
pass
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a configparser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
init = {'version': '', 'target_version': ''}
cfg = configparser.RawConfigParser(init)
try:
part = f.read(cfglen)
# Read up to the first null byte.
config = part.split(b'\0', 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(six.StringIO(config))
except configparser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''),
('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if six.PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
rel_paths = list(map(self.make_relative, self.paths))
if rel_paths:
log.debug("Saving %s", self.filename)
lines = self._wrap_lines(rel_paths)
data = '\n'.join(lines) + '\n'
if os.path.islink(self.filename):
os.unlink(self.filename)
with open(self.filename, 'wt') as f:
f.write(data)
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
@staticmethod
def _wrap_lines(lines):
return lines
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
class RewritePthDistributions(PthDistributions):
@classmethod
def _wrap_lines(cls, lines):
yield cls.prelude
for line in lines:
yield line
yield cls.postlude
prelude = _one_liner("""
import sys
sys.__plen = len(sys.path)
""")
postlude = _one_liner("""
import sys
new = sys.path[sys.__plen:]
del sys.path[sys.__plen:]
p = getattr(sys, '__egginsert', 0)
sys.path[p:p] = new
sys.__egginsert = p + len(new)
""")
if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
PthDistributions = RewritePthDistributions
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func in [os.unlink, os.remove] and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
# https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
with io.open(executable, encoding='latin-1') as fp:
magic = fp.read(2)
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
split_args = dict()
@classmethod
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls([cls._sys_executable()])
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _strip_quotes(item):
_QUOTES = '"\''
for q in _QUOTES:
if item.startswith(q) and item.endswith(q):
return item[1:-1]
return item
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(
CommandSpec._strip_quotes(item.strip()) for item in items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class WindowsCommandSpec(CommandSpec):
split_args = dict(posix=False)
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent(r"""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
command_spec_class = CommandSpec
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", DeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_header", DeprecationWarning)
if wininst:
executable = "python.exe"
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = cls.template % locals()
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
@staticmethod
def _ensure_safe_name(name):
"""
Prevent paths in *_scripts entry point names.
"""
has_path_sep = re.search(r'[\\/]', name)
if has_path_sep:
raise ValueError("Path separators not allowed in script names")
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
return WindowsScriptWriter.best()
else:
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
command_spec_class = WindowsCommandSpec
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
msg = (
"{ext} not listed in PATHEXT; scripts will not be "
"recognized as executables."
).format(**locals())
warnings.warn(msg, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@classmethod
def _adjust_header(cls, type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
return new_header if cls._use_header(new_header) else orig_header
@staticmethod
def _use_header(new_header):
"""
Should _adjust_header use the replaced header?
On non-windows systems, always use. On
Windows systems, only use the replaced header if it resolves
to an executable on the system.
"""
clean_header = new_header[2:-1].strip('"')
return sys.platform != 'win32' or find_executable(clean_header)
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if six.PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
return shutil.rmtree(path, ignore_errors, onerror)
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands,
**kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
|
BitWriters/Zenith_project
|
zango/lib/python3.5/site-packages/setuptools/command/easy_install.py
|
Python
|
mit
| 85,933
|
[
"VisIt"
] |
0f977f4be88be71f24de422b14de41844eaac21a2dacc43e9be4058403d62bef
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import subprocess
import time
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname().lower())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-control.connected')
def send_data(tls, kube_control):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
ingress_ip = get_ingress_address(kube_control)
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
ingress_ip = get_ingress_address(kube_control)
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet
set_privileged()
create_config(random.choice(servers), creds)
configure_kubelet(dns, ingress_ip)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
apply_node_labels()
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels', 'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the node.
'''
# scrub and try to format an array from the configuration option
config = hookenv.config()
user_labels = _parse_labels(config.get('labels'))
# For diffing sake, iterate the previous label set
if config.previous('labels'):
previous_labels = _parse_labels(config.previous('labels'))
hookenv.log('previous labels: {}'.format(previous_labels))
else:
# this handles first time run if there is no previous labels config
previous_labels = _parse_labels("")
# Calculate label removal
for label in previous_labels:
if label not in user_labels:
hookenv.log('Deleting node label {}'.format(label))
_apply_node_label(label, delete=True)
# if the label is in user labels we do nothing here, it will get set
# during the atomic update below.
# Atomically set a label
for label in user_labels:
_apply_node_label(label, overwrite=True)
# Set label for application name
_apply_node_label('juju-application={}'.format(hookenv.service_name()),
overwrite=True)
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args')
def extra_args_changed():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_kubelet(dns, ingress_ip):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['port'] = '10250'
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['node-ip'] = ingress_ip
if (dns['enable-kube-dns']):
kubelet_opts['cluster-dns'] = dns['sdn-ip']
privileged = is_state('kubernetes-worker.privileged')
kubelet_opts['allow-privileged'] = 'true' if privileged else 'false'
if is_state('kubernetes-worker.gpu.enabled'):
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts['experimental-nvidia-gpus'] = '1'
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts['feature-gates'] = 'Accelerators=true'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
@when_any('config.changed.default-backend-image',
'config.changed.nginx-image')
@when('kubernetes-worker.config.created')
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"gcr.io/google_containers/defaultbackend-s390x:1.4"
else:
context['defaultbackend_image'] = \
"gcr.io/google_containers/defaultbackend:1.4"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
if context['arch'] == 's390x':
context['ingress_image'] = \
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
else:
context['ingress_image'] = \
"gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15" # noqa
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if succesful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
# Apply node labels
_apply_node_label('gpu=true', overwrite=True)
_apply_node_label('cuda=true', overwrite=True)
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
_apply_node_label('gpu', delete=True)
_apply_node_label('cuda', delete=True)
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(gethostname().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(gethostname().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds \
and data_changed('kube-control.creds', creds) \
and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
class GetNodeNameFailed(Exception):
pass
def get_node_name():
# Get all the nodes in the cluster
cmd = 'kubectl --kubeconfig={} get no -o=json'.format(kubeconfig_path)
cmd = cmd.split()
deadline = time.time() + 60
while time.time() < deadline:
try:
raw = check_output(cmd)
break
except CalledProcessError:
hookenv.log('Failed to get node name for node %s.'
' Will retry.' % (gethostname()))
time.sleep(1)
else:
msg = 'Failed to get node name for node %s' % gethostname()
raise GetNodeNameFailed(msg)
result = json.loads(raw.decode('utf-8'))
if 'items' in result:
for node in result['items']:
if 'status' not in node:
continue
if 'addresses' not in node['status']:
continue
# find the hostname
for address in node['status']['addresses']:
if address['type'] == 'Hostname':
if address['address'] == gethostname():
return node['metadata']['name']
# if we didn't match, just bail to the next node
break
msg = 'Failed to get node name for node %s' % gethostname()
raise GetNodeNameFailed(msg)
class ApplyNodeLabelFailed(Exception):
pass
def _apply_node_label(label, delete=False, overwrite=False):
''' Invoke kubectl to apply node label changes '''
nodename = get_node_name()
# TODO: Make this part of the kubectl calls instead of a special string
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
if delete is True:
label_key = label.split('=')[0]
cmd = cmd_base.format(kubeconfig_path, nodename, label_key)
cmd = cmd + '-'
else:
cmd = cmd_base.format(kubeconfig_path, nodename, label)
if overwrite:
cmd = '{} --overwrite'.format(cmd)
cmd = cmd.split()
deadline = time.time() + 60
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
break
hookenv.log('Failed to apply label %s, exit code %d. Will retry.' % (
label, code))
time.sleep(1)
else:
msg = 'Failed to apply label %s' % label
raise ApplyNodeLabelFailed(msg)
def _parse_labels(labels):
''' Parse labels from a key=value string separated by space.'''
label_array = labels.split(' ')
sanitized_labels = []
for item in label_array:
if '=' in item:
sanitized_labels.append(item)
else:
hookenv.log('Skipping malformed option: {}'.format(item))
return sanitized_labels
|
antoineco/kubernetes
|
cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
|
Python
|
apache-2.0
| 37,693
|
[
"CDK"
] |
88cc6c94174fbe966c5f3fe2115c10b5207239ad91919051a54b8e7a9a8ff5f3
|
import ConfigParser
import commands
from optparse import OptionParser
import os
import string
import sys
import time
import types
SHELL_TYPE_BASH = "bash"
SHELL_TYPE_TCSH = "tcsh"
class Installer(object):
def __init__(self, commandLine):
self.AZOrangeInstallDir = None
self.commandLineArguments = commandLine
self.configFile = None
self.currentDir = os.getcwd()
self.localVars = {}
self.prepareOnly = False
self.successInstall = True
self.verbosedLogging = False
def main(self):
options, args = self.__parse(self.commandLineArguments)
if not os.path.isfile(options.file):
print "Missing the configuration file. "+options.file+" not found.\n You may use the setupTemplate.ini file to create your setup.ini file."
self.successInstall = False
return
self.configFile = options.file
self.prepareOnly = options.prepare
self.verbosedLogging = options.verbose
self.readConfig()
self.addLog("Log for AZOrange installation",True)
os.system("clear")
if not self.successInstall:
print "Nothing Done!"
sys.exit(1)
#Sends the configuration used to the details only!
self.printConfiguration(True)
startInstallTime = time.time()
# Check for some important requirements
self.addLog("*Requirements and System Info")
st,out = commands.getstatusoutput('python -c "import distutils.sysconfig; print distutils.sysconfig.get_python_inc()"; uname -a; lsb_release -a')
self.addLog('#'+out)
if not os.path.isfile("/bin/tcsh"):
self.addLog("#/bin/tcsh is missing. tcsh is required for azorange to work properly.")
self.successInstall = False
#=====install procedures=====
if self.successInstall:
self.prepareInstallDirs()
#Checkout any 3rd party software to the proper locations
if self.successInstall:
self.checkoutFTM()
if self.successInstall:
self.checkoutStructClust()
if self.successInstall:
self.checkoutFMINER()
if self.successInstall:
self.checkOutOpenAZO()
if self.successInstall:
self.checkOutCinfony()
if self.successInstall:
self.checkOutRdkit()
if self.successInstall:
self.checkOutCDK()
if self.successInstall:
self.checkOutOrange()
if self.successInstall:
self.checkOutAppspack()
if self.successInstall:
self.checkOutOpencv()
if self.successInstall:
self.checkOutBoost()
if self.successInstall:
self.checkOutPLearn()
if self.successInstall:
self.checkOutOasa()
if self.successInstall:
if self.prepareOnly:
self.addLog("*Everything is now unpacked and in place ready for install!")
else:
self.install()
if not self.successInstall:
self.addLog("*ERROR: Installation aborted!")
if self.successInstall and not self.prepareOnly:
self.createProfileExample(SHELL_TYPE_TCSH)
self.createProfileExample(SHELL_TYPE_BASH)
self.InstallCacheCleaner()
self.runAfterInstallScripts()
#==========================
self.addLog("*Finished")
self.addLog("#The process spent " + str(int((time.time() - startInstallTime)/60)) + " minutes")
#get back to the initial directory
os.chdir(self.currentDir)
if not self.successInstall:
self.addLog("#ERRORS during installation!!")
self.emailLog()
sys.exit(1)
elif not self.prepareOnly :
startAPPTemplate = os.path.join(self.trunkDir,'install/startAZOrange')
startAPPTarget = os.path.join(self.AZOrangeInstallDir,'startAZOrange')
AppLaucherTemplate = os.path.join(self.trunkDir,'install/AZOrange.desktop')
AppLaucherTarget = None
if "HOME" in os.environ:
if os.path.isdir(os.path.join(os.getenv("HOME"),'Desktop')):
AppLaucherTarget = os.path.join(os.getenv("HOME"),'Desktop')
#Create the Application GUI Starter script
cmd = 'sed "s|AZO_INSTALL_DIR|'+self.AZOrangeInstallDir+'|g" ' + startAPPTemplate + ' > '+startAPPTarget
self.__logAndExecute(cmd)
self.__logAndExecute("chmod a+x " +startAPPTarget)
#Create a Launcher in the Desktop
thisOS = commands.getstatusoutput("uname -o")
if "gnu/linux" in thisOS[1].lower() and AppLaucherTarget:
cmd='sed "s|AZO_INSTALL_DIR|'+self.AZOrangeInstallDir+'|g" '+ AppLaucherTemplate + ' > ' + os.path.join(AppLaucherTarget,'AZOrange.desktop')
self.__logAndExecute(cmd)
self.__logAndExecute("chmod a+x " + os.path.join(AppLaucherTarget,'AZOrange.desktop'))
elif AppLaucherTarget:
self.__logAndExecute("ln -s " + startAPPTarget + " " + os.path.join(AppLaucherTarget,'AZOrange'))
self.__logAndExecute("chmod a+x " + os.path.join(AppLaucherTarget,'AZOrange'))
#associate Orange canvas files with AZOrange Application: Not critial if fails
try:
home = os.environ["HOME"]
mimeStr = """
[Default Applications]
application/xml=AZOrange.desktop
[Added Associations]
application/xml=AZOrange.desktop;
"""
appDir = os.path.join(home,".local/share/applications")
if not os.path.isfile(os.path.join(appDir,"mimeapps.list")):
os.system("mkdir -p "+appDir)
fileh=open(os.path.join(appDir,"mimeapps.list"),"w")
fileh.write(mimeStr)
fileh.close()
else:
fileh=open(os.path.join(appDir,"mimeapps.list"),"r")
lines = fileh.readlines()
fileh.close()
fileh=open(os.path.join(appDir,"mimeapps.list"),"w")
for line in lines:
if "application/xml" in line:
fileh.write("application/xml=AZOrange.desktop\n")
else:
fileh.write(line)
fileh.close()
cmd='sed "s|AZO_INSTALL_DIR|'+self.AZOrangeInstallDir+'|g" '+ AppLaucherTemplate + ' > ' + os.path.join(appDir,'AZOrange.desktop')
self.__logAndExecute(cmd)
self.__logAndExecute("chmod a+x " + os.path.join(appDir,'AZOrange.desktop'))
except:
print "Could not associate .ows files to AZOrange"
self.addLog("#Installation done successfully!")
else:
self.addLog("#Preparation done successfully!")
#Send the log if required in setup file
self.emailLog()
#Start the tests if required
if self.runTests:
os.system(os.path.join(self.currentDir,"runTests"))
status,arch = commands.getstatusoutput('uname -i')
if '_64' not in arch:
print "AZOrange tests were done using 64 bits architecture."
print "In 32 bits is expected that some tests fail although does not necessarily means that there is any problem on running AZOrange."
# Needed for modules to be loaded.
def module(self,command, *arguments):
commands = os.popen('%s/bin/modulecmd python %s %s' % (os.environ['MODULESHOME'], command, string.join(arguments))).read()
exec commands
def __logAndExecute(self, command):
if self.verbosedLogging:
self.addLog("#install - About to execute (in " + os.getcwd() + "): " + command)
status, output = commands.getstatusoutput(command)
self.addLog((status, "#install - Output from command: " + str(output)))
return status, output
def __update_EnvVars(self, envFile):
""" This function will add to self.EnvVars the variables in the 'envFile'.
For the variables that already exists, they will prepend the values in the file to the values already
defined, if they do not exist already.
"""
envVars = {}
if os.path.isfile(envFile):
file = open(envFile,"r")
lines = file.readlines()
file.close()
for line in lines:
envVars[line.split("=")[0].strip()] = line.split("=")[1].strip()
for varName in envVars:
values = [var.strip() for var in envVars[varName].split(":")]
if varName not in self.EnvVars:
self.EnvVars[varName] = values
else:
for value in values:
if value not in self.EnvVars[varName]:
self.EnvVars[varName].insert(0,value)
def __getFromConfig(self,section,option):
#This method automatically pars any localVars present in the data got from config file
strToParse = self.config.get(section,option)
for var in self.localVars:
strToParse = strToParse.replace("%"+var+"%",self.localVars[var])
return strToParse
def readConfig(self):
self.config = ConfigParser.ConfigParser()
self.config.read(self.configFile)
section = "LocalSetupVars"
self.localVars={}
if section in self.config.sections() and self.config.options(section):
for option in self.config.options(section): #Read all localVars defined if any
self.localVars[option.upper()] = self.config.get(section,option).strip()
section = "Installation"
if not self.__validateOptions(section,["logfile","templateprofilefile","detailslogfile"]):
return
self.logFile = os.path.realpath(self.__getFromConfig(section,"logfile"))
if not self.logFile:
self.logFile = os.path.join(self.currentDir,"install.log")
self.detailsLogFile = os.path.realpath(self.__getFromConfig(section,"detailslogFile"))
if not self.detailsLogFile:
self.detailsLogFile = os.path.join(self.currentDir,"details.log")
self.TemplateProfileFile = os.path.realpath(self.__getFromConfig(section,"templateprofilefile"))
if not self.TemplateProfileFile:
self.TemplateProfileFile = os.path.join(self.currentDir,"templateProfile")
self.TemplateProfileFileBash = os.path.realpath(self.__getFromConfig(section,"templateprofilefilebash"))
if not self.TemplateProfileFileBash:
self.TemplateProfileFileBash = os.path.join(self.currentDir,"templateProfileBash")
section = "Paths"
if not self.__validateOptions(section,["builddir","installdir"]):
return
self.trunkDir = os.path.realpath("../")#os.path.realpath(self.__getFromConfig(section,"trunkdir"))
self.DepSrcDir = os.path.join(self.trunkDir,"orangeDependencies","src")
self.buildDir = os.path.realpath(self.__getFromConfig(section,"builddir"))
self.installDir = os.path.realpath(self.__getFromConfig(section,"installdir"))
section = "Repository"
if not self.__validateOptions(section,["repository","repointeraction"]):
return
self.repo = self.__getFromConfig(section,"repository")
self.repoInter= self.__getFromConfig(section,"repointeraction")
if "no" in self.repoInter.lower():
self.repoInter = "no"
elif "yes" in self.repoInter.lower():
self.repoInter = "yes"
else:
self.repoInter = None
if not self.repoInter:
print "Invalid repo interaction: ",self.repoInter
print " Use 'yes' or 'no' "
self.successInstall = False
return
section = "Tests"
if not self.__validateOptions(section,["runtestsafterinstall"]):
return
self.runTests = "yes" in self.__getFromConfig(section,"runtestsafterinstall").lower()
section = "Installation"
if not self.__validateOptions(section,["installtype","openinstallation","cleaninstallcache","precleanbuilddir","precleaninstalldir"]):
return
self.installType = self.__getFromConfig(section,"installtype")
self.openInstallation = self.__getFromConfig(section,"openinstallation")
if "false" in self.openInstallation.lower():
self.openInstallation = False
else:
self.openInstallation = True
if "sys" in self.installType.lower():
self.installType = "system"
self.AZOrangeInstallDir = self.installDir
else:
self.installType = "developer"
self.AZOrangeInstallDir = self.trunkDir
if self.repoInter == "export":
print "For developer installation you cannot use 'export' interaction with repository"
self.successInstall = False
return
self.cleanInstallCache= self.__getFromConfig(section,"cleaninstallcache")
if "yes" in self.cleanInstallCache.lower():
self.cleanInstallCache = True
else:
self.cleanInstallCache = False
self.preCleanTrunkDir = False #"yes" in self.__getFromConfig(section,"precleantrunkdir").lower()
self.preCleanBuildDir = "yes" in self.__getFromConfig(section,"precleanbuilddir").lower()
self.preCleanInstallDir = "yes" in self.__getFromConfig(section,"precleaninstalldir").lower()
section = "FeedBack"
if not self.__validateOptions(section,["supportemails"]):
return
self.supportEmails = self.__getFromConfig(section,"supportemails")
section = "Advanced"
if not self.__validateOptions(section,["platformtype"]):
return
self.platformType = self.__getFromConfig(section,"platformtype")
#Set the InstallTimeModules to be the union of the modules and InstallTimeModules.
#only the modules var will be used to compose the templateProfile
#only the InstallTimeModules var will be sent to the setup script
self.modules = []
if "modules" in self.config.options(section):
self.modules = self.__getFromConfig(section,"modules")
if self.modules and self.modules.lower()!="none" and self.modules.lower()!="no":
self.modules = [module.strip() for module in self.modules.split(",")]
else:
self.modules = []
self.InstallTimeModules = []
if "installtimemodules" in self.config.options(section):
self.InstallTimeModules = self.__getFromConfig(section,"installtimemodules")
if self.InstallTimeModules and self.InstallTimeModules.lower()!="none" and self.InstallTimeModules.lower()!="no":
self.InstallTimeModules = [module.strip() for module in self.InstallTimeModules.split(",")]
else:
self.InstallTimeModules = []
#Compose the InstallTimeModules and the modules vars properly
self.InstallTimeModules = self.InstallTimeModules + self.modules
if len(self.InstallTimeModules)>0:
self.InstallTimeModules = str(self.InstallTimeModules)
else:
self.InstallTimeModules = "None"
if len(self.modules)>0:
self.modules = str(self.modules)
else:
self.modules = "None"
self.RunTimeModules = "None"
if "runtimemodules" in self.config.options(section):
self.RunTimeModules = self.__getFromConfig(section,"runtimemodules")
if self.RunTimeModules and self.RunTimeModules.lower()!="none" and self.RunTimeModules.lower()!="no":
self.RunTimeModules = str([module.strip() for module in self.RunTimeModules.split(",")])
else:
self.RunTimeModules = "None"
self.preInstallModules = None
if "preinstallmodules" in self.config.options(section):
self.preInstallModules = self.__getFromConfig(section,"preinstallmodules")
if self.preInstallModules and self.preInstallModules.lower()!="none" and self.preInstallModules.lower()!="no":
self.preInstallModules = [module.strip() for module in self.preInstallModules.split(",")]
else:
self.preInstallModules = None
if self.preInstallModules:
for moduleX in self.preInstallModules:
self.module("load", moduleX)
self.GCCmoduleForAppspackMPI = None
if "gccmoduleforappspackmpi" in self.config.options(section):
self.GCCmoduleForAppspackMPI = self.__getFromConfig(section,"gccmoduleforappspackmpi").strip()
if not self.GCCmoduleForAppspackMPI or self.GCCmoduleForAppspackMPI.lower()=="none" or self.GCCmoduleForAppspackMPI.lower=="no":
self.GCCmoduleForAppspackMPI = None
self.sources = None
if "sources" in self.config.options(section):
self.sources = self.__getFromConfig(section,"sources")
if self.sources and self.sources.lower()!="none" and self.sources.lower()!="no":
self.sources = [source.strip() for source in self.sources.split(",")]
else:
self.sources = None
section = "EnvVars"
self.EnvVars={}
if section in self.config.sections() and self.config.options(section):
for option in self.config.options(section): #Read all envVard defined in the EnvVars section if any
configValues = self.__getFromConfig(section,option).split(":") #For each envVar get a list of elements
newValues = []
if not configValues[0]: #If the envVar is empty, set it yo ""
os.environ[option.upper()] = ""
self.EnvVars[option.upper()] = []
continue
for value in configValues:
if "$" in value[0]: #Load other envVars to this one if defined with $
newValues += os.environ[value[1:]].split(":")
else:
newValues.append(value)
#Compose the string to assign to the envVar
strValues = ""
for idx,value in enumerate(newValues):
if idx: strValues += ":"
strValues += value
#Assure that if something was added to PYTHONPATH, that is also in the sys.path
if option.upper()=="PYTHONPATH":
sys.path.insert(0,value)
os.environ[option.upper()] = strValues
self.EnvVars[option.upper()] = newValues
section = "Dependencies"
self.dependencies = {}
if section in self.config.sections() and self.config.options(section):
for option in self.config.options(section): #Read all dependencies if any
self.dependencies[option] = self.__getFromConfig(section,option)
def __validateOptions(self,section,allOptions):
if section not in self.config.sections():
print "Missing section "+section+" in setup file!"
self.successInstall = False
return False
if sum([x in self.config.options(section) for x in allOptions])!=len(allOptions):
print "Some options of section "+section+" are missing!"
self.successInstall = False
return False
return True
def prepareInstallDirs(self):
#trunkDir must already exist if repoInter is "" or "update"
#if repoInter is checkout, clean if exists the trunkDir
#Always clean the installDir
#if installType is developer,the installDir will not be created and
# BuildDir is not deleted
#if installType is system, we MUST create installDir
self.addLog("*Preparing Directories")
if (not self.trunkDir and self.repoInter != "export") or not self.buildDir:
self.addLog("#ERROR: Missing the definition of some Paths in Setup file.")
self.successInstall = False
return
#Prepare the Trunk Dir
if (self.repoInter == "yes") and (not os.path.isdir(self.trunkDir)):
self.addLog("#ERROR: TrunkDir must exist for the chosen RepositoryInteraction.")
self.successInstall = False
return
elif self.repoInter == "checkout":
if os.path.isdir(self.trunkDir) and self.preCleanTrunkDir:
self.addLog("#Removing existing TrunkDir")
self.__logAndExecute("chmod -R 777 " + self.trunkDir)
self.__logAndExecute("rm -Rf " + self.trunkDir)
if not os.path.isdir(self.trunkDir):
self.addLog("#Creating TrunkDir")
self.__logAndExecute("mkdir -p " + self.trunkDir)
#elif self.repoInter!="export": #is "" or "update"
# if not os.path.isdir(os.path.join(self.trunkDir,".svn")):
# self.addLog("#WARNING: Seems that the trunkDir is not a subversion trunk!")
#Prepare BuildDir
if os.path.isdir(self.buildDir) and (self.preCleanBuildDir):
self.addLog("#Removing existing buildDir")
self.__logAndExecute("chmod -R 777 " + self.buildDir)
self.__logAndExecute("rm -Rf " + self.buildDir)
if not os.path.isdir(self.buildDir):
self.addLog("#Creating buildDir")
self.__logAndExecute("mkdir -p " + self.buildDir)
#Prepare InstallDir
#Create the installDir if is a system installation. If is a dev install, we will
# install back to trunk, so we do not need installDir
if self.installType == "system":
if self.trunkDir+"/" in self.installDir:
self.addLog("#ERROR: Invalid installDir defined in setup file. The system installation needs a different install dir.")
self.successInstall = False
return
if os.path.isdir(self.installDir) and self.preCleanInstallDir:
self.addLog("#Removing existing installDir")
self.__logAndExecute("chmod -R 777 " + self.installDir)
self.__logAndExecute("rm -Rf " + self.installDir)
if not os.path.isdir(self.installDir):
self.addLog("#Creating installDir")
self.__logAndExecute("mkdir -p " + self.installDir)
if self.openInstallation:
self.__logAndExecute("mkdir -p " + self.DepSrcDir)
def checkOutOpenAZO(self):
USE_INSTALLED = False
if self.repoInter=="yes":
#Update the AZO source from GITHub
os.chdir(self.trunkDir)
if self.openInstallation:
self.addLog("*OpenAZOrange: Using current files.")
self.addLog("#trunk: "+self.trunkDir)
#self.__logAndExecute("git pull")
else:
if self.installType == "developer":
if "openazo" in self.dependencies:
depCfg = self.dependencies["openazo"].split(",")
if len(depCfg)>=3 and depCfg[2] == "*":
USE_INSTALLED = True
else:
USE_INSTALLED = False
if USE_INSTALLED:
self.addLog("*Not cloning openAZO from GIT")
return
self.addLog("*Cloning from GIT")
self.addLog("#trunk: "+self.trunkDir)
self.__logAndExecute("rm -rf AZOrange")
self.__logAndExecute("git clone "+ self.repo)
self.__logAndExecute("cp -Rf AZOrange/* .")
self.__logAndExecute("cp -Rf AZOrange/.git " + self.trunkDir)
self.__logAndExecute("rm -rf AZOrange")
else:
if "openazo" not in self.dependencies:
self.addLog((1,"Missing OpenAZO definition in Dependencies at setup.ini"))
else:
depCfg = self.dependencies["openazo"].split(",")
URL = os.path.join(self.DepSrcDir,depCfg[0])
if len(depCfg)>=3 and depCfg[2] == "*":
USE_INSTALLED = True
else:
USE_INSTALLED = False
if not URL or USE_INSTALLED or self.repoInter == "no":
self.addLog("*Not unpacking openAZO")
return
self.addLog("*Extracting openAZO from " + URL)
self.addLog("#trunk: "+self.trunkDir)
os.chdir(self.DepSrcDir)
self.__logAndExecute("rm -rf " + os.path.join(self.DepSrcDir,"AZOrange"))
self.__logAndExecute("tar xfz " + os.path.split(URL)[-1])
self.__logAndExecute("cp -Rf AZOrange/* " + self.trunkDir)
self.__logAndExecute("cp -Rf AZOrange/.git " + self.trunkDir)
self.__logAndExecute("rm -rf " + os.path.join(self.DepSrcDir,"AZOrange"))
#Revert any files replaced from the Git repo. We want to assure that the AZO inside files prevail
os.chdir(self.trunkDir)
self.addLog("#Reverting full RepoDir to SVN version...")
self.__logAndExecute("svn revert -R ./*")
os.chdir(self.currentDir)
def checkOutCDK(self):
# Get the dependency Config
name = "cdk"
if name not in self.dependencies:
URL = None
REV = None
USE_INSTALLED = True
else:
depCfg = self.dependencies[name].split(",")
URL = depCfg[0]
if len(depCfg)<2 or depCfg[1] == "":
REV = "HEAD"
else:
REV = depCfg[1]
if len(depCfg)>=3 and depCfg[2] == "*":
USE_INSTALLED = True
else:
USE_INSTALLED = False
if not URL or USE_INSTALLED or self.repoInter == "no":
self.addLog("*Not downloading "+name)
return
self.__logAndExecute("rm -rf " + os.path.join(self.DepSrcDir,name))
self.__logAndExecute("mkdir " + os.path.join(self.DepSrcDir,name))
os.chdir(os.path.join(self.DepSrcDir,"cdk"))
jarFile = os.path.split(URL)[-1].strip()
if self.openInstallation:
self.addLog("*Downloading "+name+" to trunk ("+URL+":"+REV+")")
self.__logAndExecute("rm -rf " + jarFile)
self.__logAndExecute("wget " + URL )
else:
self.addLog("*Using "+name+" in SVN Repo (Not implemented)")
def checkoutFMINER(self):
# Get the dependency Config
name = "fminer"
if name not in self.dependencies:
URL = None
REV = None
USE_INSTALLED = True
else:
depCfg = self.dependencies[name].split(",")
URL = depCfg[0]
if len(depCfg)<2 or depCfg[1] == "":
REV = "HEAD"
else:
REV = depCfg[1]
if len(depCfg)>=3 and depCfg[2] == "*":
USE_INSTALLED = True
else:
USE_INSTALLED = False
if not URL or USE_INSTALLED or self.repoInter == "no":
self.addLog("*Not downloading "+name)
return
self.__logAndExecute("rm -rf " + os.path.join(self.DepSrcDir,name))
os.chdir(self.DepSrcDir)
tarFile = os.path.split(URL)[-1].strip()
if self.openInstallation:
self.addLog("*Downloading "+name+" to trunk ("+URL+":"+REV+")")
self.__logAndExecute("git clone " + URL + " " + os.path.join(self.DepSrcDir,name))
else:
self.addLog("*Using "+name+" in SVN Repo (Not implemented)")
return
if not os.path.isdir(os.path.join(self.DepSrcDir,name,"libbbrc")):
self.addLog("#ERROR: Could not fet fminer source code.")
self.successInstall = False
return
def checkoutStructClust(self):
# Get the dependency Config
name = "clustering"
if name not in self.dependencies:
self.addLog("Name " + str(name) + " not in dependencies")
URL = None
REV = None
USE_INSTALLED = True
else:
depCfg = self.dependencies[name].split(",")
URL = depCfg[0]
self.addLog(URL)
if len(depCfg)<2 or depCfg[1] == "":
REV = "HEAD"
else:
REV = depCfg[1]
if len(depCfg)>=3 and depCfg[2] == "*":
USE_INSTALLED = True
else:
USE_INSTALLED = False
if not URL or USE_INSTALLED or self.repoInter == "no":
self.addLog("*Not downloading "+name)
return
self.__logAndExecute("rm -rf " + os.path.join(self.DepSrcDir,name))
os.chdir(self.DepSrcDir)
if self.openInstallation:
tarFile = "structuralClustering.tar.gz"
dwnldFile = os.path.split(URL)[-1].strip()
self.addLog("*Downloading "+name+" to trunk ("+URL+":"+REV+")")
self.__logAndExecute("rm -rf " + tarFile)
self.__logAndExecute("rm -rf " + dwnldFile)
self.__logAndExecute("wget " + URL )
self.__logAndExecute("mv "+dwnldFile+" "+tarFile)
else:
self.addLog("*Using "+name+" in SVN Repo")
tarFile = URL
UnpackCmd = "tar "
if tarFile[-6:] == "tar.gz":
UnpackCmd += "xfz "
elif tarFile[-6:] == "tar.bz2":
UnpackCmd += "xfj "
else:
self.addLog("#ERROR: Not a known tar file.")
self.successInstall = False
return
self.__logAndExecute(UnpackCmd + tarFile)
def checkoutFTM(self):
# Get the dependency Config
name = "ftm"
if name not in self.dependencies:
URL = None
REV = None
USE_INSTALLED = True
else:
depCfg = self.dependencies[name].split(",")
URL = depCfg[0]
if len(depCfg)<2 or depCfg[1] == "":
REV = "HEAD"
else:
REV = depCfg[1]
if len(depCfg)>=3 and depCfg[2] == "*":
USE_INSTALLED = True
else:
USE_INSTALLED = False
if not URL or USE_INSTALLED or self.repoInter == "no":
self.addLog("*Not downloading "+name)
return
self.__logAndExecute("rm -rf " + os.path.join(self.DepSrcDir,name))
os.chdir(self.DepSrcDir)
tarFile = "ftm.tar.gz"
dwnldFile = os.path.split(URL)[-1].strip()
if self.openInstallation:
self.addLog("*Downloading "+name+" to trunk ("+URL+":"+REV+")")
self.__logAndExecute("rm -rf " + tarFile)
self.__logAndExecute("rm -rf " + dwnldFile)
self.__logAndExecute("wget " + URL )
self.__logAndExecute("mv "+dwnldFile+" "+tarFile)
else:
self.addLog("*Using "+name+" in SVN Repo (Not implemented yet)")
return
UnpackCmd = "tar "
if tarFile[-6:] == "tar.gz":
UnpackCmd += "xfz "
elif tarFile[-6:] == "tar.bz2":
UnpackCmd += "xfj "
else:
self.addLog("#ERROR: Not a known tar file.")
self.successInstall = False
return
self.__logAndExecute(UnpackCmd + tarFile)
def checkOutRdkit(self):
# Get the dependency Config
name = "rdkit"
if name not in self.dependencies:
URL = None
REV = None
USE_INSTALLED = True
else:
depCfg = self.dependencies[name].split(",")
URL = depCfg[0]
if len(depCfg)<2 or depCfg[1] == "":
REV = "HEAD"
else:
REV = depCfg[1]
if len(depCfg)>=3 and depCfg[2] == "*":
USE_INSTALLED = True
else:
USE_INSTALLED = False
if not URL or USE_INSTALLED or self.repoInter == "no":
self.addLog("*Not downloading "+name)
return
self.__logAndExecute("rm -rf " + os.path.join(self.DepSrcDir,name))
os.chdir(self.DepSrcDir)
tarFile = os.path.split(URL)[-1].strip()
if self.openInstallation:
self.addLog("*Downloading "+name+" to trunk ("+URL+":"+REV+")")
self.__logAndExecute("rm -rf " + tarFile)
self.__logAndExecute("wget " + URL )
else:
self.addLog("*Using "+name+" in SVN Repo (Not implemented)")
return
UnpackCmd = "tar "
if tarFile[-6:] == "tar.gz":
UnpackCmd += "xfz "
unpackDir = tarFile[0:tarFile.rfind(".tar")]
elif tarFile[-4:] == ".tgz":
UnpackCmd += "xfz "
unpackDir = tarFile[0:tarFile.rfind(".tgz")]
elif tarFile[-6:] == "tar.bz2":
UnpackCmd += "xfj "
unpackDir = tarFile[0:tarFile.rfind(".tar")]
else:
self.addLog("#ERROR: Not a known tar file.")
self.successInstall = False
return
self.__logAndExecute(UnpackCmd + tarFile)
self.__logAndExecute("mv " + unpackDir + " " + name )
def checkOutCinfony(self):
# Get the dependency Config
name = "cinfony"
if name not in self.dependencies:
URL = None
REV = None
USE_INSTALLED = True
else:
depCfg = self.dependencies[name].split(",")
URL = depCfg[0]
if len(depCfg)<2 or depCfg[1] == "":
REV = "HEAD"
else:
REV = depCfg[1]
if len(depCfg)>=3 and depCfg[2] == "*":
USE_INSTALLED = True
else:
USE_INSTALLED = False
if not URL or USE_INSTALLED or self.repoInter == "no":
self.addLog("*Not downloading "+name)
return
self.__logAndExecute("rm -rf " + os.path.join(self.DepSrcDir,name))
os.chdir(self.DepSrcDir)
tarFile = "cinfony_Download.tar.gz"
if self.openInstallation:
self.addLog("*Downloading "+name+" to trunk ("+URL+":"+REV+")")
self.__logAndExecute("rm -rf " + tarFile)
# Download the File
self.__logAndExecute("wget " + URL + " -O " + tarFile)
else:
self.addLog("*Using "+name+" in SVN Repo (Not implemented)")
UnpackCmd = "tar "
if tarFile[-6:] == "tar.gz":
UnpackCmd += "xfz "
elif tarFile[-6:] == "tar.bz2":
UnpackCmd += "xfj "
else:
self.addLog("#ERROR: Not a known tar file.")
self.successInstall = False
return
# This file has different folder name from the tar file
os.mkdir("./"+name)
UnpackCmd += " " + tarFile + " -C "+name
self.__logAndExecute(UnpackCmd)
folderName = os.listdir("./"+name)[0]
self.__logAndExecute("mv "+os.path.join(name,folderName,"*") + " " + name)
self.__logAndExecute("rmdir "+os.path.join(name,folderName))
#self.__logAndExecute("mv " + tarFile[0:tarFile.rfind(".tar")] + " " + name )
def checkOutOrange(self):
# Get the dependency Config
if "orange" not in self.dependencies:
URL = None
REV = None
USE_INSTALLED = True
else:
depCfg = self.dependencies["orange"].split(",")
URL = depCfg[0]
if len(depCfg)<2 or depCfg[1] == "":
REV = "HEAD"
else:
REV = depCfg[1]
if len(depCfg)>=3 and depCfg[2] == "*":
USE_INSTALLED = True
else:
USE_INSTALLED = False
if not URL or USE_INSTALLED or self.repoInter == "no":
self.addLog("*Not downloading/unpacking orange")
return
self.__logAndExecute("mkdir -p " + os.path.join(self.trunkDir,"orange"))
self.__logAndExecute("rm -rf " + os.path.join(self.DepSrcDir,"orange"))
os.chdir(self.DepSrcDir)
if self.openInstallation:
self.addLog("*Checking out from orange bitbucket to trunk ("+URL+":"+REV+")")
#os.chdir(os.path.join(self.trunkDir,"orange"))
self.__logAndExecute("hg clone " + URL + " ./orange")
os.chdir("orange")
self.__logAndExecute("hg update " + REV)
self.__logAndExecute("rm -rf .hg")
else:
self.addLog("*Extracting orange from " + URL)
self.__logAndExecute("tar xfz " + os.path.split(URL)[-1])
os.chdir(self.DepSrcDir)
# Apply Patch
self.addLog("#Applying Patch...")
os.chdir(os.path.join(self.currentDir,"Patches"))
status,out = commands.getstatusoutput("./applyOrangePatch.sh %s" % (os.path.join(self.DepSrcDir,"orange")))
if status != 0:
self.addLog("#WARNING: Patch was not properly applied. It might be that it has been already applied.")
self.addLog("# Please check the next details.")
self.addLog([0,out])
else:
self.addLog([status,out])
os.chdir(self.currentDir)
def checkOutAppspack(self):
# Get the dependency Config
if "appspack" not in self.dependencies:
URL = None
REV = None
USE_INSTALLED = True
else:
depCfg = self.dependencies["appspack"].split(",")
URL = depCfg[0]
if len(depCfg)<2 or depCfg[1] == "":
REV = "HEAD"
else:
REV = depCfg[1]
if len(depCfg)>=3 and depCfg[2] == "*":
USE_INSTALLED = True
else:
USE_INSTALLED = False
if not URL or USE_INSTALLED or self.repoInter == "no":
self.addLog("*Not downloading AppsPack")
return
self.__logAndExecute("rm -rf " + os.path.join(self.DepSrcDir,"appspack"))
os.chdir(self.DepSrcDir)
if self.openInstallation:
self.addLog("*Downloading AppsPack to trunk ("+URL+":"+REV+")")
self.__logAndExecute("rm -rf " + os.path.split(URL)[-1])
self.__logAndExecute("wget " + URL )
else:
self.addLog("*Using APPSPACK in SVN Repo")
self.__logAndExecute("tar xfz " + os.path.split(URL)[-1])
self.__logAndExecute("mv " + os.path.split(URL)[-1][0:os.path.split(URL)[-1].rfind(".tar")] + " appspack" )
os.chdir(self.currentDir)
def checkOutOpencv(self):
# Get the dependency Config
if "opencv" not in self.dependencies:
URL = None
REV = None
USE_INSTALLED = True
else:
depCfg = self.dependencies["opencv"].split(",")
URL = depCfg[0]
if len(depCfg)<2 or depCfg[1] == "":
REV = "HEAD"
else:
REV = depCfg[1]
if len(depCfg)>=3 and depCfg[2] == "*":
USE_INSTALLED = True
else:
USE_INSTALLED = False
if not URL or USE_INSTALLED or self.repoInter == "no":
self.addLog("*Not downloading opencv")
return
self.__logAndExecute("rm -rf " + os.path.join(self.DepSrcDir,"opencv"))
os.chdir(self.DepSrcDir)
if self.openInstallation:
self.addLog("*Downloading opencv to trunk ("+URL+":"+REV+")")
self.__logAndExecute("rm -rf " + os.path.split(URL)[-1])
self.__logAndExecute("wget " + URL )
else:
self.addLog("*Using opencv in SVN Repo")
self.__logAndExecute("tar xfj " + os.path.split(URL)[-1])
self.__logAndExecute("mv " + os.path.split(URL)[-1][0:os.path.split(URL)[-1].rfind(".tar")] + " opencv" )
# Apply Patch
self.addLog("#Applying Patch...")
os.chdir(os.path.join(self.currentDir,"Patches"))
status,out = commands.getstatusoutput("./applyOpenCVPatch.sh %s" % (os.path.join(self.DepSrcDir,"opencv")))
if status != 0:
self.addLog("#WARNING: Patch was not properly applied. It might be that it has been already applied.")
self.addLog("# Please check the next details.")
self.addLog([0,out])
else:
self.addLog([status,out])
os.chdir(self.currentDir)
def checkOutOasa(self):
#Get the oasa dependency config
if "oasa" not in self.dependencies:
URL = None
REV = None
USE_INSTALLED = True
else:
depCfg = self.dependencies["oasa"].split(",")
URL = depCfg[0]
if len(depCfg)<2 or depCfg[1] == "":
REV = "HEAD"
else:
REV = depCfg[1]
if len(depCfg)>=3 and depCfg[2] == "*":
USE_INSTALLED = True
else:
USE_INSTALLED = False
if not URL or USE_INSTALLED or self.repoInter == "no":
self.addLog("*Not downloading Oasa")
return
self.__logAndExecute("rm -rf " + os.path.join(self.DepSrcDir,"oasa"))
os.chdir(self.DepSrcDir)
if self.openInstallation:
self.addLog("*Oasa is not needed in this installation")
return
# To download and install oasa in the open installation, remove both 2 previous lines and uncomment next 3 lines
#self.addLog("*Downloading Oasa to trunk ("+URL+":"+REV+")")
#self.__logAndExecute("rm -rf " + os.path.split(URL)[-1])
#self.__logAndExecute("wget " + URL )
else:
self.addLog("*Using oasa in SVN Repo")
self.addLog("#Extracting oasa from " + URL )
self.__logAndExecute("tar xfz " + os.path.split(URL)[-1])
self.__logAndExecute("mv " + os.path.split(URL)[-1][0:os.path.split(URL)[-1].rfind(".tar")] + " oasa" )
os.chdir(self.currentDir)
def checkOutBoost(self):
# Get the dependency Config
if "boost" not in self.dependencies:
URL = None
REV = None
USE_INSTALLED = True
else:
depCfg = self.dependencies["boost"].split(",")
URL = depCfg[0]
if len(depCfg)<2 or depCfg[1] == "":
REV = "HEAD"
else:
REV = depCfg[1]
if len(depCfg)>=3 and depCfg[2] == "*":
USE_INSTALLED = True
else:
USE_INSTALLED = False
if not URL or USE_INSTALLED or self.repoInter == "no":
self.addLog("*Not downloading Boost")
return
self.__logAndExecute("rm -rf " + os.path.join(self.DepSrcDir,"boost"))
os.chdir(self.DepSrcDir)
if self.openInstallation:
self.addLog("*Downloading Boost to trunk ("+URL+":"+REV+")")
self.__logAndExecute("rm -rf " + os.path.split(URL)[-1])
self.__logAndExecute("wget " + URL )
else:
self.addLog("*Using boost in SVN Repo")
self.__logAndExecute("tar xfz " + os.path.split(URL)[-1])
self.__logAndExecute("mv " + os.path.split(URL)[-1][0:os.path.split(URL)[-1].rfind(".tar")] + " boost" )
os.chdir(self.currentDir)
def checkOutPLearn(self):
# Get the dependency Config
if "plearn" not in self.dependencies:
URL = None
REV = None
USE_INSTALLED = True
else:
depCfg = self.dependencies["plearn"].split(",")
URL = depCfg[0]
if len(depCfg)<2 or depCfg[1] == "":
REV = "HEAD"
else:
REV = depCfg[1]
if len(depCfg)>=3 and depCfg[2] == "*":
USE_INSTALLED = True
else:
USE_INSTALLED = False
if not URL or USE_INSTALLED or self.repoInter == "no":
self.addLog("*Not downloading plearn")
return
self.__logAndExecute("rm -rf " + os.path.join(self.DepSrcDir,"plearn"))
if self.openInstallation:
self.addLog("*Checking out from plearn SVN to trunk ("+URL+":"+REV+")")
self.__logAndExecute("mkdir -p " + os.path.join(self.DepSrcDir,"plearn"))
os.chdir(os.path.join(self.DepSrcDir,"plearn"))
self.__logAndExecute("svn export" + " -r " + REV + " " + URL + " ./ --force")
else:
self.addLog("*Extracting PLearn from " + URL)
os.chdir(self.DepSrcDir)
self.__logAndExecute("tar xfz " + os.path.split(URL)[-1])
self.__logAndExecute('find '+os.path.join(self.DepSrcDir,"plearn")+' -name ".svn" | xargs rm -Rf')
# Apply Patch
self.addLog("*Applying Patch...")
os.chdir(os.path.join(self.currentDir,"Patches"))
status,out = commands.getstatusoutput("./applyPLearnPatch.sh %s" % (os.path.join(self.DepSrcDir,"plearn")))
if status != 0:
self.addLog("#WARNING: Patch was not properly applied. It might be that it has been already applied.")
self.addLog("# Please check the next details.")
self.addLog([0,out])
else:
self.addLog([status,out])
os.chdir(self.currentDir)
def install(self):
# Apply Patch
if not self.openInstallation:
self.addLog("*Applying AZInHouse Patch...")
os.chdir(os.path.join(self.currentDir,"Patches"))
status,out = commands.getstatusoutput("./applyAZInHousePatch.sh %s" % (self.trunkDir))
if status != 0:
self.addLog("#WARNING: Patch was not properly applyed. It might be that it has been already applyed.")
self.addLog("# Please check the next details.")
self.addLog([0,out])
else:
self.addLog([status,out])
self.addLog("*Compiling and Installing")
os.chdir(self.buildDir)
self.addLog("#Copying from Trunk to build dir")
self.addLog("#"+self.trunkDir+" -> "+self.buildDir)
#Export from the Trunk working copy
self.__logAndExecute("cp -Rf " + os.path.join(self.trunkDir,"*") + " ./ ")
# Remove all .svn and .git folders
self.__logAndExecute('find ./ -name ".svn" | xargs rm -Rf')
self.__logAndExecute('find ./ -name ".git" | xargs rm -Rf')
#The orange core setup.py that is placed in orange/install-scripts/linux must be copied to orange
#self.__logAndExecute("/bin/cp -f " + os.path.join(self.buildDir,"orange/install-scripts/linux/setup.py") + " " + os.path.join(self.buildDir,"orange"))
#In buildDir> python setup.py build ...
self.addLog("#Compiling by running setup.py")
self.addLog("#Installation will be to made to " + self.AZOrangeInstallDir)
envTmp = os.path.join(self.currentDir,"env.tmp")
if os.path.isfile(envTmp):
os.system("/bin/rm -f " + envTmp)
cmdFormat = 'python setup.py --action build --platform %s --builddir %s --installdir %s --modulestoload "%s"' + \
' --envfile %s --dependencies "%s" --appspackgccmodule "%s" --openinstall %s' + \
' --logfile "%s" --detailslogfile "%s" --verbose %s'
self.__logAndExecute( cmdFormat % (self.platformType, self.buildDir, self.AZOrangeInstallDir, self.InstallTimeModules,
envTmp , self.dependencies, self.GCCmoduleForAppspackMPI, self.openInstallation,
str(self.logFile), str(self.detailsLogFile), str(self.verbosedLogging)))
#Update the local self.EnvVars with the ones passed by setup.py which will be needed at runtime
self.__update_EnvVars(envTmp)
def runAfterInstallScripts(self):
# This runs once the .../azorange/bin/archive.sh in order to create the AZO_NFZ_scratchDir if it does not exists
self.addLog("#Running after-install scripts")
self.__logAndExecute(os.path.join(self.AZOrangeInstallDir,'azorange/bin/clean.sh'))
def createProfileExample(self, shellType = SHELL_TYPE_TCSH):
# In addition to the place defined in setup.ini for the template profile, a file called templateProfile will always be
# placed in:
# - Install Dir ($AZORANGEHOME):
# - system installation: place Profile Template at root of installDir
# - developer installation : place Profile template at root of trunkDir
# - within this running install dir
# NOTE: The $AZORANGEHOME/templateProfile will be used by MPI calls. Do not remove it from there and
# do not rename it.
self.addLog("*Create a Template profile")
#Compose the template profile content
if shellType == SHELL_TYPE_BASH:
strFile = "#!/bin/bash\n"
else:
strFile = "#!/bin/tcsh\n"
localVars = {"installDir":self.AZOrangeInstallDir}
strFile += "# Template profile for azorange installation at %(installDir)s\n" % localVars
if self.sources:
strFile += "\n# Additional sources needed\n"
for source in self.sources:
if shellType == SHELL_TYPE_BASH:
if os.path.isfile(source):
strFile += ". "+ source + "\n"
else:
strFile += "#. "+ source + "\n"
self.addLog("#The specified source file does not exist: " + source)
else:
if os.path.isfile(source):
strFile += "source "+ source + "\n"
else:
strFile += "#source "+ source + "\n"
self.addLog("#The specified source file does not exist: " + source)
#Variables
if shellType == SHELL_TYPE_BASH:
strFile += "export AZORANGEHOME=%(installDir)s\n" % localVars
else:
strFile += "setenv AZORANGEHOME %(installDir)s\n" % localVars
# LD_LIBRARY_PATH space separated paths in tcsh!!
#LD_LIBPaths = [localVars["installDir"]+"/orange", localVars["installDir"]+"/orangeDependencies/bin"]
LD_LIBPaths = [os.path.join("$AZORANGEHOME", "orange")]
if "LD_LIBRARY_PATH" in self.EnvVars:
for value in self.EnvVars["LD_LIBRARY_PATH"]:
if value not in LD_LIBPaths: LD_LIBPaths.insert(0,value)
self.EnvVars["LD_LIBRARY_PATH"] = LD_LIBPaths
libPathsStr = ""
for idx,value in enumerate(self.EnvVars["LD_LIBRARY_PATH"]):
if idx: libPathsStr += ":"
if str(value).startswith(self.AZOrangeInstallDir):
value = "$AZORANGEHOME" + str(value)[len(self.AZOrangeInstallDir):]
libPathsStr += value
if shellType == SHELL_TYPE_BASH:
strFile += "if [[ ! -z $LD_LIBRARY_PATH ]] ; then\n"
strFile += " export LD_LIBRARY_PATH=\"%s:${LD_LIBRARY_PATH}\"\n" % libPathsStr
strFile += "else\n"
strFile += " export LD_LIBRARY_PATH=\"%s\"\n" % libPathsStr
strFile += "fi\n"
else:
strFile += "if ( $?LD_LIBRARY_PATH ) then\n"
strFile += " setenv LD_LIBRARY_PATH \"%s:${LD_LIBRARY_PATH}\"\n" % libPathsStr
strFile += "else\n"
strFile += " setenv LD_LIBRARY_PATH \"%s\"\n" % libPathsStr
strFile += "endif\n"
# PATH
PATHPaths = [os.path.join("$AZORANGEHOME", "orangeDependencies", "bin"),"${PATH}"]
if "PATH" in self.EnvVars:
for value in self.EnvVars["PATH"]:
if value not in PATHPaths: PATHPaths.insert(0,value)
self.EnvVars["PATH"] = PATHPaths
# PYTHONPATH
pythonPaths = [".",os.path.join("$AZORANGEHOME", "orange"), os.path.join("$AZORANGEHOME", "azorange"), os.path.join("$AZORANGEHOME", "tests")]
if "PYTHONPATH" in self.EnvVars:
for value in self.EnvVars["PYTHONPATH"]:
if value not in pythonPaths: pythonPaths.insert(0,value)
self.EnvVars["PYTHONPATH"] = pythonPaths
pythonPathsStr = ""
for idx,value in enumerate(self.EnvVars["PYTHONPATH"]):
if idx: pythonPathsStr += ":"
if str(value).startswith(self.AZOrangeInstallDir):
value = "$AZORANGEHOME" + str(value)[len(self.AZOrangeInstallDir):]
pythonPathsStr += value
if shellType == SHELL_TYPE_BASH:
strFile += "if [[ ! -z $PYTHONPATH ]] ; then\n"
strFile += " export PYTHONPATH=%s:${PYTHONPATH}\n" % pythonPathsStr
strFile += "else\n"
strFile += " export PYTHONPATH=%s\n" % pythonPathsStr
strFile += "fi\n"
else:
strFile += "if ( $?PYTHONPATH ) then\n"
strFile += " setenv PYTHONPATH %s:${PYTHONPATH}\n" % pythonPathsStr
strFile += "else\n"
strFile += " setenv PYTHONPATH %s\n" % pythonPathsStr
strFile += "endif\n"
#Update and assign ALL Variables but "AZORANGEHOME" and "PYTHONPATH"
for envVar in [x for x in self.EnvVars if x.upper() not in ["AZORANGEHOME" , "PYTHONPATH","LD_LIBRARY_PATH"]]:
strValues = ""
for idx,value in enumerate(self.EnvVars[envVar]):
if idx: strValues += ":"
strValues += value
if shellType == SHELL_TYPE_BASH:
strFile += "export %s=%s\n" % (envVar.upper() ,strValues)
else:
strFile += "setenv %s %s\n" % (envVar.upper() ,strValues)
#Aliases
strFile += "\n# AZOrange canvas alias\n"
if shellType == SHELL_TYPE_BASH:
strFile += "alias azorange='python " + os.path.join("$AZORANGEHOME", "orange", "OrangeCanvas", "orngCanvas.pyw") + "'\n"
else:
strFile += "alias azorange python " + os.path.join("$AZORANGEHOME", "orange", "OrangeCanvas", "orngCanvas.pyw") + "\n"
#Modules
if eval(self.modules):
strFile += "\n# AZOrange module dependencies\n"
if shellType == SHELL_TYPE_BASH:
strFile += ". /etc/profile.d/modules.sh\n\n"
else:
strFile += "source /etc/profile.d/modules.csh\n\n"
for mname in eval(self.modules):
strFile += "module load %s\n" % (mname)
else:
strFile += "\n# Using NO modules!\n"
#RunTimeModules
if eval(self.RunTimeModules):
strFile += "\n# AZOrange module dependencies needed at runtime only\n"
for mname in eval(self.RunTimeModules):
strFile += "module load %s\n" % (mname)
else:
strFile += "\n# Using NO specific modules for runtime only!\n"
#Scripts to run upon setting the envitonment or loading the respective module
strFile += "\n# Startup scripts\n"
strFile += "# " + os.path.join("$AZORANGEHOME", "azorange", "bin", "clean.sh") + "\n"
#strFile += os.path.join(self.AZOrangeInstallDir, "azorange/bin/ssh_testcfg.sh") # This will be uncommented when using local mpi for the optimizer
#Write the template file to current dir
try:
if shellType == SHELL_TYPE_BASH:
localTemplateFile = os.path.join(self.currentDir,"templateProfile.bash")
else:
localTemplateFile = os.path.join(self.currentDir,"templateProfile")
pyFile = open(localTemplateFile,"w")
pyFile.write(strFile)
pyFile.close()
except:
self.addLog((1,"Failed to create profile template file"))
return
# #Write the template file to the user defined place (defined in setup.ini)
if shellType == SHELL_TYPE_BASH:
if localTemplateFile != self.TemplateProfileFileBash:
self.__logAndExecute("cp -p" + localTemplateFile +" "+ self.TemplateProfileFileBash)
self.addLog("#Profile template file created in "+self.TemplateProfileFileBash)
else:
if localTemplateFile != self.TemplateProfileFile:
self.__logAndExecute("cp -p" + localTemplateFile +" "+ self.TemplateProfileFile)
self.addLog("#Profile template file created in "+self.TemplateProfileFile)
#Write the template file to the install dir depending on the installType
if self.installType == "system" or self.repoInter=="export":
self.addLog("#Profile template file copied into installDir")
self.__logAndExecute("cp " + localTemplateFile + " " + self.AZOrangeInstallDir)
else:
self.addLog("#Profile template file copied into trunkDir")
self.__logAndExecute("cp " + localTemplateFile + " " + self.trunkDir)
def InstallCacheCleaner(self):
if self.cleanInstallCache:
#For system installation, removes trunk and build dir
#For developer installation, removes build dir
self.addLog("*Cleaning install Cache")
self.addLog("#Removing BuildDir")
self.__logAndExecute("chmod -R 777 " + self.buildDir)
self.__logAndExecute("rm -Rf " + self.buildDir)
#export did not create trunkDir, so, there will be no trunk to delete
#if self.installType == "system" and self.repoInter != "export":
# self.addLog("#Removing TrunkDir")
# self.__logAndExecute("chmod -R 777 " + self.trunkDir)
# self.__logAndExecute("rm -Rf " + self.trunkDir)
def printConfiguration(self,sendOnlyToDetails = False):
logStr = ""
for section in self.config.sections():
logStr+="["+section+"]\n"
for option in self.config.options(section):
logStr+=" "+option+"="+self.__getFromConfig(section, option)+"\n"
self.addLog("#Setup Configuration:")
if sendOnlyToDetails:
self.addLog((0,logStr))
else:
self.addLog("#"+logStr)
def addLog(self,status,create=False):
if create:
log = open(self.logFile,"w")
detailsLog = open(self.detailsLogFile,"w")
logStr = status + " (" +time.asctime() + ")\n" +"="*60
log.write(logStr+"\n")
detailsLog.write(logStr+"\n")
print logStr
log.close()
detailsLog.close()
return
#status can be the output of comands.getstatusoutput: (0, "something")
#If status is a string, it can be a new log section if started by "*" or a
# simply line if started by "#"
log = open(self.logFile,"a")
detailsLog = open(self.detailsLogFile,"a")
if type(status) in types.StringTypes:
if status[0]=="#":
logStr = status[1:]
elif status[0]=="*":
logStr = "-"*60 + "\n"+ status[1:] + " (" + time.asctime() + ")\n"+"-"*60
else:
logStr ="-"*60 + "\n"+ status + " (" + time.asctime() + ")\n"+"-"*60
log.write(logStr+"\n")
detailsLog.write(logStr+"\n")
print logStr
else:
if status[0]==0:
if not status[1]:
log.close()
detailsLog.close()
return
else:
logStr = "Detail Code: OK" + "_"+str(time.time())
detailsLogStr = logStr+"\n\t\t" + status[1].replace("\n","\n\t\t")
else:
self.successInstall = False
logStr = "ERROR: Detail Code: " + str(status[0]) + "_"+str(time.time())
detailsLogStr=logStr+"\n\t\t" + status[1].replace("\n","\n\t\t")
log.write(logStr+"\n")
detailsLog.write(detailsLogStr+"\n")
print logStr
log.close()
detailsLog.close()
def emailLog(self):
self.addLog("*Support FeedBack")
if not self.supportEmails:
self.addLog("#No emails specified for feedBack")
return
allLogs=open(os.path.join(self.currentDir,"allLogs.log"),"w")
log=open(self.logFile,"r")
allLogs.write(log.read())
allLogs.write("\n\n"+"*"*80+"\n\n\n")
log.close()
log=open(self.detailsLogFile,"r")
allLogs.write(log.read())
log.close()
allLogs.close()
if self.successInstall:
subject = "AZOrange Installation report: Success!"
else:
subject = "AZOrange Installation report: FAILED"
for email in self.supportEmails.split(","):
status = commands.getstatusoutput('mail -s "%s" %s < %s' % (subject, email.strip(), os.path.join(self.currentDir,"allLogs.log")) )
if status[0] != 0:
self.addLog(status)
self.addLog("#WARNING: Could not send a report to %s.\nPlease contact support." % email.strip())
else:
self.addLog("#Report sent to "+email.strip())
def __parse(self, arguments):
opt = OptionParser(usage="%prog [options]")
opt.add_option("-f", "--file", default="./setup.ini", dest='file', help='Path to INI file.')
opt.add_option("-p", "--prepare", default=False, action="store_true", help="Prepare the install dir extracting/getting all 3rd party code into place. It does not start the installation procedure.")
opt.add_option("-v", "--verbose", default=False, action="store_true", help="Enable verbose logging.")
return opt.parse_args(arguments)
if __name__ == "__main__":
import sys
installer = Installer(sys.argv[1:])
installer.main()
|
AZCompTox/AZOrange
|
install/install.py
|
Python
|
lgpl-3.0
| 62,982
|
[
"CDK",
"RDKit"
] |
1ec29cc4312a2000aedf7c5397139c30c276faf43ecbaa7e7bcaba6625ea6545
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# License: BSD style
"""
A module that implements scalar Gaussian Process based prediction (also
known as Kriging).
Contains
--------
GaussianProcess: The main class of the module that implements the Gaussian
Process prediction theory.
regression_models: A submodule that contains the built-in regression models.
correlation_models: A submodule that contains the built-in correlation models.
Implementation details
----------------------
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [1].
References
----------
[1] H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J. Sondergaard (2002).
DACE - A MATLAB Kriging Toolbox.
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
[2] W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell, and M.D.
Morris (1992). Screening, predicting, and computer experiments.
Technometrics, 34(1) 15--25.
"""
from .gaussian_process import GaussianProcess
from . import correlation_models
from . import regression_models
|
ominux/scikit-learn
|
sklearn/gaussian_process/__init__.py
|
Python
|
bsd-3-clause
| 1,189
|
[
"Gaussian"
] |
6c146ca3c3c9fa705062d79545c7f5dac82f516ab2629ae338d8d9f5db44b4da
|
#%%#############################################################################
# filterAndAggregate.py
# Copyright (c) 2017, Joshua J Hamilton and Katherine D McMahon
# Affiliation: Department of Bacteriology
# University of Wisconsin-Madison, Madison, Wisconsin, USA
# URL: http://http://mcmahonlab.wisc.edu/
# All rights reserved.
################################################################################
# Count reads which map to each (genome, gene) pair and to each (clade, COG)
# pair.
################################################################################
#%%#############################################################################
### Import packages
################################################################################
import os
import pandas as pd
import subprocess
#%%#############################################################################
### Static folder structure
################################################################################
# Define fixed input and output files
concatFolder = '../../data/refGenomes/concat'
genomeFolder = '../../data/refGenomes/fna'
sampleFolder = '../../data/sequences'
mapFolder = '../../data/mapping'
bamFolder = '../../data/mapping/bamFiles'
coverageFolder = '../../data/mapping/coverage-pooled'
countFolder = '../../data/mapping/htseq'
cogTable = '../../data/orthoMCL/cogTable.csv'
taxonFile = '../../data/externalData/taxonomy.csv'
# Check that the new output directory exists and create if it doesn't
if not os.path.exists(countFolder):
print("Creating output directory\n")
os.makedirs(countFolder)
#%%#############################################################################
### Read in sample and genome lists. Create DF to store read countDict.
################################################################################
# Read in list of samples
sampleList = []
for sample in os.listdir(sampleFolder):
if sample.endswith('.fastq'):
sampleList.append(sample)
sampleList = [sample.replace('.fastq', '') for sample in sampleList]
# Read in list of genomes.
genomeList = []
for genome in os.listdir(genomeFolder):
if genome.endswith('.fna'):
genomeList.append(genome)
genomeList = [genome.replace('.fna', '') for genome in genomeList]
# Read in list of genomes.
concatList = []
for concat in os.listdir(concatFolder):
if concat.endswith('.fna'):
concatList.append(concat)
concatList = [concat.replace('.fna', '') for concat in concatList]
#%%#############################################################################
### Count the reads which align to each CDS
################################################################################
# Define parameters for HTSeq-Count script
minQual = 0
featureType = 'CDS'
idAttr = 'locus_tag'
overlapMode = 'intersection-strict'
for sample in sampleList:
for concat in concatList:
samFile = bamFolder+'/'+sample+'-'+concat+'.sam'
gffFile = concatFolder+'/'+concat+'.gff'
outFile = countFolder+'/'+sample+'-'+concat+'.CDS.out'
subprocess.call('htseq-count -f sam -r pos -s no -a 0 -t CDS -i '+
'locus_tag -m intersection-strict '+samFile+' '+
gffFile+' > '+outFile, shell=True)
#%%#############################################################################
### Filtering. In this section, filter out all coding sequences which do not
### recruit at least 50 reads
################################################################################
# First, read in the read counts for each CDS
# Create empty dataframe to merge into
tempDF = pd.read_csv(countFolder+'/'+sampleList[0]+'-'+concatList[0]+'.CDS.out', sep='\t', index_col=0, names=[sampleList[0]])
readCountDF = pd.DataFrame(index=tempDF.index)
# And read in the counts
for sample in sampleList:
for concat in concatList:
tempDF = pd.read_csv(countFolder+'/'+sample+'-'+concat+'.CDS.out', sep='\t', index_col=0, names=[sample])
tempDF = tempDF[:-5]
# Merge with readCountDF
readCountDF = pd.concat([readCountDF, tempDF], axis=1, join='outer')
## Drop stats from the readCountsDF
readCountDF = readCountDF.drop(['__alignment_not_unique', '__ambiguous', '__no_feature', '__not_aligned', '__too_low_aQual'], axis=0)
readCountDF = readCountDF.sum(axis=1)
readCountDF.to_csv(countFolder+'/readCounts.csv', sep=',')
# Filter the results by dropping all genes which don't recruit at least ten reads
readCutoff = 10
readCountDF = readCountDF.loc[readCountDF >= readCutoff]
readCountDF.to_csv(countFolder+'/filteredReadCounts.csv', sep=',')
#%%#############################################################################
### Integrate data from taxonomy and cog tables into a single data
### structure
################################################################################
# Read in the taxonomy table and create a list of genomes for each clade
cladeToGenomeDict = {}
cladeList = []
for concat in concatList:
taxonClass = pd.DataFrame.from_csv(taxonFile, sep=',')
taxonClass = taxonClass.dropna()
# Extract the unique clades
taxonClass = taxonClass.drop(taxonClass[taxonClass['Lineage'] != concat].index)
innerCladeList = pd.unique(taxonClass['Clade'].values)
for clade in innerCladeList:
innerconcatList = taxonClass[taxonClass['Clade'] == clade].index.tolist()
cladeToGenomeDict[clade] = innerconcatList
cladeList = cladeList + innerCladeList.tolist()
# Read in the COG table
cogTableDF = pd.read_csv(cogTable, index_col=0)
# Create and populate the dataframe, indexed by clade and group
cladeCogToCdsIndex = pd.MultiIndex.from_product([cladeList, cogTableDF.index.tolist()], names=['Clade', 'COG'])
cladeCogToCdsDF = pd.DataFrame(index=cladeCogToCdsIndex, columns=['CDS'])
for index in cladeCogToCdsDF.index:
clade = index[0]
cog = index[1]
innerconcatList = cladeToGenomeDict[clade]
cdsList = []
for innerGenome in innerconcatList:
if not pd.isnull(cogTableDF.loc[cog][innerGenome]):
tempList = cogTableDF.loc[cog][innerGenome].split(';')
cdsList = cdsList + tempList
cladeCogToCdsDF.loc[index] = ','.join(cdsList)
# Sort by the multi-index and write to file
cladeCogToCdsDF = cladeCogToCdsDF.sort_index(axis=0)
cladeCogToCdsDF.to_csv(mapFolder+'/cladesCogsToCDS.csv')
# Read in singly-indexed, drop empty rows, and write to file
cladeCogToCdsDF = pd.read_csv(mapFolder+'/cladesCogsToCDS.csv', index_col=0)
cladeCogToCdsDF = cladeCogToCdsDF.dropna(axis=0, how='any')
cladeCogToCdsDF.to_csv(mapFolder+'/cladesCogsToCDS.csv')
|
joshamilton/Hamilton_acI_2017
|
code/mapping/05filterAndAggregate.py
|
Python
|
mit
| 6,718
|
[
"HTSeq"
] |
2b5d26d7a2e08d6e24328908990f8823dc05239fd91a47b0752e858ad4a933b0
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import json
import os
import tempfile
import unittest
import warnings
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.core import Orbital, Spin
from pymatgen.io.lobster import (
Bandoverlaps,
Charge,
Cohpcar,
Doscar,
Fatband,
Grosspop,
Icohplist,
Lobsterin,
Lobsterout,
Wavefunction,
)
from pymatgen.io.lobster.inputs import get_all_possible_basis_combinations
from pymatgen.io.vasp import Vasprun
from pymatgen.io.vasp.inputs import Incar, Kpoints, Potcar
from pymatgen.util.testing import PymatgenTest
__author__ = "Janine George, Marco Esters"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.2"
__email__ = "janine.george@uclouvain.be, esters@uoregon.edu"
__date__ = "Dec 10, 2017"
test_dir_doscar = PymatgenTest.TEST_FILES_DIR
this_dir = os.path.dirname(os.path.abspath(__file__))
class CohpcarTest(PymatgenTest):
def setUp(self):
self.cohp_bise = Cohpcar(filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "COHPCAR.lobster.BiSe"))
self.coop_bise = Cohpcar(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "COOPCAR.lobster.BiSe"),
are_coops=True,
)
self.cohp_fe = Cohpcar(filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "COOPCAR.lobster"))
self.coop_fe = Cohpcar(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "COOPCAR.lobster"),
are_coops=True,
)
self.orb = Cohpcar(filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "COHPCAR.lobster.orbitalwise"))
self.orb_notot = Cohpcar(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "COHPCAR.lobster.notot.orbitalwise")
)
# Lobster 3.1 (Test data is from prerelease of Lobster 3.1)
self.cohp_KF = Cohpcar(filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "COHPCAR.lobster.KF"))
self.coop_KF = Cohpcar(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "COHPCAR.lobster.KF"),
are_coops=True,
)
# example with f electrons
self.cohp_Na2UO4 = Cohpcar(filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "COHPCAR.lobster.Na2UO4"))
self.coop_Na2UO4 = Cohpcar(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "COOPCAR.lobster.Na2UO4"),
are_coops=True,
)
def test_attributes(self):
self.assertFalse(self.cohp_bise.are_coops)
self.assertTrue(self.coop_bise.are_coops)
self.assertFalse(self.cohp_bise.is_spin_polarized)
self.assertFalse(self.coop_bise.is_spin_polarized)
self.assertFalse(self.cohp_fe.are_coops)
self.assertTrue(self.coop_fe.are_coops)
self.assertTrue(self.cohp_fe.is_spin_polarized)
self.assertTrue(self.coop_fe.is_spin_polarized)
self.assertEqual(len(self.cohp_bise.energies), 241)
self.assertEqual(len(self.coop_bise.energies), 241)
self.assertEqual(len(self.cohp_fe.energies), 301)
self.assertEqual(len(self.coop_fe.energies), 301)
self.assertEqual(len(self.cohp_bise.cohp_data), 12)
self.assertEqual(len(self.coop_bise.cohp_data), 12)
self.assertEqual(len(self.cohp_fe.cohp_data), 3)
self.assertEqual(len(self.coop_fe.cohp_data), 3)
# Lobster 3.1
self.assertFalse(self.cohp_KF.are_coops)
self.assertTrue(self.coop_KF.are_coops)
self.assertFalse(self.cohp_KF.is_spin_polarized)
self.assertFalse(self.coop_KF.is_spin_polarized)
self.assertEqual(len(self.cohp_KF.energies), 6)
self.assertEqual(len(self.coop_KF.energies), 6)
self.assertEqual(len(self.cohp_KF.cohp_data), 7)
self.assertEqual(len(self.coop_KF.cohp_data), 7)
def test_energies(self):
efermi_bise = 5.90043
elim_bise = (-0.124679, 11.9255)
efermi_fe = 9.75576
elim_fe = (-0.277681, 14.7725)
efermi_KF = -2.87475
elim_KF = (-11.25000 + efermi_KF, 7.5000 + efermi_KF)
self.assertEqual(self.cohp_bise.efermi, efermi_bise)
self.assertEqual(self.coop_bise.efermi, efermi_bise)
self.assertEqual(self.cohp_fe.efermi, efermi_fe)
self.assertEqual(self.coop_fe.efermi, efermi_fe)
# Lobster 3.1
self.assertEqual(self.cohp_KF.efermi, efermi_KF)
self.assertEqual(self.coop_KF.efermi, efermi_KF)
self.assertAlmostEqual(self.cohp_bise.energies[0] + self.cohp_bise.efermi, elim_bise[0], places=4)
self.assertAlmostEqual(self.cohp_bise.energies[-1] + self.cohp_bise.efermi, elim_bise[1], places=4)
self.assertAlmostEqual(self.coop_bise.energies[0] + self.coop_bise.efermi, elim_bise[0], places=4)
self.assertAlmostEqual(self.coop_bise.energies[-1] + self.coop_bise.efermi, elim_bise[1], places=4)
self.assertAlmostEqual(self.cohp_fe.energies[0] + self.cohp_fe.efermi, elim_fe[0], places=4)
self.assertAlmostEqual(self.cohp_fe.energies[-1] + self.cohp_fe.efermi, elim_fe[1], places=4)
self.assertAlmostEqual(self.coop_fe.energies[0] + self.coop_fe.efermi, elim_fe[0], places=4)
self.assertAlmostEqual(self.coop_fe.energies[-1] + self.coop_fe.efermi, elim_fe[1], places=4)
# Lobster 3.1
self.assertAlmostEqual(self.cohp_KF.energies[0] + self.cohp_KF.efermi, elim_KF[0], places=4)
self.assertAlmostEqual(self.cohp_KF.energies[-1] + self.cohp_KF.efermi, elim_KF[1], places=4)
self.assertAlmostEqual(self.coop_KF.energies[0] + self.coop_KF.efermi, elim_KF[0], places=4)
self.assertAlmostEqual(self.coop_KF.energies[-1] + self.coop_KF.efermi, elim_KF[1], places=4)
def test_cohp_data(self):
lengths_sites_bise = {
"1": (2.882308829886294, (0, 6)),
"2": (3.1014396233274444, (0, 9)),
"3": (2.8823088298862083, (1, 7)),
"4": (3.1014396233275434, (1, 8)),
"5": (3.0500070394403904, (2, 9)),
"6": (2.9167594580335807, (2, 10)),
"7": (3.05000703944039, (3, 8)),
"8": (2.9167594580335803, (3, 11)),
"9": (3.3752173204052101, (4, 11)),
"10": (3.0729354518345948, (4, 5)),
"11": (3.3752173204052101, (5, 10)),
}
lengths_sites_fe = {
"1": (2.8318907764979082, (7, 6)),
"2": (2.4524893531900283, (7, 8)),
}
# Lobster 3.1
lengths_sites_KF = {
"1": (2.7119923200622269, (0, 1)),
"2": (2.7119923200622269, (0, 1)),
"3": (2.7119923576010501, (0, 1)),
"4": (2.7119923576010501, (0, 1)),
"5": (2.7119923200622269, (0, 1)),
"6": (2.7119923200622269, (0, 1)),
}
for data in [self.cohp_bise.cohp_data, self.coop_bise.cohp_data]:
for bond in data:
if bond != "average":
self.assertEqual(data[bond]["length"], lengths_sites_bise[bond][0])
self.assertEqual(data[bond]["sites"], lengths_sites_bise[bond][1])
self.assertEqual(len(data[bond]["COHP"][Spin.up]), 241)
self.assertEqual(len(data[bond]["ICOHP"][Spin.up]), 241)
for data in [self.cohp_fe.cohp_data, self.coop_fe.cohp_data]:
for bond in data:
if bond != "average":
self.assertEqual(data[bond]["length"], lengths_sites_fe[bond][0])
self.assertEqual(data[bond]["sites"], lengths_sites_fe[bond][1])
self.assertEqual(len(data[bond]["COHP"][Spin.up]), 301)
self.assertEqual(len(data[bond]["ICOHP"][Spin.up]), 301)
# Lobster 3.1
for data in [self.cohp_KF.cohp_data, self.coop_KF.cohp_data]:
for bond in data:
if bond != "average":
self.assertEqual(data[bond]["length"], lengths_sites_KF[bond][0])
self.assertEqual(data[bond]["sites"], lengths_sites_KF[bond][1])
self.assertEqual(len(data[bond]["COHP"][Spin.up]), 6)
self.assertEqual(len(data[bond]["ICOHP"][Spin.up]), 6)
def test_orbital_resolved_cohp(self):
orbitals = [tuple((Orbital(i), Orbital(j))) for j in range(4) for i in range(4)]
self.assertIsNone(self.cohp_bise.orb_res_cohp)
self.assertIsNone(self.coop_bise.orb_res_cohp)
self.assertIsNone(self.cohp_fe.orb_res_cohp)
self.assertIsNone(self.coop_fe.orb_res_cohp)
self.assertIsNone(self.orb_notot.cohp_data["1"]["COHP"])
self.assertIsNone(self.orb_notot.cohp_data["1"]["ICOHP"])
for orbs in self.orb.orb_res_cohp["1"]:
orb_set = self.orb.orb_res_cohp["1"][orbs]["orbitals"]
self.assertEqual(orb_set[0][0], 4)
self.assertEqual(orb_set[1][0], 4)
self.assertIn(tuple((orb_set[0][1], orb_set[1][1])), orbitals)
# test d and f orbitals
comparelist = [
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
7,
7,
7,
7,
]
comparelist2 = [
"f0",
"f0",
"f0",
"f0",
"f1",
"f1",
"f1",
"f1",
"f2",
"f2",
"f2",
"f2",
"f3",
"f3",
"f3",
"f3",
"f_1",
"f_1",
"f_1",
"f_1",
"f_2",
"f_2",
"f_2",
"f_2",
"f_3",
"f_3",
"f_3",
"f_3",
"dx2",
"dx2",
"dx2",
"dx2",
"dxy",
"dxy",
"dxy",
"dxy",
"dxz",
"dxz",
"dxz",
"dxz",
"dyz",
"dyz",
"dyz",
"dyz",
"dz2",
"dz2",
"dz2",
"dz2",
"px",
"px",
"px",
"px",
"py",
"py",
"py",
"py",
"pz",
"pz",
"pz",
"pz",
"s",
"s",
"s",
"s",
"s",
"s",
"s",
"s",
]
for iorb, orbs in enumerate(sorted(self.cohp_Na2UO4.orb_res_cohp["49"])):
orb_set = self.cohp_Na2UO4.orb_res_cohp["49"][orbs]["orbitals"]
self.assertEqual(orb_set[0][0], comparelist[iorb])
self.assertEqual(str(orb_set[0][1]), comparelist2[iorb])
# The sum of the orbital-resolved COHPs should be approximately
# the total COHP. Due to small deviations in the LOBSTER calculation,
# the precision is not very high though.
cohp = self.orb.cohp_data["1"]["COHP"][Spin.up]
icohp = self.orb.cohp_data["1"]["ICOHP"][Spin.up]
tot = np.sum(
[self.orb.orb_res_cohp["1"][orbs]["COHP"][Spin.up] for orbs in self.orb.orb_res_cohp["1"]],
axis=0,
)
self.assertArrayAlmostEqual(tot, cohp, decimal=3)
tot = np.sum(
[self.orb.orb_res_cohp["1"][orbs]["ICOHP"][Spin.up] for orbs in self.orb.orb_res_cohp["1"]],
axis=0,
)
self.assertArrayAlmostEqual(tot, icohp, decimal=3)
# Lobster 3.1
cohp_KF = self.cohp_KF.cohp_data["1"]["COHP"][Spin.up]
icohp_KF = self.cohp_KF.cohp_data["1"]["ICOHP"][Spin.up]
tot_KF = np.sum(
[self.cohp_KF.orb_res_cohp["1"][orbs]["COHP"][Spin.up] for orbs in self.cohp_KF.orb_res_cohp["1"]],
axis=0,
)
self.assertArrayAlmostEqual(tot_KF, cohp_KF, decimal=3)
tot_KF = np.sum(
[self.cohp_KF.orb_res_cohp["1"][orbs]["ICOHP"][Spin.up] for orbs in self.cohp_KF.orb_res_cohp["1"]],
axis=0,
)
self.assertArrayAlmostEqual(tot_KF, icohp_KF, decimal=3)
# d and f orbitals
cohp_Na2UO4 = self.cohp_Na2UO4.cohp_data["49"]["COHP"][Spin.up]
icohp_Na2UO4 = self.cohp_Na2UO4.cohp_data["49"]["ICOHP"][Spin.up]
tot_Na2UO4 = np.sum(
[
self.cohp_Na2UO4.orb_res_cohp["49"][orbs]["COHP"][Spin.up]
for orbs in self.cohp_Na2UO4.orb_res_cohp["49"]
],
axis=0,
)
self.assertArrayAlmostEqual(tot_Na2UO4, cohp_Na2UO4, decimal=3)
tot_Na2UO4 = np.sum(
[
self.cohp_Na2UO4.orb_res_cohp["49"][orbs]["ICOHP"][Spin.up]
for orbs in self.cohp_Na2UO4.orb_res_cohp["49"]
],
axis=0,
)
self.assertArrayAlmostEqual(tot_Na2UO4, icohp_Na2UO4, decimal=3)
class IcohplistTest(unittest.TestCase):
def setUp(self):
self.icohp_bise = Icohplist(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "ICOHPLIST.lobster.BiSe")
)
self.icoop_bise = Icohplist(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "ICOOPLIST.lobster.BiSe"),
are_coops=True,
)
self.icohp_fe = Icohplist(filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "ICOHPLIST.lobster"))
# allow gzipped files
self.icohp_gzipped = Icohplist(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "ICOHPLIST.lobster.gz")
)
self.icoop_fe = Icohplist(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "ICOHPLIST.lobster"),
are_coops=True,
)
def test_attributes(self):
self.assertFalse(self.icohp_bise.are_coops)
self.assertTrue(self.icoop_bise.are_coops)
self.assertFalse(self.icohp_bise.is_spin_polarized)
self.assertFalse(self.icoop_bise.is_spin_polarized)
self.assertEqual(len(self.icohp_bise.icohplist), 11)
self.assertEqual(len(self.icoop_bise.icohplist), 11)
self.assertFalse(self.icohp_fe.are_coops)
self.assertTrue(self.icoop_fe.are_coops)
self.assertTrue(self.icohp_fe.is_spin_polarized)
self.assertTrue(self.icoop_fe.is_spin_polarized)
self.assertEqual(len(self.icohp_fe.icohplist), 2)
self.assertEqual(len(self.icoop_fe.icohplist), 2)
def test_values(self):
icohplist_bise = {
"1": {
"length": 2.88231,
"number_of_bonds": 3,
"icohp": {Spin.up: -2.18042},
"translation": [0, 0, 0],
},
"2": {
"length": 3.10144,
"number_of_bonds": 3,
"icohp": {Spin.up: -1.14347},
"translation": [0, 0, 0],
},
"3": {
"length": 2.88231,
"number_of_bonds": 3,
"icohp": {Spin.up: -2.18042},
"translation": [0, 0, 0],
},
"4": {
"length": 3.10144,
"number_of_bonds": 3,
"icohp": {Spin.up: -1.14348},
"translation": [0, 0, 0],
},
"5": {
"length": 3.05001,
"number_of_bonds": 3,
"icohp": {Spin.up: -1.30006},
"translation": [0, 0, 0],
},
"6": {
"length": 2.91676,
"number_of_bonds": 3,
"icohp": {Spin.up: -1.96843},
"translation": [0, 0, 0],
},
"7": {
"length": 3.05001,
"number_of_bonds": 3,
"icohp": {Spin.up: -1.30006},
"translation": [0, 0, 0],
},
"8": {
"length": 2.91676,
"number_of_bonds": 3,
"icohp": {Spin.up: -1.96843},
"translation": [0, 0, 0],
},
"9": {
"length": 3.37522,
"number_of_bonds": 3,
"icohp": {Spin.up: -0.47531},
"translation": [0, 0, 0],
},
"10": {
"length": 3.07294,
"number_of_bonds": 3,
"icohp": {Spin.up: -2.38796},
"translation": [0, 0, 0],
},
"11": {
"length": 3.37522,
"number_of_bonds": 3,
"icohp": {Spin.up: -0.47531},
"translation": [0, 0, 0],
},
}
icooplist_bise = {
"1": {
"length": 2.88231,
"number_of_bonds": 3,
"icohp": {Spin.up: 0.14245},
"translation": [0, 0, 0],
},
"2": {
"length": 3.10144,
"number_of_bonds": 3,
"icohp": {Spin.up: -0.04118},
"translation": [0, 0, 0],
},
"3": {
"length": 2.88231,
"number_of_bonds": 3,
"icohp": {Spin.up: 0.14245},
"translation": [0, 0, 0],
},
"4": {
"length": 3.10144,
"number_of_bonds": 3,
"icohp": {Spin.up: -0.04118},
"translation": [0, 0, 0],
},
"5": {
"length": 3.05001,
"number_of_bonds": 3,
"icohp": {Spin.up: -0.03516},
"translation": [0, 0, 0],
},
"6": {
"length": 2.91676,
"number_of_bonds": 3,
"icohp": {Spin.up: 0.10745},
"translation": [0, 0, 0],
},
"7": {
"length": 3.05001,
"number_of_bonds": 3,
"icohp": {Spin.up: -0.03516},
"translation": [0, 0, 0],
},
"8": {
"length": 2.91676,
"number_of_bonds": 3,
"icohp": {Spin.up: 0.10745},
"translation": [0, 0, 0],
},
"9": {
"length": 3.37522,
"number_of_bonds": 3,
"icohp": {Spin.up: -0.12395},
"translation": [0, 0, 0],
},
"10": {
"length": 3.07294,
"number_of_bonds": 3,
"icohp": {Spin.up: 0.24714},
"translation": [0, 0, 0],
},
"11": {
"length": 3.37522,
"number_of_bonds": 3,
"icohp": {Spin.up: -0.12395},
"translation": [0, 0, 0],
},
}
icooplist_fe = {
"1": {
"length": 2.83189,
"number_of_bonds": 2,
"icohp": {Spin.up: -0.10218, Spin.down: -0.19701},
"translation": [0, 0, 0],
},
"2": {
"length": 2.45249,
"number_of_bonds": 1,
"icohp": {Spin.up: -0.28485, Spin.down: -0.58279},
"translation": [0, 0, 0],
},
}
self.assertEqual(icohplist_bise, self.icohp_bise.icohplist)
self.assertEqual(icooplist_fe, self.icoop_fe.icohplist)
class DoscarTest(unittest.TestCase):
def setUp(self):
# first for spin polarized version
doscar = os.path.join(test_dir_doscar, "DOSCAR.lobster.spin")
poscar = os.path.join(test_dir_doscar, "POSCAR.lobster.spin_DOS")
# not spin polarized
doscar2 = os.path.join(test_dir_doscar, "DOSCAR.lobster.nonspin")
poscar2 = os.path.join(test_dir_doscar, "POSCAR.lobster.nonspin_DOS")
self.DOSCAR_spin_pol = Doscar(doscar=doscar, structure_file=poscar)
self.DOSCAR_nonspin_pol = Doscar(doscar=doscar2, structure_file=poscar2)
with open(os.path.join(test_dir_doscar, "structure_KF.json"), "r") as f:
data = json.load(f)
self.structure = Structure.from_dict(data)
def test_completedos(self):
# first for spin polarized version
energies_spin = [-11.25000, -7.50000, -3.75000, 0.00000, 3.75000, 7.50000]
tdos_up = [0.00000, 0.79999, 0.00000, 0.79999, 0.00000, 0.02577]
tdos_down = [0.00000, 0.79999, 0.00000, 0.79999, 0.00000, 0.02586]
fermi = 0.0
PDOS_F_2s_up = [0.00000, 0.00159, 0.00000, 0.00011, 0.00000, 0.00069]
PDOS_F_2s_down = [0.00000, 0.00159, 0.00000, 0.00011, 0.00000, 0.00069]
PDOS_F_2py_up = [0.00000, 0.00160, 0.00000, 0.25801, 0.00000, 0.00029]
PDOS_F_2py_down = [0.00000, 0.00161, 0.00000, 0.25819, 0.00000, 0.00029]
PDOS_F_2pz_up = [0.00000, 0.00161, 0.00000, 0.25823, 0.00000, 0.00029]
PDOS_F_2pz_down = [0.00000, 0.00160, 0.00000, 0.25795, 0.00000, 0.00029]
PDOS_F_2px_up = [0.00000, 0.00160, 0.00000, 0.25805, 0.00000, 0.00029]
PDOS_F_2px_down = [0.00000, 0.00161, 0.00000, 0.25814, 0.00000, 0.00029]
self.assertListEqual(energies_spin, self.DOSCAR_spin_pol.completedos.energies.tolist())
self.assertListEqual(tdos_up, self.DOSCAR_spin_pol.completedos.densities[Spin.up].tolist())
self.assertListEqual(tdos_down, self.DOSCAR_spin_pol.completedos.densities[Spin.down].tolist())
self.assertAlmostEqual(fermi, self.DOSCAR_spin_pol.completedos.efermi)
for coords, coords2 in zip(
self.DOSCAR_spin_pol.completedos.structure.frac_coords,
self.structure.frac_coords,
):
for xyz, xyz2 in zip(coords, coords2):
self.assertAlmostEqual(xyz, xyz2)
self.assertListEqual(
self.DOSCAR_spin_pol.completedos.pdos[self.structure[0]]["2s"][Spin.up].tolist(),
PDOS_F_2s_up,
)
self.assertListEqual(
self.DOSCAR_spin_pol.completedos.pdos[self.structure[0]]["2s"][Spin.down].tolist(),
PDOS_F_2s_down,
)
self.assertListEqual(
self.DOSCAR_spin_pol.completedos.pdos[self.structure[0]]["2p_y"][Spin.up].tolist(),
PDOS_F_2py_up,
)
self.assertListEqual(
self.DOSCAR_spin_pol.completedos.pdos[self.structure[0]]["2p_y"][Spin.down].tolist(),
PDOS_F_2py_down,
)
self.assertListEqual(
self.DOSCAR_spin_pol.completedos.pdos[self.structure[0]]["2p_z"][Spin.up].tolist(),
PDOS_F_2pz_up,
)
self.assertListEqual(
self.DOSCAR_spin_pol.completedos.pdos[self.structure[0]]["2p_z"][Spin.down].tolist(),
PDOS_F_2pz_down,
)
self.assertListEqual(
self.DOSCAR_spin_pol.completedos.pdos[self.structure[0]]["2p_x"][Spin.up].tolist(),
PDOS_F_2px_up,
)
self.assertListEqual(
self.DOSCAR_spin_pol.completedos.pdos[self.structure[0]]["2p_x"][Spin.down].tolist(),
PDOS_F_2px_down,
)
energies_nonspin = [-11.25000, -7.50000, -3.75000, 0.00000, 3.75000, 7.50000]
tdos_nonspin = [0.00000, 1.60000, 0.00000, 1.60000, 0.00000, 0.02418]
PDOS_F_2s = [0.00000, 0.00320, 0.00000, 0.00017, 0.00000, 0.00060]
PDOS_F_2py = [0.00000, 0.00322, 0.00000, 0.51635, 0.00000, 0.00037]
PDOS_F_2pz = [0.00000, 0.00322, 0.00000, 0.51636, 0.00000, 0.00037]
PDOS_F_2px = [0.00000, 0.00322, 0.00000, 0.51634, 0.00000, 0.00037]
self.assertListEqual(energies_nonspin, self.DOSCAR_nonspin_pol.completedos.energies.tolist())
self.assertListEqual(
tdos_nonspin,
self.DOSCAR_nonspin_pol.completedos.densities[Spin.up].tolist(),
)
self.assertAlmostEqual(fermi, self.DOSCAR_nonspin_pol.completedos.efermi)
self.assertDictEqual(
self.DOSCAR_nonspin_pol.completedos.structure.as_dict(),
self.structure.as_dict(),
)
self.assertListEqual(
self.DOSCAR_nonspin_pol.completedos.pdos[self.structure[0]]["2s"][Spin.up].tolist(),
PDOS_F_2s,
)
self.assertListEqual(
self.DOSCAR_nonspin_pol.completedos.pdos[self.structure[0]]["2p_y"][Spin.up].tolist(),
PDOS_F_2py,
)
self.assertListEqual(
self.DOSCAR_nonspin_pol.completedos.pdos[self.structure[0]]["2p_z"][Spin.up].tolist(),
PDOS_F_2pz,
)
self.assertListEqual(
self.DOSCAR_nonspin_pol.completedos.pdos[self.structure[0]]["2p_x"][Spin.up].tolist(),
PDOS_F_2px,
)
def test_pdos(self):
# first for spin polarized version
PDOS_F_2s_up = [0.00000, 0.00159, 0.00000, 0.00011, 0.00000, 0.00069]
PDOS_F_2s_down = [0.00000, 0.00159, 0.00000, 0.00011, 0.00000, 0.00069]
PDOS_F_2py_up = [0.00000, 0.00160, 0.00000, 0.25801, 0.00000, 0.00029]
PDOS_F_2py_down = [0.00000, 0.00161, 0.00000, 0.25819, 0.00000, 0.00029]
PDOS_F_2pz_up = [0.00000, 0.00161, 0.00000, 0.25823, 0.00000, 0.00029]
PDOS_F_2pz_down = [0.00000, 0.00160, 0.00000, 0.25795, 0.00000, 0.00029]
PDOS_F_2px_up = [0.00000, 0.00160, 0.00000, 0.25805, 0.00000, 0.00029]
PDOS_F_2px_down = [0.00000, 0.00161, 0.00000, 0.25814, 0.00000, 0.00029]
self.assertListEqual(self.DOSCAR_spin_pol.pdos[0]["2s"][Spin.up].tolist(), PDOS_F_2s_up)
self.assertListEqual(self.DOSCAR_spin_pol.pdos[0]["2s"][Spin.down].tolist(), PDOS_F_2s_down)
self.assertListEqual(self.DOSCAR_spin_pol.pdos[0]["2p_y"][Spin.up].tolist(), PDOS_F_2py_up)
self.assertListEqual(self.DOSCAR_spin_pol.pdos[0]["2p_y"][Spin.down].tolist(), PDOS_F_2py_down)
self.assertListEqual(self.DOSCAR_spin_pol.pdos[0]["2p_z"][Spin.up].tolist(), PDOS_F_2pz_up)
self.assertListEqual(self.DOSCAR_spin_pol.pdos[0]["2p_z"][Spin.down].tolist(), PDOS_F_2pz_down)
self.assertListEqual(self.DOSCAR_spin_pol.pdos[0]["2p_x"][Spin.up].tolist(), PDOS_F_2px_up)
self.assertListEqual(self.DOSCAR_spin_pol.pdos[0]["2p_x"][Spin.down].tolist(), PDOS_F_2px_down)
# non spin
PDOS_F_2s = [0.00000, 0.00320, 0.00000, 0.00017, 0.00000, 0.00060]
PDOS_F_2py = [0.00000, 0.00322, 0.00000, 0.51635, 0.00000, 0.00037]
PDOS_F_2pz = [0.00000, 0.00322, 0.00000, 0.51636, 0.00000, 0.00037]
PDOS_F_2px = [0.00000, 0.00322, 0.00000, 0.51634, 0.00000, 0.00037]
self.assertListEqual(self.DOSCAR_nonspin_pol.pdos[0]["2s"][Spin.up].tolist(), PDOS_F_2s)
self.assertListEqual(self.DOSCAR_nonspin_pol.pdos[0]["2p_y"][Spin.up].tolist(), PDOS_F_2py)
self.assertListEqual(self.DOSCAR_nonspin_pol.pdos[0]["2p_z"][Spin.up].tolist(), PDOS_F_2pz)
self.assertListEqual(self.DOSCAR_nonspin_pol.pdos[0]["2p_x"][Spin.up].tolist(), PDOS_F_2px)
def test_tdos(self):
# first for spin polarized version
energies_spin = [-11.25000, -7.50000, -3.75000, 0.00000, 3.75000, 7.50000]
tdos_up = [0.00000, 0.79999, 0.00000, 0.79999, 0.00000, 0.02577]
tdos_down = [0.00000, 0.79999, 0.00000, 0.79999, 0.00000, 0.02586]
fermi = 0.0
self.assertListEqual(energies_spin, self.DOSCAR_spin_pol.tdos.energies.tolist())
self.assertListEqual(tdos_up, self.DOSCAR_spin_pol.tdos.densities[Spin.up].tolist())
self.assertListEqual(tdos_down, self.DOSCAR_spin_pol.tdos.densities[Spin.down].tolist())
self.assertAlmostEqual(fermi, self.DOSCAR_spin_pol.tdos.efermi)
energies_nonspin = [-11.25000, -7.50000, -3.75000, 0.00000, 3.75000, 7.50000]
tdos_nonspin = [0.00000, 1.60000, 0.00000, 1.60000, 0.00000, 0.02418]
fermi = 0.0
self.assertListEqual(energies_nonspin, self.DOSCAR_nonspin_pol.tdos.energies.tolist())
self.assertListEqual(tdos_nonspin, self.DOSCAR_nonspin_pol.tdos.densities[Spin.up].tolist())
self.assertAlmostEqual(fermi, self.DOSCAR_nonspin_pol.tdos.efermi)
def test_energies(self):
# first for spin polarized version
energies_spin = [-11.25000, -7.50000, -3.75000, 0.00000, 3.75000, 7.50000]
self.assertListEqual(energies_spin, self.DOSCAR_spin_pol.energies.tolist())
energies_nonspin = [-11.25000, -7.50000, -3.75000, 0.00000, 3.75000, 7.50000]
self.assertListEqual(energies_nonspin, self.DOSCAR_nonspin_pol.energies.tolist())
def test_tdensities(self):
# first for spin polarized version
tdos_up = [0.00000, 0.79999, 0.00000, 0.79999, 0.00000, 0.02577]
tdos_down = [0.00000, 0.79999, 0.00000, 0.79999, 0.00000, 0.02586]
self.assertListEqual(tdos_up, self.DOSCAR_spin_pol.tdensities[Spin.up].tolist())
self.assertListEqual(tdos_down, self.DOSCAR_spin_pol.tdensities[Spin.down].tolist())
tdos_nonspin = [0.00000, 1.60000, 0.00000, 1.60000, 0.00000, 0.02418]
self.assertListEqual(tdos_nonspin, self.DOSCAR_nonspin_pol.tdensities[Spin.up].tolist())
def test_itdensities(self):
itdos_up = [1.99997, 4.99992, 4.99992, 7.99987, 7.99987, 8.09650]
itdos_down = [1.99997, 4.99992, 4.99992, 7.99987, 7.99987, 8.09685]
self.assertListEqual(itdos_up, self.DOSCAR_spin_pol.itdensities[Spin.up].tolist())
self.assertListEqual(itdos_down, self.DOSCAR_spin_pol.itdensities[Spin.down].tolist())
itdos_nonspin = [4.00000, 10.00000, 10.00000, 16.00000, 16.00000, 16.09067]
self.assertListEqual(itdos_nonspin, self.DOSCAR_nonspin_pol.itdensities[Spin.up].tolist())
def test_is_spin_polarized(self):
# first for spin polarized version
self.assertTrue(self.DOSCAR_spin_pol.is_spin_polarized)
self.assertFalse(self.DOSCAR_nonspin_pol.is_spin_polarized)
class ChargeTest(PymatgenTest):
def setUp(self):
self.charge2 = Charge(filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "CHARGE.lobster.MnO"))
# gzipped file
self.charge = Charge(filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "CHARGE.lobster.MnO2.gz"))
def testattributes(self):
charge_Loewdin = [-1.25, 1.25]
charge_Mulliken = [-1.30, 1.30]
atomlist = ["O1", "Mn2"]
types = ["O", "Mn"]
num_atoms = 2
self.assertArrayEqual(charge_Mulliken, self.charge2.Mulliken)
self.assertArrayEqual(charge_Loewdin, self.charge2.Loewdin)
self.assertArrayEqual(atomlist, self.charge2.atomlist)
self.assertArrayEqual(types, self.charge2.types)
self.assertArrayEqual(num_atoms, self.charge2.num_atoms)
def test_get_structure_with_charges(self):
structure_dict2 = {
"lattice": {
"c": 3.198244,
"volume": 23.132361565928807,
"b": 3.1982447183003364,
"gamma": 60.00000011873414,
"beta": 60.00000401737447,
"alpha": 60.00000742944491,
"matrix": [
[2.769761, 0.0, 1.599122],
[0.923254, 2.611356, 1.599122],
[0.0, 0.0, 3.198244],
],
"a": 3.1982443884113985,
},
"@class": "Structure",
"sites": [
{
"xyz": [1.846502883732, 1.305680611356, 3.198248797366],
"properties": {"Loewdin Charges": -1.25, "Mulliken Charges": -1.3},
"abc": [0.499998, 0.500001, 0.500002],
"species": [{"occu": 1, "element": "O"}],
"label": "O",
},
{
"xyz": [0.0, 0.0, 0.0],
"properties": {"Loewdin Charges": 1.25, "Mulliken Charges": 1.3},
"abc": [0.0, 0.0, 0.0],
"species": [{"occu": 1, "element": "Mn"}],
"label": "Mn",
},
],
"charge": None,
"@module": "pymatgen.core.structure",
}
s2 = Structure.from_dict(structure_dict2)
self.assertEqual(
s2,
self.charge2.get_structure_with_charges(os.path.join(this_dir, "../../tests/POSCAR.MnO")),
)
class LobsteroutTest(PymatgenTest):
def setUp(self):
warnings.simplefilter("ignore")
self.lobsterout_normal = Lobsterout(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "lobsterout.normal")
)
# make sure .gz files are also read correctly
self.lobsterout_normal = Lobsterout(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "lobsterout.normal2.gz")
)
self.lobsterout_fatband_grosspop_densityofenergies = Lobsterout(
filename=os.path.join(
PymatgenTest.TEST_FILES_DIR,
"cohp",
"lobsterout.fatband_grosspop_densityofenergy",
)
)
self.lobsterout_saveprojection = Lobsterout(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "lobsterout.saveprojection")
)
self.lobsterout_skipping_all = Lobsterout(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "lobsterout.skipping_all")
)
self.lobsterout_twospins = Lobsterout(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "lobsterout.twospins")
)
self.lobsterout_GaAs = Lobsterout(filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "lobsterout.GaAs"))
self.lobsterout_from_projection = Lobsterout(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "lobsterout_from_projection")
)
self.lobsterout_onethread = Lobsterout(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "lobsterout.onethread")
)
def tearDown(self):
warnings.simplefilter("default")
def testattributes(self):
self.assertListEqual(
self.lobsterout_normal.basis_functions,
[
[
"3s",
"4s",
"3p_y",
"3p_z",
"3p_x",
"3d_xy",
"3d_yz",
"3d_z^2",
"3d_xz",
"3d_x^2-y^2",
]
],
)
self.assertListEqual(self.lobsterout_normal.basis_type, ["pbeVaspFit2015"])
self.assertListEqual(self.lobsterout_normal.chargespilling, [0.0268])
self.assertEqual(self.lobsterout_normal.dftprogram, "VASP")
self.assertListEqual(self.lobsterout_normal.elements, ["Ti"])
self.assertTrue(self.lobsterout_normal.has_CHARGE)
self.assertTrue(self.lobsterout_normal.has_COHPCAR)
self.assertTrue(self.lobsterout_normal.has_COOPCAR)
self.assertTrue(self.lobsterout_normal.has_DOSCAR)
self.assertFalse(self.lobsterout_normal.has_Projection)
self.assertTrue(self.lobsterout_normal.has_bandoverlaps)
self.assertFalse(self.lobsterout_normal.has_density_of_energies)
self.assertFalse(self.lobsterout_normal.has_fatbands)
self.assertFalse(self.lobsterout_normal.has_grosspopulation)
self.assertListEqual(
self.lobsterout_normal.info_lines,
[
"There are more PAW bands than local basis functions available.",
"To prevent trouble in orthonormalization and Hamiltonian reconstruction",
"the PAW bands from 21 and upwards will be ignored.",
],
)
self.assertListEqual(
self.lobsterout_normal.info_orthonormalization,
["3 of 147 k-points could not be orthonormalized with an accuracy of 1.0E-5."],
)
self.assertFalse(self.lobsterout_normal.is_restart_from_projection)
self.assertEqual(self.lobsterout_normal.lobster_version, "v3.1.0")
self.assertEqual(self.lobsterout_normal.number_of_spins, 1)
self.assertEqual(self.lobsterout_normal.number_of_threads, 8)
self.assertDictEqual(
self.lobsterout_normal.timing,
{
"walltime": {"h": "0", "min": "0", "s": "2", "ms": "702"},
"usertime": {"h": "0", "min": "0", "s": "20", "ms": "330"},
"sys_time": {"h": "0", "min": "0", "s": "0", "ms": "310"},
},
)
self.assertAlmostEqual(self.lobsterout_normal.totalspilling[0], [0.044000000000000004][0])
self.assertListEqual(
self.lobsterout_normal.warninglines,
[
"3 of 147 k-points could not be orthonormalized with an accuracy of 1.0E-5.",
"Generally, this is not a critical error. But to help you analyze it,",
"I dumped the band overlap matrices to the file bandOverlaps.lobster.",
"Please check how much they deviate from the identity matrix and decide to",
"use your results only, if you are sure that this is ok.",
],
)
self.assertListEqual(
self.lobsterout_fatband_grosspop_densityofenergies.basis_functions,
[
[
"3s",
"4s",
"3p_y",
"3p_z",
"3p_x",
"3d_xy",
"3d_yz",
"3d_z^2",
"3d_xz",
"3d_x^2-y^2",
]
],
)
self.assertListEqual(
self.lobsterout_fatband_grosspop_densityofenergies.basis_type,
["pbeVaspFit2015"],
)
self.assertListEqual(self.lobsterout_fatband_grosspop_densityofenergies.chargespilling, [0.0268])
self.assertEqual(self.lobsterout_fatband_grosspop_densityofenergies.dftprogram, "VASP")
self.assertListEqual(self.lobsterout_fatband_grosspop_densityofenergies.elements, ["Ti"])
self.assertTrue(self.lobsterout_fatband_grosspop_densityofenergies.has_CHARGE)
self.assertFalse(self.lobsterout_fatband_grosspop_densityofenergies.has_COHPCAR)
self.assertFalse(self.lobsterout_fatband_grosspop_densityofenergies.has_COOPCAR)
self.assertFalse(self.lobsterout_fatband_grosspop_densityofenergies.has_DOSCAR)
self.assertFalse(self.lobsterout_fatband_grosspop_densityofenergies.has_Projection)
self.assertTrue(self.lobsterout_fatband_grosspop_densityofenergies.has_bandoverlaps)
self.assertTrue(self.lobsterout_fatband_grosspop_densityofenergies.has_density_of_energies)
self.assertTrue(self.lobsterout_fatband_grosspop_densityofenergies.has_fatbands)
self.assertTrue(self.lobsterout_fatband_grosspop_densityofenergies.has_grosspopulation)
self.assertListEqual(
self.lobsterout_fatband_grosspop_densityofenergies.info_lines,
[
"There are more PAW bands than local basis functions available.",
"To prevent trouble in orthonormalization and Hamiltonian reconstruction",
"the PAW bands from 21 and upwards will be ignored.",
],
)
self.assertListEqual(
self.lobsterout_fatband_grosspop_densityofenergies.info_orthonormalization,
["3 of 147 k-points could not be orthonormalized with an accuracy of 1.0E-5."],
)
self.assertFalse(self.lobsterout_fatband_grosspop_densityofenergies.is_restart_from_projection)
self.assertEqual(self.lobsterout_fatband_grosspop_densityofenergies.lobster_version, "v3.1.0")
self.assertEqual(self.lobsterout_fatband_grosspop_densityofenergies.number_of_spins, 1)
self.assertEqual(self.lobsterout_fatband_grosspop_densityofenergies.number_of_threads, 8)
self.assertDictEqual(
self.lobsterout_fatband_grosspop_densityofenergies.timing,
{
"walltime": {"h": "0", "min": "0", "s": "4", "ms": "136"},
"usertime": {"h": "0", "min": "0", "s": "18", "ms": "280"},
"sys_time": {"h": "0", "min": "0", "s": "0", "ms": "290"},
},
)
self.assertAlmostEqual(
self.lobsterout_fatband_grosspop_densityofenergies.totalspilling[0],
[0.044000000000000004][0],
)
self.assertListEqual(
self.lobsterout_fatband_grosspop_densityofenergies.warninglines,
[
"3 of 147 k-points could not be orthonormalized with an accuracy of 1.0E-5.",
"Generally, this is not a critical error. But to help you analyze it,",
"I dumped the band overlap matrices to the file bandOverlaps.lobster.",
"Please check how much they deviate from the identity matrix and decide to",
"use your results only, if you are sure that this is ok.",
],
)
self.assertListEqual(
self.lobsterout_saveprojection.basis_functions,
[
[
"3s",
"4s",
"3p_y",
"3p_z",
"3p_x",
"3d_xy",
"3d_yz",
"3d_z^2",
"3d_xz",
"3d_x^2-y^2",
]
],
)
self.assertListEqual(self.lobsterout_saveprojection.basis_type, ["pbeVaspFit2015"])
self.assertListEqual(self.lobsterout_saveprojection.chargespilling, [0.0268])
self.assertEqual(self.lobsterout_saveprojection.dftprogram, "VASP")
self.assertListEqual(self.lobsterout_saveprojection.elements, ["Ti"])
self.assertTrue(self.lobsterout_saveprojection.has_CHARGE)
self.assertFalse(self.lobsterout_saveprojection.has_COHPCAR)
self.assertFalse(self.lobsterout_saveprojection.has_COOPCAR)
self.assertFalse(self.lobsterout_saveprojection.has_DOSCAR)
self.assertTrue(self.lobsterout_saveprojection.has_Projection)
self.assertTrue(self.lobsterout_saveprojection.has_bandoverlaps)
self.assertTrue(self.lobsterout_saveprojection.has_density_of_energies)
self.assertFalse(self.lobsterout_saveprojection.has_fatbands)
self.assertFalse(self.lobsterout_saveprojection.has_grosspopulation)
self.assertListEqual(
self.lobsterout_saveprojection.info_lines,
[
"There are more PAW bands than local basis functions available.",
"To prevent trouble in orthonormalization and Hamiltonian reconstruction",
"the PAW bands from 21 and upwards will be ignored.",
],
)
self.assertListEqual(
self.lobsterout_saveprojection.info_orthonormalization,
["3 of 147 k-points could not be orthonormalized with an accuracy of 1.0E-5."],
)
self.assertFalse(self.lobsterout_saveprojection.is_restart_from_projection)
self.assertEqual(self.lobsterout_saveprojection.lobster_version, "v3.1.0")
self.assertEqual(self.lobsterout_saveprojection.number_of_spins, 1)
self.assertEqual(self.lobsterout_saveprojection.number_of_threads, 8)
self.assertDictEqual(
self.lobsterout_saveprojection.timing,
{
"walltime": {"h": "0", "min": "0", "s": "2", "ms": "574"},
"usertime": {"h": "0", "min": "0", "s": "18", "ms": "250"},
"sys_time": {"h": "0", "min": "0", "s": "0", "ms": "320"},
},
)
self.assertAlmostEqual(self.lobsterout_saveprojection.totalspilling[0], [0.044000000000000004][0])
self.assertListEqual(
self.lobsterout_saveprojection.warninglines,
[
"3 of 147 k-points could not be orthonormalized with an accuracy of 1.0E-5.",
"Generally, this is not a critical error. But to help you analyze it,",
"I dumped the band overlap matrices to the file bandOverlaps.lobster.",
"Please check how much they deviate from the identity matrix and decide to",
"use your results only, if you are sure that this is ok.",
],
)
self.assertListEqual(
self.lobsterout_skipping_all.basis_functions,
[
[
"3s",
"4s",
"3p_y",
"3p_z",
"3p_x",
"3d_xy",
"3d_yz",
"3d_z^2",
"3d_xz",
"3d_x^2-y^2",
]
],
)
self.assertListEqual(self.lobsterout_skipping_all.basis_type, ["pbeVaspFit2015"])
self.assertListEqual(self.lobsterout_skipping_all.chargespilling, [0.0268])
self.assertEqual(self.lobsterout_skipping_all.dftprogram, "VASP")
self.assertListEqual(self.lobsterout_skipping_all.elements, ["Ti"])
self.assertFalse(self.lobsterout_skipping_all.has_CHARGE)
self.assertFalse(self.lobsterout_skipping_all.has_COHPCAR)
self.assertFalse(self.lobsterout_skipping_all.has_COOPCAR)
self.assertFalse(self.lobsterout_skipping_all.has_DOSCAR)
self.assertFalse(self.lobsterout_skipping_all.has_Projection)
self.assertTrue(self.lobsterout_skipping_all.has_bandoverlaps)
self.assertFalse(self.lobsterout_skipping_all.has_density_of_energies)
self.assertFalse(self.lobsterout_skipping_all.has_fatbands)
self.assertFalse(self.lobsterout_skipping_all.has_grosspopulation)
self.assertListEqual(
self.lobsterout_skipping_all.info_lines,
[
"There are more PAW bands than local basis functions available.",
"To prevent trouble in orthonormalization and Hamiltonian reconstruction",
"the PAW bands from 21 and upwards will be ignored.",
],
)
self.assertListEqual(
self.lobsterout_skipping_all.info_orthonormalization,
["3 of 147 k-points could not be orthonormalized with an accuracy of 1.0E-5."],
)
self.assertFalse(self.lobsterout_skipping_all.is_restart_from_projection)
self.assertEqual(self.lobsterout_skipping_all.lobster_version, "v3.1.0")
self.assertEqual(self.lobsterout_skipping_all.number_of_spins, 1)
self.assertEqual(self.lobsterout_skipping_all.number_of_threads, 8)
self.assertDictEqual(
self.lobsterout_skipping_all.timing,
{
"walltime": {"h": "0", "min": "0", "s": "2", "ms": "117"},
"usertime": {"h": "0", "min": "0", "s": "16", "ms": "79"},
"sys_time": {"h": "0", "min": "0", "s": "0", "ms": "320"},
},
)
self.assertAlmostEqual(self.lobsterout_skipping_all.totalspilling[0], [0.044000000000000004][0])
self.assertListEqual(
self.lobsterout_skipping_all.warninglines,
[
"3 of 147 k-points could not be orthonormalized with an accuracy of 1.0E-5.",
"Generally, this is not a critical error. But to help you analyze it,",
"I dumped the band overlap matrices to the file bandOverlaps.lobster.",
"Please check how much they deviate from the identity matrix and decide to",
"use your results only, if you are sure that this is ok.",
],
)
self.assertListEqual(
self.lobsterout_twospins.basis_functions,
[
[
"4s",
"4p_y",
"4p_z",
"4p_x",
"3d_xy",
"3d_yz",
"3d_z^2",
"3d_xz",
"3d_x^2-y^2",
]
],
)
self.assertListEqual(self.lobsterout_twospins.basis_type, ["pbeVaspFit2015"])
self.assertAlmostEqual(self.lobsterout_twospins.chargespilling[0], 0.36619999999999997)
self.assertAlmostEqual(self.lobsterout_twospins.chargespilling[1], 0.36619999999999997)
self.assertEqual(self.lobsterout_twospins.dftprogram, "VASP")
self.assertListEqual(self.lobsterout_twospins.elements, ["Ti"])
self.assertTrue(self.lobsterout_twospins.has_CHARGE)
self.assertTrue(self.lobsterout_twospins.has_COHPCAR)
self.assertTrue(self.lobsterout_twospins.has_COOPCAR)
self.assertTrue(self.lobsterout_twospins.has_DOSCAR)
self.assertFalse(self.lobsterout_twospins.has_Projection)
self.assertTrue(self.lobsterout_twospins.has_bandoverlaps)
self.assertFalse(self.lobsterout_twospins.has_density_of_energies)
self.assertFalse(self.lobsterout_twospins.has_fatbands)
self.assertFalse(self.lobsterout_twospins.has_grosspopulation)
self.assertListEqual(
self.lobsterout_twospins.info_lines,
[
"There are more PAW bands than local basis functions available.",
"To prevent trouble in orthonormalization and Hamiltonian reconstruction",
"the PAW bands from 19 and upwards will be ignored.",
],
)
self.assertListEqual(
self.lobsterout_twospins.info_orthonormalization,
["60 of 294 k-points could not be orthonormalized with an accuracy of 1.0E-5."],
)
self.assertFalse(self.lobsterout_twospins.is_restart_from_projection)
self.assertEqual(self.lobsterout_twospins.lobster_version, "v3.1.0")
self.assertEqual(self.lobsterout_twospins.number_of_spins, 2)
self.assertEqual(self.lobsterout_twospins.number_of_threads, 8)
self.assertDictEqual(
self.lobsterout_twospins.timing,
{
"walltime": {"h": "0", "min": "0", "s": "3", "ms": "71"},
"usertime": {"h": "0", "min": "0", "s": "22", "ms": "660"},
"sys_time": {"h": "0", "min": "0", "s": "0", "ms": "310"},
},
)
self.assertAlmostEqual(self.lobsterout_twospins.totalspilling[0], [0.2567][0])
self.assertAlmostEqual(self.lobsterout_twospins.totalspilling[1], [0.2567][0])
self.assertListEqual(
self.lobsterout_twospins.warninglines,
[
"60 of 294 k-points could not be orthonormalized with an accuracy of 1.0E-5.",
"Generally, this is not a critical error. But to help you analyze it,",
"I dumped the band overlap matrices to the file bandOverlaps.lobster.",
"Please check how much they deviate from the identity matrix and decide to",
"use your results only, if you are sure that this is ok.",
],
)
self.assertListEqual(self.lobsterout_from_projection.basis_functions, [])
self.assertListEqual(self.lobsterout_from_projection.basis_type, [])
self.assertAlmostEqual(self.lobsterout_from_projection.chargespilling[0], 0.0177)
self.assertEqual(self.lobsterout_from_projection.dftprogram, None)
self.assertListEqual(self.lobsterout_from_projection.elements, [])
self.assertTrue(self.lobsterout_from_projection.has_CHARGE)
self.assertTrue(self.lobsterout_from_projection.has_COHPCAR)
self.assertTrue(self.lobsterout_from_projection.has_COOPCAR)
self.assertTrue(self.lobsterout_from_projection.has_DOSCAR)
self.assertFalse(self.lobsterout_from_projection.has_Projection)
self.assertFalse(self.lobsterout_from_projection.has_bandoverlaps)
self.assertFalse(self.lobsterout_from_projection.has_density_of_energies)
self.assertFalse(self.lobsterout_from_projection.has_fatbands)
self.assertFalse(self.lobsterout_from_projection.has_grosspopulation)
self.assertListEqual(self.lobsterout_from_projection.info_lines, [])
self.assertListEqual(self.lobsterout_from_projection.info_orthonormalization, [])
self.assertTrue(self.lobsterout_from_projection.is_restart_from_projection)
self.assertEqual(self.lobsterout_from_projection.lobster_version, "v3.1.0")
self.assertEqual(self.lobsterout_from_projection.number_of_spins, 1)
self.assertEqual(self.lobsterout_from_projection.number_of_threads, 8)
self.assertDictEqual(
self.lobsterout_from_projection.timing,
{
"walltime": {"h": "0", "min": "2", "s": "1", "ms": "890"},
"usertime": {"h": "0", "min": "15", "s": "10", "ms": "530"},
"sys_time": {"h": "0", "min": "0", "s": "0", "ms": "400"},
},
)
self.assertAlmostEqual(self.lobsterout_from_projection.totalspilling[0], [0.1543][0])
self.assertListEqual(self.lobsterout_from_projection.warninglines, [])
self.assertListEqual(
self.lobsterout_GaAs.basis_functions,
[
["4s", "4p_y", "4p_z", "4p_x"],
[
"4s",
"4p_y",
"4p_z",
"4p_x",
"3d_xy",
"3d_yz",
"3d_z^2",
"3d_xz",
"3d_x^2-y^2",
],
],
)
self.assertListEqual(self.lobsterout_GaAs.basis_type, ["Bunge", "Bunge"])
self.assertAlmostEqual(self.lobsterout_GaAs.chargespilling[0], 0.0089)
self.assertEqual(self.lobsterout_GaAs.dftprogram, "VASP")
self.assertListEqual(self.lobsterout_GaAs.elements, ["As", "Ga"])
self.assertTrue(self.lobsterout_GaAs.has_CHARGE)
self.assertTrue(self.lobsterout_GaAs.has_COHPCAR)
self.assertTrue(self.lobsterout_GaAs.has_COOPCAR)
self.assertTrue(self.lobsterout_GaAs.has_DOSCAR)
self.assertFalse(self.lobsterout_GaAs.has_Projection)
self.assertFalse(self.lobsterout_GaAs.has_bandoverlaps)
self.assertFalse(self.lobsterout_GaAs.has_density_of_energies)
self.assertFalse(self.lobsterout_GaAs.has_fatbands)
self.assertFalse(self.lobsterout_GaAs.has_grosspopulation)
self.assertListEqual(
self.lobsterout_GaAs.info_lines,
[
"There are more PAW bands than local basis functions available.",
"To prevent trouble in orthonormalization and Hamiltonian reconstruction",
"the PAW bands from 14 and upwards will be ignored.",
],
)
self.assertListEqual(self.lobsterout_GaAs.info_orthonormalization, [])
self.assertFalse(self.lobsterout_GaAs.is_restart_from_projection)
self.assertEqual(self.lobsterout_GaAs.lobster_version, "v3.1.0")
self.assertEqual(self.lobsterout_GaAs.number_of_spins, 1)
self.assertEqual(self.lobsterout_GaAs.number_of_threads, 8)
self.assertDictEqual(
self.lobsterout_GaAs.timing,
{
"walltime": {"h": "0", "min": "0", "s": "2", "ms": "726"},
"usertime": {"h": "0", "min": "0", "s": "12", "ms": "370"},
"sys_time": {"h": "0", "min": "0", "s": "0", "ms": "180"},
},
)
self.assertAlmostEqual(self.lobsterout_GaAs.totalspilling[0], [0.0859][0])
self.assertEqual(self.lobsterout_onethread.number_of_threads, 1)
def test_get_doc(self):
comparedict = {
"restart_from_projection": False,
"lobster_version": "v3.1.0",
"threads": 8,
"Dftprogram": "VASP",
"chargespilling": [0.0268],
"totalspilling": [0.044000000000000004],
"elements": ["Ti"],
"basistype": ["pbeVaspFit2015"],
"basisfunctions": [
[
"3s",
"4s",
"3p_y",
"3p_z",
"3p_x",
"3d_xy",
"3d_yz",
"3d_z^2",
"3d_xz",
"3d_x^2-y^2",
]
],
"timing": {
"walltime": {"h": "0", "min": "0", "s": "2", "ms": "702"},
"usertime": {"h": "0", "min": "0", "s": "20", "ms": "330"},
"sys_time": {"h": "0", "min": "0", "s": "0", "ms": "310"},
},
"warnings": [
"3 of 147 k-points could not be orthonormalized with an accuracy of 1.0E-5.",
"Generally, this is not a critical error. But to help you analyze it,",
"I dumped the band overlap matrices to the file bandOverlaps.lobster.",
"Please check how much they deviate from the identity matrix and decide to",
"use your results only, if you are sure that this is ok.",
],
"orthonormalization": ["3 of 147 k-points could not be orthonormalized with an accuracy of 1.0E-5."],
"infos": [
"There are more PAW bands than local basis functions available.",
"To prevent trouble in orthonormalization and Hamiltonian reconstruction",
"the PAW bands from 21 and upwards will be ignored.",
],
"hasDOSCAR": True,
"hasCOHPCAR": True,
"hasCOOPCAR": True,
"hasCHARGE": True,
"hasProjection": False,
"hasbandoverlaps": True,
"hasfatband": False,
"hasGrossPopuliation": False,
"hasDensityOfEnergies": False,
}
for key, item in self.lobsterout_normal.get_doc().items():
if isinstance(item, str):
self.assertTrue(comparedict[key], item)
elif isinstance(item, int):
self.assertEqual(comparedict[key], item)
elif key in ("chargespilling", "totalspilling"):
self.assertAlmostEqual(item[0], comparedict[key][0])
elif isinstance(item, list):
self.assertListEqual(item, comparedict[key])
elif isinstance(item, dict):
self.assertDictEqual(item, comparedict[key])
class FatbandTest(PymatgenTest):
def setUp(self):
warnings.simplefilter("ignore")
self.fatband_SiO2_p_x = Fatband(
filenames=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "Fatband_SiO2/Test_p_x"),
Kpointsfile=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "Fatband_SiO2/Test_p_x/KPOINTS"),
vasprun=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "Fatband_SiO2/Test_p_x/vasprun.xml"),
)
self.vasprun_SiO2_p_x = Vasprun(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "Fatband_SiO2/Test_p_x/vasprun.xml")
)
self.bs_symmline = self.vasprun_SiO2_p_x.get_band_structure(line_mode=True, force_hybrid_mode=True)
self.fatband_SiO2_p = Fatband(
filenames=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "Fatband_SiO2/Test_p"),
Kpointsfile=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "Fatband_SiO2/Test_p/KPOINTS"),
vasprun=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "Fatband_SiO2/Test_p/vasprun.xml"),
)
self.vasprun_SiO2_p = Vasprun(
filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "Fatband_SiO2/Test_p/vasprun.xml")
)
self.bs_symmline2 = self.vasprun_SiO2_p.get_band_structure(line_mode=True, force_hybrid_mode=True)
self.fatband_SiO2_spin = Fatband(
filenames=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "Fatband_SiO2/Test_Spin"),
Kpointsfile=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "Fatband_SiO2/Test_Spin/KPOINTS"),
vasprun=os.path.join(
PymatgenTest.TEST_FILES_DIR,
"cohp",
"Fatband_SiO2/Test_Spin/vasprun.xml",
),
)
self.vasprun_SiO2_spin = Vasprun(
filename=os.path.join(
PymatgenTest.TEST_FILES_DIR,
"cohp",
"Fatband_SiO2/Test_Spin/vasprun.xml",
)
)
self.bs_symmline_spin = self.vasprun_SiO2_p.get_band_structure(line_mode=True, force_hybrid_mode=True)
def tearDown(self):
warnings.simplefilter("default")
def test_attributes(self):
self.assertAlmostEqual(list(self.fatband_SiO2_p_x.label_dict["M"])[0], 0.5)
self.assertAlmostEqual(list(self.fatband_SiO2_p_x.label_dict["M"])[1], 0.0)
self.assertAlmostEqual(list(self.fatband_SiO2_p_x.label_dict["M"])[2], 0.0)
self.assertEqual(self.fatband_SiO2_p_x.efermi, self.vasprun_SiO2_p_x.efermi)
lattice1 = self.bs_symmline.lattice_rec.as_dict()
lattice2 = self.fatband_SiO2_p_x.lattice.as_dict()
self.assertAlmostEqual(lattice1["matrix"][0][0], lattice2["matrix"][0][0])
self.assertAlmostEqual(lattice1["matrix"][0][1], lattice2["matrix"][0][1])
self.assertAlmostEqual(lattice1["matrix"][0][2], lattice2["matrix"][0][2])
self.assertAlmostEqual(lattice1["matrix"][1][0], lattice2["matrix"][1][0])
self.assertAlmostEqual(lattice1["matrix"][1][1], lattice2["matrix"][1][1])
self.assertAlmostEqual(lattice1["matrix"][1][2], lattice2["matrix"][1][2])
self.assertAlmostEqual(lattice1["matrix"][2][0], lattice2["matrix"][2][0])
self.assertAlmostEqual(lattice1["matrix"][2][1], lattice2["matrix"][2][1])
self.assertAlmostEqual(lattice1["matrix"][2][2], lattice2["matrix"][2][2])
self.assertEqual(
self.fatband_SiO2_p_x.eigenvals[Spin.up][1][1] - self.fatband_SiO2_p_x.efermi,
-18.245,
)
self.assertEqual(self.fatband_SiO2_p_x.is_spinpolarized, False)
self.assertAlmostEqual(self.fatband_SiO2_p_x.kpoints_array[3][0], 0.03409091)
self.assertEqual(self.fatband_SiO2_p_x.kpoints_array[3][1], 0.0)
self.assertEqual(self.fatband_SiO2_p_x.kpoints_array[3][2], 0.0)
self.assertEqual(self.fatband_SiO2_p_x.nbands, 36)
self.assertEqual(self.fatband_SiO2_p_x.p_eigenvals[Spin.up][2][1]["Si1"]["3p_x"], 0.002)
self.assertAlmostEqual(self.fatband_SiO2_p_x.structure[0].frac_coords[0], 0.0)
self.assertAlmostEqual(self.fatband_SiO2_p_x.structure[0].frac_coords[1], 0.47634315)
self.assertAlmostEqual(self.fatband_SiO2_p_x.structure[0].frac_coords[2], 0.666667)
self.assertEqual(self.fatband_SiO2_p_x.structure[0].species_string, "Si")
self.assertAlmostEqual(self.fatband_SiO2_p_x.structure[0].coords[0], -1.19607309)
self.assertAlmostEqual(self.fatband_SiO2_p_x.structure[0].coords[1], 2.0716597)
self.assertAlmostEqual(self.fatband_SiO2_p_x.structure[0].coords[2], 3.67462144)
self.assertAlmostEqual(list(self.fatband_SiO2_p.label_dict["M"])[0], 0.5)
self.assertAlmostEqual(list(self.fatband_SiO2_p.label_dict["M"])[1], 0.0)
self.assertAlmostEqual(list(self.fatband_SiO2_p.label_dict["M"])[2], 0.0)
self.assertEqual(self.fatband_SiO2_p.efermi, self.vasprun_SiO2_p.efermi)
lattice1 = self.bs_symmline2.lattice_rec.as_dict()
lattice2 = self.fatband_SiO2_p.lattice.as_dict()
self.assertAlmostEqual(lattice1["matrix"][0][0], lattice2["matrix"][0][0])
self.assertAlmostEqual(lattice1["matrix"][0][1], lattice2["matrix"][0][1])
self.assertAlmostEqual(lattice1["matrix"][0][2], lattice2["matrix"][0][2])
self.assertAlmostEqual(lattice1["matrix"][1][0], lattice2["matrix"][1][0])
self.assertAlmostEqual(lattice1["matrix"][1][1], lattice2["matrix"][1][1])
self.assertAlmostEqual(lattice1["matrix"][1][2], lattice2["matrix"][1][2])
self.assertAlmostEqual(lattice1["matrix"][2][0], lattice2["matrix"][2][0])
self.assertAlmostEqual(lattice1["matrix"][2][1], lattice2["matrix"][2][1])
self.assertAlmostEqual(lattice1["matrix"][2][2], lattice2["matrix"][2][2])
self.assertEqual(
self.fatband_SiO2_p.eigenvals[Spin.up][1][1] - self.fatband_SiO2_p.efermi,
-18.245,
)
self.assertEqual(self.fatband_SiO2_p.is_spinpolarized, False)
self.assertAlmostEqual(self.fatband_SiO2_p.kpoints_array[3][0], 0.03409091)
self.assertEqual(self.fatband_SiO2_p.kpoints_array[3][1], 0.0)
self.assertEqual(self.fatband_SiO2_p.kpoints_array[3][2], 0.0)
self.assertEqual(self.fatband_SiO2_p.nbands, 36)
self.assertEqual(self.fatband_SiO2_p.p_eigenvals[Spin.up][2][1]["Si1"]["3p"], 0.042)
self.assertAlmostEqual(self.fatband_SiO2_p.structure[0].frac_coords[0], 0.0)
self.assertAlmostEqual(self.fatband_SiO2_p.structure[0].frac_coords[1], 0.47634315)
self.assertAlmostEqual(self.fatband_SiO2_p.structure[0].frac_coords[2], 0.666667)
self.assertEqual(self.fatband_SiO2_p.structure[0].species_string, "Si")
self.assertAlmostEqual(self.fatband_SiO2_p.structure[0].coords[0], -1.19607309)
self.assertAlmostEqual(self.fatband_SiO2_p.structure[0].coords[1], 2.0716597)
self.assertAlmostEqual(self.fatband_SiO2_p.structure[0].coords[2], 3.67462144)
self.assertAlmostEqual(list(self.fatband_SiO2_spin.label_dict["M"])[0], 0.5)
self.assertAlmostEqual(list(self.fatband_SiO2_spin.label_dict["M"])[1], 0.0)
self.assertAlmostEqual(list(self.fatband_SiO2_spin.label_dict["M"])[2], 0.0)
self.assertEqual(self.fatband_SiO2_spin.efermi, self.vasprun_SiO2_spin.efermi)
lattice1 = self.bs_symmline_spin.lattice_rec.as_dict()
lattice2 = self.fatband_SiO2_spin.lattice.as_dict()
self.assertAlmostEqual(lattice1["matrix"][0][0], lattice2["matrix"][0][0])
self.assertAlmostEqual(lattice1["matrix"][0][1], lattice2["matrix"][0][1])
self.assertAlmostEqual(lattice1["matrix"][0][2], lattice2["matrix"][0][2])
self.assertAlmostEqual(lattice1["matrix"][1][0], lattice2["matrix"][1][0])
self.assertAlmostEqual(lattice1["matrix"][1][1], lattice2["matrix"][1][1])
self.assertAlmostEqual(lattice1["matrix"][1][2], lattice2["matrix"][1][2])
self.assertAlmostEqual(lattice1["matrix"][2][0], lattice2["matrix"][2][0])
self.assertAlmostEqual(lattice1["matrix"][2][1], lattice2["matrix"][2][1])
self.assertAlmostEqual(lattice1["matrix"][2][2], lattice2["matrix"][2][2])
self.assertEqual(
self.fatband_SiO2_spin.eigenvals[Spin.up][1][1] - self.fatband_SiO2_spin.efermi,
-18.245,
)
self.assertEqual(
self.fatband_SiO2_spin.eigenvals[Spin.down][1][1] - self.fatband_SiO2_spin.efermi,
-18.245,
)
self.assertEqual(self.fatband_SiO2_spin.is_spinpolarized, True)
self.assertAlmostEqual(self.fatband_SiO2_spin.kpoints_array[3][0], 0.03409091)
self.assertEqual(self.fatband_SiO2_spin.kpoints_array[3][1], 0.0)
self.assertEqual(self.fatband_SiO2_spin.kpoints_array[3][2], 0.0)
self.assertEqual(self.fatband_SiO2_spin.nbands, 36)
self.assertEqual(self.fatband_SiO2_spin.p_eigenvals[Spin.up][2][1]["Si1"]["3p"], 0.042)
self.assertAlmostEqual(self.fatband_SiO2_spin.structure[0].frac_coords[0], 0.0)
self.assertAlmostEqual(self.fatband_SiO2_spin.structure[0].frac_coords[1], 0.47634315)
self.assertAlmostEqual(self.fatband_SiO2_spin.structure[0].frac_coords[2], 0.666667)
self.assertEqual(self.fatband_SiO2_spin.structure[0].species_string, "Si")
self.assertAlmostEqual(self.fatband_SiO2_spin.structure[0].coords[0], -1.19607309)
self.assertAlmostEqual(self.fatband_SiO2_spin.structure[0].coords[1], 2.0716597)
self.assertAlmostEqual(self.fatband_SiO2_spin.structure[0].coords[2], 3.67462144)
def test_raises(self):
with self.assertRaises(ValueError):
self.fatband_SiO2_p_x = Fatband(
filenames=[
os.path.join(
PymatgenTest.TEST_FILES_DIR,
"cohp",
"Fatband_SiO2/Test_p_x/FATBAND_si1_3p_x.lobster",
),
os.path.join(
PymatgenTest.TEST_FILES_DIR,
"cohp",
"Fatband_SiO2/Test_p_x/FATBAND_si1_3p_x.lobster",
),
],
Kpointsfile=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "Fatband_SiO2/Test_p_x/KPOINTS"),
vasprun=os.path.join(
PymatgenTest.TEST_FILES_DIR,
"cohp",
"Fatband_SiO2/Test_p_x/vasprun.xml",
),
)
with self.assertRaises(ValueError):
self.fatband_SiO2_p_x = Fatband(
filenames=[
os.path.join(
PymatgenTest.TEST_FILES_DIR,
"cohp",
"Fatband_SiO2/Test_p_x/FATBAND_si1_3p_x.lobster",
),
os.path.join(
PymatgenTest.TEST_FILES_DIR,
"cohp",
"Fatband_SiO2/Test_p/FATBAND_si1_3p.lobster",
),
],
Kpointsfile=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "Fatband_SiO2/Test_p_x/KPOINTS"),
vasprun=os.path.join(
PymatgenTest.TEST_FILES_DIR,
"cohp",
"Fatband_SiO2/Test_p_x/vasprun.xml",
),
)
with self.assertRaises(ValueError):
self.fatband_SiO2_p_x = Fatband(
filenames=".",
Kpointsfile=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "Fatband_SiO2/Test_p_x/KPOINTS"),
vasprun=os.path.join(
PymatgenTest.TEST_FILES_DIR,
"cohp",
"Fatband_SiO2/Test_p_x/vasprun.xml",
),
)
def test_get_bandstructure(self):
bs_p = self.fatband_SiO2_p.get_bandstructure()
atom1 = bs_p.structure[0]
atom2 = self.bs_symmline2.structure[0]
self.assertAlmostEqual(atom1.frac_coords[0], atom2.frac_coords[0])
self.assertAlmostEqual(atom1.frac_coords[1], atom2.frac_coords[1])
self.assertAlmostEqual(atom1.frac_coords[2], atom2.frac_coords[2])
self.assertAlmostEqual(atom1.coords[0], atom2.coords[0])
self.assertAlmostEqual(atom1.coords[1], atom2.coords[1])
self.assertAlmostEqual(atom1.coords[2], atom2.coords[2])
self.assertEqual(atom1.species_string, atom2.species_string)
self.assertEqual(bs_p.efermi, self.bs_symmline2.efermi)
branch1 = bs_p.branches[0]
branch2 = self.bs_symmline2.branches[0]
self.assertEqual(branch2["name"], branch1["name"])
self.assertEqual(branch2["start_index"], branch1["start_index"])
self.assertEqual(branch2["end_index"], branch1["end_index"])
self.assertAlmostEqual(bs_p.distance[30], self.bs_symmline2.distance[30])
lattice1 = bs_p.lattice_rec.as_dict()
lattice2 = self.bs_symmline2.lattice_rec.as_dict()
self.assertAlmostEqual(lattice1["matrix"][0][0], lattice2["matrix"][0][0])
self.assertAlmostEqual(lattice1["matrix"][0][1], lattice2["matrix"][0][1])
self.assertAlmostEqual(lattice1["matrix"][0][2], lattice2["matrix"][0][2])
self.assertAlmostEqual(lattice1["matrix"][1][0], lattice2["matrix"][1][0])
self.assertAlmostEqual(lattice1["matrix"][1][1], lattice2["matrix"][1][1])
self.assertAlmostEqual(lattice1["matrix"][1][2], lattice2["matrix"][1][2])
self.assertAlmostEqual(lattice1["matrix"][2][0], lattice2["matrix"][2][0])
self.assertAlmostEqual(lattice1["matrix"][2][1], lattice2["matrix"][2][1])
self.assertAlmostEqual(lattice1["matrix"][2][2], lattice2["matrix"][2][2])
self.assertAlmostEqual(bs_p.kpoints[8].frac_coords[0], self.bs_symmline2.kpoints[8].frac_coords[0])
self.assertAlmostEqual(bs_p.kpoints[8].frac_coords[1], self.bs_symmline2.kpoints[8].frac_coords[1])
self.assertAlmostEqual(bs_p.kpoints[8].frac_coords[2], self.bs_symmline2.kpoints[8].frac_coords[2])
self.assertAlmostEqual(bs_p.kpoints[8].cart_coords[0], self.bs_symmline2.kpoints[8].cart_coords[0])
self.assertAlmostEqual(bs_p.kpoints[8].cart_coords[1], self.bs_symmline2.kpoints[8].cart_coords[1])
self.assertAlmostEqual(bs_p.kpoints[8].cart_coords[2], self.bs_symmline2.kpoints[8].cart_coords[2])
self.assertAlmostEqual(
bs_p.kpoints[50].frac_coords[0],
self.bs_symmline2.kpoints[50].frac_coords[0],
)
self.assertAlmostEqual(
bs_p.kpoints[50].frac_coords[1],
self.bs_symmline2.kpoints[50].frac_coords[1],
)
self.assertAlmostEqual(
bs_p.kpoints[50].frac_coords[2],
self.bs_symmline2.kpoints[50].frac_coords[2],
)
self.assertAlmostEqual(
bs_p.kpoints[50].cart_coords[0],
self.bs_symmline2.kpoints[50].cart_coords[0],
)
self.assertAlmostEqual(
bs_p.kpoints[50].cart_coords[1],
self.bs_symmline2.kpoints[50].cart_coords[1],
)
self.assertAlmostEqual(
bs_p.kpoints[50].cart_coords[2],
self.bs_symmline2.kpoints[50].cart_coords[2],
)
self.assertAlmostEqual(
bs_p.get_band_gap()["energy"],
self.bs_symmline2.get_band_gap()["energy"],
places=2,
)
self.assertAlmostEqual(bs_p.get_projection_on_elements()[Spin.up][0][0]["Si"], 3 * (0.001 + 0.064))
self.assertAlmostEqual(
bs_p.get_projections_on_elements_and_orbitals({"Si": ["3p"]})[Spin.up][0][0]["Si"]["3p"],
0.003,
)
self.assertAlmostEqual(
bs_p.get_projections_on_elements_and_orbitals({"O": ["2p"]})[Spin.up][0][0]["O"]["2p"],
0.002 * 3 + 0.003 * 3,
)
dict_here = bs_p.get_projections_on_elements_and_orbitals({"Si": ["3s", "3p"], "O": ["2s", "2p"]})[Spin.up][0][
0
]
self.assertAlmostEqual(dict_here["Si"]["3s"], 0.192)
self.assertAlmostEqual(dict_here["Si"]["3p"], 0.003)
self.assertAlmostEqual(dict_here["O"]["2s"], 0.792)
self.assertAlmostEqual(dict_here["O"]["2p"], 0.015)
bs_spin = self.fatband_SiO2_spin.get_bandstructure()
self.assertAlmostEqual(
bs_spin.get_projection_on_elements()[Spin.up][0][0]["Si"],
3 * (0.001 + 0.064),
)
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"Si": ["3p"]})[Spin.up][0][0]["Si"]["3p"],
0.003,
)
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"O": ["2p"]})[Spin.up][0][0]["O"]["2p"],
0.002 * 3 + 0.003 * 3,
)
dict_here = bs_spin.get_projections_on_elements_and_orbitals({"Si": ["3s", "3p"], "O": ["2s", "2p"]})[Spin.up][
0
][0]
self.assertAlmostEqual(dict_here["Si"]["3s"], 0.192)
self.assertAlmostEqual(dict_here["Si"]["3p"], 0.003)
self.assertAlmostEqual(dict_here["O"]["2s"], 0.792)
self.assertAlmostEqual(dict_here["O"]["2p"], 0.015)
self.assertAlmostEqual(
bs_spin.get_projection_on_elements()[Spin.up][0][0]["Si"],
3 * (0.001 + 0.064),
)
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"Si": ["3p"]})[Spin.down][0][0]["Si"]["3p"],
0.003,
)
self.assertAlmostEqual(
bs_spin.get_projections_on_elements_and_orbitals({"O": ["2p"]})[Spin.down][0][0]["O"]["2p"],
0.002 * 3 + 0.003 * 3,
)
dict_here = bs_spin.get_projections_on_elements_and_orbitals({"Si": ["3s", "3p"], "O": ["2s", "2p"]})[
Spin.down
][0][0]
self.assertAlmostEqual(dict_here["Si"]["3s"], 0.192)
self.assertAlmostEqual(dict_here["Si"]["3p"], 0.003)
self.assertAlmostEqual(dict_here["O"]["2s"], 0.792)
self.assertAlmostEqual(dict_here["O"]["2p"], 0.015)
bs_p_x = self.fatband_SiO2_p_x.get_bandstructure()
self.assertAlmostEqual(
bs_p_x.get_projection_on_elements()[Spin.up][0][0]["Si"],
3 * (0.001 + 0.064),
2,
)
class LobsterinTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
self.Lobsterinfromfile = Lobsterin.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "lobsterin.1"))
self.Lobsterinfromfile2 = Lobsterin.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "lobsterin.2"))
self.Lobsterinfromfile3 = Lobsterin.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "lobsterin.3"))
self.Lobsterinfromfile4 = Lobsterin.from_file(
os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "lobsterin.4.gz")
)
def test_from_file(self):
# test read from file
self.assertAlmostEqual(self.Lobsterinfromfile["cohpstartenergy"], -15.0)
self.assertAlmostEqual(self.Lobsterinfromfile["cohpendenergy"], 5.0)
self.assertAlmostEqual(self.Lobsterinfromfile["basisset"], "pbeVaspFit2015")
self.assertAlmostEqual(self.Lobsterinfromfile["gaussiansmearingwidth"], 0.1)
self.assertEqual(self.Lobsterinfromfile["basisfunctions"][0], "Fe 3d 4p 4s")
self.assertEqual(self.Lobsterinfromfile["basisfunctions"][1], "Co 3d 4p 4s")
self.assertEqual(self.Lobsterinfromfile["skipdos"], True)
self.assertEqual(self.Lobsterinfromfile["skipcohp"], True)
self.assertEqual(self.Lobsterinfromfile["skipcoop"], True)
self.assertEqual(self.Lobsterinfromfile["skippopulationanalysis"], True)
self.assertEqual(self.Lobsterinfromfile["skipgrosspopulation"], True)
# test if comments are correctly removed
self.assertDictEqual(self.Lobsterinfromfile, self.Lobsterinfromfile2)
def test_getitem(self):
# tests implementation of getitem, should be case independent
self.assertAlmostEqual(self.Lobsterinfromfile["COHPSTARTENERGY"], -15.0)
def test_setitem(self):
# test implementation of setitem
self.Lobsterinfromfile["skipCOHP"] = False
self.assertEqual(self.Lobsterinfromfile["skipcohp"], False)
def test_initialize_from_dict(self):
# initialize from dict
lobsterin1 = Lobsterin(
{
"cohpstartenergy": -15.0,
"cohpendenergy": 5.0,
"basisset": "pbeVaspFit2015",
"gaussiansmearingwidth": 0.1,
"basisfunctions": ["Fe 3d 4p 4s", "Co 3d 4p 4s"],
"skipdos": True,
"skipcohp": True,
"skipcoop": True,
"skippopulationanalysis": True,
"skipgrosspopulation": True,
}
)
self.assertAlmostEqual(lobsterin1["cohpstartenergy"], -15.0)
self.assertAlmostEqual(lobsterin1["cohpendenergy"], 5.0)
self.assertAlmostEqual(lobsterin1["basisset"], "pbeVaspFit2015")
self.assertAlmostEqual(lobsterin1["gaussiansmearingwidth"], 0.1)
self.assertEqual(lobsterin1["basisfunctions"][0], "Fe 3d 4p 4s")
self.assertEqual(lobsterin1["basisfunctions"][1], "Co 3d 4p 4s")
self.assertEqual(lobsterin1["skipdos"], True)
self.assertEqual(lobsterin1["skipcohp"], True)
self.assertEqual(lobsterin1["skipcoop"], True)
self.assertEqual(lobsterin1["skippopulationanalysis"], True)
self.assertEqual(lobsterin1["skipgrosspopulation"], True)
with self.assertRaises(IOError):
lobsterin2 = Lobsterin({"cohpstartenergy": -15.0, "cohpstartEnergy": -20.0})
lobsterin2 = Lobsterin({"cohpstartenergy": -15.0})
# can only calculate nbands if basis functions are provided
with self.assertRaises(IOError):
lobsterin2._get_nbands(structure=Structure.from_file(os.path.join(test_dir_doscar, "POSCAR.Fe3O4")))
def test_standard_settings(self):
# test standard settings
for option in [
"standard",
"standard_from_projection",
"standard_with_fatband",
"onlyprojection",
"onlydos",
"onlycohp",
"onlycoop",
"onlycohpcoop",
]:
lobsterin1 = Lobsterin.standard_calculations_from_vasp_files(
os.path.join(test_dir_doscar, "POSCAR.Fe3O4"),
os.path.join(test_dir_doscar, "INCAR.lobster"),
os.path.join(test_dir_doscar, "POTCAR.Fe3O4"),
option=option,
)
self.assertAlmostEqual(lobsterin1["cohpstartenergy"], -15.0)
self.assertAlmostEqual(lobsterin1["cohpendenergy"], 5.0)
self.assertAlmostEqual(lobsterin1["basisset"], "pbeVaspFit2015")
self.assertAlmostEqual(lobsterin1["gaussiansmearingwidth"], 0.1)
self.assertEqual(lobsterin1["basisfunctions"][0], "Fe 3d 4p 4s ")
self.assertEqual(lobsterin1["basisfunctions"][1], "O 2p 2s ")
if option in [
"standard",
"standard_with_fatband",
"onlyprojection",
"onlycohp",
"onlycoop",
"onlycohpcoop",
]:
self.assertEqual(lobsterin1["saveProjectiontoFile"], True)
if option in [
"standard",
"standard_with_fatband",
"onlycohp",
"onlycoop",
"onlycohpcoop",
]:
self.assertEqual(lobsterin1["cohpGenerator"], "from 0.1 to 6.0 orbitalwise")
if option in ["standard"]:
self.assertEqual("skipdos" not in lobsterin1, True)
self.assertEqual("skipcohp" not in lobsterin1, True)
self.assertEqual("skipcoop" not in lobsterin1, True)
if option in ["standard_with_fatband"]:
self.assertListEqual(lobsterin1["createFatband"], ["Fe 3d 4p 4s ", "O 2p 2s "])
self.assertEqual("skipdos" not in lobsterin1, True)
self.assertEqual("skipcohp" not in lobsterin1, True)
self.assertEqual("skipcoop" not in lobsterin1, True)
if option in ["standard_from_projection"]:
self.assertTrue(lobsterin1["loadProjectionFromFile"], True)
if option in ["onlyprojection", "onlycohp", "onlycoop", "onlycohpcoop"]:
self.assertTrue(lobsterin1["skipdos"], True)
self.assertTrue(lobsterin1["skipPopulationAnalysis"], True)
self.assertTrue(lobsterin1["skipGrossPopulation"], True)
if option in ["onlydos"]:
self.assertTrue(lobsterin1["skipPopulationAnalysis"], True)
self.assertTrue(lobsterin1["skipGrossPopulation"], True)
self.assertTrue(lobsterin1["skipcohp"], True)
self.assertTrue(lobsterin1["skipcoop"], True)
if option in ["onlycohp"]:
self.assertTrue(lobsterin1["skipcoop"], True)
if option in ["onlycoop"]:
self.assertTrue(lobsterin1["skipcohp"], True)
if option in ["onlyprojection"]:
self.assertTrue(lobsterin1["skipdos"], True)
# test basis functions by dict
lobsterin_new = Lobsterin.standard_calculations_from_vasp_files(
os.path.join(test_dir_doscar, "POSCAR.Fe3O4"),
os.path.join(test_dir_doscar, "INCAR.lobster"),
dict_for_basis={"Fe": "3d 4p 4s", "O": "2s 2p"},
option="standard",
)
self.assertListEqual(lobsterin_new["basisfunctions"], ["Fe 3d 4p 4s", "O 2s 2p"])
# test gaussian smearing
lobsterin_new = Lobsterin.standard_calculations_from_vasp_files(
os.path.join(test_dir_doscar, "POSCAR.Fe3O4"),
os.path.join(test_dir_doscar, "INCAR.lobster2"),
dict_for_basis={"Fe": "3d 4p 4s", "O": "2s 2p"},
option="standard",
)
self.assertTrue("gaussiansmearingwidth" not in lobsterin_new)
# fatband and ISMEAR=-5 does not work together
with self.assertRaises(ValueError):
lobsterin_new = Lobsterin.standard_calculations_from_vasp_files(
os.path.join(test_dir_doscar, "POSCAR.Fe3O4"),
os.path.join(test_dir_doscar, "INCAR.lobster2"),
dict_for_basis={"Fe": "3d 4p 4s", "O": "2s 2p"},
option="standard_with_fatband",
)
def test_diff(self):
# test diff
self.assertDictEqual(self.Lobsterinfromfile.diff(self.Lobsterinfromfile2)["Different"], {})
self.assertAlmostEqual(
self.Lobsterinfromfile.diff(self.Lobsterinfromfile2)["Same"]["COHPSTARTENERGY"],
-15.0,
)
# test diff in both directions
for entry in self.Lobsterinfromfile.diff(self.Lobsterinfromfile3)["Same"].keys():
self.assertTrue(entry in self.Lobsterinfromfile3.diff(self.Lobsterinfromfile)["Same"].keys())
for entry in self.Lobsterinfromfile3.diff(self.Lobsterinfromfile)["Same"].keys():
self.assertTrue(entry in self.Lobsterinfromfile.diff(self.Lobsterinfromfile3)["Same"].keys())
for entry in self.Lobsterinfromfile.diff(self.Lobsterinfromfile3)["Different"].keys():
self.assertTrue(entry in self.Lobsterinfromfile3.diff(self.Lobsterinfromfile)["Different"].keys())
for entry in self.Lobsterinfromfile3.diff(self.Lobsterinfromfile)["Different"].keys():
self.assertTrue(entry in self.Lobsterinfromfile.diff(self.Lobsterinfromfile3)["Different"].keys())
self.assertEqual(
self.Lobsterinfromfile.diff(self.Lobsterinfromfile3)["Different"]["SKIPCOHP"]["lobsterin1"],
self.Lobsterinfromfile3.diff(self.Lobsterinfromfile)["Different"]["SKIPCOHP"]["lobsterin2"],
)
def test_get_basis(self):
# get basis functions
lobsterin1 = Lobsterin({})
potcar = Potcar.from_file(os.path.join(test_dir_doscar, "POTCAR.Fe3O4"))
Potcar_names = [name["symbol"] for name in potcar.spec]
self.assertListEqual(
lobsterin1.get_basis(
Structure.from_file(os.path.join(test_dir_doscar, "Fe3O4.cif")),
potcar_symbols=Potcar_names,
),
["Fe 3d 4p 4s ", "O 2p 2s "],
)
potcar = Potcar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "POTCAR.GaAs"))
Potcar_names = [name["symbol"] for name in potcar.spec]
self.assertListEqual(
lobsterin1.get_basis(
Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "POSCAR.GaAs")),
potcar_symbols=Potcar_names,
),
["Ga 3d 4p 4s ", "As 4p 4s "],
)
def test_get_all_possible_basis_functions(self):
potcar = Potcar.from_file(os.path.join(test_dir_doscar, "POTCAR.Fe3O4"))
Potcar_names = [name["symbol"] for name in potcar.spec]
result = Lobsterin.get_all_possible_basis_functions(
Structure.from_file(os.path.join(test_dir_doscar, "Fe3O4.cif")),
potcar_symbols=Potcar_names,
)
self.assertDictEqual(result[0], {"Fe": "3d 4s", "O": "2p 2s"})
self.assertDictEqual(result[1], {"Fe": "3d 4s 4p", "O": "2p 2s"})
potcar2 = Potcar.from_file(os.path.join(test_dir_doscar, "POT_GGA_PAW_PBE_54/POTCAR.Fe_pv.gz"))
Potcar_names2 = [name["symbol"] for name in potcar2.spec]
result2 = Lobsterin.get_all_possible_basis_functions(
Structure.from_file(os.path.join(test_dir_doscar, "Fe.cif")),
potcar_symbols=Potcar_names2,
)
self.assertDictEqual(result2[0], {"Fe": "3d 3p 4s"})
def test_get_potcar_symbols(self):
lobsterin1 = Lobsterin({})
self.assertListEqual(
lobsterin1._get_potcar_symbols(os.path.join(test_dir_doscar, "POTCAR.Fe3O4")),
["Fe", "O"],
)
self.assertListEqual(
lobsterin1._get_potcar_symbols(os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "POTCAR.GaAs")),
["Ga_d", "As"],
)
def test_write_lobsterin(self):
# write lobsterin, read it and compare it
outfile_path = tempfile.mkstemp()[1]
lobsterin1 = Lobsterin.standard_calculations_from_vasp_files(
os.path.join(test_dir_doscar, "POSCAR.Fe3O4"),
os.path.join(test_dir_doscar, "INCAR.lobster"),
os.path.join(test_dir_doscar, "POTCAR.Fe3O4"),
option="standard",
)
lobsterin1.write_lobsterin(outfile_path)
lobsterin2 = Lobsterin.from_file(outfile_path)
self.assertDictEqual(lobsterin1.diff(lobsterin2)["Different"], {})
def test_write_INCAR(self):
# write INCAR and compare
outfile_path = tempfile.mkstemp()[1]
lobsterin1 = Lobsterin.standard_calculations_from_vasp_files(
os.path.join(test_dir_doscar, "POSCAR.Fe3O4"),
os.path.join(test_dir_doscar, "INCAR.lobster"),
os.path.join(test_dir_doscar, "POTCAR.Fe3O4"),
option="standard",
)
lobsterin1.write_INCAR(
os.path.join(test_dir_doscar, "INCAR.lobster3"),
outfile_path,
os.path.join(test_dir_doscar, "POSCAR.Fe3O4"),
)
incar1 = Incar.from_file(os.path.join(test_dir_doscar, "INCAR.lobster3"))
incar2 = Incar.from_file(outfile_path)
self.assertDictEqual(
incar1.diff(incar2)["Different"],
{
"ISYM": {"INCAR1": 2, "INCAR2": -1},
"NBANDS": {"INCAR1": None, "INCAR2": 86},
"NSW": {"INCAR1": 500, "INCAR2": 0},
"LWAVE": {"INCAR1": False, "INCAR2": True},
},
)
def test_write_KPOINTS(self):
# line mode
outfile_path = tempfile.mkstemp()[1]
outfile_path2 = tempfile.mkstemp(prefix="POSCAR")[1]
lobsterin1 = Lobsterin({})
# test writing primitive cell
lobsterin1.write_POSCAR_with_standard_primitive(
POSCAR_input=os.path.join(test_dir_doscar, "POSCAR.Fe3O4"),
POSCAR_output=outfile_path2,
)
lobsterin1.write_KPOINTS(
POSCAR_input=outfile_path2,
KPOINTS_output=outfile_path,
kpoints_line_density=58,
)
kpoint = Kpoints.from_file(outfile_path)
self.assertEqual(kpoint.num_kpts, 562)
self.assertAlmostEqual(kpoint.kpts[-1][0], -0.5)
self.assertAlmostEqual(kpoint.kpts[-1][1], 0.5)
self.assertAlmostEqual(kpoint.kpts[-1][2], 0.5)
self.assertEqual(kpoint.labels[-1], "T")
kpoint2 = Kpoints.from_file(os.path.join(test_dir_doscar, "KPOINTS_band.lobster"))
labels = []
number = 0
for label in kpoint.labels:
if label is not None:
if number != 0:
if label != labels[number - 1]:
labels.append(label)
number += 1
else:
labels.append(label)
number += 1
labels2 = []
number2 = 0
for label in kpoint2.labels:
if label is not None:
if number2 != 0:
if label != labels2[number2 - 1]:
labels2.append(label)
number2 += 1
else:
labels2.append(label)
number2 += 1
self.assertListEqual(labels, labels2)
# without line mode
lobsterin1.write_KPOINTS(POSCAR_input=outfile_path2, KPOINTS_output=outfile_path, line_mode=False)
kpoint = Kpoints.from_file(outfile_path)
kpoint2 = Kpoints.from_file(os.path.join(test_dir_doscar, "IBZKPT.lobster"))
for num_kpt, list_kpoint in enumerate(kpoint.kpts):
self.assertAlmostEqual(list_kpoint[0], kpoint2.kpts[num_kpt][0])
self.assertAlmostEqual(list_kpoint[1], kpoint2.kpts[num_kpt][1])
self.assertAlmostEqual(list_kpoint[2], kpoint2.kpts[num_kpt][2])
self.assertEqual(kpoint.num_kpts, 108)
# without line mode, use grid instead of reciprocal density
lobsterin1.write_KPOINTS(
POSCAR_input=outfile_path2,
KPOINTS_output=outfile_path,
line_mode=False,
from_grid=True,
input_grid=[6, 6, 3],
)
kpoint = Kpoints.from_file(outfile_path)
kpoint2 = Kpoints.from_file(os.path.join(test_dir_doscar, "IBZKPT.lobster"))
for num_kpt, list_kpoint in enumerate(kpoint.kpts):
self.assertAlmostEqual(list_kpoint[0], kpoint2.kpts[num_kpt][0])
self.assertAlmostEqual(list_kpoint[1], kpoint2.kpts[num_kpt][1])
self.assertAlmostEqual(list_kpoint[2], kpoint2.kpts[num_kpt][2])
self.assertEqual(kpoint.num_kpts, 108)
#
# #without line mode, using a certain grid, isym=0 instead of -1
lobsterin1.write_KPOINTS(
POSCAR_input=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "POSCAR.Li"),
KPOINTS_output=outfile_path,
line_mode=False,
from_grid=True,
input_grid=[3, 3, 3],
isym=0,
)
kpoint1 = Kpoints.from_file(outfile_path)
kpoint2 = Kpoints.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "IBZKPT_3_3_3_Li"))
for ikpoint, kpoint in enumerate(kpoint1.kpts):
self.assertTrue(
self.is_kpoint_in_list(
kpoint,
kpoint2.kpts,
kpoint1.kpts_weights[ikpoint],
kpoint2.kpts_weights,
)
)
for ikpoint, kpoint in enumerate(kpoint2.kpts):
self.assertTrue(
self.is_kpoint_in_list(
kpoint,
kpoint1.kpts,
kpoint2.kpts_weights[ikpoint],
kpoint1.kpts_weights,
)
)
lobsterin1.write_KPOINTS(
POSCAR_input=os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "POSCAR.Li"),
KPOINTS_output=outfile_path,
line_mode=False,
from_grid=True,
input_grid=[2, 2, 2],
isym=0,
)
kpoint1 = Kpoints.from_file(outfile_path)
kpoint2 = Kpoints.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "IBZKPT_2_2_2_Li"))
for ikpoint, kpoint in enumerate(kpoint1.kpts):
self.assertTrue(
self.is_kpoint_in_list(
kpoint,
kpoint2.kpts,
kpoint1.kpts_weights[ikpoint],
kpoint2.kpts_weights,
)
)
for ikpoint, kpoint in enumerate(kpoint2.kpts):
self.assertTrue(
self.is_kpoint_in_list(
kpoint,
kpoint1.kpts,
kpoint2.kpts_weights[ikpoint],
kpoint1.kpts_weights,
)
)
def is_kpoint_in_list(self, kpoint, kpointlist, weight, weightlist):
found = 0
for ikpoint2, kpoint2 in enumerate(kpointlist):
if (
np.isclose(kpoint[0], kpoint2[0])
and np.isclose(kpoint[1], kpoint2[1])
and np.isclose(kpoint[2], kpoint2[2])
):
if weight == weightlist[ikpoint2]:
found += 1
elif (
np.isclose(-kpoint[0], kpoint2[0])
and np.isclose(-kpoint[1], kpoint2[1])
and np.isclose(-kpoint[2], kpoint2[2])
):
if weight == weightlist[ikpoint2]:
found += 1
if found == 1:
return True
else:
return False
def test_MSONable_implementation(self):
# tests as dict and from dict methods
newLobsterin = Lobsterin.from_dict(self.Lobsterinfromfile.as_dict())
self.assertDictEqual(newLobsterin, self.Lobsterinfromfile)
newLobsterin.to_json()
def tearDown(self):
warnings.simplefilter("default")
class BandoverlapsTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
# test spin polarlized calc and non spinpolarized calc
self.bandoverlaps1 = Bandoverlaps(os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "bandOverlaps.lobster.1"))
self.bandoverlaps2 = Bandoverlaps(os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "bandOverlaps.lobster.2"))
def test_attributes(self):
# bandoverlapsdict
self.assertAlmostEqual(
self.bandoverlaps1.bandoverlapsdict[Spin.up]["0.5 0 0"]["maxDeviation"],
0.000278953,
)
self.assertAlmostEqual(
self.bandoverlaps1.bandoverlapsdict[Spin.up]["0.5 0 0"]["matrix"][-1][-1],
0.0188058,
)
self.assertAlmostEqual(self.bandoverlaps1.bandoverlapsdict[Spin.up]["0.5 0 0"]["matrix"][0][0], 1)
self.assertAlmostEqual(
self.bandoverlaps1.bandoverlapsdict[Spin.down]["0.0261194 0.0261194 0.473881"]["maxDeviation"],
4.31567e-05,
)
self.assertAlmostEqual(
self.bandoverlaps1.bandoverlapsdict[Spin.down]["0.0261194 0.0261194 0.473881"]["matrix"][0][-1],
4.0066e-07,
)
# maxDeviation
self.assertAlmostEqual(self.bandoverlaps1.max_deviation[0], 0.000278953)
self.assertAlmostEqual(self.bandoverlaps1.max_deviation[-1], 4.31567e-05)
self.assertAlmostEqual(self.bandoverlaps2.max_deviation[0], 0.000473319)
self.assertAlmostEqual(self.bandoverlaps2.max_deviation[-1], 1.48451e-05)
def test_has_good_quality(self):
self.assertFalse(self.bandoverlaps1.has_good_quality_maxDeviation(limit_maxDeviation=0.1))
self.assertFalse(
self.bandoverlaps1.has_good_quality_check_occupied_bands(
number_occ_bands_spin_up=9,
number_occ_bands_spin_down=5,
limit_deviation=0.1,
spin_polarized=True,
)
)
self.assertTrue(
self.bandoverlaps1.has_good_quality_check_occupied_bands(
number_occ_bands_spin_up=3,
number_occ_bands_spin_down=0,
limit_deviation=0.001,
spin_polarized=True,
)
)
self.assertFalse(
self.bandoverlaps1.has_good_quality_check_occupied_bands(
number_occ_bands_spin_up=1,
number_occ_bands_spin_down=1,
limit_deviation=0.000001,
spin_polarized=True,
)
)
self.assertFalse(
self.bandoverlaps1.has_good_quality_check_occupied_bands(
number_occ_bands_spin_up=1,
number_occ_bands_spin_down=0,
limit_deviation=0.000001,
spin_polarized=True,
)
)
self.assertFalse(
self.bandoverlaps1.has_good_quality_check_occupied_bands(
number_occ_bands_spin_up=0,
number_occ_bands_spin_down=1,
limit_deviation=0.000001,
spin_polarized=True,
)
)
self.assertFalse(
self.bandoverlaps1.has_good_quality_check_occupied_bands(
number_occ_bands_spin_up=4,
number_occ_bands_spin_down=4,
limit_deviation=0.001,
spin_polarized=True,
)
)
self.assertTrue(self.bandoverlaps1.has_good_quality_maxDeviation(limit_maxDeviation=100))
self.assertTrue(self.bandoverlaps2.has_good_quality_maxDeviation())
self.assertFalse(self.bandoverlaps2.has_good_quality_maxDeviation(limit_maxDeviation=0.0000001))
self.assertFalse(
self.bandoverlaps2.has_good_quality_check_occupied_bands(
number_occ_bands_spin_up=10, limit_deviation=0.0000001
)
)
self.assertTrue(
self.bandoverlaps2.has_good_quality_check_occupied_bands(number_occ_bands_spin_up=1, limit_deviation=0.1)
)
self.assertFalse(
self.bandoverlaps2.has_good_quality_check_occupied_bands(number_occ_bands_spin_up=1, limit_deviation=1e-8)
)
self.assertTrue(
self.bandoverlaps2.has_good_quality_check_occupied_bands(number_occ_bands_spin_up=10, limit_deviation=0.1)
)
self.assertTrue(
self.bandoverlaps2.has_good_quality_check_occupied_bands(number_occ_bands_spin_up=1, limit_deviation=0.1)
)
class GrosspopTest(unittest.TestCase):
def setUp(self):
self.grosspop1 = Grosspop(os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "GROSSPOP.lobster"))
def testattributes(self):
self.assertAlmostEqual(self.grosspop1.list_dict_grosspop[0]["Mulliken GP"]["3s"], 0.52)
self.assertAlmostEqual(self.grosspop1.list_dict_grosspop[0]["Mulliken GP"]["3p_y"], 0.38)
self.assertAlmostEqual(self.grosspop1.list_dict_grosspop[0]["Mulliken GP"]["3p_z"], 0.37)
self.assertAlmostEqual(self.grosspop1.list_dict_grosspop[0]["Mulliken GP"]["3p_x"], 0.37)
self.assertAlmostEqual(self.grosspop1.list_dict_grosspop[0]["Mulliken GP"]["total"], 1.64)
self.assertEqual(self.grosspop1.list_dict_grosspop[0]["element"], "Si")
self.assertAlmostEqual(self.grosspop1.list_dict_grosspop[0]["Loewdin GP"]["3s"], 0.61)
self.assertAlmostEqual(self.grosspop1.list_dict_grosspop[0]["Loewdin GP"]["3p_y"], 0.52)
self.assertAlmostEqual(self.grosspop1.list_dict_grosspop[0]["Loewdin GP"]["3p_z"], 0.52)
self.assertAlmostEqual(self.grosspop1.list_dict_grosspop[0]["Loewdin GP"]["3p_x"], 0.52)
self.assertAlmostEqual(self.grosspop1.list_dict_grosspop[0]["Loewdin GP"]["total"], 2.16)
self.assertAlmostEqual(self.grosspop1.list_dict_grosspop[5]["Mulliken GP"]["2s"], 1.80)
self.assertAlmostEqual(self.grosspop1.list_dict_grosspop[5]["Loewdin GP"]["2s"], 1.60)
self.assertEqual(self.grosspop1.list_dict_grosspop[5]["element"], "O")
self.assertAlmostEqual(self.grosspop1.list_dict_grosspop[8]["Mulliken GP"]["2s"], 1.80)
self.assertAlmostEqual(self.grosspop1.list_dict_grosspop[8]["Loewdin GP"]["2s"], 1.60)
self.assertEqual(self.grosspop1.list_dict_grosspop[8]["element"], "O")
def test_structure_with_grosspop(self):
struct_dict = {
"@module": "pymatgen.core.structure",
"@class": "Structure",
"charge": None,
"lattice": {
"matrix": [
[5.021897888834907, 4.53806e-11, 0.0],
[-2.5109484443388332, 4.349090983701526, 0.0],
[0.0, 0.0, 5.511929408565514],
],
"a": 5.021897888834907,
"b": 5.0218974974248045,
"c": 5.511929408565514,
"alpha": 90.0,
"beta": 90.0,
"gamma": 119.99999598960493,
"volume": 120.38434608659402,
},
"sites": [
{
"species": [{"element": "Si", "occu": 1}],
"abc": [-3e-16, 0.4763431475490085, 0.6666669999999968],
"xyz": [-1.1960730853096477, 2.0716596881533986, 3.674621443020128],
"label": "Si",
"properties": {"Total Mulliken GP": 1.64, "Total Loewdin GP": 2.16},
},
{
"species": [{"element": "Si", "occu": 1}],
"abc": [0.5236568524509936, 0.5236568524509926, 0.0],
"xyz": [1.3148758827683875, 2.277431295571896, 0.0],
"label": "Si",
"properties": {"Total Mulliken GP": 1.64, "Total Loewdin GP": 2.16},
},
{
"species": [{"element": "Si", "occu": 1}],
"abc": [0.4763431475490066, -1.2e-15, 0.3333330000000032],
"xyz": [
2.392146647037334,
2.1611518932482004e-11,
1.8373079655453863,
],
"label": "Si",
"properties": {"Total Mulliken GP": 1.64, "Total Loewdin GP": 2.16},
},
{
"species": [{"element": "O", "occu": 1}],
"abc": [0.1589037798059321, 0.7440031622164922, 0.4613477252144715],
"xyz": [-1.0701550264153763, 3.235737444648381, 2.5429160941844473],
"label": "O",
"properties": {"Total Mulliken GP": 7.18, "Total Loewdin GP": 6.92},
},
{
"species": [{"element": "O", "occu": 1}],
"abc": [0.2559968377835071, 0.4149006175894398, 0.7946807252144676],
"xyz": [0.2437959189219816, 1.8044405351020447, 4.380224059729795],
"label": "O",
"properties": {"Total Mulliken GP": 7.18, "Total Loewdin GP": 6.92},
},
{
"species": [{"element": "O", "occu": 1}],
"abc": [0.5850993824105679, 0.8410962201940679, 0.1280147252144683],
"xyz": [0.8263601076506712, 3.6580039876980064, 0.7056081286390611],
"label": "O",
"properties": {"Total Mulliken GP": 7.18, "Total Loewdin GP": 6.92},
},
{
"species": [{"element": "O", "occu": 1}],
"abc": [0.7440031622164928, 0.1589037798059326, 0.5386522747855285],
"xyz": [3.337308710918233, 0.6910869960638374, 2.969013314381067],
"label": "O",
"properties": {"Total Mulliken GP": 7.18, "Total Loewdin GP": 6.92},
},
{
"species": [{"element": "O", "occu": 1}],
"abc": [0.4149006175894392, 0.2559968377835, 0.2053192747855324],
"xyz": [1.4407936739605638, 1.1133535390791505, 1.13170534883572],
"label": "O",
"properties": {"Total Mulliken GP": 7.18, "Total Loewdin GP": 6.92},
},
{
"species": [{"element": "O", "occu": 1}],
"abc": [0.841096220194068, 0.5850993824105675, 0.8719852747855317],
"xyz": [2.754744948452184, 2.5446504486493, 4.806321279926453],
"label": "O",
"properties": {"Total Mulliken GP": 7.18, "Total Loewdin GP": 6.92},
},
],
}
newstructure = self.grosspop1.get_structure_with_total_grosspop(
os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "POSCAR.SiO2")
)
for coords, coords2 in zip(newstructure.frac_coords, Structure.from_dict(struct_dict).frac_coords):
for xyz, xyz2 in zip(coords, coords2):
self.assertAlmostEqual(xyz, xyz2)
class TestUtils(PymatgenTest):
def test_get_all_possible_basis_combinations(self):
# this basis is just for testing (not correct)
min_basis = ["Li 1s 2s ", "Na 1s 2s", "Si 1s 2s"]
max_basis = ["Li 1s 2p 2s ", "Na 1s 2p 2s", "Si 1s 2s"]
combinations_basis = get_all_possible_basis_combinations(min_basis, max_basis)
self.assertListEqual(
combinations_basis,
[
["Li 1s 2s", "Na 1s 2s", "Si 1s 2s"],
["Li 1s 2s", "Na 1s 2s 2p", "Si 1s 2s"],
["Li 1s 2s 2p", "Na 1s 2s", "Si 1s 2s"],
["Li 1s 2s 2p", "Na 1s 2s 2p", "Si 1s 2s"],
],
)
min_basis = ["Li 1s 2s"]
max_basis = ["Li 1s 2s 2p 3s"]
combinations_basis = get_all_possible_basis_combinations(min_basis, max_basis)
self.assertListEqual(
combinations_basis,
[["Li 1s 2s"], ["Li 1s 2s 2p"], ["Li 1s 2s 3s"], ["Li 1s 2s 2p 3s"]],
)
min_basis = ["Li 1s 2s", "Na 1s 2s"]
max_basis = ["Li 1s 2s 2p 3s", "Na 1s 2s 2p 3s"]
combinations_basis = get_all_possible_basis_combinations(min_basis, max_basis)
self.assertListEqual(
combinations_basis,
[
["Li 1s 2s", "Na 1s 2s"],
["Li 1s 2s", "Na 1s 2s 2p"],
["Li 1s 2s", "Na 1s 2s 3s"],
["Li 1s 2s", "Na 1s 2s 2p 3s"],
["Li 1s 2s 2p", "Na 1s 2s"],
["Li 1s 2s 2p", "Na 1s 2s 2p"],
["Li 1s 2s 2p", "Na 1s 2s 3s"],
["Li 1s 2s 2p", "Na 1s 2s 2p 3s"],
["Li 1s 2s 3s", "Na 1s 2s"],
["Li 1s 2s 3s", "Na 1s 2s 2p"],
["Li 1s 2s 3s", "Na 1s 2s 3s"],
["Li 1s 2s 3s", "Na 1s 2s 2p 3s"],
["Li 1s 2s 2p 3s", "Na 1s 2s"],
["Li 1s 2s 2p 3s", "Na 1s 2s 2p"],
["Li 1s 2s 2p 3s", "Na 1s 2s 3s"],
["Li 1s 2s 2p 3s", "Na 1s 2s 2p 3s"],
],
)
min_basis = ["Si 1s 2s 2p", "Na 1s 2s"]
max_basis = ["Si 1s 2s 2p 3s", "Na 1s 2s 2p 3s"]
combinations_basis = get_all_possible_basis_combinations(min_basis, max_basis)
self.assertListEqual(
combinations_basis,
[
["Si 1s 2s 2p", "Na 1s 2s"],
["Si 1s 2s 2p", "Na 1s 2s 2p"],
["Si 1s 2s 2p", "Na 1s 2s 3s"],
["Si 1s 2s 2p", "Na 1s 2s 2p 3s"],
["Si 1s 2s 2p 3s", "Na 1s 2s"],
["Si 1s 2s 2p 3s", "Na 1s 2s 2p"],
["Si 1s 2s 2p 3s", "Na 1s 2s 3s"],
["Si 1s 2s 2p 3s", "Na 1s 2s 2p 3s"],
],
)
class WavefunctionTest(PymatgenTest):
def test_parse_file(self):
grid, points, real, imaginary, distance = Wavefunction._parse_file(
os.path.join(
test_dir_doscar,
"cohp",
"LCAOWaveFunctionAfterLSO1PlotOfSpin1Kpoint1band1.gz",
)
)
self.assertArrayEqual([41, 41, 41], grid)
self.assertAlmostEqual(points[4][0], 0.0000)
self.assertAlmostEqual(points[4][1], 0.0000)
self.assertAlmostEqual(points[4][2], 0.4000)
self.assertAlmostEqual(real[8], 1.38863e-01)
self.assertAlmostEqual(imaginary[8], 2.89645e-01)
self.assertEqual(len(imaginary), 41 * 41 * 41)
self.assertEqual(len(real), 41 * 41 * 41)
self.assertEqual(len(points), 41 * 41 * 41)
self.assertAlmostEqual(distance[0], 0.0000)
def test_set_volumetric_data(self):
wave1 = Wavefunction(
filename=os.path.join(
test_dir_doscar,
"cohp",
"LCAOWaveFunctionAfterLSO1PlotOfSpin1Kpoint1band1" ".gz",
),
structure=Structure.from_file(os.path.join(test_dir_doscar, "cohp", "POSCAR_O.gz")),
)
wave1.set_volumetric_data(grid=wave1.grid, structure=wave1.structure)
self.assertTrue(hasattr(wave1, "volumetricdata_real"))
self.assertTrue(hasattr(wave1, "volumetricdata_imaginary"))
def test_get_volumetricdata_real(self):
wave1 = Wavefunction(
filename=os.path.join(
test_dir_doscar,
"cohp",
"LCAOWaveFunctionAfterLSO1PlotOfSpin1Kpoint1band1.gz",
),
structure=Structure.from_file(os.path.join(test_dir_doscar, "cohp", "POSCAR_O.gz")),
)
volumetricdata_real = wave1.get_volumetricdata_real()
self.assertAlmostEqual(volumetricdata_real.data["total"][0, 0, 0], -3.0966)
def test_get_volumetricdata_imaginary(self):
wave1 = Wavefunction(
filename=os.path.join(
test_dir_doscar,
"cohp",
"LCAOWaveFunctionAfterLSO1PlotOfSpin1Kpoint1band1.gz",
),
structure=Structure.from_file(os.path.join(test_dir_doscar, "cohp", "POSCAR_O.gz")),
)
volumetricdata_imaginary = wave1.get_volumetricdata_imaginary()
self.assertAlmostEqual(volumetricdata_imaginary.data["total"][0, 0, 0], -6.45895e00)
def test_get_volumetricdata_density(self):
wave1 = Wavefunction(
filename=os.path.join(
test_dir_doscar,
"cohp",
"LCAOWaveFunctionAfterLSO1PlotOfSpin1Kpoint1band1.gz",
),
structure=Structure.from_file(os.path.join(test_dir_doscar, "cohp", "POSCAR_O.gz")),
)
volumetricdata_density = wave1.get_volumetricdata_density()
self.assertAlmostEqual(
volumetricdata_density.data["total"][0, 0, 0],
(-3.0966 * -3.0966) + (-6.45895 * -6.45895),
)
def test_write_file(self):
wave1 = Wavefunction(
filename=os.path.join(
test_dir_doscar,
"cohp",
"LCAOWaveFunctionAfterLSO1PlotOfSpin1Kpoint1band1.gz",
),
structure=Structure.from_file(os.path.join(test_dir_doscar, "cohp", "POSCAR_O.gz")),
)
wave1.write_file(filename=os.path.join("wavecar_test.vasp"), part="real")
self.assertTrue(os.path.isfile("wavecar_test.vasp"))
wave1.write_file(filename=os.path.join("wavecar_test.vasp"), part="imaginary")
self.assertTrue(os.path.isfile("wavecar_test.vasp"))
os.remove("wavecar_test.vasp")
wave1.write_file(filename=os.path.join("density.vasp"), part="density")
self.assertTrue(os.path.isfile("density.vasp"))
os.remove("density.vasp")
def tearDown(self):
warnings.simplefilter("default")
if __name__ == "__main__":
unittest.main()
|
gmatteo/pymatgen
|
pymatgen/io/lobster/tests/test_lobster.py
|
Python
|
mit
| 115,785
|
[
"Gaussian",
"VASP",
"pymatgen"
] |
6d732eb01aa8935f2a2d467fc05d61dde5c9196b91a778b36058cdcdd20bdc9b
|
#!/usr/bin/python
########################################################################
# 27 April 2015
# Patrick Lombard, Centre for Stem Stem Research
# Core Bioinformatics Group
# University of Cambridge
# All right reserved.
########################################################################
import subprocess
import sys, re, os
import ConfigParser
import itertools
import HTSeq
from multiprocessing import Pool, Manager
import argparse
import pysam
import numpy
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import pyatactools
import pkg_resources
import collections
from qcmodule import mystat
from itertools import islice
import tempfile
#def sam_size(bam):
# results= {}
# size = reduce(lambda x, y: x + y, [ int(l.rstrip('\n').split('\t')[2]) for l in pysam.flagstat(bam) ])
# return size
def sam_size(bam):
command = "samtools view -c -F 4 {}".format(bam)
proc = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
out = proc.communicate()[0]
return int(out.upper())
def read_tss_pysam(bam, position_dict, halfwinwidth, norm, return_dict):
profile = numpy.zeros( 2*halfwinwidth, dtype="f" )
if norm:
constant = 1e-6/float(norm[bam])
else:
constant = 1e-6/float(sam_size(bam))
samfile = pysam.Samfile(bam, "rb")
aggreagated_cvg = collections.defaultdict(int)
count =0
for chrom, tss, strand in position_dict.values():
count += 1
coverage = {}
chrom_start = tss-halfwinwidth
if chrom_start <0: chrom_start=0
chrom_end = tss+ halfwinwidth
try:
samfile.pileup(chrom, 1,2)
except:
continue
for i in range(1, 2*halfwinwidth):
coverage[i] = 0.0
for pileupcolumn in samfile.pileup(chrom, chrom_start, chrom_end, truncate=True):
#ref_pos = pileupcolumn.pos
if strand == "+":
ref_pos = pileupcolumn.pos - chrom_start
elif strand == "-":
ref_pos = chrom_end - pileupcolumn.pos
cover_read = 0
for pileupread in pileupcolumn.pileups:
if pileupread.is_del: continue
if pileupread.alignment.is_qcfail:continue
if pileupread.alignment.is_secondary:continue
if pileupread.alignment.is_unmapped:continue
if pileupread.alignment.is_duplicate:continue
cover_read += constant
coverage[ref_pos] = cover_read
tmp1 = [coverage[k] for k in sorted(coverage)]
for i in range(0,len(tmp1)):
aggreagated_cvg[i] += tmp1[i]*10000
for key in aggreagated_cvg:
profile[key] = aggreagated_cvg[key]/float(count)
return_dict[bam] = profile
def read_tss_function(args):
return read_tss_pysam(*args)
def read_gene_function(args):
return genebody_coverage(*args)
def genebody_percentile(anno, gene_filter, mRNA_len_cut = 100):
'''
return percentile points of gene body
mRNA length < mRNA_len_cut will be skipped
'''
g_percentiles = {}
g_filter = []
if gene_filter:
with open(gene_filter) as f:
for line in f:
line = line.rstrip()
word = line.split("\t")
g_filter.append(word[0])
for line in open(anno,'r'):
if line.startswith('Ensembl'):continue
# Parse fields from gene tabls
fields = line.split()
if fields[1] == "MT": chrom = "chrM"
elif fields[1] == "X": chrom = "chrX"
elif fields[1] == "Y": chrom = "chrY"
elif fields[1].isdigit(): chrom = "chr" + fields[1]
else:
continue
tx_start = int( fields[2] )
tx_end = int( fields[3] )
geneName = fields[0]
if fields[4] == "1":
strand = "+"
else:
strand = "-"
geneID = '_'.join([str(j) for j in (chrom, tx_start, tx_end, geneName, strand)])
gene_all_base=[]
if g_filter:
if geneName in g_filter:
gene_all_base.extend(range(tx_start+1,tx_end+1)) #1-based coordinates on genome
if len(gene_all_base) < mRNA_len_cut:
continue
g_percentiles[geneID] = (chrom, strand, mystat.percentile_list (gene_all_base)) #get 100 points from each gene's coordinates
else:
gene_all_base.extend(range(tx_start+1,tx_end+1)) #1-based coordinates on genome
if len(gene_all_base) < mRNA_len_cut:
continue
g_percentiles[geneID] = (chrom, strand, mystat.percentile_list (gene_all_base)) #get 100 points from each gene's coordinates
return g_percentiles
def genebody_coverage(bam, position_list, norm, return_dict):
'''
position_list is dict returned from genebody_percentile
position is 1-based genome coordinate
'''
if norm:
constant = 1e-6/float(norm[bam])
else:
constant = 1e-6/float(sam_size(bam))
samfile = pysam.Samfile(bam, "rb")
aggreagated_cvg = collections.defaultdict(int)
gene_finished = 0
for chrom, strand, positions in position_list.values():
before = positions[0]-1011
if before ==0:
before = 0
after = positions[-1] + 1000
before_list = range(before, positions[0]-1-10, 10)
after_list = range(positions[-1]+10, after+10, 10)
new_positions = before_list + positions + after_list
if len(new_positions) != 300:
continue
coverage = {}
for i in new_positions:
coverage[i] = 0.0
chrom_start = new_positions[0]
if chrom_start <0: chrom_start=0
chrom_end = new_positions[-1]
try:
samfile.pileup(chrom, 1,2)
except:
continue
for pileupcolumn in samfile.pileup(chrom, chrom_start, chrom_end, truncate=True):
ref_pos = pileupcolumn.pos+1
if ref_pos not in new_positions:
continue
if pileupcolumn.n == 0:
coverage[ref_pos] = 0
continue
cover_read = 0
for pileupread in pileupcolumn.pileups:
if pileupread.is_del: continue
if pileupread.alignment.is_qcfail:continue
if pileupread.alignment.is_secondary:continue
if pileupread.alignment.is_unmapped:continue
if pileupread.alignment.is_duplicate:continue
cover_read +=constant
coverage[ref_pos] = cover_read
tmp = [coverage[k] for k in sorted(coverage)]
if strand == '-':
tmp = tmp[::-1]
for i in range(0,len(tmp)):
aggreagated_cvg[i] += tmp[i]
gene_finished += 1
tmp3 = numpy.zeros( 300, dtype='f' )
for key in aggreagated_cvg:
tmp3[key] = aggreagated_cvg[key]/float(len(position_list.keys()))
return_dict[bam] = tmp3
def read_tss_anno(anno, gene_filter):
positions = {}
g_filter = []
if gene_filter:
with open(gene_filter) as f:
for line in f:
line = line.rstrip()
word = line.split("\t")
g_filter.append(word[0])
with open(anno) as f:
next(f)
for line in f:
line = line.rstrip()
word = line.split("\t")
if word[1] == "MT": chrom = "chrM"
elif word[1] == "X": chrom = "chrX"
elif word[1] == "Y": chrom = "chrY"
elif word[1].isdigit(): chrom = "chr" + word[1]
if word[4] == "1":
strand = "+"
else:
strand = "-"
if g_filter:
if word[0] in g_filter:
if strand == "+":
positions[word[0]] = (chrom, int(word[2]), strand)
elif strand == "-":
positions[word[0]] = (chrom, int(word[3]), strand)
else:
if strand == "+":
positions[word[0]] = (chrom, int(word[2]), strand)
elif strand == "-":
positions[word[0]] = (chrom, int(word[3]), strand)
return positions
def plot_tss_profile(conditions, anno, halfwinwidth, gene_filter, threads, comb, outname, norm):
halfwinwidth = int(halfwinwidth)
positions = read_tss_anno(anno, gene_filter)
fname = None
manager = Manager()
return_dict = manager.dict()
pool = Pool(threads)
pool.map(read_tss_function, itertools.izip(list(conditions.keys()), itertools.repeat(positions), itertools.repeat(halfwinwidth),itertools.repeat(norm), itertools.repeat(return_dict)))
pool.close()
pool.join()
if comb:
combined_profiles = {}
rev_conds = reverse_dict(conditions)
for key in rev_conds:
c = 0
for bam in rev_conds[key]:
if key not in combined_profiles:
combined_profiles[key] = return_dict[bam]
else:
combined_profiles[key] += return_dict[bam]
c+= 1
combined_profiles[key] = combined_profiles[key]/float(c)
pyplot.rc('axes', color_cycle=['b','r', 'c', 'm', 'y', 'k', 'gray', "green", "darkred", "skyblue"])
for key in combined_profiles.keys():
pyplot.plot( numpy.arange( -halfwinwidth, halfwinwidth ), combined_profiles[key], label=key)
pyplot.legend(prop={'size':8})
pyplot.savefig(outname+".pdf")
pyplot.close()
else:
if len(list(conditions.keys())) < 11:
pyplot.rc('axes', color_cycle=['b','r', 'c', 'm', 'y', 'k', 'gray', "green", "darkred", "skyblue"])
else:
colormap = pyplot.cm.gist_ncar
pyplot.gca().set_color_cycle([colormap(i) for i in numpy.linspace(0, 0.9, len(list(conditions.keys())))])
for key in return_dict.keys():
pyplot.plot( numpy.arange( -halfwinwidth, halfwinwidth ), return_dict[key], label=conditions[key])
pyplot.legend(prop={'size':8})
pyplot.savefig(outname+".pdf")
pyplot.close()
def plot_genebody_profile(conditions, anno, gene_filter, threads, comb, outname, norm):
positions = genebody_percentile(anno, gene_filter, mRNA_len_cut = 100)
fname = None
manager = Manager()
return_dict = manager.dict()
pool = Pool(threads)
pool.map(read_gene_function, itertools.izip(list(conditions.keys()), itertools.repeat(positions), itertools.repeat(norm), itertools.repeat(return_dict)))
pool.close()
pool.join()
if comb:
pyplot.rc('axes', color_cycle=['b','r', 'c', 'm', 'y', 'k', 'gray', "green", "darkred", "skyblue"])
combined_profiles = {}
rev_conds = reverse_dict(conditions)
for key in rev_conds:
c = 0
for bam in rev_conds[key]:
if key not in combined_profiles:
combined_profiles[key] = return_dict[bam]
else:
combined_profiles[key] += return_dict[bam]
c+= 1
combined_profiles[key] = combined_profiles[key]/float(c)
for key in combined_profiles.keys():
pyplot.plot( numpy.arange( -100, 200 ), combined_profiles[key], label=key)
else:
if len(list(conditions.keys())) < 11:
pyplot.rc('axes', color_cycle=['b','r', 'c', 'm', 'y', 'k', 'gray', "green", "darkred", "skyblue"])
else:
colormap = pyplot.cm.gist_ncar
pyplot.gca().set_color_cycle([colormap(i) for i in numpy.linspace(0, 0.9, len(list(conditions.keys())))])
for key in return_dict.keys():
pyplot.plot( numpy.arange( -100, 200 ), return_dict[key], label=conditions[key])
pyplot.xticks([-100, 0, 100, 200], ['-1000', 'TSS', 'TES', "+1000"])
pyplot.legend(prop={'size':8})
pyplot.savefig(outname+".pdf")
def ConfigSectionMap(section, Config):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
def get_insert_fun(args):
return get_insert(*args)
def get_insert(sam, return_dict):
cvg = collections.defaultdict(int)
test2 = tempfile.NamedTemporaryFile(delete=False)
command = "samtools view -h {} -o {}".format(sam, test2.name)
subprocess.call(command, shell=True)
with open(test2.name) as f:
lines = [next(f) for x in xrange(5000000)]
for line in lines:
if line.startswith("@"):
pass
else:
word = line.rstrip().split("\t")
if len(word) < 9: #Presumes it removes unaligned reads
pass
else:
if word[2] == "chrM" or word[2] == "M": #Filter because of not relevant
pass
else:
if int(word[8]) < 1:
pass
elif int(word[8]) > 649:
pass
else:
cvg[int(word[8])] += 1
profile = numpy.zeros( 650, dtype='i' )
for key in cvg:
profile[key] = cvg[key]
return_dict[sam] = profile
def plot_insert_bed2(sam, bedfile, return_dict):
cvg = collections.defaultdict(int)
test = tempfile.NamedTemporaryFile(delete = False)
test2 = tempfile.NamedTemporaryFile(delete=False)
command = "bedtools intersect -abam {} -b {} > {}".format(sam, bedfile, test.name)
subprocess.call(command, shell=True)
command = "samtools view -h {} -o {}".format(test.name, test2.name)
subprocess.call(command, shell=True)
with open(test2.name) as f:
for line in f:
if line.startswith("@"):
pass
else:
word = line.rstrip().split("\t")
if len(word) < 9: #Presumes it removes unaligned reads
pass
else:
if word[2] == "chrM" or word[2] == "M": #Filter because of not relevant
pass
else:
if abs(int(word[8])) < 1:
pass
elif abs(int(word[8])) > 100:
pass
else:
cvg[abs(int(word[8]))] += 1
profile = numpy.zeros( 101, dtype='i' )
for i in range(0, 101):
if cvg[i]:
profile[i] = cvg[i]
else:
profile[i] = 0
return_dict[sam] = profile
def plot_insert_bed_function(args):
return plot_insert_bed2(*args)
def plot_inserts(conditions, threads, output, bedfile, comb):
if bedfile:
positions = set()
manager = Manager()
return_dict = manager.dict()
pool = Pool(int(threads))
pool.map(plot_insert_bed_function, itertools.izip(list(conditions.keys()), itertools.repeat(bedfile), itertools.repeat(return_dict)))
pool.close()
pool.join()
if comb:
pyplot.rc('axes', color_cycle=['b','r', 'c', 'm', 'y', 'k', 'gray', "green", "darkred", "skyblue"])
combined_profiles = {}
rev_conds = reverse_dict(conditions)
for key in rev_conds:
c = 0
for bam in rev_conds[key]:
if key not in combined_profiles:
combined_profiles[key] = return_dict[bam]
else:
combined_profiles[key] += return_dict[bam]
c+= 1
combined_profiles[key] = combined_profiles[key]/float(c)
for key in combined_profiles.keys():
pyplot.plot( numpy.arange( 0, 101), combined_profiles[key], label=key)
pyplot.legend(prop={'size':8})
pyplot.savefig(output+".pdf")
else:
for key in return_dict.keys():
pyplot.plot( numpy.arange( 0, 101 ), return_dict[key], label=conditions[key])
pyplot.legend(prop={'size':8})
pyplot.savefig(output+".pdf")
else:
colormap = pyplot.cm.gist_ncar
pyplot.gca().set_color_cycle([colormap(i) for i in numpy.linspace(0, 0.9, len(list(conditions.keys())))])
manager = Manager()
return_dict = manager.dict()
pool = Pool(int(threads))
pool.map(get_insert_fun, itertools.izip(list(conditions.keys()), itertools.repeat(return_dict)))
pool.close()
pool.join()
for key in return_dict.keys():
pyplot.plot( numpy.arange( 0, 650 ), return_dict[key], label=conditions[key])
pyplot.axvline(x=147.,color='k',ls='dashed')
pyplot.axvline(x=294.,color='k',ls='dashed')
pyplot.axvline(x=441.,color='k',ls='dashed')
pyplot.legend(prop={'size':8})
pyplot.savefig(output+".pdf")
def reverse_dict(idict):
inv_map = {}
for k, v in idict.iteritems():
inv_map[v] = inv_map.get(v, [])
inv_map[v].append(k)
return inv_map
def read_peaks(bed):
positions = {}
c = 0
with open(bed) as f:
for line in f:
line = line.rstrip()
word = line.split("\t")
med = int(word[2]) + int(word[1])
med = round(med/float(2))
positions[c] = (word[0], med)
c += 1
return positions
def read_peak_pysam(bam, halfwinwidth, position_dict, norm, return_dict):
profile = numpy.zeros( 2*halfwinwidth, dtype="f" )
if norm:
constant = 1e-6/float(norm[bam])
else:
constant = 1e-6/float(sam_size(bam))
samfile = pysam.Samfile(bam, "rb")
aggreagated_cvg = collections.defaultdict(int)
for chrom, summit in position_dict.values():
coverage = {}
chrom_start = summit - halfwinwidth
if chrom_start <0: chrom_start=0
chrom_end = summit + halfwinwidth
try:
samfile.pileup(chrom, 1,2)
except:
continue
coverage = {}
for i in range(1, 2*int(halfwinwidth)):
coverage[i] = 0.0
for pileupcolumn in samfile.pileup(chrom, chrom_start, chrom_end, truncate=True):
#ref_pos = pileupcolumn.pos
ref_pos = pileupcolumn.pos - chrom_start
cover_read = 0
for pileupread in pileupcolumn.pileups:
if pileupread.is_del: continue
if pileupread.alignment.is_qcfail:continue
if pileupread.alignment.is_secondary:continue
if pileupread.alignment.is_unmapped:continue
if pileupread.alignment.is_duplicate:continue
cover_read += constant
coverage[ref_pos] = cover_read
tmp = [coverage[k] for k in sorted(coverage)]
for i in range(0,len(tmp)):
aggreagated_cvg[i] += tmp[i]
for key in aggreagated_cvg:
profile[key] = aggreagated_cvg[key]/float(len(position_dict.keys()))
return_dict[bam] = profile
def read_peak_function(args):
return read_peak_pysam(*args)
def plot_peak_profile(conditions, bed, halfwinwidth, threads, comb, outname, norm):
positions = read_peaks(bed)
fname = None
manager = Manager()
return_dict = manager.dict()
pool = Pool(threads)
pool.map(read_peak_function, itertools.izip(list(conditions.keys()), itertools.repeat(halfwinwidth), itertools.repeat(positions), itertools.repeat(norm), itertools.repeat(return_dict)))
pool.close()
pool.join()
if comb:
pyplot.rc('axes', color_cycle=['b','r', 'c', 'm', 'y', 'k', 'gray', "green", "darkred", "skyblue"])
combined_profiles = {}
rev_conds = reverse_dict(conditions)
for key in rev_conds:
c = 0
for bam in rev_conds[key]:
if key not in combined_profiles:
combined_profiles[key] = return_dict[bam]
else:
combined_profiles[key] += return_dict[bam]
c+= 1
combined_profiles[key] = combined_profiles[key]/float(c)
for key in combined_profiles.keys():
pyplot.plot( numpy.arange( 0, halfwinwidth*2 ), combined_profiles[key], label=key)
else:
if len(list(conditions.keys())) < 11:
pyplot.rc('axes', color_cycle=['b','r', 'c', 'm', 'y', 'k', 'gray', "green", "darkred", "skyblue"])
else:
colormap = pyplot.cm.gist_ncar
pyplot.gca().set_color_cycle([colormap(i) for i in numpy.linspace(0, 0.9, len(list(conditions.keys())))])
for key in return_dict.keys():
pyplot.plot( numpy.arange( 0, halfwinwidth*2 ), return_dict[key], label=conditions[key])
pyplot.legend(prop={'size':8})
pyplot.savefig(outname+".pdf")
def main():
parser = argparse.ArgumentParser(description='Takes BED files and intersect them with regions, uses TSS regions by default\n')
subparsers = parser.add_subparsers(help='Programs included',dest="subparser_name")
tss_parser = subparsers.add_parser('tss', help='TSS plotter')
tss_parser.add_argument('-c', '--config', help='BAM as keys', required=True)
tss_parser.add_argument('-f', '--filter', help='Gene name per line, filters TSS regions', required=False)
tss_parser.add_argument('-o', '--output', help='Output name of pdf file', required=True)
tss_parser.add_argument('-d', action='store_true', help='Use combinations for plotting', required=False)
tss_parser.add_argument('-w', '--width', help='Width of region, default=1000', default=1000, required=False)
tss_parser.add_argument('-t', '--threads', help='Threads, default=8', default=8, required=False)
tss_parser.add_argument('-n', action='store_true', help='Use [Norm] as constant from config', required=False)
gene_parser = subparsers.add_parser('gene', help='Genebody plotter')
gene_parser.add_argument('-c', '--config', help='BAM as keys', required=False)
gene_parser.add_argument('-f', '--filter', help='Gene name per line, filters TSS regions', required=False)
gene_parser.add_argument('-d', action='store_true', help='Use combinations for plotting', required=False)
gene_parser.add_argument('-o', '--output', help='Output name of pdf file', required=False)
gene_parser.add_argument('-t', '--threads', help='Threads, default=8', default=8, required=False)
gene_parser.add_argument('-n', action='store_true', help='Use [Norm] as constant from config', required=False)
insert_parser = subparsers.add_parser('insert', help='Insert histogram plotter')
insert_parser.add_argument('-c', '--config', help='BAM as keys, must be bams!', required=False)
insert_parser.add_argument('-o', '--output', help='Output name of pdf file', required=False)
insert_parser.add_argument('-r', '--bedfile', help='Plot insert size of bed file regions', required=False)
insert_parser.add_argument('-t', '--threads', help='Threads, default=8', default=8, required=False)
insert_parser.add_argument('-d', action='store_true', help='Use combinations for plotting. Only works if supplying bed file', required=False)
peak_parser = subparsers.add_parser('peak', help='Peak profiles plotter')
peak_parser.add_argument('-c', '--config', help='BAM as keys', required=False)
peak_parser.add_argument('-b', '--bed', help='Peak file, should be of standard width',required=False)
peak_parser.add_argument('-w', '--width', help='Width of region, default=1000', default=1000, required=False)
peak_parser.add_argument('-o', '--output', help='Output name of pdf file', required=False)
peak_parser.add_argument('-t', '--threads', help='Threads, default=8', default=8, required=False)
peak_parser.add_argument('-d', action='store_true', help='Use combinations for plotting', required=False)
peak_parser.add_argument('-n', action='store_true', help='Use [Norm] as constant from config', required=False)
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = vars(parser.parse_args())
Config = ConfigParser.ConfigParser()
Config.optionxform = str
Config.read(args["config"])
conditions = ConfigSectionMap("Conditions", Config)
if args["subparser_name"] == "tss":
if args["filter"]:
filters = args["filter"]
else:
filters=None
if args["n"]:
norm = ConfigSectionMap("Norm", Config)
else:
norm = None
data = pkg_resources.resource_filename('pyatactools', 'data/mm10_ensembl_80.txt')
plot_tss_profile(conditions, data, int(args["width"])/2.0, filters, int(args["threads"]), args["d"], args["output"], norm)
elif args["subparser_name"] == "gene":
if args["filter"]:
filters = args["filter"]
else:
filters=None
if args["n"]:
norm = ConfigSectionMap("Norm", Config)
else:
norm = None
data = pkg_resources.resource_filename('pyatactools', 'data/mm10_ensembl_80.txt')
plot_genebody_profile(conditions, data, filters, int(args["threads"]), args["d"], args["output"], norm)
elif args["subparser_name"] == "insert":
plot_inserts(conditions, int(args["threads"]), args["output"], args["bedfile"], args["d"])
elif args["subparser_name"] == "peak":
if args["n"]:
norm = ConfigSectionMap("Norm", Config)
else:
norm = None
plot_peak_profile(conditions, args["bed"], int(args["width"])/2.0, int(args["threads"]), args["d"], args["output"], norm)
|
pdl30/pyatactools
|
pyatactools/atac_profiler.py
|
Python
|
gpl-2.0
| 22,058
|
[
"HTSeq",
"pysam"
] |
3572f6da7c6e9ce111595f3979e9fe4991fe316e87144055c1b82cf35f90f9aa
|
# -*- coding: utf-8 -*-
#
# Moonstone is platform for processing of medical images (DICOM).
# Copyright (C) 2009-2011 by Neppo Tecnologia da Informação LTDA
# and Aevum Softwares LTDA
#
# This file is part of Moonstone.
#
# Moonstone is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PySide import QtCore, QtGui
import logging
import vtk
from widget.textproperties_ui import Ui_TextProperties
import unicodedata
class TextProperties(QtGui.QWidget, Ui_TextProperties):
def __init__(self, parent=None, text=None):
super(TextProperties, self).__init__(parent)
self.text = text
self.textButton = None
self.texts = {}
self.textButtons = {}
self.setupUi(self)
self.buttonGrigLayout = QtGui.QGridLayout()
self.buttonGrigLayout.setAlignment(QtCore.Qt.AlignLeft)
self.buttonGroup = QtGui.QButtonGroup()
self.textGroup.setLayout(self.buttonGrigLayout)
self.createActions()
if text:
self._getPropertiesFromText()
def addText(self, text):
self.text = text
self.text.AddObserver("StartInteractionEvent", self.slotSelectButtonByText)
self.text.AddObserver("EndInteractionEvent", self.slotMeasure)
self.textButton = QtGui.QPushButton()
self.textButton.setCheckable(True)
self.textButton.setChecked(True)
self.textButton.setMinimumSize(30, 30)
self.textButton.setMaximumSize(30, 30)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/static/default/icon/22x22/text.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.textButton.setIcon(icon)
self.textButtons[self.text] = self.textButton
self.texts[self.textButton] = self.text
self.buttonGrigLayout.addWidget(self.textButton,(len(self.texts)-1)/4,(len(self.texts)-1)%4 )
self.buttonGroup.addButton(self.textButton)
self._getPropertiesFromText()
def removeSelectedText(self):
if not self.text:
return False
self.text.Off()
if self.text.scene:
self.text.scene.renderer.RemoveActor(self.text.textActor)
self.text.scene.window.Render()
self.buttonGroup.removeButton(self.textButton)
self.buttonGrigLayout.removeWidget(self.textButton)
self.texts.pop(self.textButton)
self.textButtons.pop(self.text)
self.textButton.close()
self.buttonGrigLayout.update()
self.text.removeObservers()
if self.texts:
self.text = self.texts.values()[0]
self.textButton = self.textButtons[self.text]
self.textButton.setChecked(True)
self._getPropertiesFromText()
else:
self.text = None
self.textButton = None
return True
def getText(self):
return self.text
def createActions(self):
self.fontColor.mousePressEvent = self.slotFontColorClicked
self.connect(self.buttonGroup, QtCore.SIGNAL(
"buttonClicked ( QAbstractButton*)"),
self.slotTextChoosed)
self.connect(self.textField, QtCore.SIGNAL(
"textChanged ( QString)"),
self.slotTextChanged)
self.connect(self.fontSize, QtCore.SIGNAL("valueChanged ( int)"),
self.slotFontSizeChanged)
self.connect(self.fontCombo, QtCore.SIGNAL("currentIndexChanged ( QString )"), self.slotFontChanged)
self.connect(self.bold, QtCore.SIGNAL("clicked ( bool)"), self.slotActionBold)
self.connect(self.italic, QtCore.SIGNAL("clicked ( bool)"), self.slotActionItalic)
self.connect(self.visible, QtCore.SIGNAL("clicked ( bool)"), self.slotActionVisible)
def slotActionBold(self, bold):
if self.text:
self.text.setBold(bold)
def slotFontChanged(self, font):
if self.text:
self.text.setFont(font)
def slotActionItalic(self, italic):
if self.text:
self.text.setItalic(italic)
def slotActionVisible(self, visible):
if self.text:
self.text.setVisible(visible)
def slotFontSizeChanged(self, size):
if self.text:
self.text.setFontSize(size)
def slotTextChanged(self, newText):
if self.text:
if isinstance(newText, unicode):
newText = unicodedata.normalize('NFKD', newText).encode('ascii','ignore')
self.text.setText(newText)
def slotTextChoosed(self, button):
self.textButton = button
self.text = self.texts[button]
self._getPropertiesFromText()
def slotFontColorClicked(self, event):
self.colorDialog = QtGui.QColorDialog()
self.connect(self.colorDialog, QtCore.SIGNAL("colorSelected ( QColor)"), self.changeFontColor)
self.colorDialog.show()
def changeFontColor(self, color):
self.text.setFontColor(color.red()/255.0, color.green()/255.0, color.blue()/255.0)
self.fontColor.setStyleSheet(
"background-color : rgb(" + str(color.red()) + ","
+ str(color.green()) + "," + str(color.blue())
+ ");" )
def _getPropertiesFromText(self):
fontColor = self.text.getFontColor()
self.fontColor.setStyleSheet(
"background-color : rgb(" + str(fontColor[0]*255)+ ","
+ str(fontColor[1]*255) + "," + str(fontColor[2]*255)
+ ");" )
self.textField.setText(self.text.getText())
self.bold.setChecked(self.text.getBold())
self.italic.setChecked(self.text.getItalic())
self.visible.setChecked(self.text.getVisible())
self.fontSize.setValue(self.text.getFontSize())
font = self.text.getFont()
for i in range(self.fontCombo.count()):
if self.fontCombo.itemText(i) == font:
self.fontCombo.setCurrentIndex(i)
break
def slotSelectButtonByText(self, obj, evt):
self.text = obj
self.textButton = self.textButtons[self.text]
self.textButton.setChecked(True)
self._getPropertiesFromText()
def slotMeasure(self, obj, evt):
self._getPropertiesFromText()
def removeScene(self, scene):
texts = self.textButtons.keys()
for text in texts:
if text.scene == scene:
self.slotSelectButtonByText(text, None)
self.removeSelectedText()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
win = TextProperties()
win.show()
sys.exit(app.exec_())
|
aevum/moonstone
|
src/moonstone/ilsa/plugins/text/gui/qt/textproperties.py
|
Python
|
lgpl-3.0
| 7,252
|
[
"VTK"
] |
65ff2577c91c7b3b05e330e97db3dc496b0e736beafd80f08a450b5410345ea6
|
import fnmatch
import os
import pandas as pd
import cPickle as pickle
import csv
from collections import OrderedDict
#list of file paths with mapped hits
pats = ['/netapp/home/idriver/count-picard_combined_ips17_BU3']
#output path
path = '/netapp/home/idriver/count-picard_combined_ips17_BU3'
#base name for final output count matrix and picard metrics
base_name = 'combined_spc'
#initialize dictonaries for collected output
fpkm_matrix_dict_g = OrderedDict()
count_dict = OrderedDict()
norm_read_dict = OrderedDict()
picard_stats_dict = OrderedDict()
#collect gene_list once since it the same between all samples
st = 1
gene_list = []
for p in pats:
for root, dirnames, filenames in os.walk(os.path.join(path,p)):
for filename in fnmatch.filter(filenames, '*_sorted.bam'):
#sorted file path
cname = root.split('/')[-1]
out = path
sort_out = os.path.join(out, cname, cname+'_sorted')
#fixmate file path
picard_fixmate_out = sort_out.strip('.bam')+'_FM.bam'
#format htseq-count command to generate raw counts from sorted accepted hits
hts_out = os.path.join(out,cname,cname+'_htseqcount.txt')
#run picard CollectRnaSeqMetrics (http://broadinstitute.github.io/picard/command-line-overview.html) and generate matrix of 3' to 5' bias (norm_read_dict)
picard_rnaseqmetric_out = sort_out.strip('sorted.bam')+'RNA_metric.txt'
picard_rnaseqchart_out = sort_out.strip('sorted.bam')+'RNA_metric.pdf'
g_counts = []
with open(hts_out, mode='r') as infile:
hts_tab = csv.reader(infile, delimiter = '\t')
print st
for l in hts_tab:
if st == 1:
gene_list.append(l[0])
g_counts.append(l[1])
st = 2
print len(g_counts)
print len(gene_list)
count_dict[cname] = g_counts
norm_read_dict[cname] = []
index3 = []
with open(picard_rnaseqmetric_out, mode='r') as infile:
pic_tab = csv.reader(infile, delimiter = '\t')
for i, l in enumerate(pic_tab):
if i == 6:
index1 = l
if i == 7:
num_stats = []
for n in l:
if n == '' or n == '?':
num_stats.append(0.0)
else:
num_stats.append(float(n))
picard_stats_dict[cname] = num_stats
if i == 10:
index2 = l
if i > 10 and i <= 111:
index3.append(int(l[0]))
norm_read_dict[cname].append(float(l[1]))
for k, v in norm_read_dict.items():
if len(v) == 0:
norm_read_dict[k] = [0 for x in range(101)]
print norm_read_dict[k], len(norm_read_dict[k])
#form pandas dataframe of each and save as tab delimited file
count_df = pd.DataFrame(count_dict, index = gene_list)
count_df.to_csv(os.path.join(path,base_name+'_count_table.txt'), sep = '\t')
with open(os.path.join(path,'htseq_count_'+base_name+'.p'), 'wb') as fp1:
pickle.dump(count_df, fp1)
pic_stats_df = pd.DataFrame(picard_stats_dict, index = index1)
pic_stats_df.to_csv(os.path.join(path,base_name+'_picard_stats.txt'), sep = '\t')
norm_read_df = pd.DataFrame(norm_read_dict, index = index3)
norm_read_df.to_csv(os.path.join(path,base_name+'_read_bias.txt'), sep = '\t')
|
idbedead/RNA-sequence-tools
|
Count_Parsing/count_matrix_stats.py
|
Python
|
mit
| 3,657
|
[
"HTSeq"
] |
09928e96a1f3420b47e06e0131aea4cb959730c17e8eea343a40494eb2c54f88
|
""" This is a test of the FileCatalogDB
It supposes that the DB is present.
"""
# pylint: disable=invalid-name,wrong-import-position
import unittest
import itertools
import os
import sys
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.DataManagementSystem.DB.FileCatalogDB import FileCatalogDB
from DIRAC.Core.Security.Properties import FC_MANAGEMENT
seName = "mySE"
testUser = 'atsareg'
testGroup = 'dirac_user'
testDir = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir'
parentDir = '/vo.formation.idgrilles.fr/user/a/atsareg'
nonExistingDir = "/I/Dont/exist/dir"
testFile = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir/testfile'
nonExistingFile = "/I/Dont/exist"
x509Chain = "<X509Chain 3 certs [/DC=ch/DC=cern/OU=computers/CN=volhcb12.cern.ch]"
x509Chain += "[/DC=ch/DC=cern/CN=CERN Trusted Certification Authority][/DC=ch/DC=cern/CN=CERN Root CA]>"
credDict = {
'DN': '/DC=ch/DC=cern/OU=computers/CN=volhcb12.cern.ch',
'extraCredentials': 'hosts',
'group': 'visitor',
'CN': 'volhcb12.cern.ch',
'x509Chain': x509Chain,
'username': 'anonymous',
'isLimitedProxy': False,
'properties': [FC_MANAGEMENT],
'isProxy': False}
isAdmin = False
proxyUser = 'anonymous'
proxyGroup = 'visitor'
# TESTS WERE DESIGNED WITH THIS CONFIGURATION
# DATABASE_CONFIG = { 'UserGroupManager' : 'UserAndGroupManagerDB',
# 'SEManager' : 'SEManagerDB',
# 'SecurityManager' : 'NoSecurityManager',
# 'DirectoryManager' : 'DirectoryLevelTree',
# 'FileManager' : 'FileManager',
# 'DirectoryMetadata' : 'DirectoryMetadata',
# 'FileMetadata' : 'FileMetadata',
# 'DatasetManager' : 'DatasetManager',
# 'UniqueGUID' : False,
# 'GlobalReadAccess' : True,
# 'LFNPFNConvention' : 'Strong',
# 'ResolvePFN' : True,
# 'DefaultUmask' : 0775,
# 'ValidFileStatus' : ['AprioriGood', 'Trash', 'Removing', 'Probing'],
# 'ValidReplicaStatus' : ['AprioriGood', 'Trash', 'Removing', 'Probing'],
# 'VisibleFileStatus' : ['AprioriGood'],
# 'VisibleReplicaStatus': ['AprioriGood'] }
DATABASE_CONFIG = {
'UserGroupManager': 'UserAndGroupManagerDB', # UserAndGroupManagerDB, UserAndGroupManagerCS
'SEManager': 'SEManagerDB', # SEManagerDB, SEManagerCS
# NoSecurityManager, DirectorySecurityManager, FullSecurityManager
'SecurityManager': 'NoSecurityManager',
# DirectorySimpleTree, DirectoryFlatTree, DirectoryNodeTree, DirectoryLevelTree
'DirectoryManager': 'DirectoryLevelTree',
'FileManager': 'FileManager', # FileManagerFlat, FileManager
'DirectoryMetadata': 'DirectoryMetadata',
'FileMetadata': 'FileMetadata',
'DatasetManager': 'DatasetManager',
'UniqueGUID': True,
'GlobalReadAccess': True,
'LFNPFNConvention': 'Strong',
'ResolvePFN': True,
'DefaultUmask': 0o775,
'ValidFileStatus': ['AprioriGood', 'Trash', 'Removing', 'Probing'],
'ValidReplicaStatus': ['AprioriGood', 'Trash', 'Removing', 'Probing'],
'VisibleFileStatus': ['AprioriGood'],
'VisibleReplicaStatus': ['AprioriGood']}
ALL_MANAGERS = {
"UserGroupManager": [
"UserAndGroupManagerDB", "UserAndGroupManagerCS"], "SEManager": [
"SEManagerDB", "SEManagerCS"], "SecurityManager": [
"NoSecurityManager", "DirectorySecurityManager", "FullSecurityManager"], "DirectoryManager": [
"DirectorySimpleTree", "DirectoryFlatTree", "DirectoryNodeTree", "DirectoryLevelTree"], "FileManager": [
"FileManagerFlat", "FileManager"], }
ALL_MANAGERS_NO_CS = {
"UserGroupManager": ["UserAndGroupManagerDB"],
"SEManager": ["SEManagerDB"],
"SecurityManager": [
"NoSecurityManager",
"DirectorySecurityManager",
"FullSecurityManager"],
"DirectoryManager": [
"DirectorySimpleTree",
"DirectoryFlatTree",
"DirectoryNodeTree",
"DirectoryLevelTree"],
"FileManager": [
"FileManagerFlat",
"FileManager"],
}
DEFAULT_MANAGER = {"UserGroupManager": ["UserAndGroupManagerDB"],
"SEManager": ["SEManagerDB"],
"SecurityManager": ["DirectorySecurityManagerWithDelete"],
"DirectoryManager": ["DirectoryClosure"],
"FileManager": ["FileManagerPs"],
}
DEFAULT_MANAGER_2 = {"UserGroupManager": ["UserAndGroupManagerDB"],
"SEManager": ["SEManagerDB"],
"SecurityManager": ["NoSecurityManager"],
"DirectoryManager": ["DirectoryLevelTree"],
"FileManager": ["FileManager"],
}
MANAGER_TO_TEST = DEFAULT_MANAGER
class FileCatalogDBTestCase(unittest.TestCase):
""" Base class for the FileCatalogDB test cases
"""
def setUp(self):
self.db = FileCatalogDB()
# for table in self.db._query( "Show tables;" )["Value"]:
# self.db.deleteEntries( table[0] )
self.db.setConfig(DATABASE_CONFIG)
def tearDown(self):
pass
# for table in self.db._query( "Show tables;" )["Value"]:
# self.db.deleteEntries( table[0] )
class SECase (FileCatalogDBTestCase):
def test_seOperations(self):
"""Testing SE related operation"""
# create SE
ret = self.db.addSE(seName, credDict)
if isAdmin:
self.assertTrue(ret["OK"], "addSE failed when adding new SE: %s" % ret)
seId = ret["Value"]
# create it again
ret = self.db.addSE(seName, credDict)
self.assertEqual(ret["Value"], seId, "addSE failed when adding existing SE: %s" % ret)
else:
self.assertEqual(
ret["OK"],
False,
"addSE should fail when adding new SE as non admin: %s" %
ret)
# remove it
ret = self.db.deleteSE(seName, credDict)
self.assertEqual(ret["OK"], True if isAdmin else False, "deleteE failed %s" % ret)
class UserGroupCase(FileCatalogDBTestCase):
def test_userOperations(self):
"""Testing the user related operations"""
expectedRes = None
if isAdmin:
print "Running UserTest in admin mode"
expectedRes = True
else:
print "Running UserTest in non admin mode"
expectedRes = False
# Add the user
result = self.db.addUser(testUser, credDict)
self.assertEqual(result['OK'], expectedRes, "AddUser failed when adding new user: %s" % result)
# Add an existing user
result = self.db.addUser(testUser, credDict)
self.assertEqual(
result['OK'],
expectedRes,
"AddUser failed when adding existing user: %s" %
result)
# Fetch the list of user
result = self.db.getUsers(credDict)
self.assertEqual(result['OK'], expectedRes, "getUsers failed: %s" % result)
if isAdmin:
# Check if our user is present
self.assertEqual(testUser in result['Value'], expectedRes, "getUsers failed: %s" % result)
# remove the user we created
result = self.db.deleteUser(testUser, credDict)
self.assertEqual(result['OK'], expectedRes, "deleteUser failed: %s" % result)
def test_groupOperations(self):
"""Testing the group related operations"""
expectedRes = None
if isAdmin:
print "Running UserTest in admin mode"
expectedRes = True
else:
print "Running UserTest in non admin mode"
expectedRes = False
# Create new group
result = self.db.addGroup(testGroup, credDict)
self.assertEqual(result['OK'], expectedRes, "AddGroup failed when adding new user: %s" % result)
result = self.db.addGroup(testGroup, credDict)
self.assertEqual(
result['OK'],
expectedRes,
"AddGroup failed when adding existing user: %s" %
result)
result = self.db.getGroups(credDict)
self.assertEqual(result['OK'], expectedRes, "getGroups failed: %s" % result)
if isAdmin:
self.assertEqual(testGroup in result['Value'], expectedRes)
result = self.db.deleteGroup(testGroup, credDict)
self.assertEqual(result['OK'], expectedRes, "deleteGroup failed: %s" % result)
class FileCase(FileCatalogDBTestCase):
def test_fileOperations(self):
"""
Tests the File related Operations
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new file
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
result = self.db.exists(testFile, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile) should be the same lfn %s" % result)
result = self.db.exists({testFile: '1000'}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1000) should be the same lfn %s" % result)
result = self.db.exists({testFile: {'GUID': '1000', 'PFN': 'blabla'}}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1000) should be the same lfn %s" % result)
# In fact, we don't check if the GUID is correct...
result = self.db.exists({testFile: '1001'}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1001) should be the same lfn %s" % result)
result = self.db.exists({testFile + '2': '1000'}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile + '2'),
testFile, "exists( testFile2 : 1000) should return testFile %s" % result)
# Re-adding the same file
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(
result["OK"],
"addFile failed when adding existing file with same param %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"addFile failed: it should be possible to add an existing lfn with same param %s" %
result)
# Adding same file with different param
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '1'}}, credDict)
self.assertTrue(
result["OK"],
"addFile failed when adding existing file with different parem %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"addFile failed: it should not be possible to add an existing lfn with different param %s" %
result)
result = self.db.addFile({testFile + '2': {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result["OK"], "addFile failed when adding existing file %s" % result)
self.assertTrue(
testFile +
'2' in result["Value"]["Failed"],
"addFile failed: it should not be possible to add a new lfn with existing GUID %s" %
result)
##################################################################################
# Setting existing status of existing file
result = self.db.setFileStatus({testFile: "AprioriGood"}, credDict)
self.assertTrue(
result["OK"],
"setFileStatus failed when setting existing status of existing file %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"setFileStatus failed: %s should be in successful (%s)" %
(testFile,
result))
# Setting unexisting status of existing file
result = self.db.setFileStatus({testFile: "Happy"}, credDict)
self.assertTrue(
result["OK"],
"setFileStatus failed when setting un-existing status of existing file %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setFileStatus should have failed %s" %
result)
# Setting existing status of unexisting file
result = self.db.setFileStatus({nonExistingFile: "Trash"}, credDict)
self.assertTrue(
result["OK"],
"setFileStatus failed when setting existing status of non-existing file %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"setFileStatus failed: %s should be in failed (%s)" %
(nonExistingFile,
result))
##################################################################################
result = self.db.isFile([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "isFile failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"isFile : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][testFile],
"isFile : %s should be seen as a file %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Successful"],
"isFile : %s should be in Successful %s" %
(nonExistingFile,
result))
self.assertTrue(result["Value"]["Successful"][nonExistingFile] is False,
"isFile : %s should be seen as a file %s" % (nonExistingFile, result))
result = self.db.changePathOwner({testFile: "toto", nonExistingFile: "tata"}, credDict)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathOwner : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.changePathGroup({testFile: "toto", nonExistingFile: "tata"}, credDict)
self.assertTrue(result["OK"], "changePathGroup failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathGroup : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.changePathMode({testFile: 0o44, nonExistingFile: 0o44}, credDict)
self.assertTrue(result["OK"], "changePathMode failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathMode : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathMode : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.getFileSize([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "getFileSize failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getFileSize : %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile],
123,
"getFileSize got incorrect file size %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getFileSize : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.getFileMetadata([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "getFileMetadata failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getFileMetadata : %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile]["Owner"],
"toto",
"getFileMetadata got incorrect Owner %s" %
result)
self.assertEqual(
result["Value"]["Successful"][testFile]["Status"],
"AprioriGood",
"getFileMetadata got incorrect status %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getFileMetadata : %s should be in Failed %s" %
(nonExistingFile,
result))
# DOES NOT FOLLOW THE SUCCESSFUL/FAILED CONVENTION
# result = self.db.getFileDetails([testFile, nonExistingFile], credDict)
# self.assertTrue(result["OK"], "getFileDetails failed: %s" % result)
# self.assertTrue(
# testFile in result["Value"]["Successful"],
# "getFileDetails : %s should be in Successful %s" %
# (testFile,
# result))
# self.assertEqual(
# result["Value"]["Successful"][testFile]["Owner"],
# "toto",
# "getFileDetails got incorrect Owner %s" %
# result)
# self.assertTrue(
# nonExistingFile in result["Value"]["Failed"],
# "getFileDetails : %s should be in Failed %s" %
# (nonExistingFile,
# result))
# ADD SOMETHING ABOUT FILE ANCESTORS AND DESCENDENTS
result = self.db.getSEDump('testSE')
self.assertTrue(result['OK'], "Error when getting SE dump %s" % result)
self.assertEqual(result['Value'], ((testFile, '0', 123),),
"Did not get the expected SE Dump %s" % result['Value'])
result = self.db.removeFile([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"removeFile : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][testFile],
"removeFile : %s should be in True %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][nonExistingFile],
"removeFile : %s should be in True %s" %
(nonExistingFile,
result))
class ReplicaCase(FileCatalogDBTestCase):
def test_replicaOperations(self):
"""
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new file
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
# Adding new replica
result = self.db.addReplica({testFile: {"PFN": "testFile", "SE": "otherSE"}}, credDict)
self.assertTrue(result['OK'], "addReplica failed when adding new Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"addReplica failed when adding new Replica %s" %
result)
# Adding the same replica
result = self.db.addReplica({testFile: {"PFN": "testFile", "SE": "otherSE"}}, credDict)
self.assertTrue(result['OK'], "addReplica failed when adding new Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"addReplica failed when adding new Replica %s" %
result)
# Adding replica of a non existing file
result = self.db.addReplica({nonExistingFile: {"PFN": "Idontexist", "SE": "otherSE"}}, credDict)
self.assertTrue(
result['OK'],
"addReplica failed when adding Replica to non existing Replica %s" %
result)
self.assertTrue(
nonExistingFile in result['Value']["Failed"],
"addReplica for non existing file should go in Failed %s" %
result)
# Setting existing status of existing Replica
result = self.db.setReplicaStatus({testFile: {"Status": "Trash", "SE": "otherSE"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"setReplicaStatus failed: %s should be in successful (%s)" %
(testFile,
result))
# Setting non existing status of existing Replica
result = self.db.setReplicaStatus(
{testFile: {"Status": "randomStatus", "SE": "otherSE"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting non-existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(testFile,
result))
# Setting existing status of non-existing Replica
result = self.db.setReplicaStatus(
{testFile: {"Status": "Trash", "SE": "nonExistingSe"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of non-existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(testFile,
result))
# Setting existing status of non-existing File
result = self.db.setReplicaStatus(
{nonExistingFile: {"Status": "Trash", "SE": "nonExistingSe"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of non-existing File %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(nonExistingFile,
result))
# Getting existing status of existing Replica but not visible
result = self.db.getReplicaStatus({testFile: "testSE"}, credDict)
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicaStatus failed: %s should be in Successful (%s)" %
(testFile,
result))
# Getting existing status of existing Replica but not visible
result = self.db.getReplicaStatus({testFile: "otherSE"}, credDict)
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting existing status of existing Replica but not visible %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicaStatus failed: %s should be in Successful (%s)" %
(testFile,
result))
# Getting status of non-existing File but not visible
result = self.db.getReplicaStatus({nonExistingFile: "testSE"}, credDict)
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting status of non existing File %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getReplicaStatus failed: %s should be in failed (%s)" %
(nonExistingFile,
result))
# Getting replicas of existing File and non existing file, seeing all replicas
result = self.db.getReplicas([testFile, nonExistingFile], allStatus=True, credDict=credDict)
self.assertTrue(result["OK"], "getReplicas failed %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicas failed, %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile], {
"otherSE": "", "testSE": ""}, "getReplicas failed, %s should be in Successful %s" %
(testFile, result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getReplicas failed, %s should be in Failed %s" %
(nonExistingFile,
result))
# removing master replica
result = self.db.removeReplica({testFile: {"SE": "testSE"}}, credDict)
self.assertTrue(result['OK'], "removeReplica failed when removing master Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing master Replica %s" %
result)
# removing non existing replica of existing File
result = self.db.removeReplica({testFile: {"SE": "nonExistingSe2"}}, credDict)
self.assertTrue(
result['OK'],
"removeReplica failed when removing non existing Replica %s" %
result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing new Replica %s" %
result)
# removing non existing replica of non existing file
result = self.db.removeReplica({nonExistingFile: {"SE": "nonExistingSe3"}}, credDict)
self.assertTrue(
result['OK'],
"removeReplica failed when removing replica of non existing File %s" %
result)
self.assertTrue(
nonExistingFile in result['Value']["Successful"],
"removeReplica of non existing file, %s should be in Successful %s" %
(nonExistingFile,
result))
# removing last replica
result = self.db.removeReplica({testFile: {"SE": "otherSE"}}, credDict)
self.assertTrue(result['OK'], "removeReplica failed when removing last Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing last Replica %s" %
result)
# Cleaning after us
result = self.db.removeFile(testFile, credDict)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
class DirectoryCase(FileCatalogDBTestCase):
def test_directoryOperations(self):
"""
Tests the Directory related Operations
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new directory
result = self.db.createDirectory(testDir, credDict)
self.assertTrue(result['OK'], "addDirectory failed when adding new directory %s" % result)
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
# Re-adding the same directory (CAUTION, different from addFile)
result = self.db.createDirectory(testDir, credDict)
self.assertTrue(result["OK"], "addDirectory failed when adding existing directory %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"addDirectory failed: it should be possible to add an existing lfn %s" %
result)
result = self.db.isDirectory([testDir, nonExistingDir], credDict)
self.assertTrue(result["OK"], "isDirectory failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"isDirectory : %s should be in Successful %s" %
(testDir,
result))
self.assertTrue(
result["Value"]["Successful"][testDir],
"isDirectory : %s should be seen as a directory %s" %
(testDir,
result))
self.assertTrue(
nonExistingDir in result["Value"]["Successful"],
"isDirectory : %s should be in Successful %s" %
(nonExistingDir,
result))
self.assertTrue(
result["Value"]["Successful"][nonExistingDir] is False,
"isDirectory : %s should be seen as a directory %s" %
(nonExistingDir,
result))
result = self.db.getDirectorySize([testDir, nonExistingDir], False, False, credDict)
self.assertTrue(result["OK"], "getDirectorySize failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"getDirectorySize : %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir],
{'LogicalFiles': 1,
'LogicalDirectories': 0,
'LogicalSize': 123},
"getDirectorySize got incorrect directory size %s" % result)
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"getDirectorySize : %s should be in Failed %s" %
(nonExistingDir,
result))
result = self.db.getDirectorySize([testDir, nonExistingDir], False, True, credDict)
self.assertTrue(result["OK"], "getDirectorySize (calc) failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"getDirectorySize (calc): %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir],
{'LogicalFiles': 1,
'LogicalDirectories': 0,
'LogicalSize': 123},
"getDirectorySize got incorrect directory size %s" % result)
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"getDirectorySize (calc) : %s should be in Failed %s" %
(nonExistingDir,
result))
result = self.db.listDirectory([parentDir, testDir, nonExistingDir], credDict)
self.assertTrue(result["OK"], "listDirectory failed: %s" % result)
self.assertTrue(
parentDir in result["Value"]["Successful"],
"listDirectory : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(result["Value"]["Successful"][parentDir]["SubDirs"].keys(), [testDir],
"listDir : incorrect content for %s (%s)" % (parentDir, result))
self.assertTrue(
testDir in result["Value"]["Successful"],
"listDirectory : %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir]["Files"].keys(), [testFile.split("/")[-1]],
"listDir : incorrect content for %s (%s)" % (testDir, result))
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"listDirectory : %s should be in Failed %s" %
(nonExistingDir,
result))
# We do it two times to make sure that
# when updating something to the same value
# returns a success if it is allowed
for attempt in xrange(2):
print "Attempt %s" % (attempt + 1)
# Only admin can change path group
resultM = self.db.changePathMode({parentDir: 0o777}, credDict)
result = self.db.changePathOwner({parentDir: "toto"}, credDict)
resultG = self.db.changePathGroup({parentDir: "toto"}, credDict)
result2 = self.db.getDirectoryMetadata([parentDir, testDir], credDict)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultG["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultM["OK"], "changePathMode failed: %s" % result)
self.assertTrue(result2["OK"], "getDirectoryMetadata failed: %s" % result)
# Since we were the owner we should have been able to do it in any case, admin or not
self.assertTrue(
parentDir in resultM["Value"]["Successful"],
"changePathMode : %s should be in Successful %s" %
(parentDir,
resultM))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Mode'),
0o777,
"parentDir should have mode %s %s" %
(0o777,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('Mode'),
0o775,
"testDir should not have changed %s" %
result2)
if isAdmin:
self.assertTrue(
parentDir in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Owner'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('Owner'),
proxyUser,
"testDir should not have changed %s" %
result2)
self.assertTrue(
parentDir in resultG["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultG))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('OwnerGroup'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('OwnerGroup'),
proxyGroup,
"testDir should not have changed %s" %
result2)
else:
# depends on the policy manager so I comment
# self.assertTrue( parentDir in result["Value"]["Failed"], "changePathOwner : \
# %s should be in Failed %s" % ( parentDir, result ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'Owner' ), \
# proxyUser, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'Owner' ), \
# proxyUser, "testDir should not have changed %s" % result2 )
# self.assertTrue( parentDir in resultG["Value"]["Failed"], \
# "changePathGroup : %s should be in Failed %s" % ( parentDir, resultG ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "testDir should not have changed %s" % result2 )
pass
# Only admin can change path group
resultM = self.db.changePathMode({parentDir: 0o777}, credDict, True)
result = self.db.changePathOwner({parentDir: "toto"}, credDict, True)
resultG = self.db.changePathGroup({parentDir: "toto"}, credDict, True)
result2 = self.db.getDirectoryMetadata([parentDir, testDir], credDict)
result3 = self.db.getFileMetadata(testFile, credDict)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultG["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultM["OK"], "changePathMode failed: %s" % result)
self.assertTrue(result2["OK"], "getDirectoryMetadata failed: %s" % result)
self.assertTrue(result3["OK"], "getFileMetadata failed: %s" % result)
# Since we were the owner we should have been able to do it in any case, admin or not
self.assertTrue(
parentDir in resultM["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultM))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Mode'),
0o777,
"parentDir should have mode %s %s" %
(0o777,
result2))
self.assertEqual(
result2['Value'].get(
'Successful', {}).get(
testDir, {}).get('Mode'), 0o777, "testDir should have mode %s %s" %
(0o777, result2))
self.assertEqual(
result3['Value'].get(
'Successful', {}).get(
testFile, {}).get('Mode'), 0o777, "testFile should have mode %s %s" %
(0o777, result3))
if isAdmin:
self.assertTrue(
parentDir in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Owner'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful', {}).get(
testDir, {}).get('Owner'), 'toto', "testDir should belong to %s %s" %
(proxyUser, result2))
self.assertEqual(
result3['Value'].get(
'Successful',
{}).get(
testFile,
{}).get('Owner'),
'toto',
"testFile should belong to %s %s" %
(proxyUser,
result3))
self.assertTrue(
parentDir in resultG["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultG))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('OwnerGroup'),
'toto',
"parentDir should belong to %s %s" %
(proxyGroup,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('OwnerGroup'),
'toto',
"testDir should belong to %s %s" %
(proxyGroup,
result2))
self.assertEqual(
result3['Value'].get(
'Successful',
{}).get(
testFile,
{}).get('OwnerGroup'),
'toto',
"testFile should belong to %s %s" %
(proxyGroup,
result3))
else:
# depends on the policy manager so I comment
# self.assertTrue( parentDir in result["Value"]["Failed"], \
# "changePathOwner : %s should be in Failed %s" % ( parentDir, result ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'Owner' ), \
# proxyUser, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'Owner' ), \
# proxyUser, "testDir should not have changed %s" % result2 )
# self.assertEqual( result3['Value'].get( 'Successful', {} ).get( testFile, {} ).get( 'Owner' ), \
# proxyUser, "testFile should not have changed %s" % result3 )
#
# self.assertTrue( parentDir in resultG["Value"]["Failed"], \
# "changePathGroup : %s should be in Failed %s" % ( parentDir, resultG ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "testDir should not have changed %s" % result2 )
# self.assertEqual( result3['Value'].get( 'Successful', {} ).get( testFile, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "testFile should not have changed %s" % result3 )
pass
# Cleaning after us
result = self.db.removeFile(testFile, credDict)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
pathParts = testDir.split('/')[1:]
startDir = '/'
pathToRemove = []
for part in pathParts:
startDir = os.path.join(startDir, part)
pathToRemove.append(startDir)
pathToRemove.reverse()
for toRemove in pathToRemove:
result = self.db.removeDirectory(toRemove, credDict)
self.assertTrue(result["OK"], "removeDirectory failed: %s" % result)
class DirectoryUsageCase (FileCatalogDBTestCase):
def getPhysicalSize(self, sizeDict, dirName, seName):
""" Extract the information from a ret dictionary
and return the tuple (files, size) for a given
directory and a se
"""
val = sizeDict[dirName]['PhysicalSize'][seName]
files = val['Files']
size = val['Size']
return (files, size)
def getLogicalSize(self, sizeDict, dirName):
""" Extract the information from a ret dictionary
and return the tuple (files, size) for a given
directory and a se
"""
files = sizeDict[dirName]['LogicalFiles']
size = sizeDict[dirName]['LogicalSize']
return (files, size)
def getAndCompareDirectorySize(self, dirList):
""" Fetch the directory size from the DirectoryUsage table
and calculate it, compare the results, and then return
the values
"""
retTable = self.db.getDirectorySize(dirList, True, False, credDict)
retCalc = self.db.getDirectorySize(dirList, True, True, credDict)
self.assertTrue(retTable["OK"])
self.assertTrue(retCalc["OK"])
succTable = retTable['Value']['Successful']
succCalc = retCalc['Value']['Successful']
# Since we have simple type, the == is recursive for dict :-)
retEquals = (succTable == succCalc)
self.assertTrue(retEquals, "Calc and table results different %s %s" % (succTable, succCalc))
return retTable
def test_directoryUsage(self):
"""Testing DirectoryUsage related operation"""
# create SE
# Only admin can run that
if not isAdmin:
return
d1 = '/sizeTest/d1'
d2 = '/sizeTest/d2'
f1 = d1 + '/f1'
f2 = d1 + '/f2'
f3 = d2 + '/f3'
f1Size = 3000000000
f2Size = 3000000001
f3Size = 3000000002
# f1Size = 1
# f2Size = 2
# f3Size = 5
for sen in ['se1', 'se2', 'se3']:
ret = self.db.addSE(sen, credDict)
self.assertTrue(ret["OK"])
for din in [d1, d2]:
ret = self.db.createDirectory(din, credDict)
self.assertTrue(ret["OK"])
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'},
f2: {'PFN': 'f2se2',
'SE': 'se2',
'Size': f2Size,
'GUID': '1001',
'Checksum': '2'}}, credDict)
self.assertTrue(ret["OK"])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize(val, d1, 'se1')
d1s2 = self.getPhysicalSize(val, d1, 'se2')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s1, (1, f1Size), "Unexpected size %s, expected %s" % (d1s1, (1, f1Size)))
self.assertEqual(d1s2, (1, f2Size), "Unexpected size %s, expected %s" % (d1s2, (1, f2Size)))
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
ret = self.db.addReplica({f1: {"PFN": "f1se2", "SE": "se2"},
f2: {"PFN": "f1se3", "SE": "se3"}},
credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize(val, d1, 'se1')
d1s2 = self.getPhysicalSize(val, d1, 'se2')
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s1, (1, f1Size), "Unexpected size %s, expected %s" % (d1s1, (1, f1Size)))
self.assertEqual(
d1s2, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1s2, (2, f1Size + f2Size)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
ret = self.db.removeFile([f1], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
# Here we should have the KeyError, since there are no files left on s1 in principle
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
d1s2 = self.getPhysicalSize(val, d1, 'se2')
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s2, (1, f2Size), "Unexpected size %s, expected %s" % (d1s2, (1, f2Size)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
ret = self.db.removeReplica({f2: {"SE": "se2"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
# Here we should have the KeyError, since there are no files left on s1 in principle
try:
d1s2 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s2 = (0, 0)
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s2, (0, 0), "Unexpected size %s, expected %s" % (d1s2, (0, 0)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'},
f3: {'PFN': 'f3se3',
'SE': 'se3',
'Size': f3Size,
'GUID': '1003',
'Checksum': '3'}}, credDict)
self.assertTrue(ret["OK"])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize(val, d1, 'se1')
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d2s3 = self.getPhysicalSize(val, d2, 'se3')
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (1, f1Size), "Unexpected size %s, expected %s" % (d1s1, (1, f1Size)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d2s3, (1, f3Size), "Unexpected size %s, expected %s" % (d2s3, (1, f3Size)))
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeReplica({f1: {"SE": "se1"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d2s3 = self.getPhysicalSize(val, d2, 'se3')
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d2s3, (1, f3Size), "Unexpected size %s, expected %s" % (d2s3, (1, f3Size)))
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeFile([f1], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d2s3 = self.getPhysicalSize(val, d2, 'se3')
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d2s3, (1, f3Size), "Unexpected size %s, expected %s" % (d2s3, (1, f3Size)))
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeReplica({f2: {"SE": "se3"},
f3: {"SE": "se3"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
try:
d1s3 = self.getPhysicalSize(val, d1, 'se3')
except KeyError:
d1s3 = (0, 0)
try:
d2s3 = self.getPhysicalSize(val, d2, 'se3')
except KeyError:
d2s3 = (0, 0)
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (0, 0), "Unexpected size %s, expected %s" % (d1s3, (0, 0)))
self.assertEqual(d2s3, (0, 0), "Unexpected size %s, expected %s" % (d2s3, (0, 0)))
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeFile([f2, f3], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
try:
d1s3 = self.getPhysicalSize(val, d1, 'se3')
except KeyError:
d1s3 = (0, 0)
try:
d2s3 = self.getPhysicalSize(val, d2, 'se3')
except KeyError:
d2s3 = (0, 0)
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (0, 0), "Unexpected size %s, expected %s" % (d1s3, (0, 0)))
self.assertEqual(d2s3, (0, 0), "Unexpected size %s, expected %s" % (d2s3, (0, 0)))
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual(d1l, (0, 0), "Unexpected size %s, expected %s" % (d1l, (0, 0)))
self.assertEqual(d2l, (0, 0), "Unexpected size %s, expected %s" % (d2l, (0, 0)))
# Removing Replicas and Files from the same directory
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'},
f2: {'PFN': 'f2se2',
'SE': 'se1',
'Size': f2Size,
'GUID': '1001',
'Checksum': '2'}}, credDict)
ret = self.db.removeReplica({f1: {"SE": "se1"},
f2: {"SE": "se1"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
ret = self.db.removeFile([f1, f2], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1l, (0, 0), "Unexpected size %s, expected %s" % (d1l, (0, 0)))
# Try removing a replica from a non existing SE
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'}}, credDict)
ret = self.db.removeReplica({f1: {"SE": "se2"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s2 = self.getPhysicalSize(val, d1, 'se2')
except KeyError:
d1s2 = (0, 0)
self.assertEqual(d1s2, (0, 0), "Unexpected size %s, expected %s" % (d1s2, (0, 0)))
if __name__ == '__main__':
managerTypes = MANAGER_TO_TEST.keys()
all_combinations = list(itertools.product(*MANAGER_TO_TEST.values()))
numberOfManager = len(managerTypes)
for setup in all_combinations:
print "Running with:"
print ("".join(["\t %s : %s\n" % (managerTypes[i], setup[i]) for i in xrange(numberOfManager)]))
for i in xrange(numberOfManager):
DATABASE_CONFIG[managerTypes[i]] = setup[i]
suite = unittest.defaultTestLoader.loadTestsFromTestCase(SECase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(UserGroupCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(FileCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ReplicaCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(DirectoryCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(DirectoryUsageCase))
# Then run without admin privilege:
isAdmin = False
if FC_MANAGEMENT in credDict['properties']:
credDict['properties'].remove(FC_MANAGEMENT)
print "Running test without admin privileges"
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
# First run with admin privilege:
isAdmin = True
if FC_MANAGEMENT not in credDict['properties']:
credDict['properties'].append(FC_MANAGEMENT)
print "Running test with admin privileges"
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
|
andresailer/DIRAC
|
tests/Integration/DataManagementSystem/Test_FileCatalogDB.py
|
Python
|
gpl-3.0
| 56,699
|
[
"DIRAC"
] |
797022c2771e9932020f0705009bcc2602649dbcad3884d04ef88e246c23a795
|
from __future__ import print_function, division
import numpy as np
#########################################
### Generators ###
# Functions which return next value in a sequence upon each call
def squares():
""" Generator which returns the square value of every integer, starting with 1. """
i = 1
while True:
# Return a squared number
yield i**2
i += 1
# Init the generator
sq = squares()
print(next(sq))
print(next(sq))
print(next(sq))
print(next(sq))
# Generator which exchanges between True and False upon every call
def truthGenerator():
while True:
yield True
yield False
# Init the generator
truth = truthGenerator()
print(next(truth))
print(next(truth))
print(next(truth))
print(next(truth))
# Let's replace the lottery hostess with a robot...
import random
def lottery():
# Returns 6 numbers between 1 and 40
for i in range(6):
yield random.randint(1, 40)
# Returns a 7th number between 1 and 15
yield random.randint(1,15)
for rand_num in lottery():
print("And the next number is...", rand_num)
# Now we can run our own illegal gambling den!
#########################################
### Introduction to object oriented programming ###
# Make a class which will describe a battleship
class Battleship:
def __init__(self, name, operator, displacement, sunk=False):
# Name of the battleship
self.name = name
# Operator/country
self.operator = operator
# Displacement in metric tonnes
self.displacement = displacement
# List of armament (number and caliber)
self.armament = []
self.sunk = sunk
# Game parameters
self.battle_power = 0
self.hp = displacement
def addArmament(self, num, caliber):
self.armament.append([num, caliber])
# Calculate the arbitrary battle power parameter
self.battle_power += num*caliber**2
# Init a battleship object for Austria-Hungary
vb = Battleship('SMS Viribus Unitis', 'Austria-Hungary', 20000)
# Add armament
vb.addArmament(12, 12) # 4x12 inch guns
vb.addArmament(12, 15)
vb.addArmament(12, 7)
# Init a battleship for Italy
da = Battleship('Dante Alighieri', 'Italy', 19500)
# Add armament
da.addArmament(12, 12)
da.addArmament(20, 4.7)
def engage(ship1, ship2):
""" Confront two battleships. """
# Exchange volleys until one of the ships is sunk
while True:
# Reduce health points of the 1st ship
ship1.hp -= ship2.battle_power
# Reduce health points of the 2nd ship
ship2.hp -= ship1.battle_power
# Check if the 1st ship is sunk
if ship1.hp < 0:
ship1.sunk = True
print(ship1.name, 'was sunk!')
break
# Check if the 2nd ship is sunk
elif ship2.hp < 0:
ship2.sunk = True
print(ship2.name, 'was sunk!')
break
# Confront Viribus Unitis and Dante Alighieri
engage(vb, da)
# Operator overloading and printable representation of an object
class Sphere:
def __init__(self, volume):
self.volume = volume
self.radius = (self.volume/(4/3*np.pi))**(1/3)
def __add__(self, other):
merged = Sphere(self.volume + other.volume)
return merged
def __repr__(self):
return 'Sphere of volume ' + str(self.volume) + ' m^3 and radius ' + str(self.radius) + ' m'
# More on operator overloading: https://docs.python.org/3/library/operator.html
s1 = Sphere(10) # 10 m3 volume
s2 = Sphere(2) # 2 m3 volume
print(s1)
print(s2)
# Add to spheres together
s3 = s1 + s2
# Check the new radius
print(s3)
# Class inheritance
class AstroObj:
def __init__(self, name, ra, dec):
self.name = name
self.ra = ra
self.dec = dec
def angDist(self, other):
""" Calculate the angular distance between two astronomical objects. """
ra1 = np.radians(self.ra)
dec1 = np.radians(self.dec)
ra2 = np.radians(other.ra)
dec2 = np.radians(other.dec)
ang = np.sin(dec1)*np.sin(dec2) + np.cos(dec1)*np.cos(dec2)*np.cos(ra1 - ra2)
return np.degrees(np.arccos(ang))
class Star(AstroObj):
def __init__(self, name, ra, dec, spec_type):
# Extend AstroObj
AstroObj.__init__(self, name, ra, dec)
self.spec_type = spec_type
class Galaxy(AstroObj):
def __init__(self, name, ra, dec, z):
# Extend AstroObj
AstroObj.__init__(self, name, ra, dec)
self.z = z
s1 = Star('Sirius', 101.2875, -16.7161, 'DA2')
g1 = Galaxy('NGC660', 25.7583, +13.645, 0.003)
print(s1.angDist(g1))
#########################################
# Everything in Python is an object!
# E.g. we can do something like this:
a = [1, 2, 3]
# This will give us the length of list 'a', because it is stored as its attribute
print(a.__len__)
#########################################
### List comprehension ###
# Transforming one list to another
# List of even numbers from 1 to 100
evens = [x for x in range(1, 101) if x%2 == 0]
# For easier understanding of the line above, let's convert it to words:
# "Take a number in a range from 1 to 100, only if it is divisible by 2
print(evens)
### C/P to show the equivalent code
evens = []
for x in range(1, 101):
if x%2 == 0:
evens.append(x)
print(evens)
###
# Let's unravel a 2D list
a = [[1, 2], [3, 4]]
a = [x for row in a for x in row]
print(a)
# The classic "Mathematicians order pizzas" joke:
# An infinite number of mathematicians enter a pizzeria. The first mathematician orders 1 pizza. The second
# one orders 1/2 of a pizza, the third one orders 1/4, the fourth one orders 1/8, etc.
# The server quickly looses this temper and just brings them 2 pizzas. Was he right?
pizzas = [1.0/(2**x) for x in range(50)]
# We see that the number quickly converges to 0, so we can use only 100 numbers
print(pizzas)
# The sum of all pizzas
print('Infinite pizzas:', sum(pizzas))
#########################################
# Question:
# Describe what will the 'form' list contain
lst = [4.1756, 2.3412, 8.5754, 7.124531]
form = ["x[{:d}] = {:5.2f}".format(i, x) for i, x in enumerate(lst)]
print(form)
#########################################
### Dictionaries ###
# A collection of (key: value) pairs
num2word = {0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four'}
# Print 'zero'
print(num2word[0])
# Go through all keys in the dictionary and return values
for key in num2word:
print(num2word[key])
#########################################
### Sets ###
# Lists of unique elements
a = [1, 1, 2, 2, 2, 3, 4, 5, 6, 6, 7]
# Convert a to a set
b = set(a)
# WARNING!
# We cannot index sets!
# This return an ERROR:
# print(b[0])
# If you want it back as a list, you could do:
# b = list(set(a))
c = set([2, 3, 10])
# Get the difference of two sets
print(b.difference(c))
# Get the intersection of two sets
print(b.intersection(c))
# More: check if one set is a subset of another, check if they are disjoint (their intersection is null)
|
dvida/UWO-PA-Python-Course
|
Lecture 8/L8_lecture.py
|
Python
|
mit
| 7,179
|
[
"Galaxy"
] |
3f3b30db21d72503cbb74a1546e4d00c9dde5d88d0689750960ca04e23fc0fda
|
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
import numpy as np
from gpaw import debug
from gpaw.utilities import is_contiguous
import _gpaw
class Spline:
def __init__(self, l, rmax, f_g):
"""Spline(l, rcut, list) -> Spline object
The integer l gives the angular momentum quantum number and
the list contains the spline values from r=0 to r=rcut.
The array f_g gives the radial part of the function on the grid.
The radial function is multiplied by a real solid spherical harmonics
(r^l * Y_lm).
"""
assert 0.0 < rmax
f_g = np.array(f_g, float)
# Copy so we don't change the values of the input array
f_g[-1] = 0.0
self.spline = _gpaw.Spline(l, rmax, f_g)
def get_cutoff(self):
"""Return the radial cutoff."""
return self.spline.get_cutoff()
def get_angular_momentum_number(self):
"""Return the angular momentum quantum number."""
return self.spline.get_angular_momentum_number()
def get_value_and_derivative(self, r):
"""Return the value and derivative."""
return self.spline.get_value_and_derivative(r)
def __call__(self, r):
assert r >= 0.0
return self.spline(r)
def map(self, r_x):
return np.vectorize(self, [float])(r_x)
def get_functions(self, gd, start_c, end_c, spos_c):
h_cv = gd.h_cv
# start_c is the new origin so we translate gd.beg_c to start_c
origin_c = np.array([0,0,0])
pos_v = np.dot(spos_c, gd.cell_cv) - np.dot(start_c, h_cv)
A_gm, G_b = _gpaw.spline_to_grid(self.spline, origin_c, end_c-start_c,
pos_v, h_cv, end_c-start_c, origin_c)
if debug:
assert G_b.ndim == 1 and G_b.shape[0] % 2 == 0
assert is_contiguous(G_b, np.intc)
assert A_gm.shape[:-1] == np.sum(G_b[1::2]-G_b[::2])
indices_gm, ng, nm = self.spline.get_indices_from_zranges(start_c,
end_c, G_b)
shape = (nm,) + tuple(end_c-start_c)
work_mB = np.zeros(shape, dtype=A_gm.dtype)
np.put(work_mB, indices_gm, A_gm)
return work_mB
## class rspline:
## def __init__(self, r_g, f_g, l=0):
## self.rcut = r_g[-1]
## self.l = l
## ...
## def __call__(self, r):
## return self.spline(r)*r**l
|
robwarm/gpaw-symm
|
gpaw/spline.py
|
Python
|
gpl-3.0
| 2,507
|
[
"GPAW"
] |
e6e0080040f4ae18101060a88b84257c7f2d8f1cc04d2b5dca603ff33527a2b2
|
#
# Copyright (C) 2012-2013 Aleabot
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Robot breakfast
"""
import kol.Error as Error
from kol.manager import FilterManager
from kol.manager import PatternManager
from kol.request.GenericRequest import GenericRequest
from kol.request.MeatBushRequest import MeatBushRequest
from kol.request.MeatOrchidRequest import MeatOrchidRequest
from kol.request.MeatTreeRequest import MeatTreeRequest
from kol.util import Report
from kol.util import ParseResponseUtils
class HippyProduceStandRequest(GenericRequest):
"Goes to the hippy produce stand to retrieve daily meat."
def __init__(self, session):
super(HippyProduceStandRequest, self).__init__(session)
self.url = session.serverURL + 'store.php?whichstore=h'
def parseResponse(self):
if len(self.responseText) == 0:
raise Error.Error('You cannot visit that store yet.', Error.INVALID_LOCATION)
self.responseData['meat'] = ParseResponseUtils.parseMeatGainedLost(self.responseText)
def breakfast(session):
Report.info('bot', 'Start of breakfast.')
meatGained = 0
Report.info('bot', 'Visiting hippy produce stand.')
try:
req = HippyProduceStandRequest(session)
response = req.doRequest()
meatGained += response['meat']
except Error.Error as err:
Report.error('bot', 'Error while visiting hippy produce stand: ' + str(err))
Report.info('bot', 'Visiting potted meat bush.')
try:
req = MeatBushRequest(session)
response = req.doRequest()
meatGained += response['meat']
except Error.Error as err:
Report.error('bot', 'Error while visiting potted meat bush: ' + str(err))
Report.info('bot', 'Visiting exotic hanging meat orchid.')
try:
req = MeatOrchidRequest(session)
response = req.doRequest()
meatGained += response['meat']
except Error.Error as err:
Report.error('bot', 'Error while visiting exotic hanging meat orchid: ' + str(err))
Report.info('bot', 'Visiting potted meat tree.')
try:
req = MeatTreeRequest(session)
response = req.doRequest()
meatGained += response['meat']
except Error.Error as err:
Report.error('bot', 'Error while visiting potted meat tree: ' + str(err))
Report.info('bot', 'End of breakfast. Meat gained: ' + str(meatGained))
|
aleabot/aleabot
|
src/alea/breakfast.py
|
Python
|
gpl-3.0
| 2,991
|
[
"VisIt"
] |
1ae5d18c7c72c90132078140f404d8730a6d72e166862d26f1732ada8e5e3a9b
|
"""
fitpack --- curve and surface fitting with splines
fitpack is based on a collection of Fortran routines DIERCKX
by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
to double routines by Pearu Peterson.
"""
# Created by Pearu Peterson, June,August 2003
from __future__ import division, print_function, absolute_import
__all__ = [
'UnivariateSpline',
'InterpolatedUnivariateSpline',
'LSQUnivariateSpline',
'BivariateSpline',
'LSQBivariateSpline',
'SmoothBivariateSpline',
'LSQSphereBivariateSpline',
'SmoothSphereBivariateSpline',
'RectBivariateSpline',
'RectSphereBivariateSpline']
import warnings
from numpy import zeros, concatenate, alltrue, ravel, all, diff, array, ones
import numpy as np
from . import fitpack
from . import dfitpack
################ Univariate spline ####################
_curfit_messages = {1:"""
The required storage space exceeds the available storage space, as
specified by the parameter nest: nest too small. If nest is already
large (say nest > m/2), it may also indicate that s is too small.
The approximation returned is the weighted least-squares spline
according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
gives the corresponding weighted sum of squared residuals (fp>s).
""",
2:"""
A theoretically impossible result was found during the iteration
proces for finding a smoothing spline with fp = s: s too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
3:"""
The maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached: s
too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1
if iopt=-1:
xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe"""
}
class UnivariateSpline(object):
"""
One-dimensional smoothing spline fit to a given set of data points.
Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. `s`
specifies the number of knots by specifying a smoothing condition.
Parameters
----------
x : (N,) array_like
1-D array of independent input data. Must be increasing.
y : (N,) array_like
1-D array of dependent input data, of the same length as `x`.
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be <= 5.
s : float or None, optional
Positive smoothing factor used to choose the number of knots. Number
of knots will be increased until the smoothing condition is satisfied:
sum((w[i]*(y[i]-s(x[i])))**2,axis=0) <= s
If None (default), s=len(w) which should be a good value if 1/w[i] is
an estimate of the standard deviation of y[i]. If 0, spline will
interpolate through all data points.
See Also
--------
InterpolatedUnivariateSpline : Subclass with smoothing forced to 0
LSQUnivariateSpline : Subclass in which knots are user-selected instead of
being set by smoothing condition
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> from numpy import linspace,exp
>>> from numpy.random import randn
>>> from scipy.interpolate import UnivariateSpline
>>> x = linspace(-3, 3, 100)
>>> y = exp(-x**2) + randn(100)/10
>>> s = UnivariateSpline(x, y, s=1)
>>> xs = linspace(-3, 3, 1000)
>>> ys = s(xs)
xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y.
"""
def __init__(self, x, y, w=None, bbox = [None]*2, k=3, s=None):
"""
Input:
x,y - 1-d sequences of data points (x must be
in strictly ascending order)
Optional input:
w - positive 1-d sequence of weights
bbox - 2-sequence specifying the boundary of
the approximation interval.
By default, bbox=[x[0],x[-1]]
k=3 - degree of the univariate spline.
s - positive smoothing factor defined for
estimation condition:
sum((w[i]*(y[i]-s(x[i])))**2,axis=0) <= s
Default s=len(w) which should be a good value
if 1/w[i] is an estimate of the standard
deviation of y[i].
"""
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=s)
if data[-1]==1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def _reset_class(self):
data = self._data
n,t,c,k,ier = data[7],data[8],data[9],data[5],data[-1]
self._eval_args = t[:n],c[:n],k
if ier==0:
# the spline returned has a residual sum of squares fp
# such that abs(fp-s)/s <= tol with tol a relative
# tolerance set to 0.001 by the program
pass
elif ier==-1:
# the spline returned is an interpolating spline
self._set_class(InterpolatedUnivariateSpline)
elif ier==-2:
# the spline returned is the weighted least-squares
# polynomial of degree k. In this extreme case fp gives
# the upper bound fp0 for the smoothing factor s.
self._set_class(LSQUnivariateSpline)
else:
# error
if ier==1:
self._set_class(LSQUnivariateSpline)
message = _curfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
def _set_class(self, cls):
self._spline_class = cls
if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
LSQUnivariateSpline):
self.__class__ = cls
else:
# It's an unknown subclass -- don't change class. cf. #731
pass
def _reset_nest(self, data, nest=None):
n = data[10]
if nest is None:
k,m = data[5],len(data[0])
nest = m+k+1 # this is the maximum bound for nest
else:
if not n <= nest:
raise ValueError("`nest` can only be increased")
t, c, fpint, nrdata = [np.resize(data[n], nest) for n in [8,9,11,12]]
args = data[:8] + (t,c,n,fpint,nrdata,data[13])
data = dfitpack.fpcurf1(*args)
return data
def set_smoothing_factor(self, s):
""" Continue spline computation with the given smoothing
factor s and with the knots found at the last call.
"""
data = self._data
if data[6]==-1:
warnings.warn('smoothing factor unchanged for'
'LSQ spline with fixed knots')
return
args = data[:6] + (s,) + data[7:]
data = dfitpack.fpcurf1(*args)
if data[-1]==1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def __call__(self, x, nu=0):
""" Evaluate spline (or its nu-th derivative) at positions x.
Note: x can be unordered but the evaluation is more efficient
if x is (partially) ordered.
"""
x = np.asarray(x)
# empty input yields empty output
if x.size == 0:
return array([])
# if nu is None:
# return dfitpack.splev(*(self._eval_args+(x,)))
# return dfitpack.splder(nu=nu,*(self._eval_args+(x,)))
return fitpack.splev(x, self._eval_args, der=nu)
def get_knots(self):
""" Return positions of (boundary and interior) knots of the spline.
"""
data = self._data
k,n = data[5],data[7]
return data[8][k:n-k]
def get_coeffs(self):
"""Return spline coefficients."""
data = self._data
k,n = data[5],data[7]
return data[9][:n-k-1]
def get_residual(self):
"""Return weighted sum of squared residuals of the spline
approximation: ``sum((w[i] * (y[i]-s(x[i])))**2, axis=0)``.
"""
return self._data[10]
def integral(self, a, b):
""" Return definite integral of the spline between two given points.
"""
return dfitpack.splint(*(self._eval_args+(a,b)))
def derivatives(self, x):
""" Return all derivatives of the spline at the point x."""
d,ier = dfitpack.spalde(*(self._eval_args+(x,)))
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return d
def roots(self):
""" Return the zeros of the spline.
Restriction: only cubic splines are supported by fitpack.
"""
k = self._data[5]
if k==3:
z,m,ier = dfitpack.sproot(*self._eval_args[:2])
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return z[:m]
raise NotImplementedError('finding roots unsupported for '
'non-cubic splines')
class InterpolatedUnivariateSpline(UnivariateSpline):
"""
One-dimensional interpolating spline for a given set of data points.
Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. Spline
function passes through all provided points. Equivalent to
`UnivariateSpline` with s=0.
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
input dimension of data points
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), bbox=[x[0],x[-1]].
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
See Also
--------
UnivariateSpline : Superclass -- allows knots to be selected by a
smoothing condition
LSQUnivariateSpline : spline for which knots are user-selected
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> from numpy import linspace,exp
>>> from numpy.random import randn
>>> from scipy.interpolate import InterpolatedUnivariateSpline
>>> x = linspace(-3, 3, 100)
>>> y = exp(-x**2) + randn(100)/10
>>> s = InterpolatedUnivariateSpline(x, y)
>>> xs = linspace(-3, 3, 1000)
>>> ys = s(xs)
xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y
"""
def __init__(self, x, y, w=None, bbox = [None]*2, k=3):
"""
Input:
x,y - 1-d sequences of data points (x must be
in strictly ascending order)
Optional input:
w - positive 1-d sequence of weights
bbox - 2-sequence specifying the boundary of
the approximation interval.
By default, bbox=[x[0],x[-1]]
k=3 - degree of the univariate spline.
"""
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=0)
self._reset_class()
class LSQUnivariateSpline(UnivariateSpline):
"""
One-dimensional spline with explicit internal knots.
Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. `t`
specifies the internal knots of the spline
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
Input dimension of data points
t: (M,) array_like
interior knots of the spline. Must be in ascending order
and bbox[0]<t[0]<...<t[-1]<bbox[-1]
w : (N,) array_like, optional
weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), bbox=[x[0],x[-1]].
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
Raises
------
ValueError
If the interior knots do not satisfy the Schoenberg-Whitney conditions
See Also
--------
UnivariateSpline : Superclass -- knots are specified by setting a
smoothing condition
InterpolatedUnivariateSpline : spline passing through all points
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> from numpy import linspace,exp
>>> from numpy.random import randn
>>> from scipy.interpolate import LSQUnivariateSpline
>>> x = linspace(-3,3,100)
>>> y = exp(-x**2) + randn(100)/10
>>> t = [-1,0,1]
>>> s = LSQUnivariateSpline(x,y,t)
>>> xs = linspace(-3,3,1000)
>>> ys = s(xs)
xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y
with knots [-3,-1,0,1,3]
"""
def __init__(self, x, y, t, w=None, bbox = [None]*2, k=3):
"""
Input:
x,y - 1-d sequences of data points (x must be
in strictly ascending order)
t - 1-d sequence of the positions of user-defined
interior knots of the spline (t must be in strictly
ascending order and bbox[0]<t[0]<...<t[-1]<bbox[-1])
Optional input:
w - positive 1-d sequence of weights
bbox - 2-sequence specifying the boundary of
the approximation interval.
By default, bbox=[x[0],x[-1]]
k=3 - degree of the univariate spline.
"""
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
xb=bbox[0]
xe=bbox[1]
if xb is None: xb = x[0]
if xe is None: xe = x[-1]
t = concatenate(([xb]*(k+1),t,[xe]*(k+1)))
n = len(t)
if not alltrue(t[k+1:n-k]-t[k:n-k-1] > 0,axis=0):
raise ValueError('Interior knots t must satisfy '
'Schoenberg-Whitney conditions')
data = dfitpack.fpcurfm1(x,y,k,t,w=w,xb=xb,xe=xe)
self._data = data[:-3] + (None,None,data[-1])
self._reset_class()
################ Bivariate spline ####################
class _BivariateSplineBase(object):
""" Base class for Bivariate spline s(x,y) interpolation on the rectangle
[xb,xe] x [yb, ye] calculated from a given set of data points
(x,y,z).
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
BivariateSpline :
implementation of bivariate spline interpolation on a plane grid
SphereBivariateSpline :
implementation of bivariate spline interpolation on a spherical grid
"""
def get_residual(self):
""" Return weighted sum of squared residuals of the spline
approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
"""
return self.fp
def get_knots(self):
""" Return a tuple (tx,ty) where tx,ty contain knots positions
of the spline with respect to x-, y-variable, respectively.
The position of interior and additional knots are given as
t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
"""
return self.tck[:2]
def get_coeffs(self):
""" Return spline coefficients."""
return self.tck[2]
_surfit_messages = {1:"""
The required storage space exceeds the available storage space: nxest
or nyest too small, or s too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
2:"""
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small or
badly chosen eps.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
3:"""
the maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached:
s too small.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
4:"""
No more knots can be added because the number of b-spline coefficients
(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
either s or m too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
5:"""
No more knots can be added because the additional knot would (quasi)
coincide with an old one: s too small or too large a weight to an
inaccurate data point.
The weighted least-squares spline corresponds to the current set of
knots.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
If iopt==-1, then
xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe
yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""",
-3:"""
The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank deficient
system (deficiency=%i). If deficiency is large, the results may be
inaccurate. Deficiency may strongly depend on the value of eps."""
}
class BivariateSpline(_BivariateSplineBase):
"""
Base class for bivariate splines.
This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on
the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set
of data points ``(x, y, z)``.
To construct these splines, call either `SmoothBivariateSpline` or
`LSQBivariateSpline`.
See Also
--------
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline :
to create a BivariateSpline through the given points
LSQBivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
SphereBivariateSpline :
bivariate spline interpolation in spherical cooridinates
bisplrep : older wrapping of FITPACK
bisplev : older wrapping of FITPACK
"""
def __call__(self, x, y, mth='array'):
""" Evaluate spline at the grid points defined by the coordinate arrays
x,y."""
x = np.asarray(x)
y = np.asarray(y)
# empty input yields empty output
if (x.size == 0) and (y.size == 0):
return array([])
if mth=='array':
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
z,ier = dfitpack.bispev(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispev: %s" % ier)
return z
raise NotImplementedError('unknown method mth=%s' % mth)
def ev(self, xi, yi):
"""
Evaluate spline at points (x[i], y[i]), i=0,...,len(x)-1
"""
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
zi,ier = dfitpack.bispeu(tx,ty,c,kx,ky,xi,yi)
if not ier == 0:
raise ValueError("Error code returned by bispeu: %s" % ier)
return zi
def integral(self, xa, xb, ya, yb):
"""
Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
return dfitpack.dblint(tx,ty,c,kx,ky,xa,xb,ya,yb)
class SmoothBivariateSpline(BivariateSpline):
"""
Smooth bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
w : array_like, optional
Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
LSQUnivariateSpline : to create a BivariateSpline using weighted
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
eps=None):
xb,xe,yb,ye = bbox
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=1)
if ier in [0,-1,-2]: # normal return
pass
else:
message = _surfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx[:nx],ty[:ny],c[:(nx-kx-1)*(ny-ky-1)]
self.degrees = kx,ky
class LSQBivariateSpline(BivariateSpline):
"""
Weighted least-squares bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
tx, ty : array_like
Strictly ordered 1-D sequences of knots coordinates.
w : array_like, optional
Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
bbox : (4,) array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline : create a smoothing BivariateSpline
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
eps=None):
nx = 2*kx+2+len(tx)
ny = 2*ky+2+len(ty)
tx1 = zeros((nx,),float)
ty1 = zeros((ny,),float)
tx1[kx+1:nx-kx-1] = tx
ty1[ky+1:ny-ky-1] = ty
xb,xe,yb,ye = bbox
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,\
xb,xe,yb,ye,\
kx,ky,eps,lwrk2=1)
if ier>10:
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,\
xb,xe,yb,ye,\
kx,ky,eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
if ier<-2:
deficiency = (nx-kx-1)*(ny-ky-1)+ier
message = _surfit_messages.get(-3) % (deficiency)
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx1, ty1, c
self.degrees = kx, ky
class RectBivariateSpline(BivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing and interpolating data.
Parameters
----------
x,y : array_like
1-D arrays of coordinates in strictly ascending order.
z : array_like
2-D array of data with shape (x.size,y.size).
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default is ``s=0``, which is for interpolation.
See Also
--------
SmoothBivariateSpline : a smoothing bivariate spline for scattered data
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
"""
def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
x, y = ravel(x), ravel(y)
if not all(diff(x) > 0.0):
raise TypeError('x must be strictly increasing')
if not all(diff(y) > 0.0):
raise TypeError('y must be strictly increasing')
if not ((x.min() == x[0]) and (x.max() == x[-1])):
raise TypeError('x must be strictly ascending')
if not ((y.min() == y[0]) and (y.max() == y[-1])):
raise TypeError('y must be strictly ascending')
if not x.size == z.shape[0]:
raise TypeError('x dimension of z must have same number of '
'elements as x')
if not y.size == z.shape[1]:
raise TypeError('y dimension of z must have same number of '
'elements as y')
z = ravel(z)
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
ye, kx, ky, s)
if not ier in [0, -1, -2]:
msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
self.degrees = kx, ky
_spherefit_messages = _surfit_messages.copy()
_spherefit_messages[10] = """
ERROR. On entry, the input data are controlled on validity. The following
restrictions must be satisfied:
-1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,
0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m
lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
kwrk >= m+(ntest-7)*(npest-7)
if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
0<tt(5)<tt(6)<...<tt(nt-4)<pi
0<tp(5)<tp(6)<...<tp(np-4)<2*pi
if iopt>=0: s>=0
if one of these conditions is found to be violated,control
is immediately repassed to the calling program. in that
case there is no approximation returned."""
_spherefit_messages[-3] = """
WARNING. The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank
deficient system (deficiency=%i, rank=%i). Especially if the rank
deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
the results may be inaccurate. They could also seriously depend on
the value of eps."""
class SphereBivariateSpline(_BivariateSplineBase):
"""
Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
given set of data points (theta,phi,r).
.. versionadded:: 0.11.0
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothUnivariateSpline :
to create a BivariateSpline through the given points
LSQUnivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
"""
def __call__(self, theta, phi):
""" Evaluate the spline at the grid ponts defined by the coordinate
arrays theta, phi. """
theta = np.asarray(theta)
phi = np.asarray(phi)
# empty input yields empty output
if (theta.size == 0) and (phi.size == 0):
return array([])
if theta.min() < 0. or theta.max() > np.pi:
raise ValueError("requested theta out of bounds.")
if phi.min() < 0. or phi.max() > 2. * np.pi:
raise ValueError("requested phi out of bounds.")
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
z, ier = dfitpack.bispev(tx, ty, c, kx, ky, theta, phi)
if not ier == 0:
raise ValueError("Error code returned by bispev: %s" % ier)
return z
def ev(self, thetai, phii):
""" Evaluate the spline at the points (theta[i], phi[i]),
i=0,...,len(theta)-1
"""
thetai = np.asarray(thetai)
phii = np.asarray(phii)
# empty input yields empty output
if (thetai.size == 0) and (phii.size == 0):
return array([])
if thetai.min() < 0. or thetai.max() > np.pi:
raise ValueError("requested thetai out of bounds.")
if phii.min() < 0. or phii.max() > 2. * np.pi:
raise ValueError("requested phii out of bounds.")
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
zi, ier = dfitpack.bispeu(tx, ty, c, kx, ky, thetai, phii)
if not ier == 0:
raise ValueError("Error code returned by bispeu: %s" % ier)
return zi
class SmoothSphereBivariateSpline(SphereBivariateSpline):
"""
Smooth bivariate spline approximation in spherical coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
w : array_like, optional
Positive 1-D sequence of weights.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if 1/w[i] is an
estimate of the standard deviation of r[i].
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object
>>> lats, lons = np.meshgrid(theta, phi)
>>> from scipy.interpolate import SmoothSphereBivariateSpline
>>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(),s=3.5)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2 * np.pi, 90)
>>> data_smth = lut(fine_lats, fine_lons)
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_smth, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
r, w=w, s=s,
eps=eps)
if not ier in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
self.degrees = (3, 3)
class LSQSphereBivariateSpline(SphereBivariateSpline):
"""
Weighted least-squares bivariate spline approximation in spherical
coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
tt, tp : array_like
Strictly ordered 1-D sequences of knots coordinates.
Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
w : array_like, optional
Positive 1-D sequence of weights, of the same length as `theta`, `phi`
and `r`.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object. Here, we must also specify the
coordinates of the knots to use.
>>> lats, lons = np.meshgrid(theta, phi)
>>> knotst, knotsp = theta.copy(), phi.copy()
>>> knotst[0] += .0001
>>> knotst[-1] -= .0001
>>> knotsp[0] += .0001
>>> knotsp[-1] -= .0001
>>> from scipy.interpolate import LSQSphereBivariateSpline
>>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(),knotst,knotsp)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2*np.pi, 90)
>>> data_lsq = lut(fine_lats, fine_lons)
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_lsq, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, np_ = 8 + len(tt), 8 + len(tp)
tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
tt_[4:-4], tp_[4:-4] = tt, tp
tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
w=w, eps=eps)
if ier < -2:
deficiency = 6 + (nt_ - 8) * (np_ - 7) + ier
message = _spherefit_messages.get(-3) % (deficiency, -ier)
warnings.warn(message)
elif not ier in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_, tp_, c
self.degrees = (3, 3)
_spfit_messages = _surfit_messages.copy()
_spfit_messages[10] = """
ERROR: on entry, the input data are controlled on validity
the following restrictions must be satisfied.
-1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
-1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
-1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
kwrk>=5+mu+mv+nuest+nvest,
lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
0< u(i-1)<u(i)< pi,i=2,..,mu,
-pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv
if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3))
0<tu(5)<tu(6)<...<tu(nu-4)< pi
8<=nv<=min(nvest,mv+7)
v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi
the schoenberg-whitney conditions, i.e. there must be
subset of grid co-ordinates uu(p) and vv(q) such that
tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4
(iopt(2)=1 and iopt(3)=1 also count for a uu-value
tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4
(vv(q) is either a value v(j) or v(j)+2*pi)
if iopt(1)>=0: s>=0
if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
if one of these conditions is found to be violated,control is
immediately repassed to the calling program. in that case there is no
approximation returned."""
class RectSphereBivariateSpline(SphereBivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh on a sphere.
Can be used for smoothing data.
.. versionadded:: 0.11.0
Parameters
----------
u : array_like
1-D array of latitude coordinates in strictly ascending order.
Coordinates must be given in radians and lie within the interval
(0, pi).
v : array_like
1-D array of longitude coordinates in strictly ascending order.
Coordinates must be given in radians, and must lie within (0, 2pi).
r : array_like
2-D array of data with shape ``(u.size, v.size)``.
s : float, optional
Positive smoothing factor defined for estimation condition
(``s=0`` is for interpolation).
pole_continuity : bool or (bool, bool), optional
Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
will be 1 or 0 when this is True or False, respectively.
Defaults to False.
pole_values : float or (float, float), optional
Data values at the poles ``u=0`` and ``u=pi``. Either the whole
parameter or each individual element can be None. Defaults to None.
pole_exact : bool or (bool, bool), optional
Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
value is considered to be the right function value, and it will be
fitted exactly. If False, the value will be considered to be a data
value just like the other data values. Defaults to False.
pole_flat : bool or (bool, bool), optional
For the poles at ``u=0`` and ``u=pi``, specify whether or not the
approximation has vanishing derivatives. Defaults to False.
See Also
--------
RectBivariateSpline : bivariate spline approximation over a rectangular
mesh
Notes
-----
Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
least-squares spline approximation is not implemented yet.
When actually performing the interpolation, the requested `v` values must
lie within the same length 2pi interval that the original `v` values were
chosen from.
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
Examples
--------
Suppose we have global data on a coarse grid
>>> lats = np.linspace(10, 170, 9) * np.pi / 180.
>>> lons = np.linspace(0, 350, 18) * np.pi / 180.
>>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
We want to interpolate it to a global one-degree grid
>>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
>>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
>>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
We need to set up the interpolator object
>>> from scipy.interpolate import RectSphereBivariateSpline
>>> lut = RectSphereBivariateSpline(lats, lons, data)
Finally we interpolate the data. The `RectSphereBivariateSpline` object
only takes 1-D arrays as input, therefore we need to do some reshaping.
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
Looking at the original and the interpolated data, one can see that the
interpolant reproduces the original data very well:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(212)
>>> ax2.imshow(data_interp, interpolation='nearest')
>>> plt.show()
Chosing the optimal value of ``s`` can be a delicate task. Recommended
values for ``s`` depend on the accuracy of the data values. If the user
has an idea of the statistical errors on the data, she can also find a
proper estimate for ``s``. By assuming that, if she specifies the
right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
reproduces the function underlying the data, she can evaluate
``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
For example, if she knows that the statistical errors on her
``r(i,j)``-values are not greater than 0.1, she may expect that a good
``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
be determined by trial and error. The best is then to start with a very
large value of ``s`` (to determine the least-squares polynomial and the
corresponding upper bound ``fp0`` for ``s``) and then to progressively
decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
shows more detail) to obtain closer fits.
The interpolation results for different values of ``s`` give some insight
into this process:
>>> fig2 = plt.figure()
>>> s = [3e9, 2e9, 1e9, 1e8]
>>> for ii in xrange(len(s)):
>>> lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii])
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
>>> ax = fig2.add_subplot(2, 2, ii+1)
>>> ax.imshow(data_interp, interpolation='nearest')
>>> ax.set_title("s = %g" % s[ii])
>>> plt.show()
"""
def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
pole_exact=False, pole_flat=False):
iopt = np.array([0, 0, 0], dtype=int)
ider = np.array([-1, 0, -1, 0], dtype=int)
if pole_values is None:
pole_values = (None, None)
elif isinstance(pole_values, (float, np.float32, np.float64)):
pole_values = (pole_values, pole_values)
if isinstance(pole_continuity, bool):
pole_continuity = (pole_continuity, pole_continuity)
if isinstance(pole_exact, bool):
pole_exact = (pole_exact, pole_exact)
if isinstance(pole_flat, bool):
pole_flat = (pole_flat, pole_flat)
r0, r1 = pole_values
iopt[1:] = pole_continuity
if r0 is None:
ider[0] = -1
else:
ider[0] = pole_exact[0]
if r1 is None:
ider[2] = -1
else:
ider[2] = pole_exact[1]
ider[1], ider[3] = pole_flat
u, v = np.ravel(u), np.ravel(v)
if not np.all(np.diff(u) > 0.0):
raise TypeError('u must be strictly increasing')
if not np.all(np.diff(v) > 0.0):
raise TypeError('v must be strictly increasing')
if not u.size == r.shape[0]:
raise TypeError('u dimension of r must have same number of '
'elements as u')
if not v.size == r.shape[1]:
raise TypeError('v dimension of r must have same number of '
'elements as v')
if pole_continuity[1] is False and pole_flat[1] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
if pole_continuity[0] is False and pole_flat[0] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
r = np.ravel(r)
nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
u.copy(), v.copy(), r.copy(), r0, r1, s)
if not ier in [0, -1, -2]:
msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
self.degrees = (3, 3)
|
Universal-Model-Converter/UMC3.0a
|
data/Python/x86/Lib/site-packages/scipy/interpolate/fitpack2.py
|
Python
|
mit
| 48,477
|
[
"Gaussian"
] |
423cca9b2d77da107d042b8178a72f3de860510d5240e4c14b383cf6e4c7f29f
|
"""Read and write notebook files as XML.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from base64 import encodestring, decodestring
import warnings
from xml.etree import ElementTree as ET
from .rwbase import NotebookReader, NotebookWriter
from .nbbase import (
new_code_cell, new_text_cell, new_worksheet, new_notebook, new_output,
new_metadata
)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def _get_text(e, tag):
sub_e = e.find(tag)
if sub_e is None:
return None
else:
return sub_e.text
def _set_text(nbnode, attr, parent, tag):
if attr in nbnode:
e = ET.SubElement(parent, tag)
e.text = nbnode[attr]
def _get_int(e, tag):
sub_e = e.find(tag)
if sub_e is None:
return None
else:
return int(sub_e.text)
def _set_int(nbnode, attr, parent, tag):
if attr in nbnode:
e = ET.SubElement(parent, tag)
e.text = unicode(nbnode[attr])
def _get_bool(e, tag):
sub_e = e.find(tag)
if sub_e is None:
return None
else:
return bool(int(sub_e.text))
def _set_bool(nbnode, attr, parent, tag):
if attr in nbnode:
e = ET.SubElement(parent, tag)
if nbnode[attr]:
e.text = u'1'
else:
e.text = u'0'
def _get_binary(e, tag):
sub_e = e.find(tag)
if sub_e is None:
return None
else:
return decodestring(sub_e.text)
def _set_binary(nbnode, attr, parent, tag):
if attr in nbnode:
e = ET.SubElement(parent, tag)
e.text = encodestring(nbnode[attr])
class XMLReader(NotebookReader):
def reads(self, s, **kwargs):
root = ET.fromstring(s)
return self.to_notebook(root, **kwargs)
def to_notebook(self, root, **kwargs):
warnings.warn('The XML notebook format is no longer supported, '
'please convert your notebooks to JSON.', DeprecationWarning)
nbname = _get_text(root,u'name')
nbauthor = _get_text(root,u'author')
nbemail = _get_text(root,u'email')
nblicense = _get_text(root,u'license')
nbcreated = _get_text(root,u'created')
nbsaved = _get_text(root,u'saved')
worksheets = []
for ws_e in root.find(u'worksheets').getiterator(u'worksheet'):
wsname = _get_text(ws_e,u'name')
cells = []
for cell_e in ws_e.find(u'cells').getiterator():
if cell_e.tag == u'codecell':
input = _get_text(cell_e,u'input')
prompt_number = _get_int(cell_e,u'prompt_number')
collapsed = _get_bool(cell_e,u'collapsed')
language = _get_text(cell_e,u'language')
outputs = []
for output_e in cell_e.find(u'outputs').getiterator(u'output'):
output_type = _get_text(output_e,u'output_type')
output_text = _get_text(output_e,u'text')
output_png = _get_binary(output_e,u'png')
output_jpeg = _get_binary(output_e,u'jpeg')
output_svg = _get_text(output_e,u'svg')
output_html = _get_text(output_e,u'html')
output_latex = _get_text(output_e,u'latex')
output_json = _get_text(output_e,u'json')
output_javascript = _get_text(output_e,u'javascript')
out_prompt_number = _get_int(output_e,u'prompt_number')
etype = _get_text(output_e,u'etype')
evalue = _get_text(output_e,u'evalue')
traceback = []
traceback_e = output_e.find(u'traceback')
if traceback_e is not None:
for frame_e in traceback_e.getiterator(u'frame'):
traceback.append(frame_e.text)
if len(traceback) == 0:
traceback = None
output = new_output(output_type=output_type,output_png=output_png,
output_text=output_text, output_svg=output_svg,
output_html=output_html, output_latex=output_latex,
output_json=output_json, output_javascript=output_javascript,
output_jpeg=output_jpeg, prompt_number=out_prompt_number,
etype=etype, evalue=evalue, traceback=traceback
)
outputs.append(output)
cc = new_code_cell(input=input,prompt_number=prompt_number,
language=language,outputs=outputs,collapsed=collapsed)
cells.append(cc)
if cell_e.tag == u'htmlcell':
source = _get_text(cell_e,u'source')
rendered = _get_text(cell_e,u'rendered')
cells.append(new_text_cell(u'html', source=source, rendered=rendered))
if cell_e.tag == u'markdowncell':
source = _get_text(cell_e,u'source')
rendered = _get_text(cell_e,u'rendered')
cells.append(new_text_cell(u'markdown', source=source, rendered=rendered))
ws = new_worksheet(name=wsname,cells=cells)
worksheets.append(ws)
md = new_metadata(name=nbname)
nb = new_notebook(metadata=md,worksheets=worksheets)
return nb
_reader = XMLReader()
reads = _reader.reads
read = _reader.read
to_notebook = _reader.to_notebook
|
noslenfa/tdjangorest
|
uw/lib/python2.7/site-packages/IPython/nbformat/v2/nbxml.py
|
Python
|
apache-2.0
| 6,765
|
[
"Brian"
] |
919fbe71a5ed11dc07a8439c6635bb4bbcea4100fa301477f0451f767386b70e
|
# (C) British Crown Copyright 2014 - 2018, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.fileformats.netcdf._load_cube` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import netCDF4
import numpy as np
from iris.coords import DimCoord
import iris.fileformats.cf
from iris.fileformats.netcdf import _load_cube
from iris.tests import mock
class TestCoordAttributes(tests.IrisTest):
@staticmethod
def _patcher(engine, cf, cf_group):
coordinates = []
for coord in cf_group:
engine.cube.add_aux_coord(coord)
coordinates.append((coord, coord.name()))
engine.provides['coordinates'] = coordinates
def setUp(self):
this = 'iris.fileformats.netcdf._assert_case_specific_facts'
patch = mock.patch(this, side_effect=self._patcher)
patch.start()
self.addCleanup(patch.stop)
self.engine = mock.Mock()
self.filename = 'DUMMY'
self.flag_masks = mock.sentinel.flag_masks
self.flag_meanings = mock.sentinel.flag_meanings
self.flag_values = mock.sentinel.flag_values
self.valid_range = mock.sentinel.valid_range
self.valid_min = mock.sentinel.valid_min
self.valid_max = mock.sentinel.valid_max
def _make(self, names, attrs):
coords = [DimCoord(i, long_name=name) for i, name in enumerate(names)]
shape = (1,)
cf_group = {}
for name, cf_attrs in zip(names, attrs):
cf_attrs_unused = mock.Mock(return_value=cf_attrs)
cf_group[name] = mock.Mock(cf_attrs_unused=cf_attrs_unused)
cf = mock.Mock(cf_group=cf_group)
cf_data = mock.Mock(_FillValue=None)
cf_data.chunking = mock.MagicMock(return_value=shape)
cf_var = mock.MagicMock(spec=iris.fileformats.cf.CFVariable,
dtype=np.dtype('i4'),
cf_data=cf_data,
cf_name='DUMMY_VAR',
cf_group=coords,
shape=shape)
return cf, cf_var
def test_flag_pass_thru(self):
items = [('masks', 'flag_masks', self.flag_masks),
('meanings', 'flag_meanings', self.flag_meanings),
('values', 'flag_values', self.flag_values)]
for name, attr, value in items:
names = [name]
attrs = [[(attr, value)]]
cf, cf_var = self._make(names, attrs)
cube = _load_cube(self.engine, cf, cf_var, self.filename)
self.assertEqual(len(cube.coords(name)), 1)
coord = cube.coord(name)
self.assertEqual(len(coord.attributes), 1)
self.assertEqual(list(coord.attributes.keys()), [attr])
self.assertEqual(list(coord.attributes.values()), [value])
def test_flag_pass_thru_multi(self):
names = ['masks', 'meanings', 'values']
attrs = [[('flag_masks', self.flag_masks),
('wibble', 'wibble')],
[('flag_meanings', self.flag_meanings),
('add_offset', 'add_offset')],
[('flag_values', self.flag_values)],
[('valid_range', self.valid_range)],
[('valid_min', self.valid_min)],
[('valid_max', self.valid_max)]]
cf, cf_var = self._make(names, attrs)
cube = _load_cube(self.engine, cf, cf_var, self.filename)
self.assertEqual(len(cube.coords()), 3)
self.assertEqual(set([c.name() for c in cube.coords()]), set(names))
expected = [attrs[0],
[attrs[1][0]],
attrs[2],
attrs[3],
attrs[4],
attrs[5]]
for name, expect in zip(names, expected):
attributes = cube.coord(name).attributes
self.assertEqual(set(attributes.items()), set(expect))
class TestCubeAttributes(tests.IrisTest):
def setUp(self):
this = 'iris.fileformats.netcdf._assert_case_specific_facts'
patch = mock.patch(this)
patch.start()
self.addCleanup(patch.stop)
self.engine = mock.Mock()
self.cf = None
self.filename = 'DUMMY'
self.flag_masks = mock.sentinel.flag_masks
self.flag_meanings = mock.sentinel.flag_meanings
self.flag_values = mock.sentinel.flag_values
self.valid_range = mock.sentinel.valid_range
self.valid_min = mock.sentinel.valid_min
self.valid_max = mock.sentinel.valid_max
def _make(self, attrs):
shape = (1,)
cf_attrs_unused = mock.Mock(return_value=attrs)
cf_data = mock.Mock(_FillValue=None)
cf_data.chunking = mock.MagicMock(return_value=shape)
cf_var = mock.MagicMock(spec=iris.fileformats.cf.CFVariable,
dtype=np.dtype('i4'),
cf_data=cf_data,
cf_name='DUMMY_VAR',
cf_group=mock.Mock(),
cf_attrs_unused=cf_attrs_unused,
shape=shape)
return cf_var
def test_flag_pass_thru(self):
attrs = [('flag_masks', self.flag_masks),
('flag_meanings', self.flag_meanings),
('flag_values', self.flag_values)]
for key, value in attrs:
cf_var = self._make([(key, value)])
cube = _load_cube(self.engine, self.cf, cf_var, self.filename)
self.assertEqual(len(cube.attributes), 1)
self.assertEqual(list(cube.attributes.keys()), [key])
self.assertEqual(list(cube.attributes.values()), [value])
def test_flag_pass_thru_multi(self):
attrs = [('flag_masks', self.flag_masks),
('wibble', 'wobble'),
('flag_meanings', self.flag_meanings),
('add_offset', 'add_offset'),
('flag_values', self.flag_values),
('standard_name', 'air_temperature'),
('valid_range', self.valid_range),
('valid_min', self.valid_min),
('valid_max', self.valid_max)]
# Expect everything from above to be returned except those
# corresponding to exclude_ind.
expected = set([attrs[ind] for ind in [0, 1, 2, 4, 6, 7, 8]])
cf_var = self._make(attrs)
cube = _load_cube(self.engine, self.cf, cf_var, self.filename)
self.assertEqual(len(cube.attributes), len(expected))
self.assertEqual(set(cube.attributes.items()), expected)
if __name__ == "__main__":
tests.main()
|
dkillick/iris
|
lib/iris/tests/unit/fileformats/netcdf/test__load_cube.py
|
Python
|
lgpl-3.0
| 7,516
|
[
"NetCDF"
] |
27f5d494841c6956dd719192d052acb22e2d8404a1cc4f47690a825ba1144930
|
# Copyright 2001 by Tarjei Mikkelsen. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""BioPython Pathway module.
Bio.Pathway is a lightweight class library designed to support the following tasks:
- Data interchange and preprocessing between pathway databases and analysis software.
- Quick prototyping of pathway analysis algorithms
The basic object in the Bio.Pathway model is Interaction, which represents an arbitrary
interaction between any number of biochemical species.
Network objects are used to represent the connectivity between species in pathways
and reaction networks.
For applications where it is not neccessary to explicitly represent network connectivity,
the specialized classes Reaction and System should be used in place of Interacton and
Network.
The Bio.Pathway classes, especially Interaction, are intentionally
desgined to be very flexible. Their intended use are as wrappers around database
specific records, such as BIND objects. The value-added in this module is a
framework for representing collections of reactions in a way that supports
graph theoretic and numeric analysis.
Note: This module should be regarded as a prototype only. API changes are likely.
Comments and feature requests are most welcome.
"""
from Bio.Pathway.Rep.MultiGraph import *
class Reaction(object):
"""Abstraction for a biochemical transformation.
This class represents a (potentially reversible) biochemical
transformation of the type:
a S1 + b S2 + ... --> c P1 + d P2 + ...
where
- a, b, c, d ... are positive numeric stochiometric coefficients,
- S1, S2, ... are substrates
- P1, P2, ... are products
A Reaction should be viewed as the net result of one or more individual
reaction steps, where each step is potentially facilitated by a different
catalyst. Support for 'Reaction algebra' will be added at some point in
the future.
Attributes:
reactants -- map of involved species to their stochiometric coefficients:
reactants[S] = stochiometric constant for S
catalysts -- list of tuples of catalysts required for this reaction
reversible -- true iff reaction is reversible
data -- reference to arbitrary additional data
Invariants:
for all S in reactants: reactants[S] != 0
for all C in catalysts: catalysts[C] != 0
"""
def __init__(self, reactants = {}, catalysts = [],
reversible = 0, data = None):
"""Initializes a new Reaction object."""
# enforce invariants on reactants:
self.reactants = reactants.copy()
# loop over original, edit the copy
for r, value in reactants.iteritems():
if value == 0:
del self.reactants[r]
self.catalysts = sorted(set(catalysts))
self.data = data
self.reversible = reversible
def __eq__(self, r):
"""Returns true iff self is equal to r."""
return isinstance(r, Reaction) and \
self.reactants == r.reactants and \
self.catalysts == r.catalysts and \
self.data == r.data and \
self.reversible == r.reversible
def __ne__(self, r):
"""Returns true iff self is not equal to r."""
return not self.__eq__(r)
def __hash__(self):
"""Returns a hashcode for self."""
t = tuple(self.species())
return hash(t)
def __repr__(self):
"""Returns a debugging string representation of self."""
return "Reaction(" + \
",".join(map(repr,[self.reactants,
self.catalysts,
self.data,
self.reversible])) + ")"
def __str__(self):
"""Returns a string representation of self."""
substrates = ""
products = ""
all_species = sorted(self.reactants)
for species in all_species:
stoch = self.reactants[species]
if stoch < 0:
# species is a substrate:
if substrates != "":
substrates = substrates + " + "
if stoch != -1:
substrates = substrates + str(abs(stoch)) + " "
substrates = substrates + str(species)
elif stoch > 0:
# species is a product:
if products != "":
products = products + " + "
if stoch != 1:
products = products + str(stoch) + " "
products = products + str(species)
else:
raise AttributeError("Invalid 0 coefficient in Reaction.reactants")
if self.reversible:
return substrates + " <=> " + products
else:
return substrates + " --> " + products
def reverse(self):
"""Returns a new Reaction that is the reverse of self."""
reactants = {}
for r in self.reactants:
reactants[r] = - self.reactants[r]
return Reaction(reactants, self.catalysts,
self.reversible, self.data)
def species(self):
"""Returns a list of all Species involved in self."""
return self.reactants.keys()
class System(object):
"""Abstraction for a collection of reactions.
This class is used in the Bio.Pathway framework to represent an arbitrary
collection of reactions without explicitly defined links.
Attributes:
None
"""
def __init__(self, reactions = []):
"""Initializes a new System object."""
self.__reactions = set(reactions)
def __repr__(self):
"""Returns a debugging string representation of self."""
return "System(" + ",".join(map(repr,self.__reactions)) + ")"
def __str__(self):
"""Returns a string representation of self."""
return "System of " + str(len(self.__reactions)) + \
" reactions involving " + str(len(self.species())) + \
" species"
def add_reaction(self, reaction):
"""Adds reaction to self."""
self.__reactions.add(reaction)
def remove_reaction(self, reaction):
"""Removes reaction from self."""
self.__reactions.remove(reaction)
def reactions(self):
"""Returns a list of the reactions in this system.
Note the order is arbitrary!
"""
#TODO - Define __lt__ so that Reactions can be sorted on Python?
return list(self.__reactions)
def species(self):
"""Returns a list of the species in this system."""
return sorted(set(reduce(lambda s,x: s + x,
[x.species() for x in self.reactions()], [])))
def stochiometry(self):
"""Computes the stoichiometry matrix for self.
Returns (species, reactions, stoch) where
species = ordered list of species in this system
reactions = ordered list of reactions in this system
stoch = 2D array where stoch[i][j] is coef of the
jth species in the ith reaction, as defined
by species and reactions above
"""
# Note: This an inefficient and ugly temporary implementation.
# To be practical, stochiometric matrices should probably
# be implemented by sparse matrices, which would require
# NumPy dependencies.
#
# PS: We should implement automatic checking for NumPy here.
species = self.species()
reactions = self.reactions()
stoch = [] * len(reactions)
for i in range(len(reactions)):
stoch[i] = 0 * len(species)
for s in reactions[i].species():
stoch[species.index(s)] = reactions[i].reactants[s]
return (species, reactions, stoch)
class Interaction(object):
"""An arbitrary interaction between any number of species.
This class definition is inteded solely as a minimal wrapper interface that should
be implemented and extended by more specific abstractions.
Attributes:
data -- reference to arbitrary additional data
"""
def __init_(self, data):
self.data = data
def __hash__(self):
"""Returns a hashcode for self."""
return hash(self.data)
def __repr__(self):
"""Returns a debugging string representation of self."""
return "Interaction(" + repr(self.data) + ")"
def __str__(self):
"""Returns a string representation of self."""
return "<" + str(self.data) + ">"
class Network(object):
"""A set of species that are explicitly linked by interactions.
The network is a directed multigraph with labeled edges. The nodes in the graph
are the biochemical species involved. The edges represent an interaction between
two species, and the edge label is a reference to the associated Interaction
object.
Attributes:
None
"""
def __init__(self, species = []):
"""Initializes a new Network object."""
self.__graph = MultiGraph(species)
def __repr__(self):
"""Returns a debugging string representation of this network."""
return "<Network: __graph: " + repr(self.__graph) + ">"
def __str__(self):
"""Returns a string representation of this network."""
return "Network of " + str(len(self.species())) + " species and " + \
str(len(self.interactions())) + " interactions."
def add_species(self, species):
"""Adds species to this network."""
self.__graph.add_node(species)
def add_interaction(self, source, sink, interaction):
"""Adds interaction to this network."""
self.__graph.add_edge(source, sink, interaction)
def source(self, species):
"""Returns list of unique sources for species."""
return self.__graph.parents(species)
def source_interactions(self, species):
"""Returns list of (source, interaction) pairs for species."""
return self.__graph.parent_edges(species)
def sink(self, species):
"""Returns list of unique sinks for species."""
return self.__graph.children(species)
def sink_interactions(self, species):
"""Returns list of (sink, interaction) pairs for species."""
return self.__graph.child_edges(species)
def species(self):
"""Returns list of the species in this network."""
return self.__graph.nodes()
def interactions(self):
"""Returns list of the unique interactions in this network."""
return self.__graph.labels()
|
bryback/quickseq
|
genescript/Bio/Pathway/__init__.py
|
Python
|
mit
| 10,881
|
[
"Biopython"
] |
bda20a16d89121fb0a3ee72083d6ad7198df10a30874d921075314244bacb9dd
|
""" JobMonitoringHandler is the implementation of the JobMonitoring service
in the DISET framework
The following methods are available in the Service interface
"""
from __future__ import print_function
__RCSID__ = "$Id$"
from datetime import timedelta
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
import DIRAC.Core.Utilities.Time as Time
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.ElasticJobDB import ElasticJobDB
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.WorkloadManagementSystem.Service.JobPolicy import JobPolicy, RIGHT_GET_INFO
# These are global instances of the DB classes
gJobDB = False
gElasticJobDB = False
gJobLoggingDB = False
gTaskQueueDB = False
SUMMARY = ['JobType', 'Site', 'JobName', 'Owner', 'SubmissionTime',
'LastUpdateTime', 'Status', 'MinorStatus', 'ApplicationStatus']
SUMMARY = []
PRIMARY_SUMMARY = []
FINAL_STATES = ['Done', 'Completed', 'Stalled', 'Failed', 'Killed']
def initializeJobMonitoringHandler(serviceInfo):
global gJobDB, gJobLoggingDB, gTaskQueueDB
gJobDB = JobDB()
gJobLoggingDB = JobLoggingDB()
gTaskQueueDB = TaskQueueDB()
return S_OK()
class JobMonitoringHandler(RequestHandler):
def initialize(self):
"""
Flags useESForJobParametersFlag (in /Operations/[]/Services/JobMonitoring/) have bool value (True/False)
and determines the switching of backends from MySQL to ElasticSearch for the JobParameters DB table.
For version v7r0, the MySQL backend is (still) the default.
"""
credDict = self.getRemoteCredentials()
self.ownerDN = credDict['DN']
self.ownerGroup = credDict['group']
operations = Operations(group=self.ownerGroup)
self.globalJobsInfo = operations.getValue('/Services/JobMonitoring/GlobalJobsInfo', True)
self.jobPolicy = JobPolicy(self.ownerDN, self.ownerGroup, self.globalJobsInfo)
self.jobPolicy.jobDB = gJobDB
useESForJobParametersFlag = operations.getValue('/Services/JobMonitoring/useESForJobParametersFlag', False)
global gElasticJobDB
if useESForJobParametersFlag:
gElasticJobDB = ElasticJobDB()
self.log.verbose("Using ElasticSearch for JobParameters")
return S_OK()
##############################################################################
types_getApplicationStates = []
@staticmethod
def export_getApplicationStates():
""" Return Distinct Values of ApplicationStatus job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('ApplicationStatus')
##############################################################################
types_getJobTypes = []
@staticmethod
def export_getJobTypes():
""" Return Distinct Values of JobType job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('JobType')
##############################################################################
types_getOwners = []
@staticmethod
def export_getOwners():
"""
Return Distinct Values of Owner job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('Owner')
##############################################################################
types_getProductionIds = []
@staticmethod
def export_getProductionIds():
"""
Return Distinct Values of ProductionId job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('JobGroup')
##############################################################################
types_getJobGroups = []
@staticmethod
def export_getJobGroups(condDict=None, cutDate=None):
"""
Return Distinct Values of ProductionId job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('JobGroup', condDict,
newer=cutDate)
##############################################################################
types_getSites = []
@staticmethod
def export_getSites():
"""
Return Distinct Values of Site job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('Site')
##############################################################################
types_getStates = []
@staticmethod
def export_getStates():
"""
Return Distinct Values of Status job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('Status')
##############################################################################
types_getMinorStates = []
@staticmethod
def export_getMinorStates():
"""
Return Distinct Values of Minor Status job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes('MinorStatus')
##############################################################################
types_getJobs = []
@staticmethod
def export_getJobs(attrDict=None, cutDate=None):
"""
Return list of JobIds matching the condition given in attrDict
"""
# queryDict = {}
# if attrDict:
# if type ( attrDict ) != dict:
# return S_ERROR( 'Argument must be of Dict Type' )
# for attribute in self.queryAttributes:
# # Only those Attribute in self.queryAttributes can be used
# if attrDict.has_key(attribute):
# queryDict[attribute] = attrDict[attribute]
return gJobDB.selectJobs(attrDict, newer=cutDate)
##############################################################################
types_getCounters = [list]
@staticmethod
def export_getCounters(attrList, attrDict=None, cutDate=''):
"""
Retrieve list of distinct attributes values from attrList
with attrDict as condition.
For each set of distinct values, count number of occurences.
Return a list. Each item is a list with 2 items, the list of distinct
attribute values and the counter
"""
# Check that Attributes in attrList and attrDict, they must be in
# self.queryAttributes.
# for attr in attrList:
# try:
# self.queryAttributes.index(attr)
# except:
# return S_ERROR( 'Requested Attribute not Allowed: %s.' % attr )
#
# for attr in attrDict:
# try:
# self.queryAttributes.index(attr)
# except:
# return S_ERROR( 'Condition Attribute not Allowed: %s.' % attr )
cutDate = str(cutDate)
if not attrDict:
attrDict = {}
return gJobDB.getCounters('Jobs', attrList, attrDict, newer=cutDate, timeStamp='LastUpdateTime')
##############################################################################
types_getCurrentJobCounters = []
@staticmethod
def export_getCurrentJobCounters(attrDict=None):
""" Get job counters per Status with attrDict selection. Final statuses are given for
the last day.
"""
if not attrDict:
attrDict = {}
result = gJobDB.getCounters('Jobs', ['Status'], attrDict, timeStamp='LastUpdateTime')
if not result['OK']:
return result
last_update = Time.dateTime() - Time.day
resultDay = gJobDB.getCounters('Jobs', ['Status'], attrDict, newer=last_update,
timeStamp='LastUpdateTime')
if not resultDay['OK']:
return resultDay
resultDict = {}
for statusDict, count in result['Value']:
status = statusDict['Status']
resultDict[status] = count
if status in FINAL_STATES:
resultDict[status] = 0
for statusDayDict, ccount in resultDay['Value']:
if status == statusDayDict['Status']:
resultDict[status] = ccount
break
return S_OK(resultDict)
##############################################################################
types_getJobStatus = [int]
@staticmethod
def export_getJobStatus(jobID):
return gJobDB.getJobAttribute(jobID, 'Status')
##############################################################################
types_getJobOwner = [int]
@staticmethod
def export_getJobOwner(jobID):
return gJobDB.getJobAttribute(jobID, 'Owner')
##############################################################################
types_getJobSite = [int]
@staticmethod
def export_getJobSite(jobID):
return gJobDB.getJobAttribute(jobID, 'Site')
##############################################################################
types_getJobJDL = [int, bool]
@staticmethod
def export_getJobJDL(jobID, original):
return gJobDB.getJobJDL(jobID, original=original)
##############################################################################
types_getJobLoggingInfo = [int]
@staticmethod
def export_getJobLoggingInfo(jobID):
return gJobLoggingDB.getJobLoggingInfo(jobID)
##############################################################################
types_getJobsParameters = [list, list]
@staticmethod
def export_getJobsParameters(jobIDs, parameters):
if not (jobIDs and parameters):
return S_OK({})
return gJobDB.getAttributesForJobList(jobIDs, parameters)
##############################################################################
types_getJobsStatus = [list]
@staticmethod
def export_getJobsStatus(jobIDs):
if not jobIDs:
return S_OK({})
return gJobDB.getAttributesForJobList(jobIDs, ['Status'])
##############################################################################
types_getJobsMinorStatus = [list]
@staticmethod
def export_getJobsMinorStatus(jobIDs):
return gJobDB.getAttributesForJobList(jobIDs, ['MinorStatus'])
##############################################################################
types_getJobsApplicationStatus = [list]
@staticmethod
def export_getJobsApplicationStatus(jobIDs):
return gJobDB.getAttributesForJobList(jobIDs, ['ApplicationStatus'])
##############################################################################
types_getJobsSites = [list]
@staticmethod
def export_getJobsSites(jobIDs):
return gJobDB.getAttributesForJobList(jobIDs, ['Site'])
##############################################################################
types_getJobSummary = [int]
@staticmethod
def export_getJobSummary(jobID):
return gJobDB.getJobAttributes(jobID, SUMMARY)
##############################################################################
types_getJobPrimarySummary = [int]
@staticmethod
def export_getJobPrimarySummary(jobID):
return gJobDB.getJobAttributes(jobID, PRIMARY_SUMMARY)
##############################################################################
types_getJobsSummary = [list]
@staticmethod
def export_getJobsSummary(jobIDs):
if not jobIDs:
return S_ERROR('JobMonitoring.getJobsSummary: Received empty job list')
result = gJobDB.getAttributesForJobList(jobIDs, SUMMARY)
# return result
restring = str(result['Value'])
return S_OK(restring)
##############################################################################
types_getJobPageSummaryWeb = [dict, list, int, int]
def export_getJobPageSummaryWeb(self, selectDict, sortList, startItem, maxItems, selectJobs=True):
""" Get the summary of the job information for a given page in the
job monitor in a generic format
"""
resultDict = {}
startDate = selectDict.get('FromDate', None)
if startDate:
del selectDict['FromDate']
# For backward compatibility
if startDate is None:
startDate = selectDict.get('LastUpdate', None)
if startDate:
del selectDict['LastUpdate']
endDate = selectDict.get('ToDate', None)
if endDate:
del selectDict['ToDate']
result = self.jobPolicy.getControlledUsers(RIGHT_GET_INFO)
if not result['OK']:
return S_ERROR('Failed to evaluate user rights')
if result['Value'] != 'ALL':
selectDict[('Owner', 'OwnerGroup')] = result['Value']
# Sorting instructions. Only one for the moment.
if sortList:
orderAttribute = sortList[0][0] + ":" + sortList[0][1]
else:
orderAttribute = None
statusDict = {}
result = gJobDB.getCounters('Jobs', ['Status'], selectDict,
newer=startDate,
older=endDate,
timeStamp='LastUpdateTime')
nJobs = 0
if result['OK']:
for stDict, count in result['Value']:
nJobs += count
statusDict[stDict['Status']] = count
resultDict['TotalRecords'] = nJobs
if nJobs == 0:
return S_OK(resultDict)
resultDict['Extras'] = statusDict
if selectJobs:
iniJob = startItem
if iniJob >= nJobs:
return S_ERROR('Item number out of range')
result = gJobDB.selectJobs(selectDict, orderAttribute=orderAttribute,
newer=startDate, older=endDate, limit=(maxItems, iniJob))
if not result['OK']:
return S_ERROR('Failed to select jobs: ' + result['Message'])
summaryJobList = result['Value']
if not self.globalJobsInfo:
validJobs, _invalidJobs, _nonauthJobs, _ownJobs = self.jobPolicy.evaluateJobRights(summaryJobList,
RIGHT_GET_INFO)
summaryJobList = validJobs
result = gJobDB.getAttributesForJobList(summaryJobList, SUMMARY)
if not result['OK']:
return S_ERROR('Failed to get job summary: ' + result['Message'])
summaryDict = result['Value']
# Evaluate last sign of life time
for jobID, jobDict in summaryDict.items():
if jobDict['HeartBeatTime'] == 'None':
jobDict['LastSignOfLife'] = jobDict['LastUpdateTime']
else:
lastTime = Time.fromString(jobDict['LastUpdateTime'])
hbTime = Time.fromString(jobDict['HeartBeatTime'])
# Not only Stalled jobs but also Failed jobs because Stalled
if ((hbTime - lastTime) > timedelta(0) or
jobDict['Status'] == "Stalled" or
jobDict['MinorStatus'].startswith('Job stalled') or
jobDict['MinorStatus'].startswith('Stalling')):
jobDict['LastSignOfLife'] = jobDict['HeartBeatTime']
else:
jobDict['LastSignOfLife'] = jobDict['LastUpdateTime']
tqDict = {}
result = gTaskQueueDB.getTaskQueueForJobs(summaryJobList)
if result['OK']:
tqDict = result['Value']
# If no jobs can be selected after the properties check
if not summaryDict.keys():
return S_OK(resultDict)
# prepare the standard structure now
key = summaryDict.keys()[0]
paramNames = summaryDict[key].keys()
records = []
for jobID, jobDict in summaryDict.items():
jParList = []
for pname in paramNames:
jParList.append(jobDict[pname])
jParList.append(tqDict.get(jobID, 0))
records.append(jParList)
resultDict['ParameterNames'] = paramNames + ['TaskQueueID']
resultDict['Records'] = records
return S_OK(resultDict)
##############################################################################
types_getJobStats = [basestring, dict]
@staticmethod
def export_getJobStats(attribute, selectDict):
""" Get job statistics distribution per attribute value with a given selection
"""
startDate = selectDict.get('FromDate', None)
if startDate:
del selectDict['FromDate']
# For backward compatibility
if startDate is None:
startDate = selectDict.get('LastUpdate', None)
if startDate:
del selectDict['LastUpdate']
endDate = selectDict.get('ToDate', None)
if endDate:
del selectDict['ToDate']
result = gJobDB.getCounters('Jobs', [attribute], selectDict,
newer=startDate,
older=endDate,
timeStamp='LastUpdateTime')
resultDict = {}
if result['OK']:
for cDict, count in result['Value']:
resultDict[cDict[attribute]] = count
return S_OK(resultDict)
##############################################################################
types_getJobsPrimarySummary = [list]
@staticmethod
def export_getJobsPrimarySummary(jobIDs):
return gJobDB.getAttributesForJobList(jobIDs, PRIMARY_SUMMARY)
##############################################################################
types_getJobParameter = [[basestring, int, long], basestring]
@staticmethod
def export_getJobParameter(jobID, parName):
"""
:param str/int/long jobID: one single Job ID
:param str parName: one single parameter name
"""
if gElasticJobDB:
res = gElasticJobDB.getJobParameters(jobID, [parName])
if not res['OK']:
return res
if res['Value'].get(int(jobID)):
return S_OK(res['Value'][int(jobID)])
res = gJobDB.getJobParameters(jobID, [parName])
if not res['OK']:
return res
return S_OK(res['Value'].get(int(jobID), {}))
##############################################################################
types_getJobOptParameters = [int]
@staticmethod
def export_getJobOptParameters(jobID):
return gJobDB.getJobOptParameters(jobID)
##############################################################################
types_getJobParameters = [[basestring, int, long, list]]
@staticmethod
def export_getJobParameters(jobIDs, parName=None):
"""
:param str/int/long/list jobIDs: one single job ID or a list of them
:param str parName: one single parameter name, or None (meaning all of them)
"""
if gElasticJobDB:
if not isinstance(jobIDs, list):
jobIDs = [jobIDs]
parameters = {}
for jobID in jobIDs:
res = gElasticJobDB.getJobParameters(jobID, parName)
if not res['OK']:
return res
parameters.update(res['Value'])
# Need anyway to get also from JobDB, for those jobs with parameters registered in MySQL or in both backends
res = gJobDB.getJobParameters(jobIDs, parName)
if not res['OK']:
return res
parametersM = res['Value']
# and now combine
final = dict(parametersM)
for jobID in parametersM:
final[jobID].update(parameters.get(jobID, {}))
for jobID in parameters:
if jobID not in final:
final[jobID] = parameters[jobID]
return S_OK(final)
return gJobDB.getJobParameters(jobIDs, parName)
##############################################################################
types_traceJobParameter = [basestring, [basestring, int, long, list],
basestring, [basestring, None],
[basestring, None]]
@staticmethod
def export_traceJobParameter(site, localID, parameter, date, until):
return gJobDB.traceJobParameter(site, localID, parameter, date, until)
##############################################################################
types_traceJobParameters = [basestring, [basestring, int, long, list],
[list, None], [list, None],
[basestring, None], [basestring, None]]
@staticmethod
def export_traceJobParameters(site, localID, parameterList, attributeList, date, until):
return gJobDB.traceJobParameters(site, localID, parameterList, attributeList, date, until)
##############################################################################
types_getAtticJobParameters = [[int, long]]
@staticmethod
def export_getAtticJobParameters(jobID, parameters=None, rescheduleCycle=-1):
if not parameters:
parameters = []
return gJobDB.getAtticJobParameters(jobID, parameters, rescheduleCycle)
##############################################################################
types_getJobAttributes = [int]
@staticmethod
def export_getJobAttributes(jobID):
"""
:param int jobID: one single Job ID
"""
return gJobDB.getJobAttributes(jobID)
##############################################################################
types_getJobAttribute = [int, basestring]
@staticmethod
def export_getJobAttribute(jobID, attribute):
"""
:param int jobID: one single Job ID
:param str attribute: one single attribute name
"""
return gJobDB.getJobAttribute(jobID, attribute)
##############################################################################
types_getSiteSummary = []
@staticmethod
def export_getSiteSummary():
return gJobDB.getSiteSummary()
##############################################################################
types_getJobHeartBeatData = [int]
@staticmethod
def export_getJobHeartBeatData(jobID):
return gJobDB.getHeartBeatData(jobID)
##############################################################################
types_getInputData = [[int, long]]
@staticmethod
def export_getInputData(jobID):
""" Get input data for the specified jobs
"""
return gJobDB.getInputData(jobID)
##############################################################################
types_getOwnerGroup = []
@staticmethod
def export_getOwnerGroup():
"""
Return Distinct Values of OwnerGroup from the JobsDB
"""
return gJobDB.getDistinctJobAttributes('OwnerGroup')
|
fstagni/DIRAC
|
WorkloadManagementSystem/Service/JobMonitoringHandler.py
|
Python
|
gpl-3.0
| 21,262
|
[
"DIRAC"
] |
954a5f640d4ff8b725210fbfe224f259df441023fd890cc5d1d006ef59e916d1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Ruggero Marchei <ruggero.marchei@daemonzone.net>
# (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: find
author: Brian Coca (based on Ruggero Marchei's Tidy)
version_added: "2.0"
short_description: Return a list of files based on specific criteria
description:
- Return a list of files based on specific criteria. Multiple criteria are AND'd together.
options:
age:
description:
- Select files whose age is equal to or greater than the specified time.
Use a negative age to find files equal to or less than the specified time.
You can choose seconds, minutes, hours, days, or weeks by specifying the
first letter of any of those words (e.g., "1w").
patterns:
default: '*'
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- The patterns restrict the list of files to be returned to those whose basenames match at
least one of the patterns specified. Multiple patterns can be specified using a list.
aliases: ['pattern']
contains:
description:
- One or more regex patterns which should be matched against the file content.
paths:
required: true
aliases: [ name, path ]
description:
- List of paths of directories to search. All paths must be fully qualified.
file_type:
description:
- Type of file to select.
- The 'link' and 'any' choices were added in version 2.3.
choices: [ any, directory, file, link ]
default: file
recurse:
default: 'no'
choices: [ 'no', 'yes' ]
description:
- If target is a directory, recursively descend into the directory looking for files.
size:
description:
- Select files whose size is equal to or greater than the specified size.
Use a negative size to find files equal to or less than the specified size.
Unqualified values are in bytes, but b, k, m, g, and t can be appended to specify
bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
Size is not evaluated for directories.
age_stamp:
default: mtime
choices: [ atime, ctime, mtime ]
description:
- Choose the file property against which we compare age.
hidden:
default: 'no'
choices: [ 'no', 'yes' ]
description:
- Set this to true to include hidden files, otherwise they'll be ignored.
follow:
default: 'no'
choices: [ 'no', 'yes' ]
description:
- Set this to true to follow symlinks in path for systems with python 2.6+.
get_checksum:
default: 'no'
choices: [ 'no', 'yes' ]
description:
- Set this to true to retrieve a file's sha1 checksum.
use_regex:
default: 'no'
choices: [ 'no', 'yes' ]
description:
- If false the patterns are file globs (shell) if true they are python regexes.
'''
EXAMPLES = r'''
- name: Recursively find /tmp files older than 2 days
find:
paths: /tmp
age: 2d
recurse: yes
- name: Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
- find:
paths: /tmp
age: 4w
size: 1m
recurse: yes
- name: Recursively find /var/tmp files with last access time greater than 3600 seconds
- find:
paths: /var/tmp
age: 3600
age_stamp: atime
recurse: yes
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
- find:
paths: /var/tmp
patterns: '*.old,*.log.gz'
size: 10m
# Note that YAML double quotes require escaping backslashes but yaml single quotes do not.
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
- find:
paths: /var/tmp
patterns: "^.*?\\.(?:old|log\\.gz)$"
size: 10m
use_regex: yes
'''
RETURN = r'''
files:
description: all matches found with the specified criteria (see stat module for full output of each dictionary)
returned: success
type: list
sample: [
{ path: "/var/tmp/test1",
mode: "0644",
"...": "...",
checksum: 16fac7be61a6e4591a33ef4b729c5c3302307523
},
{ path: "/var/tmp/test2",
"...": "..."
},
]
matched:
description: number of matches
returned: success
type: string
sample: 14
examined:
description: number of filesystem objects looked at
returned: success
type: string
sample: 34
'''
import fnmatch
import os
import re
import stat
import sys
import time
from ansible.module_utils.basic import AnsibleModule
def pfilter(f, patterns=None, use_regex=False):
'''filter using glob patterns'''
if patterns is None:
return True
if use_regex:
for p in patterns:
r = re.compile(p)
if r.match(f):
return True
else:
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
return False
def agefilter(st, now, age, timestamp):
'''filter files older than age'''
if age is None:
return True
elif age >= 0 and now - st.__getattribute__("st_%s" % timestamp) >= abs(age):
return True
elif age < 0 and now - st.__getattribute__("st_%s" % timestamp) <= abs(age):
return True
return False
def sizefilter(st, size):
'''filter files greater than size'''
if size is None:
return True
elif size >= 0 and st.st_size >= abs(size):
return True
elif size < 0 and st.st_size <= abs(size):
return True
return False
def contentfilter(fsname, pattern):
'''filter files which contain the given expression'''
if pattern is None:
return True
try:
f = open(fsname)
prog = re.compile(pattern)
for line in f:
if prog.match(line):
f.close()
return True
f.close()
except:
pass
return False
def statinfo(st):
return {
'mode': "%04o" % stat.S_IMODE(st.st_mode),
'isdir': stat.S_ISDIR(st.st_mode),
'ischr': stat.S_ISCHR(st.st_mode),
'isblk': stat.S_ISBLK(st.st_mode),
'isreg': stat.S_ISREG(st.st_mode),
'isfifo': stat.S_ISFIFO(st.st_mode),
'islnk': stat.S_ISLNK(st.st_mode),
'issock': stat.S_ISSOCK(st.st_mode),
'uid': st.st_uid,
'gid': st.st_gid,
'size': st.st_size,
'inode': st.st_ino,
'dev': st.st_dev,
'nlink': st.st_nlink,
'atime': st.st_atime,
'mtime': st.st_mtime,
'ctime': st.st_ctime,
'wusr': bool(st.st_mode & stat.S_IWUSR),
'rusr': bool(st.st_mode & stat.S_IRUSR),
'xusr': bool(st.st_mode & stat.S_IXUSR),
'wgrp': bool(st.st_mode & stat.S_IWGRP),
'rgrp': bool(st.st_mode & stat.S_IRGRP),
'xgrp': bool(st.st_mode & stat.S_IXGRP),
'woth': bool(st.st_mode & stat.S_IWOTH),
'roth': bool(st.st_mode & stat.S_IROTH),
'xoth': bool(st.st_mode & stat.S_IXOTH),
'isuid': bool(st.st_mode & stat.S_ISUID),
'isgid': bool(st.st_mode & stat.S_ISGID),
}
def main():
module = AnsibleModule(
argument_spec=dict(
paths=dict(type='list', required=True, aliases=['name', 'path']),
patterns=dict(type='list', default=['*'], aliases=['pattern']),
contains=dict(type='str'),
file_type=dict(type='str', default="file", choices=['any', 'directory', 'file', 'link']),
age=dict(type='str'),
age_stamp=dict(type='str', default="mtime", choices=['atime', 'mtime', 'ctime']),
size=dict(type='str'),
recurse=dict(type='bool', default='no'),
hidden=dict(type='bool', default='no'),
follow=dict(type='bool', default='no'),
get_checksum=dict(type='bool', default='no'),
use_regex=dict(type='bool', default='no'),
),
supports_check_mode=True,
)
params = module.params
filelist = []
if params['age'] is None:
age = None
else:
# convert age to seconds:
m = re.match("^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
if m:
age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
else:
module.fail_json(age=params['age'], msg="failed to process age")
if params['size'] is None:
size = None
else:
# convert size to bytes:
m = re.match("^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
if m:
size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
else:
module.fail_json(size=params['size'], msg="failed to process size")
now = time.time()
msg = ''
looked = 0
for npath in params['paths']:
npath = os.path.expanduser(os.path.expandvars(npath))
if os.path.isdir(npath):
''' ignore followlinks for python version < 2.6 '''
for root, dirs, files in (sys.version_info < (2, 6, 0) and os.walk(npath)) or os.walk(npath, followlinks=params['follow']):
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
fsname = os.path.normpath(os.path.join(root, fsobj))
if os.path.basename(fsname).startswith('.') and not params['hidden']:
continue
try:
st = os.lstat(fsname)
except:
msg += "%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
continue
r = {'path': fsname}
if params['file_type'] == 'any':
if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
if pfilter(fsobj, params['patterns'], params['use_regex']) and \
agefilter(st, now, age, params['age_stamp']) and \
sizefilter(st, size) and \
contentfilter(fsname, params['contains']):
r.update(statinfo(st))
if params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
if not params['recurse']:
break
else:
msg += "%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath
matched = len(filelist)
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
if __name__ == '__main__':
main()
|
napkindrawing/ansible
|
lib/ansible/modules/files/find.py
|
Python
|
gpl-3.0
| 12,849
|
[
"Brian"
] |
4ae4d4d7080105f9ba9629e2eab2d7b3795a8a4dbef5d573819a6cab34cc661a
|
# Copyright 2017 juramote contributors (see README)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import serial, time, sys, logging, argparse, json, codecs
from functools import wraps
from enum import IntEnum, Enum
from datetime import datetime, timedelta
from threading import Lock
from collections import namedtuple
from .decorator import locked
log = logging.getLogger(__name__)
class Raw:
"""
Raw access to Jura coffee maker, no error-checking, minimal decoding
"""
EEPROM_WORDLENGTH = 2 # bytes
EEPROM_LINELENGTH = 32 # bytes (decoded)
EEPROM_LINES = 64
def __init__ (self, tty):
# XXX: auto-detect machine type
self.s = serial.Serial (tty, 9600, timeout=30)
self._test ()
self.machine = ImpressaXs90
def _test (self):
"""
Simple self-test
"""
for b in range (256):
orig = bytes ([b])
enc = self._encodebyte (orig)
dec = self._decodebyte (enc)
assert dec == orig, (orig, enc, dec)
assert self._decode (self._encode (b'RE:1234')) == b'RE:1234'
@staticmethod
def _encodebyte (c):
"""
Encode a single byte to Jura coding, i.e. stretched to 4 bytes with
one bit of information distributed to 2nd and 5th output bit each.
"""
assert len (c) == 1
c = c[0]
out = [0xdb]*4
for i in range (4):
out[i] |= ((c>>(i*2))&1)<<2
out[i] |= ((c>>(i*2+1))&1)<<5
return bytes (out)
@classmethod
def _encode (cls, s):
"""
Encode byte string to Jura coding
"""
return list (map (lambda x: cls._encodebyte (bytes ([x])), s))
@staticmethod
def _decodebyte (b):
"""
Decode byte received from Jura machine
"""
assert len (b) == 4
out = 0
shift = 0
for i in range (4):
out |= ((b[i]>>2)&1)<<shift
shift += 1
out |= ((b[i]>>5)&1)<<shift
shift += 1
assert shift == 8
return bytes ([out])
@classmethod
def _decode (cls, s):
return b''.join (map (cls._decodebyte, s))
def _send (self, command):
"""
Send single command
"""
# if commands “time out” there may be a late anwer left behind in the
# buffers
self.s.reset_input_buffer ()
self.s.reset_output_buffer ()
log.debug ('← {}'.format (command))
command += b'\r\n'
enc = self._encode (command)
for b in enc:
self.s.write (b)
def _receive (self):
"""
Receive single command response
"""
s = b''
while True:
b = self.s.read (4)
if len (b) != 4:
raise ValueError ('response too small/timeout')
s += self._decodebyte (b)
if s.endswith (b'\r\n'):
break
log.debug ('→ {}'.format (s))
return s.rstrip (b'\r\n')
def _receiveInt (self, expected):
"""
Receive hex-encoded integer
"""
l = self._receive ()
if not l.startswith (expected):
raise ValueError ('invalid response')
# response is big endian, so we are fine
return int (l[len (expected):], 16)
def _receiveBool (self):
"""
Receive boolean response.
Right now only ok: is recognized. Not sure if there actually is an error response…
"""
l = self._receive ()
return l == b'ok:'
def _receiveBytes (self, expected):
"""
Receive hex-encoded raw bytes
"""
l = self._receive ()
if not l.startswith (expected):
raise ValueError ('invalid response')
return codecs.decode (l[len (expected):], 'hex')
def _receiveString (self, expected):
"""
Receive latin1 string
"""
l = self._receive ()
if not l.startswith (expected):
raise ValueError ('invalid response')
return l[len (expected):].decode ('latin1')
def readEeprom (self, address):
"""
Read a single word from EEPROM.
:param address: eeprom *word* address. Words are 16 bit. I.e. 0 ->
first word, 1 -> second word, …
"""
self._send ('RE:{:04X}'.format (address).encode ('ascii'))
return self._receiveInt (b're:')
def writeEeprom (self, address, value):
"""
Write a single word into EEPROM.
:param address: See readEeprom
:param value: The value
"""
self._send ('WE:{:04X},{:04X}'.format (address, value).encode ('ascii'))
return self._receiveBool ()
def readEepromLine (self, address):
"""
Read 32 bytes from EEPROM.
:param address: eeprom *word* start address. Can be any offset. Words are 16 bit.
"""
self._send ('RT:{:04X}'.format (address).encode ('ascii'))
return self._receiveBytes (b'rt:')
def readInput (self):
self._send (b'IC:')
return self._receiveInt (b'ic:')
def pressButton (self, i):
"""
Press any button on the machine. Second press to abort item in progress
works.
"""
self._send ('FA:{:02X}'.format (i).encode ('ascii'))
return self._receiveBool ()
def makeComponent (self, i):
self._send ('FN:{:02X}'.format (i).encode ('ascii'))
return self._receiveBool ()
def getType (self):
"""
Get machine type
"""
self._send (b'TY:')
return self._receiveString (b'ty:')
def getLoader (self):
"""
Get bootloader(?) version string
"""
self._send (b'TL:')
return self._receiveString (b'tl:')
def getHeaterSensors (self):
"""
Get heater and brewing sensor/status information
"""
self._send (b'HZ:')
v = self._receiveString (b'hz:').split (',')
for i in (0, 7, 9):
v[i] = int (v[i], 2)
for i in list (range (1, 7)) + [8]:
v[i] = int (v[i], 16)
return v
def resetDisplay (self):
"""
Reset display to default
"""
self._send (b'DR:')
return self._receiveBool ()
def printDisplay (self, s):
"""
Permanently display a message.
Display supports ASCII and subset of latin1 (german umlauts)
"""
self._send ('DA:{}'.format (s).encode ('latin1'))
return self._receiveBool ()
def printDisplayDefault (self, s):
"""
Change the default selection message
"""
self._send ('DT:{}'.format (s).encode ('latin1'))
return self._receiveBool ()
def raw (self, cmd):
"""
Send raw command
"""
self._send (cmd.encode ('latin1'))
return self._receive ().decode ('latin1')
class State (Enum):
"""
Current machine state
"""
IDLE = 0
GRINDING = 1
BREWING = 2
FOAMING = 3
UNKNOWN = 99
MachineState = namedtuple ('MachineState', ['state', 'flow', 'coffeetemp', 'milktemp'])
class Stateful (Raw):
"""
Extends raw communnication by state: Thread-safety (locking), button press delay
XXX: cache eeprom reads
"""
BUTTON_DELAY = timedelta (milliseconds=100)
# wrapped functions
readEeprom = locked (Raw.readEeprom)
writeEeprom = locked (Raw.writeEeprom)
readEepromLine = locked (Raw.readEepromLine)
readInput = locked (Raw.readInput)
makeComponent = locked (Raw.makeComponent)
getType = locked (Raw.getType)
getLoader = locked (Raw.getLoader)
getHeaterSensors = locked (Raw.getHeaterSensors)
resetDisplay = locked (Raw.resetDisplay)
printDisplay = locked (Raw.printDisplay)
printDisplayDefault = locked (Raw.printDisplayDefault)
def __init__ (self, tty, timeout=10):
"""
:param tty: TTY connected to coffee maker
:param timeout: Lock aquisition timeout
"""
super ().__init__ (tty)
self.lastButtonPress = datetime.now ()
self.lock = Lock ()
self.timeout = timeout
@locked
def pressButton (self, i):
wait = (self.lastButtonPress + self.BUTTON_DELAY) - datetime.now ()
if wait > timedelta (0):
log.debug ('waiting for next button press {}'.format (wait))
time.sleep (wait.total_seconds ())
self.lastButtonPress = datetime.now ()
return super ().pressButton (i)
@locked
def patchEeprom (self, address, f):
"""
Atomic read-modify-write a single eeprom word
"""
return Raw.writeEeprom (self, address, f (Raw.readEeprom (self, address)))
def _decodeState (self, v):
brewerOn = ((v[0] >> 6) & 1) == 0
foamOn = ((v[0] >> 3) & 1) == 1
flow = v[3]
if flow == 0 and not brewerOn and not foamOn:
return State.GRINDING
elif brewerOn and not foamOn:
return State.BREWING
elif foamOn and not brewerOn:
return State.FOAMING
elif flow != 0 and not brewerOn and not foamOn:
return State.IDLE
return State.UNKNOWN
def _decodeFlow (self, v):
"""
Get flow meter value in ml
"""
# XXX: based on observations
scaler = 41/97
return int (v[3]*scaler)
def _decodeTemperature (self, v):
# XXX: based on observations, take it with a grain of salt
scaler = 100/0x4c0
return (int (v[4]*scaler), int (v[5]*scaler))
@locked
def getHeaterSensors (self):
v = Raw.getHeaterSensors (self)
coffeetemp, milktemp = self._decodeTemperature (v)
return MachineState (state=self._decodeState (v),
flow=self._decodeFlow (v), coffeetemp=coffeetemp,
milktemp=milktemp)
def getState (self):
return self.getHeaterSensors ().state
def getProductDefaults (self, product):
return ProductDefaults (*map (lambda x: x.get (self) if x else None, self.machine.products[product]))
def setProductDefaults (self, product, defaults):
for eeprom, v in zip (self.machine.products[product], defaults):
if eeprom is not None and v is not None:
eeprom.patch (self, v)
def make (self, product, defaults=None):
"""
Make product
"""
prev = None
if defaults:
prev = self.getProductDefaults (product)
self.setProductDefaults (product, defaults)
self.pressButton (self.machine.buttons[product.name])
# XXX: is this actually required?
time.sleep (1)
if defaults:
self.setProductDefaults (product, prev)
class EepromValue:
"""
Maps EEPROM locations to pythonic values and vice versa
"""
def __init__ (self, word, shift=0, mask=0xffff, scale=1, unit=int):
self.word = word
self.shift = shift
self.mask = mask
self.scale = scale
self.unit = unit
def get (self, machine):
"""
Retrieve value from machine
"""
v = machine.readEeprom (self.word)
return self.unit (((v>>self.shift)&self.mask)*self.scale)
def patch (self, machine, value):
"""
Read-modify-write value to machine’s EEPROM
"""
invmask = 0xffff^self.mask
f = lambda x: (x&invmask)|((int (value)//self.scale)<<self.shift)
return machine.patchEeprom (self.word, f)
def __repr__ (self):
return '<EepromValue {}(((@{}>>{})&{:x})*{})>'.format (self.unit, self.word, self.shift, self.mask, self.scale)
class Type (Enum):
"""
Product presets
"""
WATER = 1
WATER_CUP = 2
CAPPUCCINO = 3
CAPPUCCINO_DOUBLE = 4
ESPRESSO = 5
ESPRESSO_DOUBLE = 6
LATTE = 7
MILK = 8
MILK_CUP = 9
COFFEE = 10
COFFEE_DOUBLE = 11
class Temperature (IntEnum):
LOW = 0
NORMAL = 1
HIGH = 2
ProductDefaults = namedtuple ('ProductDefaults', ['water', 'temperature', 'pause', 'milk', 'aroma'])
class ImpressaXs90Buttons (IntEnum):
"""
Button mapping for Impressa Xs90, use with .pressButton()
These names must match those in Type, see Stateful.make
"""
ONOFF = 1
CLEAN = 2 # ?
ESPRESSO = 3
ESPRESSO_DOUBLE = 4
COFFEE = 5
COFFEE_DOUBLE = 6
A_LA_CARTE = 7 # only brings up the menu, needs more button presses
INSTANT = 8
CAPPUCCINO = 9
LATTE = 10
WATER_CUP = 11
WATER = 12
MILK_CUP = 13
MILK = 14
SUBMIT = 16
MENU = 17 # main menu
# left wheel
BACK = 18
FORWARD = 19
class ImpressaXs90Eeprom (IntEnum):
"""
Counter EEPROM addresses for Impressa Xs90
"""
COUNT_ESPRESSO = 0
COUNT_A_LA_CARTE = 1
COUNT_COFFEE = 2
COUNT_CAPPUCCINO = 4
COUNT_LATTE = 5
COUNT_INSTANT = 6
COUNT_CLEAN = 8 # times cleaned
COUNT_CCLEAN = 17 # „c-reinigen“
COUNT_MILK = 19
COUNT_WATER = 20
COUNT_FILTER = 34
COUNT_ESPRESSO_DOUBLE = 224 # ?
COUNT_COFFEE_DOUBLE = 226 # ?
class ImpressaXs90Input (IntEnum):
"""
Bit position in status word.
"""
pass
class ImpressaXs90:
buttons = ImpressaXs90Buttons
eeprom = ImpressaXs90Eeprom
input = ImpressaXs90Input
products = {
Type.COFFEE: ProductDefaults (
aroma = EepromValue (214, 4, 0xf),
temperature = EepromValue (214, 0, 0xf, unit=Temperature),
water = EepromValue (220, 0, 0xff, 5),
pause = None, milk = None),
Type.ESPRESSO: ProductDefaults (
aroma = EepromValue (212, 4, 0xf),
temperature = EepromValue (212, 0, 0xf, unit=Temperature),
water = EepromValue (218, 0, 0xff, 5),
pause = None, milk = None),
Type.LATTE: ProductDefaults (
water = EepromValue (223, 0, 0xff, 5),
pause = EepromValue (186, 8, 0xff),
milk = EepromValue (186, 0, 0xff),
aroma = None, temperature = None),
Type.CAPPUCCINO: ProductDefaults (
aroma = EepromValue (216, 4, 0xf),
water = EepromValue (222, 0, 0xff, 5),
milk = EepromValue (184, 0, 0xff),
pause = EepromValue (184, 8, 0xff),
temperature = None,
),
}
|
PromyLOPh/juramote
|
juramote/com.py
|
Python
|
mit
| 15,556
|
[
"ESPResSo"
] |
586c192dad52a06dc463df24d62c272c77a5e4097b9529af07bacec07db2354e
|
#!/usr/bin/env python
""" Tests the MySQL class
"""
# FIXME: to bring back to life
import time
import DIRAC
from DIRAC.Core.Utilities.MySQL import MySQL
nThread = 3
nRetrieval = 100000
DIRAC.gLogger.initialize('test_MySQL', '/testSectionVerbose')
# DIRAC.gLogger.initialize('test_MySQL','/testSection')
class MyDB(MySQL):
def __init__(self, *stArgs, **stKeyArgs):
self.gLogger = DIRAC.gLogger.getSubLogger('MyDB')
MySQL.__init__(self, *stArgs, **stKeyArgs)
def checktable(self):
retDict = self._update('DROP TABLE IF EXISTS `MyDB_testTable`')
if not retDict['OK']:
return retDict
retDict = self._update('CREATE TABLE `MyDB_testTable` ( '
'`ID` INTEGER NOT NULL AUTO_INCREMENT, '
'`LastUpdate` TIMESTAMP, '
'`Status` varchar(128), '
'PRIMARY KEY (`ID`) )')
if not retDict['OK']:
return retDict
return DIRAC.S_OK()
def filltable(self, entries):
for i in xrange(1, entries + 1):
retDict = self.insertFields('MyDB_testTable',
inFields=['Status'],
inValues=[i])
if not retDict['OK']:
return retDict
return DIRAC.S_OK(i)
def listtable(self, entries):
for i in xrange(1, entries + 1):
retDict = self._getFields('MyDB_testTable', [],
inFields=['Status'],
inValues=[i])
if not retDict['OK']:
return retDict
return DIRAC.S_OK(i)
def insert(self, status):
return self.insertFields('MyDB_testTable',
inFields=['Status'],
inValues=[status])
def retrieve(self, id):
return self._getFields('MyDB_testTable', ['Status'],
inFields=['ID'], inValues=[id])
def droptable(self):
retDict = self._update('DROP TABLE IF EXISTS `MyDB_testTable`')
if not retDict['OK']:
return retDict
return DIRAC.S_OK()
DB = MyDB('Ricardo', 'Dirac', 'CKM-best', 'test', nThread)
testMajorStatusTable = {'Table': 'State',
'Description': 'VARCHAR(128)'}
testMinorStatusTable = {'Table': 'State',
'Description': 'VARCHAR(128)'}
testTable = {'Id': 'INT NOT NULL AUTO_INCREMENT',
'Name': 'VARCHAR(128)',
'MajorState': testMajorStatusTable,
'MinorState': testMajorStatusTable,
'Site': 'VARCHAR(128)',
}
testTableIndexDict = {'Site': ['`Site`']}
testKeys = ['Id']
tableDict = {'test': {'Fields': {'ID': 'INT NOT NULL AUTO_INCREMENT',
'Name': 'VARCHAR(128)',
},
'ForeignKeys': {'Name': 'Site',
'Major': 'Status',
'Minor': 'Status',
'Application': 'Status',
},
'PrimaryKey': 'ID',
'Indexes': {'test': ['`Name`', '`ID`']}
},
'Site': {'Fields': {'Name': 'VARCHAR(128)',
},
},
'Status': {'Fields': {'Major': 'VARCHAR(24)',
'Minor': 'VARCHAR(24)',
'Application': 'VARCHAR(128)',
},
}
}
# print DB._createTables( tableDict , force = True )
# DIRAC.exit()
import threading
semaphore = threading.Semaphore(nThread)
lock = threading.Lock()
Success = 0
Error = 0
def testMultiThreading(tries):
import random
DIRAC.gLogger.info('Testing MySQL MultiThreading')
DIRAC.gLogger.info('First adding 10 K records')
if not DB.checktable()['OK']:
return DIRAC.S_ERROR()
if not DB.filltable(10000)['OK']:
return DIRAC.S_ERROR()
i = 0
# overthread = 0
DIRAC.gLogger.info('Now querying 100 K in MultiThread mode')
while i < tries:
if not i % 1000:
DIRAC.gLogger.info('Query:', i)
overthread = 0
i += 1
id = int(random.uniform(0, 10000)) + 1
t = threading.Thread(target=testRetrieve, args=(id, ))
semaphore.acquire()
t.start()
n = threading.activeCount()
while n > 1:
DIRAC.gLogger.info('Waiting for Treads to end:', n)
n = threading.activeCount()
time.sleep(0.1)
DIRAC.gLogger.info('Total retrieved values', Success)
DIRAC.gLogger.info('Total Errors', Error)
return DIRAC.S_OK((Success, Error))
def testRetrieve(id):
global Success
global Error
retDict = DB.retrieve(id)
while not retDict['OK']:
lock.acquire()
Error += 1
lock.release()
retDict = DB.retrieve(id)
if retDict['Value'] == ((str(id), ), ):
lock.acquire()
Success += 1
lock.release()
else:
DIRAC.gLogger.error(id)
semaphore.release()
testlist = [{'method': DB._connect,
'arguments': (),
'output': {'OK': True, 'Value': ''}
},
{'method': DB.checktable,
'arguments': (),
'output': {'OK': True, 'Value': ''}
},
{'method': DB.filltable,
'arguments': (10, ),
'output': {'OK': True, 'Value': 10}
},
{'method': DB.insert,
'arguments': ('`', ),
'output': {'OK': True, 'Value': 1}
},
{'method': DB.retrieve,
'arguments': (11, ),
'output': {'OK': True, 'Value': (('`', ), )}
},
{'method': DB.insert,
'arguments': ('"', ),
'output': {'OK': True, 'Value': 1}
},
{'method': DB.retrieve,
'arguments': (12, ),
'output': {'OK': True, 'Value': (('"', ), )}
},
{'method': DB.insert,
'arguments': ('\'', ),
'output': {'OK': True, 'Value': 1}
},
{'method': DB.retrieve,
'arguments': (13, ),
'output': {'OK': True, 'Value': (("'", ), )}
},
{'method': DB.insert,
'arguments': ('`', ),
'output': {'OK': True, 'Value': 1}
},
{'method': DB.retrieve,
'arguments': (14, ),
'output': {'OK': True, 'Value': (("`", ), )}
},
{'method': DB.listtable,
'arguments': (10, ),
'output': {'OK': True, 'Value': 10}
},
{'method': testMultiThreading,
'arguments': (nRetrieval, ),
'output': {'OK': True, 'Value': (nRetrieval, 0)}
},
# { 'method' : DB.droptable,
# 'arguments' : ( ),
# 'output' : {'OK': True, 'Value': ''}
# },
]
testdict = {'DIRAC.MySQL': testlist}
DIRAC.exit()
|
fstagni/DIRAC
|
tests/Integration/FIXME_Test_MySQL.py
|
Python
|
gpl-3.0
| 7,059
|
[
"DIRAC"
] |
a22d839b2d486e8030c7b3eb549d04ed98ad1011689de1dc7c17c896b03a01fc
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
import abc
import re
import os
import glob
import shutil
import warnings
from itertools import chain
from copy import deepcopy
import six
import numpy as np
from monty.serialization import loadfn
from monty.io import zopen
from pymatgen.core.periodic_table import Specie
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Incar, Poscar, Potcar, Kpoints
from pymatgen.io.vasp.outputs import Vasprun, Outcar
from monty.json import MSONable
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.sites import PeriodicSite
"""
This module defines the VaspInputSet abstract base class and a concrete
implementation for the parameters developed and tested by the core team
of pymatgen, including the Materials Virtual Lab, Materials Project and the MIT
high throughput project. The basic concept behind an input set is to specify
a scheme to generate a consistent set of VASP inputs from a structure
without further user intervention. This ensures comparability across
runs.
Read the following carefully before implementing new input sets:
1. 99% of what needs to be done can be done by specifying user_incar_settings
to override some of the defaults of various input sets. Unless there is an
extremely good reason to add a new set, DO NOT add one. E.g., if you want
to turn the hubbard U off, just set "LDAU": False as a user_incar_setting.
2. All derivative input sets should inherit from one of the usual MPRelaxSet or
MITRelaxSet, and proper superclass delegation should be used where possible.
In particular, you are not supposed to implement your own as_dict or
from_dict for derivative sets unless you know what you are doing.
Improper overriding the as_dict and from_dict protocols is the major
cause of implementation headaches. If you need an example, look at how the
MPStaticSet or MPNonSCFSets are constructed.
The above are recommendations. The following are UNBREAKABLE rules:
1. All input sets must take in a structure or list of structures as the first
argument.
2. user_incar_settings and user_kpoints_settings are absolute. Any new sets you
implement must obey this. If a user wants to override your settings,
you assume he knows what he is doing. Do not magically override user
supplied settings. You can issue a warning if you think the user is wrong.
3. All input sets must save all supplied args and kwargs as instance variables.
E.g., self.my_arg = my_arg and self.kwargs = kwargs in the __init__. This
ensures the as_dict and from_dict work correctly.
"""
__author__ = "Shyue Ping Ong, Wei Chen, Will Richards, Geoffroy Hautier, Anubhav Jain"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "May 28 2016"
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
class VaspInputSet(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
Base class representing a set of Vasp input parameters with a structure
supplied as init parameters. Typically, you should not inherit from this
class. Start from DictSet or MPRelaxSet or MITRelaxSet.
"""
@property
@abc.abstractmethod
def incar(self):
"""Incar object"""
pass
@property
@abc.abstractmethod
def kpoints(self):
"""Kpoints object"""
pass
@property
@abc.abstractmethod
def poscar(self):
"""Poscar object"""
pass
@property
def potcar_symbols(self):
"""
List of POTCAR symbols.
"""
elements = self.poscar.site_symbols
potcar_symbols = []
settings = self._config_dict["POTCAR"]
if isinstance(settings[elements[-1]], dict):
for el in elements:
potcar_symbols.append(settings[el]['symbol']
if el in settings else el)
else:
for el in elements:
potcar_symbols.append(settings.get(el, el))
return potcar_symbols
@property
def potcar(self):
"""
Potcar object.
"""
return Potcar(self.potcar_symbols, functional=self.potcar_functional)
@property
def all_input(self):
"""
Returns all input files as a dict of {filename: vasp object}
Returns:
dict of {filename: object}, e.g., {'INCAR': Incar object, ...}
"""
kpoints = self.kpoints
incar = self.incar
if np.product(kpoints.kpts) < 4 and incar.get("ISMEAR", 0) == -5:
incar["ISMEAR"] = 0
return {'INCAR': incar,
'KPOINTS': kpoints,
'POSCAR': self.poscar,
'POTCAR': self.potcar}
def write_input(self, output_dir,
make_dir_if_not_present=True, include_cif=False):
"""
Writes a set of VASP input to a directory.
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
include_cif (bool): Whether to write a CIF file in the output
directory for easier opening by VESTA.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
for k, v in self.all_input.items():
v.write_file(os.path.join(output_dir, k))
if include_cif:
s = self.all_input["POSCAR"].structure
fname = os.path.join(output_dir, "%s.cif" % re.sub(r'\s', "",
s.formula))
s.to(filename=fname)
def as_dict(self, verbosity=2):
d = MSONable.as_dict(self)
if verbosity == 1:
d.pop("structure", None)
return d
def _load_yaml_config(fname):
config = loadfn(os.path.join(MODULE_DIR, "%s.yaml" % fname))
config["INCAR"].update(loadfn(os.path.join(MODULE_DIR,
"VASPIncarBase.yaml")))
return config
class DictSet(VaspInputSet):
"""
Concrete implementation of VaspInputSet that is initialized from a dict
settings. This allows arbitrary settings to be input. In general,
this is rarely used directly unless there is a source of settings in yaml
format (e.g., from a REST interface). It is typically used by other
VaspInputSets for initialization.
Special consideration should be paid to the way the MAGMOM initialization
for the INCAR is done. The initialization differs depending on the type of
structure and the configuration settings. The order in which the magmom is
determined is as follows:
1. If the site itself has a magmom setting, that is used.
2. If the species on the site has a spin setting, that is used.
3. If the species itself has a particular setting in the config file, that
is used, e.g., Mn3+ may have a different magmom than Mn4+.
4. Lastly, the element symbol itself is checked in the config file. If
there are no settings, VASP's default of 0.6 is used.
Args:
structure (Structure): The Structure to create inputs for.
config_dict (dict): The config dictionary to use.
files_to_transfer (dict): A dictionary of {filename: filepath}. This
allows the transfer of files from a previous calculation.
user_incar_settings (dict): User INCAR settings. This allows a user
to override INCAR settings, e.g., setting a different MAGMOM for
various elements or species. Note that in the new scheme,
ediff_per_atom and hubbard_u are no longer args. Instead, the
config_dict supports EDIFF_PER_ATOM and EDIFF keys. The former
scales with # of atoms, the latter does not. If both are
present, EDIFF is preferred. To force such settings, just supply
user_incar_settings={"EDIFF": 1e-5, "LDAU": False} for example.
The keys 'LDAUU', 'LDAUJ', 'LDAUL' are special cases since
pymatgen defines different values depending on what anions are
present in the structure, so these keys can be defined in one
of two ways, e.g. either {"LDAUU":{"O":{"Fe":5}}} to set LDAUU
for Fe to 5 in an oxide, or {"LDAUU":{"Fe":5}} to set LDAUU to
5 regardless of the input structure.
user_kpoints_settings (dict or Kpoints): Allow user to override kpoints
setting by supplying a dict E.g., {"reciprocal_density": 1000}.
User can also supply Kpoints object. Default is None.
user_potcar_settings (dict: Allow user to override POTCARs. E.g.,
{"Gd": "Gd_3"}. This is generally not recommended. Default is None.
constrain_total_magmom (bool): Whether to constrain the total magmom
(NUPDOWN in INCAR) to be the sum of the expected MAGMOM for all
species. Defaults to False.
sort_structure (bool): Whether to sort the structure (using the
default sort order of electronegativity) before generating input
files. Defaults to True, the behavior you would want most of the
time. This ensures that similar atomic species are grouped
together.
potcar_functional (str): Functional to use. Default (None) is to use
the functional in Potcar.DEFAULT_FUNCTIONAL. Valid values:
"PBE", "LDA", "PW91", "LDA_US"
force_gamma (bool): Force gamma centered kpoint generation. Default
(False) is to use the Automatic Density kpoint scheme, which
will use the Gamma centered generation scheme for hexagonal
cells, and Monkhorst-Pack otherwise.
reduce_structure (None/str): Before generating the input files,
generate the reduced structure. Default (None), does not
alter the structure. Valid values: None, "niggli", "LLL".
vdw: Adds default parameters for van-der-Waals functionals supported
by VASP to INCAR. Supported functionals are: DFT-D2, undamped
DFT-D3, DFT-D3 with Becke-Jonson damping, Tkatchenko-Scheffler,
Tkatchenko-Scheffler with iterative Hirshfeld partitioning,
MBD@rSC, dDsC, Dion's vdW-DF, DF2, optPBE, optB88, optB86b and
rVV10.
"""
def __init__(self, structure, config_dict,
files_to_transfer=None, user_incar_settings=None,
user_kpoints_settings=None, user_potcar_settings=None,
constrain_total_magmom=False, sort_structure=True,
potcar_functional="PBE", force_gamma=False,
reduce_structure=None, vdw=None):
if reduce_structure:
structure = structure.get_reduced_structure(reduce_structure)
if sort_structure:
structure = structure.get_sorted_structure()
self.structure = structure
self._config_dict = deepcopy(config_dict)
self.files_to_transfer = files_to_transfer or {}
self.constrain_total_magmom = constrain_total_magmom
self.sort_structure = sort_structure
self.potcar_functional = potcar_functional
self.force_gamma = force_gamma
self.reduce_structure = reduce_structure
self.user_incar_settings = user_incar_settings or {}
self.user_kpoints_settings = user_kpoints_settings
self.user_potcar_settings = user_potcar_settings
self.vdw = vdw.lower() if vdw is not None else None
if self.vdw:
vdw_par = loadfn(os.path.join(MODULE_DIR, "vdW_parameters.yaml"))
try:
self._config_dict["INCAR"].update(vdw_par[self.vdw])
except KeyError:
raise KeyError("Invalid or unsupported van-der-Waals "
"functional. Supported functionals are "
"%s." % vdw_par.keys())
if self.user_potcar_settings:
warnings.warn(
"Overriding POTCARs is generally not recommended as it "
"significantly affect the results of calculations and "
"compatibility with other calculations done with the same "
"input set. In many instances, it is better to write a "
"subclass of a desired input set and override the POTCAR in "
"the subclass to be explicit on the differences.")
for k, v in self.user_potcar_settings.items():
self._config_dict["POTCAR"][k] = v
@property
def incar(self):
settings = dict(self._config_dict["INCAR"])
settings.update(self.user_incar_settings)
structure = self.structure
incar = Incar()
comp = structure.composition
elements = sorted([el for el in comp.elements if comp[el] > 0],
key=lambda e: e.X)
most_electroneg = elements[-1].symbol
poscar = Poscar(structure)
hubbard_u = settings.get("LDAU", False)
for k, v in settings.items():
if k == "MAGMOM":
mag = []
for site in structure:
if hasattr(site, 'magmom'):
mag.append(site.magmom)
elif hasattr(site.specie, 'spin'):
mag.append(site.specie.spin)
elif str(site.specie) in v:
mag.append(v.get(str(site.specie)))
else:
mag.append(v.get(site.specie.symbol, 0.6))
incar[k] = mag
elif k in ('LDAUU', 'LDAUJ', 'LDAUL'):
if hubbard_u:
if hasattr(structure[0], k.lower()):
m = dict([(site.specie.symbol, getattr(site, k.lower()))
for site in structure])
incar[k] = [m[sym] for sym in poscar.site_symbols]
# lookup specific LDAU if specified for most_electroneg atom
elif most_electroneg in v.keys() and \
isinstance(v[most_electroneg], dict):
incar[k] = [v[most_electroneg].get(sym, 0)
for sym in poscar.site_symbols]
# else, use fallback LDAU value if it exists
else:
incar[k] = [v.get(sym, 0)
if isinstance(v.get(sym, 0), (float, int))
else 0 for sym in poscar.site_symbols]
elif k.startswith("EDIFF") and k != "EDIFFG":
if "EDIFF" not in settings and k == "EDIFF_PER_ATOM":
incar["EDIFF"] = float(v) * structure.num_sites
else:
incar["EDIFF"] = float(settings["EDIFF"])
else:
incar[k] = v
has_u = hubbard_u and sum(incar['LDAUU']) > 0
if has_u:
# modify LMAXMIX if LSDA+U and you have d or f electrons
# note that if the user explicitly sets LMAXMIX in settings it will
# override this logic.
if 'LMAXMIX' not in settings.keys():
# contains f-electrons
if any([el.Z > 56 for el in structure.composition]):
incar['LMAXMIX'] = 6
# contains d-electrons
elif any([el.Z > 20 for el in structure.composition]):
incar['LMAXMIX'] = 4
else:
for key in list(incar.keys()):
if key.startswith('LDAU'):
del incar[key]
if self.constrain_total_magmom:
nupdown = sum([mag if abs(mag) > 0.6 else 0
for mag in incar['MAGMOM']])
incar['NUPDOWN'] = nupdown
if self.structure._charge:
incar["NELECT"] = self.nelect + self.structure._charge
return incar
@property
def poscar(self):
return Poscar(self.structure)
@property
def nelect(self):
"""
Gets the default number of electrons for a given structure.
"""
return int(round(
sum([self.structure.composition.element_composition[ps.element]
* ps.ZVAL
for ps in self.potcar])))
@property
def kpoints(self):
"""
Writes out a KPOINTS file using the fully automated grid method. Uses
Gamma centered meshes for hexagonal cells and Monk grids otherwise.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
"""
settings = self.user_kpoints_settings or self._config_dict["KPOINTS"]
if isinstance(settings, Kpoints):
return settings
# If grid_density is in the kpoints_settings use
# Kpoints.automatic_density
if settings.get('grid_density'):
return Kpoints.automatic_density(
self.structure, int(settings['grid_density']),
self.force_gamma)
# If reciprocal_density is in the kpoints_settings use
# Kpoints.automatic_density_by_vol
elif settings.get('reciprocal_density'):
return Kpoints.automatic_density_by_vol(
self.structure, int(settings['reciprocal_density']),
self.force_gamma)
# If length is in the kpoints_settings use Kpoints.automatic
elif settings.get('length'):
return Kpoints.automatic(settings['length'])
# Raise error. Unsure of which kpoint generation to use
else:
raise ValueError(
"Invalid KPoint Generation algo : Supported Keys are "
"grid_density: for Kpoints.automatic_density generation, "
"reciprocal_density: for KPoints.automatic_density_by_vol "
"generation, and length : for Kpoints.automatic generation")
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return self.__class__.__name__
def write_input(self, output_dir,
make_dir_if_not_present=True, include_cif=False):
super(DictSet, self).write_input(
output_dir=output_dir,
make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif)
for k, v in self.files_to_transfer.items():
with zopen(v, "rb") as fin, \
zopen(os.path.join(output_dir, k), "wb") as fout:
shutil.copyfileobj(fin, fout)
class MITRelaxSet(DictSet):
"""
Standard implementation of VaspInputSet utilizing parameters in the MIT
High-throughput project.
The parameters are chosen specifically for a high-throughput project,
which means in general pseudopotentials with fewer electrons were chosen.
Please refer::
A Jain, G. Hautier, C. Moore, S. P. Ong, C. Fischer, T. Mueller,
K. A. Persson, G. Ceder. A high-throughput infrastructure for density
functional theory calculations. Computational Materials Science,
2011, 50(8), 2295-2310. doi:10.1016/j.commatsci.2011.02.023
"""
CONFIG = _load_yaml_config("MITRelaxSet")
def __init__(self, structure, **kwargs):
super(MITRelaxSet, self).__init__(
structure, MITRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPRelaxSet(DictSet):
"""
Implementation of VaspInputSet utilizing parameters in the public
Materials Project. Typically, the pseudopotentials chosen contain more
electrons than the MIT parameters, and the k-point grid is ~50% more dense.
The LDAUU parameters are also different due to the different psps used,
which result in different fitted values.
"""
CONFIG = CONFIG = _load_yaml_config("MPRelaxSet")
def __init__(self, structure, **kwargs):
super(MPRelaxSet, self).__init__(
structure, MPRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPHSERelaxSet(DictSet):
"""
Same as the MPRelaxSet, but with HSE parameters.
"""
CONFIG = _load_yaml_config("MPHSERelaxSet")
def __init__(self, structure, **kwargs):
super(MPHSERelaxSet, self).__init__(
structure, MPHSERelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPStaticSet(MPRelaxSet):
def __init__(self, structure, prev_incar=None, prev_kpoints=None,
lepsilon=False, lcalcpol=False, reciprocal_density=100,
**kwargs):
"""
Run a static calculation.
Args:
structure (Structure): Structure from previous run.
prev_incar (Incar): Incar file from previous run.
prev_kpoints (Kpoints): Kpoints from previous run.
lepsilon (bool): Whether to add static dielectric calculation
reciprocal_density (int): For static calculations,
we usually set the reciprocal density by volume. This is a
convenience arg to change that, rather than using
user_kpoints_settings. Defaults to 100, which is ~50% more than
that of standard relaxation calculations.
\\*\\*kwargs: kwargs supported by MPRelaxSet.
"""
super(MPStaticSet, self).__init__(structure, **kwargs)
if isinstance(prev_incar, six.string_types):
prev_incar = Incar.from_file(prev_incar)
if isinstance(prev_kpoints, six.string_types):
prev_kpoints = Kpoints.from_file(prev_kpoints)
self.prev_incar = prev_incar
self.prev_kpoints = prev_kpoints
self.reciprocal_density = reciprocal_density
self.structure = structure
self.kwargs = kwargs
self.lepsilon = lepsilon
self.lcalcpol = lcalcpol
@property
def incar(self):
parent_incar = super(MPStaticSet, self).incar
incar = Incar(self.prev_incar) if self.prev_incar is not None else \
Incar(parent_incar)
incar.update(
{"IBRION": -1, "ISMEAR": -5, "LAECHG": True, "LCHARG": True,
"LORBIT": 11, "LVHAR": True, "LWAVE": False, "NSW": 0,
"ICHARG": 0, "ALGO": "Normal"})
if self.lepsilon:
incar["IBRION"] = 8
incar["LEPSILON"] = True
# LPEAD=T: numerical evaluation of overlap integral prevents
# LRF_COMMUTATOR errors and can lead to better expt. agreement
# but produces slightly different results
incar["LPEAD"] = True
# Note that DFPT calculations MUST unset NSW. NSW = 0 will fail
# to output ionic.
incar.pop("NSW", None)
incar.pop("NPAR", None)
if self.lcalcpol:
incar["LCALCPOL"] = True
for k in ["MAGMOM", "NUPDOWN"] + list(self.kwargs.get(
"user_incar_settings", {}).keys()):
# For these parameters as well as user specified settings, override
# the incar settings.
if parent_incar.get(k, None) is not None:
incar[k] = parent_incar[k]
else:
incar.pop(k, None)
# use new LDAUU when possible b/c the Poscar might have changed
# representation
if incar.get('LDAU'):
u = incar.get('LDAUU', [])
j = incar.get('LDAUJ', [])
if sum([u[x] - j[x] for x, y in enumerate(u)]) > 0:
for tag in ('LDAUU', 'LDAUL', 'LDAUJ'):
incar.update({tag: parent_incar[tag]})
# ensure to have LMAXMIX for GGA+U static run
if "LMAXMIX" not in incar:
incar.update({"LMAXMIX": parent_incar["LMAXMIX"]})
# Compare ediff between previous and staticinputset values,
# choose the tighter ediff
incar["EDIFF"] = min(incar.get("EDIFF", 1), parent_incar["EDIFF"])
return incar
@property
def kpoints(self):
self._config_dict["KPOINTS"]["reciprocal_density"] = \
self.reciprocal_density
kpoints = super(MPStaticSet, self).kpoints
# Prefer to use k-point scheme from previous run
if self.prev_kpoints and self.prev_kpoints.style != kpoints.style:
if self.prev_kpoints.style == Kpoints.supported_modes.Monkhorst:
k_div = [kp + 1 if kp % 2 == 1 else kp
for kp in kpoints.kpts[0]]
kpoints = Kpoints.monkhorst_automatic(k_div)
else:
kpoints = Kpoints.gamma_automatic(kpoints.kpts[0])
return kpoints
@classmethod
def from_prev_calc(cls, prev_calc_dir, standardize=False, sym_prec=0.1,
international_monoclinic=True, reciprocal_density=100,
small_gap_multiply=None, **kwargs):
"""
Generate a set of Vasp input files for static calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
standardize (float): Whether to standardize to a primitive
standard cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding. If not 0,
the final structure from the previous run will be symmetrized
to get a primitive standard cell. Set to 0 if you don't want
that.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
\\*\\*kwargs: All kwargs supported by MPStaticSet,
other than prev_incar and prev_structure and prev_kpoints which
are determined from the prev_calc_dir.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
prev_incar = vasprun.incar
prev_kpoints = vasprun.kpoints
# We will make a standard structure for the given symprec.
prev_structure = get_structure_from_prev_run(
vasprun, outcar, sym_prec=standardize and sym_prec,
international_monoclinic=international_monoclinic)
# multiply the reciprocal density if needed:
if small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= small_gap_multiply[0]:
reciprocal_density = reciprocal_density * small_gap_multiply[1]
return cls(
structure=prev_structure, prev_incar=prev_incar,
prev_kpoints=prev_kpoints,
reciprocal_density=reciprocal_density, **kwargs)
class MPHSEBSSet(MPHSERelaxSet):
def __init__(self, structure, user_incar_settings=None, added_kpoints=None,
mode="Uniform", reciprocal_density=None,
kpoints_line_density=20, **kwargs):
"""
Implementation of a VaspInputSet for HSE band structure computations.
Remember that HSE band structures must be self-consistent in VASP. A
band structure along symmetry lines for instance needs BOTH a uniform
grid with appropriate weights AND a path along the lines with weight 0.
Thus, the "Uniform" mode is just like regular static SCF but allows
adding custom kpoints (e.g., corresponding to known VBM/CBM) to the
uniform grid that have zero weight (e.g., for better gap estimate).
The "Line" mode is just like Uniform mode, but additionally adds
k-points along symmetry lines with zero weight.
Args:
structure (Structure): Structure to compute
user_incar_settings (dict): A dict specifying additional incar
settings
added_kpoints (list): a list of kpoints (list of 3 number list)
added to the run. The k-points are in fractional coordinates
mode (str): "Line" - generate k-points along symmetry lines for
bandstructure. "Uniform" - generate uniform k-points grid
reciprocal_density (int): k-point density to use for uniform mesh
kpoints_line_density (int): k-point density for high symmetry lines
**kwargs (dict): Any other parameters to pass into DictVaspInputSet
"""
super(MPHSEBSSet, self).__init__(structure, **kwargs)
self.structure = structure
self.user_incar_settings = user_incar_settings or {}
self._config_dict["INCAR"].update({
"NSW": 0,
"ISMEAR": 0,
"SIGMA": 0.05,
"ISYM": 3,
"LCHARG": False,
"NELMIN": 5
})
self.added_kpoints = added_kpoints if added_kpoints is not None else []
self.mode = mode
self.reciprocal_density = reciprocal_density or \
self.kpoints_settings['reciprocal_density']
self.kpoints_line_density = kpoints_line_density
@property
def kpoints(self):
kpts = []
weights = []
all_labels = []
# for both modes, include the Uniform mesh w/standard weights
grid = Kpoints.automatic_density_by_vol(self.structure,
self.reciprocal_density).kpts
ir_kpts = SpacegroupAnalyzer(self.structure, symprec=0.1) \
.get_ir_reciprocal_mesh(grid[0])
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
all_labels.append(None)
# for both modes, include any user-added kpoints w/zero weight
for k in self.added_kpoints:
kpts.append(k)
weights.append(0.0)
all_labels.append("user-defined")
# for line mode only, add the symmetry lines w/zero weight
if self.mode.lower() == "line":
kpath = HighSymmKpath(self.structure)
frac_k_points, labels = kpath.get_kpoints(
line_density=self.kpoints_line_density,
coords_are_cartesian=False)
for k in range(len(frac_k_points)):
kpts.append(frac_k_points[k])
weights.append(0.0)
all_labels.append(labels[k])
comment = ("HSE run along symmetry lines"
if self.mode.lower() == "line"
else "HSE run on uniform grid")
return Kpoints(comment=comment,
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(kpts), kpts=kpts, kpts_weights=weights,
labels=all_labels)
@classmethod
def from_prev_calc(cls, prev_calc_dir, mode="gap",
reciprocal_density=50, copy_chgcar=True, **kwargs):
"""
Generate a set of Vasp input files for HSE calculations from a
directory of previous Vasp run. if mode=="gap", it explicitly adds VBM
and CBM of the prev run to the k-point list of this run.
Args:
prev_calc_dir (str): Directory containing the outputs
(vasprun.xml and OUTCAR) of previous vasp run.
mode (str): Either "uniform", "gap" or "line"
reciprocal_density (int): density of k-mesh
copy_chgcar (bool): whether to copy CHGCAR of previous run
\\*\\*kwargs: All kwargs supported by MPHSEBSStaticSet,
other than prev_structure which is determined from the previous
calc dir.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
# note: don't standardize the cell because we want to retain k-points
prev_structure = get_structure_from_prev_run(vasprun, outcar,
sym_prec=0)
added_kpoints = []
if mode.lower() == "gap":
bs = vasprun.get_band_structure()
vbm, cbm = bs.get_vbm()["kpoint"], bs.get_cbm()["kpoint"]
if vbm:
added_kpoints.append(vbm.frac_coords)
if cbm:
added_kpoints.append(cbm.frac_coords)
files_to_transfer = {}
if copy_chgcar:
chgcars = sorted(glob.glob(os.path.join(prev_calc_dir, "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
return cls(
structure=prev_structure,
added_kpoints=added_kpoints, reciprocal_density=reciprocal_density,
mode=mode, files_to_transfer=files_to_transfer, **kwargs)
class MPNonSCFSet(MPRelaxSet):
def __init__(self, structure, prev_incar=None,
mode="line", nedos=601, reciprocal_density=100, sym_prec=0.1,
kpoints_line_density=20, optics=False, **kwargs):
"""
Init a MPNonSCFSet. Typically, you would use the classmethod
from_prev_calc to initialize from a previous SCF run.
Args:
structure (Structure): Structure to compute
prev_incar (Incar/string): Incar file from previous run.
mode (str): Line or Uniform mode supported.
nedos (int): nedos parameter. Default to 601.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
sym_prec (float): Symmetry precision (for Uniform mode).
kpoints_line_density (int): Line density for Line mode.
optics (bool): whether to add dielectric function
\\*\\*kwargs: kwargs supported by MPVaspInputSet.
"""
super(MPNonSCFSet, self).__init__(structure, **kwargs)
if isinstance(prev_incar, six.string_types):
prev_incar = Incar.from_file(prev_incar)
self.prev_incar = prev_incar
self.kwargs = kwargs
self.nedos = nedos
self.reciprocal_density = reciprocal_density
self.sym_prec = sym_prec
self.kpoints_line_density = kpoints_line_density
self.optics = optics
self.mode = mode.lower()
if self.mode.lower() not in ["line", "uniform"]:
raise ValueError("Supported modes for NonSCF runs are 'Line' and "
"'Uniform'!")
if (self.mode.lower() != "uniform" or nedos < 2000) and optics:
warnings.warn("It is recommended to use Uniform mode with a high "
"NEDOS for optics calculations.")
@property
def incar(self):
incar = super(MPNonSCFSet, self).incar
if self.prev_incar is not None:
incar.update({k: v for k, v in self.prev_incar.items()})
# Overwrite necessary INCAR parameters from previous runs
incar.update({"IBRION": -1, "ISMEAR": 0, "SIGMA": 0.001,
"LCHARG": False, "LORBIT": 11, "LWAVE": False,
"NSW": 0, "ISYM": 0, "ICHARG": 11})
incar.update(self.kwargs.get("user_incar_settings", {}))
if self.mode.lower() == "uniform":
# Set smaller steps for DOS output
incar["NEDOS"] = self.nedos
if self.optics:
incar["LOPTICS"] = True
incar.pop("MAGMOM", None)
return incar
@property
def kpoints(self):
if self.mode == "line":
kpath = HighSymmKpath(self.structure)
frac_k_points, k_points_labels = kpath.get_kpoints(
line_density=self.kpoints_line_density,
coords_are_cartesian=False)
kpoints = Kpoints(
comment="Non SCF run along symmetry lines",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(frac_k_points),
kpts=frac_k_points, labels=k_points_labels,
kpts_weights=[1] * len(frac_k_points))
else:
kpoints = Kpoints.automatic_density_by_vol(self.structure,
self.reciprocal_density)
mesh = kpoints.kpts[0]
ir_kpts = SpacegroupAnalyzer(
self.structure,
symprec=self.sym_prec).get_ir_reciprocal_mesh(mesh)
kpts = []
weights = []
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
kpoints = Kpoints(comment="Non SCF run on uniform grid",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(ir_kpts),
kpts=kpts, kpts_weights=weights)
return kpoints
@classmethod
def from_prev_calc(cls, prev_calc_dir, copy_chgcar=True,
nbands_factor=1.2, standardize=False, sym_prec=0.1,
international_monoclinic=True, reciprocal_density=100,
kpoints_line_density=20, small_gap_multiply=None,
**kwargs):
"""
Generate a set of Vasp input files for NonSCF calculations from a
directory of previous static Vasp run.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
copy_chgcar: Whether to copy the old CHGCAR. Defaults to True.
nbands_factor (float): Multiplicative factor for NBANDS. Choose a
higher number if you are doing an LOPTICS calculation.
standardize (float): Whether to standardize to a primitive
standard cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding. If not 0,
the final structure from the previous run will be symmetrized
to get a primitive standard cell. Set to 0 if you don't want
that.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
reciprocal_density (int): density of k-mesh by reciprocal
volume in uniform mode (defaults to 100)
kpoints_line_density (int): density of k-mesh in line mode
(defaults to 20)
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
\\*\\*kwargs: All kwargs supported by MPNonSCFSet,
other than structure, prev_incar and prev_chgcar which
are determined from the prev_calc_dir.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
incar = vasprun.incar
# Get a Magmom-decorated structure
structure = get_structure_from_prev_run(
vasprun, outcar, sym_prec=standardize and sym_prec,
international_monoclinic=international_monoclinic)
# Turn off spin when magmom for every site is smaller than 0.02.
if outcar and outcar.magnetization:
site_magmom = np.array([i['tot'] for i in outcar.magnetization])
ispin = 2 if np.any(site_magmom[np.abs(site_magmom) > 0.02]) else 1
elif vasprun.is_spin:
ispin = 2
else:
ispin = 1
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * nbands_factor))
incar.update({"ISPIN": ispin, "NBANDS": nbands})
files_to_transfer = {}
if copy_chgcar:
chgcars = sorted(glob.glob(os.path.join(prev_calc_dir, "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
# multiply the reciprocal density if needed:
if small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= small_gap_multiply[0]:
reciprocal_density = reciprocal_density * small_gap_multiply[1]
kpoints_line_density = kpoints_line_density * \
small_gap_multiply[1]
return cls(structure=structure, prev_incar=incar,
reciprocal_density=reciprocal_density,
kpoints_line_density=kpoints_line_density,
files_to_transfer=files_to_transfer, **kwargs)
class MPSOCSet(MPStaticSet):
def __init__(self, structure, saxis=(0, 0, 1), prev_incar=None,
reciprocal_density=100, **kwargs):
"""
Init a MPSOCSet.
Args:
structure (Structure): the structure must have the 'magmom' site
property and each magnetic moment value must have 3
components. eg:- magmom = [[0,0,2], ...]
saxis (tuple): magnetic moment orientation
prev_incar (Incar): Incar file from previous run.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
\\*\\*kwargs: kwargs supported by MPVaspInputSet.
"""
if not hasattr(structure[0], "magmom") and \
not isinstance(structure[0].magmom, list):
raise ValueError(
"The structure must have the 'magmom' site "
"property and each magnetic moment value must have 3 "
"components. eg:- magmom = [0,0,2]")
self.saxis = saxis
super(MPSOCSet, self).__init__(
structure, prev_incar=prev_incar,
reciprocal_density=reciprocal_density, **kwargs)
@property
def incar(self):
incar = super(MPSOCSet, self).incar
if self.prev_incar is not None:
incar.update({k: v for k, v in self.prev_incar.items()})
# Overwrite necessary INCAR parameters from previous runs
incar.update({"ISYM": -1, "LSORBIT": "T", "ICHARG": 11,
"SAXIS": list(self.saxis)})
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
@classmethod
def from_prev_calc(cls, prev_calc_dir, copy_chgcar=True,
nbands_factor=1.2, standardize=False, sym_prec=0.1,
international_monoclinic=True, reciprocal_density=100,
small_gap_multiply=None, **kwargs):
"""
Generate a set of Vasp input files for SOC calculations from a
directory of previous static Vasp run. SOC calc requires all 3
components for MAGMOM for each atom in the structure.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
copy_chgcar: Whether to copy the old CHGCAR. Defaults to True.
nbands_factor (float): Multiplicative factor for NBANDS. Choose a
higher number if you are doing an LOPTICS calculation.
standardize (float): Whether to standardize to a primitive
standard cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding. If not 0,
the final structure from the previous run will be symmetrized
to get a primitive standard cell. Set to 0 if you don't want
that.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
\\*\\*kwargs: All kwargs supported by MPSOCSet,
other than structure, prev_incar and prev_chgcar which
are determined from the prev_calc_dir.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
incar = vasprun.incar
# Get a magmom-decorated structure
structure = get_structure_from_prev_run(
vasprun, outcar, sym_prec=standardize and sym_prec,
international_monoclinic=international_monoclinic)
# override magmom if provided
if kwargs.get("magmom", None):
structure = structure.copy(
site_properties={"magmom": kwargs["magmom"]})
kwargs.pop("magmom", None)
# magmom has to be 3D for SOC calculation.
if hasattr(structure[0], "magmom"):
if not isinstance(structure[0].magmom, list):
structure = structure.copy(site_properties={
"magmom": [[0, 0, site.magmom] for site in structure]})
else:
raise ValueError("Neither the previous structure has mamgom "
"property nor magmom provided")
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * nbands_factor))
incar.update({"NBANDS": nbands})
files_to_transfer = {}
if copy_chgcar:
chgcars = sorted(glob.glob(os.path.join(prev_calc_dir, "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
# multiply the reciprocal density if needed:
if small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= small_gap_multiply[0]:
reciprocal_density = reciprocal_density * small_gap_multiply[1]
return cls(structure, prev_incar=incar,
files_to_transfer=files_to_transfer,
reciprocal_density=reciprocal_density, **kwargs)
class MPNMRSet(MPStaticSet):
def __init__(self, structure, mode="cs", isotopes=None,
prev_incar=None, reciprocal_density=100, **kwargs):
"""
Init a MPNMRSet.
Args:
structure (Structure): Structure to compute
mode (str): The NMR calculation to run
"cs": for Chemical Shift
"efg" for Electric Field Gradient
isotopes (list): list of Isotopes for quadrupole moments
prev_incar (Incar): Incar file from previous run.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
\\*\\*kwargs: kwargs supported by MPStaticSet.
"""
self.mode = mode
self.isotopes = isotopes if isotopes else []
super(MPNMRSet, self).__init__(
structure, prev_incar=prev_incar,
reciprocal_density=reciprocal_density, **kwargs)
@property
def incar(self):
incar = super(MPNMRSet, self).incar
if self.mode.lower() == "cs":
incar.update({"LCHIMAG": True,
"EDIFF": -1.0e-10,
"ISYM": 0,
"LCHARG": False,
"LNMR_SYM_RED": True,
"NELMIN": 10,
"NSLPLINE": True,
"PREC": "ACCURATE",
"SIGMA": 0.01})
elif self.mode.lower() == "efg":
isotopes = {ist.split("-")[0]: ist for ist in self.isotopes}
quad_efg = [Specie(p).get_nmr_quadrupole_moment(isotopes.get(p, None)) for p in self.poscar.site_symbols]
incar.update({"ALGO": "FAST",
"EDIFF": -1.0e-10,
"ISYM": 0,
"LCHARG": False,
"LEFG": True,
"QUAD_EFG": quad_efg,
"NELMIN": 10,
"PREC": "ACCURATE",
"SIGMA": 0.01})
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
class MVLElasticSet(MPRelaxSet):
"""
MVL denotes VASP input sets that are implemented by the Materials Virtual
Lab (http://www.materialsvirtuallab.org) for various research.
This input set is used to calculate elastic constants in VASP. It is used
in the following work::
Z. Deng, Z. Wang, I.-H. Chu, J. Luo, S. P. Ong.
“Elastic Properties of Alkali Superionic Conductor Electrolytes
from First Principles Calculations”, J. Electrochem. Soc.
2016, 163(2), A67-A74. doi: 10.1149/2.0061602jes
To read the elastic constants, you may use the Outcar class which parses the
elastic constants.
Args:
scale (float): POTIM parameter. The default of 0.015 is usually fine,
but some structures may require a smaller step.
user_incar_settings (dict): A dict specifying additional incar
settings.
"""
def __init__(self, structure, potim=0.015, **kwargs):
super(MVLElasticSet, self).__init__(structure, **kwargs)
self._config_dict["INCAR"].update({"IBRION": 6, "NFREE": 2,
"POTIM": potim})
self._config_dict["INCAR"].pop("NPAR", None)
class MVLGWSet(DictSet):
"""
MVL denotes VASP input sets that are implemented by the Materials Virtual
Lab (http://www.materialsvirtuallab.org) for various research. This is a
flexible input set for GW calculations.
Note that unlike all other input sets in this module, the PBE_54 series of
functional is set as the default. These have much improved performance for
GW calculations.
"""
CONFIG = _load_yaml_config("MVLGWSet")
SUPPORTED_MODES = ("DIAG", "GW", "STATIC", "BSE")
def __init__(self, structure, prev_incar=None, nbands=None,
potcar_functional="PBE_54",
reciprocal_density=100, mode="STATIC", **kwargs):
"""
A typical sequence is mode="STATIC" -> mode="DIAG" -> mode="GW" ->
mode="BSE". For all steps other than the first one (static), the
recommendation is to use from_prev_calculation on the preceding run in
the series.
Args:
structure (Structure): Input structure.
prev_incar (Incar/string): Incar file from previous run.
mode (str): Supported modes are "STATIC" (default), "DIAG", "GW",
and "BSE".
nbands (int): For subsequent calculations, it is generally
recommended to perform NBANDS convergence starting from the
NBANDS of the previous run for DIAG, and to use the exact same
NBANDS for GW and BSE. This parameter is used by
from_previous_calculation to set nband.
potcar_functional (str): Defaults to "PBE_54".
\\*\\*kwargs: All kwargs supported by DictSet. Typically,
user_incar_settings is a commonly used option.
"""
super(MVLGWSet, self).__init__(
structure, MVLGWSet.CONFIG, **kwargs)
self.prev_incar = prev_incar
self.nbands = nbands
self.potcar_functional = potcar_functional
self.reciprocal_density = reciprocal_density
self.mode = mode.upper()
if self.mode not in MVLGWSet.SUPPORTED_MODES:
raise ValueError("%s not one of the support modes : %s" %
(self.mode, MVLGWSet.SUPPORTED_MODES))
self.kwargs = kwargs
@property
def kpoints(self):
"""
Generate gamma center k-points mesh grid for GW calc,
which is requested by GW calculation.
"""
return Kpoints.automatic_density_by_vol(self.structure,
self.reciprocal_density,
force_gamma=True)
@property
def incar(self):
parent_incar = super(MVLGWSet, self).incar
incar = Incar(self.prev_incar) if self.prev_incar is not None else \
Incar(parent_incar)
if self.mode == "DIAG":
# Default parameters for diagonalization calculation.
incar.update({
"ALGO": "Exact",
"NELM": 1,
"LOPTICS": True,
"LPEAD": True
})
elif self.mode == "GW":
# Default parameters for GW calculation.
incar.update({
"ALGO": "GW0",
"NELM": 1,
"NOMEGA": 80,
"ENCUTGW": 250
})
incar.pop("EDIFF", None)
incar.pop("LOPTICS", None)
incar.pop("LPEAD", None)
elif self.mode == "BSE":
# Default parameters for BSE calculation.
incar.update({
"ALGO": "BSE",
"ANTIRES": 0,
"NBANDSO": 20,
"NBANDSV": 20
})
if self.nbands:
incar["NBANDS"] = self.nbands
# Respect user set INCAR.
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
@classmethod
def from_prev_calc(cls, prev_calc_dir, copy_wavecar=True, mode="DIAG",
nbands_factor=5, ncores=16, **kwargs):
"""
Generate a set of Vasp input files for GW or BSE calculations from a
directory of previous Exact Diag Vasp run.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml of previous vasp run.
copy_wavecar: Whether to copy the old WAVECAR, WAVEDER and
associated files. Defaults to True.
mode (str): Supported modes are "STATIC", "DIAG" (default), "GW",
and "BSE".
nbands_factor (int): Multiplicative factor for NBANDS. Only applies
if mode=="DIAG". Need to be tested for convergence.
ncores (int): numbers of cores you do calculations. VASP will alter
NBANDS if it was not dividable by ncores. Only applies
if mode=="DIAG".
\\*\\*kwargs: All kwargs supported by MVLGWSet,
other than structure, prev_incar and mode, which
are determined from the prev_calc_dir.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
prev_incar = vasprun.incar
structure = vasprun.final_structure
nbands = int(vasprun.parameters["NBANDS"])
if mode.upper() == "DIAG":
nbands = int(np.ceil(nbands * nbands_factor / ncores) * ncores)
# copy WAVECAR, WAVEDER (derivatives)
files_to_transfer = {}
if copy_wavecar:
for fname in ("WAVECAR", "WAVEDER", "WFULL"):
w = sorted(glob.glob(os.path.join(prev_calc_dir, fname + "*")))
if w:
if fname == "WFULL":
for f in w:
fname = os.path.basename(f)
fname = fname.split(".")[0]
files_to_transfer[fname] = f
else:
files_to_transfer[fname] = str(w[-1])
return cls(structure=structure, prev_incar=prev_incar,
nbands=nbands, mode=mode,
files_to_transfer=files_to_transfer, **kwargs)
class MVLSlabSet(MPRelaxSet):
"""
Class for writing a set of slab vasp runs,
including both slabs (along the c direction) and orient unit cells (bulk),
to ensure the same KPOINTS, POTCAR and INCAR criterion.
Args:
k_product: default to 50, kpoint number * length for a & b directions,
also for c direction in bulk calculations
bulk (bool): Set to True for bulk calculation. Defaults to False.
**kwargs:
Other kwargs supported by :class:`DictSet`.
"""
def __init__(self, structure, k_product=50, bulk=False,
auto_dipole=False, set_mix=True, sort_structure=True, **kwargs):
super(MVLSlabSet, self).__init__(structure, **kwargs)
if sort_structure:
structure = structure.get_sorted_structure()
self.structure = structure
self.k_product = k_product
self.bulk = bulk
self.auto_dipole = auto_dipole
self.kwargs = kwargs
self.set_mix = set_mix
slab_incar = {"EDIFF": 1e-4, "EDIFFG": -0.02, "ENCUT": 400,
"ISMEAR": 0, "SIGMA": 0.05, "ISIF": 3}
if not self.bulk:
slab_incar["ISIF"] = 2
slab_incar["LVTOT"] = True
if self.set_mix:
slab_incar["AMIN"] = 0.01
slab_incar["AMIX"] = 0.2
slab_incar["BMIX"] = 0.001
slab_incar["NELMIN"] = 8
if self.auto_dipole:
weights = [s.species_and_occu.weight for s in structure]
center_of_mass = np.average(structure.frac_coords,
weights = weights, axis = 0)
slab_incar["IDIPOL"] = 3
slab_incar["LDIPOL"] = True
slab_incar["DIPOL"] = center_of_mass
self._config_dict["INCAR"].update(slab_incar)
@property
def kpoints(self):
"""
k_product, default to 50, is kpoint number * length for a & b
directions, also for c direction in bulk calculations
Automatic mesh & Gamma is the default setting.
"""
# To get input sets, the input structure has to has the same number
# of required parameters as a Structure object (ie. 4). Slab
# attributes aren't going to affect the VASP inputs anyways so
# converting the slab into a structure should not matter
kpt = super(MVLSlabSet, self).kpoints
kpt.comment = "Automatic mesh"
kpt.style = 'Gamma'
# use k_product to calculate kpoints, k_product = kpts[0][0] * a
abc = self.structure.lattice.abc
kpt_calc = [int(self.k_product / abc[0] + 0.5),
int(self.k_product / abc[1] + 0.5), 1]
self.kpt_calc = kpt_calc
# calculate kpts (c direction) for bulk. (for slab, set to 1)
if self.bulk:
kpt_calc[2] = int(self.k_product / abc[2] + 0.5)
kpt.kpts[0] = kpt_calc
return kpt
def as_dict(self, verbosity=2):
d = MSONable.as_dict(self)
if verbosity == 1:
d.pop("structure", None)
return d
class MVLGBSet(MPRelaxSet):
"""
Class for writing a vasp input files for grain boundary calculations, slab
or bulk.
Args:
structure(Structure): provide the structure
k_product: Kpoint number * length for a & b directions, also for c
direction in bulk calculations. Default to 40.
slab_mode (bool): Defaults to False. Use default (False) for a
bulk supercell. Use True if you are performing calculations on a
slab-like (i.e., surface) of the GB, for example, when you are
calculating the work of separation.
is_metal (bool): Defaults to True. This determines whether an ISMEAR of
1 is used (for metals) or not (for insulators and semiconductors)
by default. Note that it does *not* override user_incar_settings,
which can be set by the user to be anything desired.
**kwargs:
Other kwargs supported by :class:`DictVaspInputSet`.
"""
def __init__(self, structure, k_product=40, slab_mode=False, is_metal=True,
**kwargs):
super(MVLGBSet, self).__init__(structure, **kwargs)
self.structure = structure
self.k_product = k_product
self.slab_mode = slab_mode
self.is_metal = is_metal
@property
def kpoints(self):
"""
k_product, default to 40, is kpoint number * length for a & b
directions, also for c direction in bulk calculations
Automatic mesh & Gamma is the default setting.
"""
# To get input sets, the input structure has to has the same number
# of required parameters as a Structure object.
kpt = super(MVLGBSet, self).kpoints
kpt.comment = "Generated by pymatgen's MVLGBSet"
kpt.style = 'Gamma'
# use k_product to calculate kpoints, k_product = kpts[0][0] * a
lengths = self.structure.lattice.abc
kpt_calc = [int(self.k_product / lengths[0] + 0.5),
int(self.k_product / lengths[1] + 0.5),
int(self.k_product / lengths[2] + 0.5)]
if self.slab_mode:
kpt_calc[2] = 1
kpt.kpts[0] = kpt_calc
return kpt
@property
def incar(self):
incar = super(MVLGBSet, self).incar
# The default incar setting is used for metallic system, for
# insulator or semiconductor, ISMEAR need to be changed.
incar.update({
"LCHARG": False,
"NELM": 60,
"PREC": "Normal",
"EDIFFG": -0.02,
"ICHARG": 0,
"NSW": 200,
"EDIFF": 0.0001
})
if self.is_metal:
incar["ISMEAR"] = 1
incar["LDAU"] = False
if self.slab_mode:
# for clean grain boundary and bulk relaxation, full optimization
# relaxation (ISIF=3) is used. For slab relaxation (ISIF=2) is used.
incar["ISIF"] = 2
incar["NELMIN"] = 8
incar.update(self.user_incar_settings)
return incar
class MITNEBSet(MITRelaxSet):
"""
Class for writing NEB inputs. Note that EDIFF is not on a per atom
basis for this input set.
Args:
unset_encut (bool): Whether to unset ENCUT.
\\*\\*kwargs: Other kwargs supported by :class:`DictSet`.
"""
def __init__(self, structures, unset_encut=False, **kwargs):
if len(structures) < 3:
raise ValueError("You need at least 3 structures for an NEB.")
kwargs["sort_structure"] = False
super(MITNEBSet, self).__init__(structures[0], **kwargs)
self.structures = self._process_structures(structures)
self.unset_encut = False
if unset_encut:
self._config_dict["INCAR"].pop("ENCUT", None)
if "EDIFF" not in self._config_dict["INCAR"]:
self._config_dict["INCAR"]["EDIFF"] = self._config_dict[
"INCAR"].pop("EDIFF_PER_ATOM")
# NEB specific defaults
defaults = {'IMAGES': len(structures) - 2, 'IBRION': 1, 'ISYM': 0,
'LCHARG': False, "LDAU": False}
self._config_dict["INCAR"].update(defaults)
@property
def poscar(self):
return Poscar(self.structures[0])
@property
def poscars(self):
return [Poscar(s) for s in self.structures]
def _process_structures(self, structures):
"""
Remove any atom jumps across the cell
"""
input_structures = structures
structures = [input_structures[0]]
for s in input_structures[1:]:
prev = structures[-1]
for i in range(len(s)):
t = np.round(prev[i].frac_coords - s[i].frac_coords)
if np.any(np.abs(t) > 0.5):
s.translate_sites([i], t, to_unit_cell=False)
structures.append(s)
return structures
def write_input(self, output_dir, make_dir_if_not_present=True,
write_cif=False, write_path_cif=False,
write_endpoint_inputs=False):
"""
NEB inputs has a special directory structure where inputs are in 00,
01, 02, ....
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
write_cif (bool): If true, writes a cif along with each POSCAR.
write_path_cif (bool): If true, writes a cif for each image.
write_endpoint_inputs (bool): If true, writes input files for
running endpoint calculations.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
self.incar.write_file(os.path.join(output_dir, 'INCAR'))
self.kpoints.write_file(os.path.join(output_dir, 'KPOINTS'))
self.potcar.write_file(os.path.join(output_dir, 'POTCAR'))
for i, p in enumerate(self.poscars):
d = os.path.join(output_dir, str(i).zfill(2))
if not os.path.exists(d):
os.makedirs(d)
p.write_file(os.path.join(d, 'POSCAR'))
if write_cif:
p.structure.to(filename=os.path.join(d, '{}.cif'.format(i)))
if write_endpoint_inputs:
end_point_param = MITRelaxSet(
self.structures[0],
user_incar_settings=self.user_incar_settings)
for image in ['00', str(len(self.structures) - 1).zfill(2)]:
end_point_param.incar.write_file(os.path.join(output_dir, image, 'INCAR'))
end_point_param.kpoints.write_file(os.path.join(output_dir, image, 'KPOINTS'))
end_point_param.potcar.write_file(os.path.join(output_dir, image, 'POTCAR'))
if write_path_cif:
sites = set()
l = self.structures[0].lattice
for site in chain(*(s.sites for s in self.structures)):
sites.add(PeriodicSite(site.species_and_occu, site.frac_coords, l))
nebpath = Structure.from_sites(sorted(sites))
nebpath.to(filename=os.path.join(output_dir, 'path.cif'))
class MITMDSet(MITRelaxSet):
"""
Class for writing a vasp md run. This DOES NOT do multiple stage
runs.
"""
def __init__(self, structure, start_temp, end_temp, nsteps, time_step=2,
spin_polarized=False, **kwargs):
"""
Args:
structure (Structure): Input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps (int): Number of time steps for simulations. The NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
\\*\\*kwargs: Other kwargs supported by :class:`DictSet`.
"""
# MD default settings
defaults = {'TEBEG': start_temp, 'TEEND': end_temp, 'NSW': nsteps,
'EDIFF_PER_ATOM': 0.000001, 'LSCALU': False,
'LCHARG': False,
'LPLANE': False, 'LWAVE': True, 'ISMEAR': 0,
'NELMIN': 4, 'LREAL': True, 'BMIX': 1,
'MAXMIX': 20, 'NELM': 500, 'NSIM': 4, 'ISYM': 0,
'ISIF': 0, 'IBRION': 0, 'NBLOCK': 1, 'KBLOCK': 100,
'SMASS': 0, 'POTIM': time_step, 'PREC': 'Low',
'ISPIN': 2 if spin_polarized else 1,
"LDAU": False}
super(MITMDSet, self).__init__(structure, **kwargs)
self.start_temp = start_temp
self.end_temp = end_temp
self.nsteps = nsteps
self.time_step = time_step
self.spin_polarized = spin_polarized
self.kwargs = kwargs
# use VASP default ENCUT
self._config_dict["INCAR"].pop('ENCUT', None)
if defaults['ISPIN'] == 1:
self._config_dict["INCAR"].pop('MAGMOM', None)
self._config_dict["INCAR"].update(defaults)
@property
def kpoints(self):
return Kpoints.gamma_automatic()
class MVLNPTMDSet(MITMDSet):
"""
Class for writing a vasp md run in NPT ensemble.
"""
def __init__(self, structure, start_temp, end_temp, nsteps, time_step=2,
spin_polarized=False, **kwargs):
"""
Notes:
To eliminate Pulay stress, the default ENCUT is set to a rather
large value of ENCUT, which is 1.5 * ENMAX.
Args:
structure (Structure): input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps(int): Number of time steps for simulations. The NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
\\*\\*kwargs: Other kwargs supported by :class:`DictSet`.
"""
user_incar_settings = kwargs.get("user_incar_settings", {})
# NPT-AIMD default settings
defaults = {"IALGO": 48,
"ISIF": 3,
"LANGEVIN_GAMMA": [10] * structure.ntypesp,
"LANGEVIN_GAMMA_L": 1,
"MDALGO": 3,
"PMASS": 10,
"PSTRESS": 0,
"SMASS": 0}
defaults.update(user_incar_settings)
kwargs["user_incar_settings"] = defaults
super(MVLNPTMDSet, self).__init__(structure, start_temp, end_temp,
nsteps, time_step, spin_polarized,
**kwargs)
# Set NPT-AIMD ENCUT = 1.5 * VASP_default
enmax = [self.potcar[i].keywords['ENMAX']
for i in range(structure.ntypesp)]
encut = max(enmax) * 1.5
self._config_dict["INCAR"]["ENCUT"] = encut
class MVLScanRelaxSet(MPRelaxSet):
"""
Class for writing a relax input set using Strongly Constrained and
Appropriately Normed (SCAN) semilocal density functional.
"""
def __init__(self, structure, potcar_functional="PBE_52", **kwargs):
"""
Notes:
1. This functional is only available from VASP.5.4.3 upwards.
2. Meta-GGA calculations require POTCAR files that include
information on the kinetic energy density of the core-electrons,
i.e. "PBE_52" or "PBE_54". Make sure the POTCAR including the
following lines (see VASP wiki for more details):
$ grep kinetic POTCAR
kinetic energy-density
mkinetic energy-density pseudized
kinetic energy density (partial)
Args:
structure (Structure): input structure.
potcar_functional (str): choose from "PBE_52" and "PBE_54".
vdw (str): set "rVV10" to enable SCAN+rVV10, which is a versatile
van der Waals density functional by combing the SCAN functional
with the rVV10 non-local correlation functional.
\\*\\*kwargs: Other kwargs supported by :class:`DictSet`.
"""
if potcar_functional not in ["PBE_52", "PBE_54"]:
raise ValueError("SCAN calculations required PBE_52 or PBE_54!")
super(MVLScanRelaxSet, self).__init__(
structure, potcar_functional=potcar_functional,
**kwargs)
self._config_dict["INCAR"].update({"ADDGRID": True,
"EDIFF": 1e-05,
"EDIFFG": -0.05,
"LASPH": True,
"LDAU": False,
"METAGGA": "SCAN",
"NELM": 200})
def get_vasprun_outcar(path, parse_dos=True, parse_eigen=True):
vruns = list(glob.glob(os.path.join(path, "vasprun.xml*")))
outcars = list(glob.glob(os.path.join(path, "OUTCAR*")))
if len(vruns) == 0 or len(outcars) == 0:
raise ValueError(
"Unable to get vasprun.xml/OUTCAR from prev calculation in %s" %
path)
vsfile_fullpath = os.path.join(path, "vasprun.xml")
outcarfile_fullpath = os.path.join(path, "OUTCAR")
vsfile = vsfile_fullpath if vsfile_fullpath in vruns else sorted(vruns)[-1]
outcarfile = outcarfile_fullpath if outcarfile_fullpath in outcars else sorted(outcars)[-1]
return Vasprun(str(vsfile), parse_dos=parse_dos, parse_eigen=parse_eigen), \
Outcar(str(outcarfile))
def get_structure_from_prev_run(vasprun, outcar=None, sym_prec=0.1,
international_monoclinic=True):
"""
Process structure from previous run.
Args:
vasprun (Vasprun): Vasprun that contains the final structure
from previous run.
outcar (Outcar): Outcar that contains the magnetization info from
previous run.
sym_prec (float): Tolerance for symmetry finding for standardization. If
no standardization is desired, set to 0 or a False.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
Returns:
Returns the magmom-decorated structure that can be passed to get
Vasp input files, e.g. get_kpoints.
"""
structure = vasprun.final_structure
site_properties = {}
# magmom
if vasprun.is_spin:
if outcar and outcar.magnetization:
site_properties.update({"magmom": [i['tot']
for i in outcar.magnetization]})
else:
site_properties.update({"magmom": vasprun.parameters['MAGMOM']})
# ldau
if vasprun.parameters.get("LDAU", False):
for k in ("LDAUU", "LDAUJ", "LDAUL"):
vals = vasprun.incar[k]
m = {}
l = []
s = 0
for site in structure:
if site.specie.symbol not in m:
m[site.specie.symbol] = vals[s]
s += 1
l.append(m[site.specie.symbol])
if len(l) == len(structure):
site_properties.update({k.lower(): l})
else:
raise ValueError("length of list {} not the same as"
"structure".format(l))
structure = structure.copy(site_properties=site_properties)
if sym_prec:
sym_finder = SpacegroupAnalyzer(structure, symprec=sym_prec)
new_structure = sym_finder.get_primitive_standard_structure(
international_monoclinic=international_monoclinic)
# the primitive structure finding has had several bugs in the past
# defend through validation
vpa_old = structure.volume / structure.num_sites
vpa_new = new_structure.volume / new_structure.num_sites
if abs(vpa_old - vpa_new) / vpa_old > 0.02:
raise ValueError(
"Standardizing cell failed! VPA old: {}, VPA new: {}".format(
vpa_old, vpa_new))
sm = StructureMatcher()
if not sm.fit(structure, new_structure):
raise ValueError(
"Standardizing cell failed! Old structure doesn't match new.")
structure = new_structure
return structure
def batch_write_input(structures, vasp_input_set=MPRelaxSet, output_dir=".",
make_dir_if_not_present=True, subfolder=None,
sanitize=False, include_cif=False, **kwargs):
"""
Batch write vasp input for a sequence of structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
structures ([Structure]): Sequence of Structures.
vasp_input_set (VaspInputSet): VaspInputSet class that creates
vasp input files from structures. Note that a class should be
supplied. Defaults to MPRelaxSet.
output_dir (str): Directory to output files. Defaults to current
directory ".".
make_dir_if_not_present (bool): Create the directory if not present.
Defaults to True.
subfolder (callable): Function to create subdirectory name from
structure. Defaults to simply "formula_count".
sanitize (bool): Boolean indicating whether to sanitize the
structure before writing the VASP input files. Sanitized output
are generally easier for viewing and certain forms of analysis.
Defaults to False.
include_cif (bool): Whether to output a CIF as well. CIF files are
generally better supported in visualization programs.
\\*\\*kwargs: Additional kwargs are passed to the vasp_input_set class
in addition to structure.
"""
for i, s in enumerate(structures):
formula = re.sub(r'\s+', "", s.formula)
if subfolder is not None:
subdir = subfolder(s)
d = os.path.join(output_dir, subdir)
else:
d = os.path.join(output_dir, '{}_{}'.format(formula, i))
if sanitize:
s = s.copy(sanitize=True)
v = vasp_input_set(s, **kwargs)
v.write_input(d, make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif)
|
nisse3000/pymatgen
|
pymatgen/io/vasp/sets.py
|
Python
|
mit
| 77,824
|
[
"VASP",
"pymatgen"
] |
bc211262c2660e347edd50e357cf51f05ad2f7bd762c3f75a4b5c1f72dd25645
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.