text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# (C) British Crown Copyright 2011 - 2020, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import print_function
import fnmatch
import os
import subprocess
import sys
import warnings
from collections import defaultdict
from distutils.spawn import find_executable
from distutils.sysconfig import get_config_var
from setuptools import Command, Extension, convert_path, setup
import versioneer
"""
Distribution definition for Cartopy.
"""
# The existence of a PKG-INFO directory is enough to tell us whether this is a
# source installation or not (sdist).
HERE = os.path.dirname(__file__)
IS_SDIST = os.path.exists(os.path.join(HERE, 'PKG-INFO'))
FORCE_CYTHON = os.environ.get('FORCE_CYTHON', False)
if not IS_SDIST or FORCE_CYTHON:
import Cython
if Cython.__version__ < '0.28':
raise ImportError(
"Cython 0.28+ is required to install cartopy from source.")
from Cython.Distutils import build_ext as cy_build_ext
try:
import numpy as np
except ImportError:
raise ImportError('NumPy 1.10+ is required to install cartopy.')
PY3 = (sys.version_info[0] == 3)
# Please keep in sync with INSTALL file.
GEOS_MIN_VERSION = (3, 3, 3)
PROJ_MIN_VERSION = (4, 9, 0)
def file_walk_relative(top, remove=''):
"""
Return a generator of files from the top of the tree, removing
the given prefix from the root/file result.
"""
top = top.replace('/', os.path.sep)
remove = remove.replace('/', os.path.sep)
for root, dirs, files in os.walk(top):
for file in files:
yield os.path.join(root, file).replace(remove, '')
def find_package_tree(root_path, root_package):
"""
Return the package and all its sub-packages.
Automated package discovery - extracted/modified from Distutils Cookbook:
https://wiki.python.org/moin/Distutils/Cookbook/AutoPackageDiscovery
"""
packages = [root_package]
# Accept a root_path with Linux path separators.
root_path = root_path.replace('/', os.path.sep)
root_count = len(root_path.split(os.path.sep))
for (dir_path, dir_names, _) in os.walk(convert_path(root_path)):
# Prune dir_names *in-place* to prevent unwanted directory recursion
for dir_name in list(dir_names):
if not os.path.isfile(os.path.join(dir_path, dir_name,
'__init__.py')):
dir_names.remove(dir_name)
if dir_names:
prefix = dir_path.split(os.path.sep)[root_count:]
packages.extend(['.'.join([root_package] + prefix + [dir_name])
for dir_name in dir_names])
return packages
class MissingHeaderError(Exception):
"""
Raised when one or more files do not have the required copyright
and licence header.
"""
pass
class HeaderCheck(Command):
"""
Checks that all the necessary files have the copyright and licence
header.
"""
description = "check for copyright/licence headers"
user_options = []
exclude_patterns = ('./setup.py',
'./build/*',
'./docs/build/*',
'./dist/*',
'./lib/cartopy/examples/*.py')
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
check_paths = []
for root, dirs, files in os.walk('.'):
for file in files:
if file.endswith('.py') or file.endswith('.c'):
path = os.path.join(root, file)
check_paths.append(path)
for pattern in self.exclude_patterns:
exclude = lambda path: not fnmatch.fnmatch(path, pattern)
check_paths = list(filter(exclude, check_paths))
bad_paths = list(filter(self._header_bad, check_paths))
if bad_paths:
raise MissingHeaderError(bad_paths)
def _header_bad(self, path):
target = '(C) British Crown Copyright 2011 - 2012, Met Office'
with open(path, 'rt') as text_file:
# Check for the header on the first line.
line = text_file.readline().rstrip()
bad = target not in line
# Check if it was an executable script, with the header
# starting on the second line.
if bad and line == '#!/usr/bin/env python':
line = text_file.readline().rstrip()
bad = target not in line
return bad
# Dependency checks
# =================
# GEOS
try:
geos_version = subprocess.check_output(['geos-config', '--version'])
geos_version = tuple(int(v) for v in geos_version.split(b'.'))
geos_includes = subprocess.check_output(['geos-config', '--includes'])
geos_clibs = subprocess.check_output(['geos-config', '--clibs'])
except (OSError, ValueError, subprocess.CalledProcessError):
warnings.warn(
'Unable to determine GEOS version. Ensure you have %s or later '
'installed, or installation may fail.' % (
'.'.join(str(v) for v in GEOS_MIN_VERSION), ))
geos_includes = []
geos_library_dirs = []
geos_libraries = ['geos_c']
else:
if geos_version < GEOS_MIN_VERSION:
print('GEOS version %s is installed, but cartopy requires at least '
'version %s.' % ('.'.join(str(v) for v in geos_version),
'.'.join(str(v) for v in GEOS_MIN_VERSION)),
file=sys.stderr)
exit(1)
if PY3:
geos_includes = geos_includes.decode()
geos_clibs = geos_clibs.decode()
geos_includes = geos_includes.split()
geos_libraries = []
geos_library_dirs = []
for entry in geos_clibs.split():
if entry.startswith('-L'):
geos_library_dirs.append(entry[2:])
elif entry.startswith('-l'):
geos_libraries.append(entry[2:])
# Proj
def find_proj_version_by_program(conda=None):
proj = find_executable('proj')
if proj is None:
print(
'Proj %s must be installed.' % (
'.'.join(str(v) for v in PROJ_MIN_VERSION), ),
file=sys.stderr)
exit(1)
if conda is not None and conda not in proj:
print(
'Proj %s must be installed in Conda environment "%s".' % (
'.'.join(str(v) for v in PROJ_MIN_VERSION), conda),
file=sys.stderr)
exit(1)
try:
proj_version = subprocess.check_output([proj],
stderr=subprocess.STDOUT)
proj_version = proj_version.split()[1].split(b'.')
proj_version = tuple(int(v.strip(b',')) for v in proj_version)
except (OSError, IndexError, ValueError, subprocess.CalledProcessError):
warnings.warn(
'Unable to determine Proj version. Ensure you have %s or later '
'installed, or installation may fail.' % (
'.'.join(str(v) for v in PROJ_MIN_VERSION), ))
proj_version = (0, 0, 0)
return proj_version
def get_proj_libraries():
"""
This function gets the PROJ libraries to cythonize with
"""
proj_libraries = ["proj"]
if os.name == "nt" and proj_version >= (6, 0, 0):
proj_libraries = [
"proj_{}_{}".format(proj_version[0], proj_version[1])
]
return proj_libraries
conda = os.getenv('CONDA_DEFAULT_ENV')
if conda is not None and conda in sys.prefix:
# Conda does not provide pkg-config compatibility, but the search paths
# should be set up so that nothing extra is required. We'll still check
# the version, though.
proj_version = find_proj_version_by_program(conda)
if proj_version < PROJ_MIN_VERSION:
print(
'Proj version %s is installed, but cartopy requires at least '
'version %s.' % ('.'.join(str(v) for v in proj_version),
'.'.join(str(v) for v in PROJ_MIN_VERSION)),
file=sys.stderr)
exit(1)
proj_includes = []
proj_libraries = get_proj_libraries()
proj_library_dirs = []
else:
try:
proj_version = subprocess.check_output(['pkg-config', '--modversion',
'proj'],
stderr=subprocess.STDOUT)
proj_version = tuple(int(v) for v in proj_version.split(b'.'))
proj_includes = subprocess.check_output(['pkg-config', '--cflags',
'proj'])
proj_clibs = subprocess.check_output(['pkg-config', '--libs', 'proj'])
except (OSError, ValueError, subprocess.CalledProcessError):
proj_version = find_proj_version_by_program()
if proj_version < PROJ_MIN_VERSION:
print(
'Proj version %s is installed, but cartopy requires at least '
'version %s.' % ('.'.join(str(v) for v in proj_version),
'.'.join(str(v) for v in PROJ_MIN_VERSION)),
file=sys.stderr)
exit(1)
proj_includes = []
proj_libraries = get_proj_libraries()
proj_library_dirs = []
else:
if proj_version < PROJ_MIN_VERSION:
print(
'Proj version %s is installed, but cartopy requires at least '
'version %s.' % ('.'.join(str(v) for v in proj_version),
'.'.join(str(v) for v in PROJ_MIN_VERSION)),
file=sys.stderr)
exit(1)
if PY3:
proj_includes = proj_includes.decode()
proj_clibs = proj_clibs.decode()
proj_includes = [
proj_include[2:] if proj_include.startswith('-I') else
proj_include for proj_include in proj_includes.split()]
proj_libraries = []
proj_library_dirs = []
for entry in proj_clibs.split():
if entry.startswith('-L'):
proj_library_dirs.append(entry[2:])
elif entry.startswith('-l'):
proj_libraries.append(entry[2:])
# Python dependencies
extras_require = {}
for name in os.listdir(os.path.join(HERE, 'requirements')):
with open(os.path.join(HERE, 'requirements', name), 'r') as fh:
section, ext = os.path.splitext(name)
extras_require[section] = []
for line in fh:
if line.startswith('#'):
pass
elif line.startswith('-'):
pass
else:
extras_require[section].append(line.strip())
install_requires = extras_require.pop('default')
tests_require = extras_require.pop('tests', [])
# General extension paths
if sys.platform.startswith('win'):
def get_config_var(name):
return '.'
include_dir = get_config_var('INCLUDEDIR')
library_dir = get_config_var('LIBDIR')
extra_extension_args = defaultdict(list)
if not sys.platform.startswith('win'):
extra_extension_args["runtime_library_dirs"].append(
get_config_var('LIBDIR')
)
# Description
# ===========
with open(os.path.join(HERE, 'README.md'), 'r') as fh:
description = ''.join(fh.readlines())
cython_coverage_enabled = os.environ.get('CYTHON_COVERAGE', None)
if proj_version >= (6, 0, 0):
extra_extension_args["define_macros"].append(
('ACCEPT_USE_OF_DEPRECATED_PROJ_API_H', '1')
)
if cython_coverage_enabled:
extra_extension_args["define_macros"].append(
('CYTHON_TRACE_NOGIL', '1')
)
extensions = [
Extension(
'cartopy.trace',
['lib/cartopy/trace.pyx'],
include_dirs=([include_dir, './lib/cartopy', np.get_include()] +
proj_includes + geos_includes),
libraries=proj_libraries + geos_libraries,
library_dirs=[library_dir] + proj_library_dirs + geos_library_dirs,
language='c++',
**extra_extension_args),
Extension(
'cartopy._crs',
['lib/cartopy/_crs.pyx'],
include_dirs=[include_dir, np.get_include()] + proj_includes,
libraries=proj_libraries,
library_dirs=[library_dir] + proj_library_dirs,
**extra_extension_args),
# Requires proj v4.9
Extension(
'cartopy.geodesic._geodesic',
['lib/cartopy/geodesic/_geodesic.pyx'],
include_dirs=[include_dir, np.get_include()] + proj_includes,
libraries=proj_libraries,
library_dirs=[library_dir] + proj_library_dirs,
**extra_extension_args),
]
if cython_coverage_enabled:
# We need to explicitly cythonize the extension in order
# to control the Cython compiler_directives.
from Cython.Build import cythonize
directives = {'linetrace': True,
'binding': True}
extensions = cythonize(extensions, compiler_directives=directives)
def decythonize(extensions, **_ignore):
# Remove pyx sources from extensions.
# Note: even if there are changes to the pyx files, they will be ignored.
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx',):
if extension.language == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
sources.append(sfile)
extension.sources[:] = sources
return extensions
cmdclass = versioneer.get_cmdclass()
if IS_SDIST and not FORCE_CYTHON:
extensions = decythonize(extensions)
else:
cmdclass.update({'build_ext': cy_build_ext})
# Main setup
# ==========
setup(
name='Cartopy',
version=versioneer.get_version(),
url='https://scitools.org.uk/cartopy/docs/latest/',
download_url='https://github.com/SciTools/cartopy',
author='UK Met Office',
description='A cartographic python library with Matplotlib support for '
'visualisation',
long_description=description,
long_description_content_type='text/markdown',
license="LGPLv3",
keywords="cartography map transform projection proj proj.4 geos shapely "
"shapefile",
install_requires=install_requires,
extras_require=extras_require,
tests_require=tests_require,
packages=find_package_tree('lib/cartopy', 'cartopy'),
package_dir={'': 'lib'},
package_data={'cartopy': list(file_walk_relative('lib/cartopy/tests/'
'mpl/baseline_images/',
remove='lib/cartopy/')) +
list(file_walk_relative('lib/cartopy/data/raster',
remove='lib/cartopy/')) +
list(file_walk_relative('lib/cartopy/data/netcdf',
remove='lib/cartopy/')) +
list(file_walk_relative('lib/cartopy/data/'
'shapefiles/gshhs',
remove='lib/cartopy/')) +
list(file_walk_relative('lib/cartopy/tests/lakes_shapefile',
remove='lib/cartopy/')) +
['io/srtm.npz']},
# requires proj headers
ext_modules=extensions,
cmdclass=cmdclass,
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU Lesser General Public License v3 '
'or later (LGPLv3+)',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: POSIX :: AIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: C++',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Scientific/Engineering :: Visualization',
],
)
|
ocefpaf/cartopy
|
setup.py
|
Python
|
lgpl-3.0
| 17,000
|
[
"NetCDF"
] |
6f3e971832133d3850da88736e9f5f4ece0d09321d2f252be787bcac311d294a
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 João Faria
# This file is part of OPEN which is licensed under the MIT license.
# You should have received a copy of the license along with OPEN. See LICENSE.
#
"""
This module defines the commands that are used as magics in OPEN.
"""
# standard library imports
import glob
from os.path import expanduser
from itertools import chain
# IPython imports
from IPython.core.magic import (Magics, magics_class, line_magic,
needs_local_scope)
from IPython.core.magic_arguments import argument
# other imports
from numpy import sqrt, mean, min, delete, take
# intra-package imports
from .docopt import docopt, DocoptExit
from .classes import rvSeries
from .utils import stdout_write, ask_yes_no, write_yorbit_macro
from .logger import clogger, logging
import core
import periodograms
################################################################################
################################################################################
## command usage patterns
read_usage = \
"""
Usage:
read <file>...
read <file>... [-d] [--skip=<sn>] [-v] [--quiet] [--nomps]
read -h | --help
Options:
-d Set this as default system.
-v --verbose Verbose output about data just read.
--quiet Do not print any output.
--skip=<sn> How many header lines to skip [default: 2].
--nomps Do not convert data to m/s
-h --help Show this help message.
"""
saverdb_usage = \
"""
Usage:
saverdb <file>
saverdb -n SYSTEM
Options:
-n SYSTEM Specify name of system (else use default)
"""
plot_usage = \
"""
Usage:
plot (obs|fwhm|rhk|s|bis|contrast|resid) [--together=q] [--save=filename]
plot -n SYSTEM
plot -h | --help
Options:
-n SYSTEM Specify name of system (else use default)
--together=q Plot together with another quantity
--save=filename Save figure as filename
-h --help Show this help message
"""
per_usage = \
"""
Usage:
per
per [-n SYSTEM] (obs|bis|fwhm|rhk|contrast|resid) [-g|-m|-b|-l|-z|-r] [-v] [-f] [--hifac=<hf>] [--ofac=<of>] [--fap] [--bfap] [--save=filename] [--noplot] [--describe]
per -h | --help
Options:
-n SYSTEM Specify name of system (else use default)
-g --gls Calculate the Generalized Lomb-Scargle periodogram (default)
-m --bgls Calculate the Bayesian Generalized Lomb-Scargle periodogram
-b --bayes Calculate the Bayesian LS periodogram
-l --ls Calculate the Lomb-Scargle periodogram with fast algorithm
-z --hoef Calculate the Hoeffding-test "periodogram" with Zucker's algorithm
-r --multiband Calculate the multiband periodogram; Vanderplas & Ivezic (2015)
-f --force Force recalculation
--hifac=<hf> hifac * Nyquist is lowest frequency used [default: 40]
--ofac=<of> Oversampling factor [default: 6]
--fap Plot false alarm probabilities
--bfap Plot false alarm probabilities calculated using bootstrap
--save=filename Save figure as filename
--noplot Don't plot the periodogram (just creates system.per* instance)
-v --verbose Verbose statistical output
--describe Show a very detailed help message
-h --help Show this help message
"""
wf_usage = \
"""
Usage:
wf
wf -n SYSTEM
wf [--dials] [--freq]
wf -h | --help
Options:
-n SYSTEM Specify name of system (else use default)
--dials Plot phase "dials" in largest (3) peaks
--freq Plot as a function of frequency
-h --help Show this help message
"""
dawfab_usage = \
"""
Usage:
dawfab
dawfab -n SYSTEM
dawfab -h | --help
Options:
-n SYSTEM Specify name of system (else use default)
-h --help Show this help message
"""
fit_usage = \
"""
Usage:
fit [-v]
Options:
-v --verbose Verbose statistical output
"""
correlate_usage = \
"""
Usage:
correlate <var1> <var2> [-v] [-r] [--chunks]
Options:
-v --verbose Verbose statistical output
-r --remove Remove linear dependence from RV
--chunks Remove linear dependence on individual chunks
"""
de_usage = \
"""
Usage:
de [--npop=<pop>] [--ngen=<gen>]
de -h | --help
Options:
--npop=<pop> Number of individuals in population [default: 100]
--ngen=<gen> Number of generations to evolve [default: 250]
-h --help Show this help message
"""
demc_usage = \
"""
Usage:
demc [<zipfile>]
demc -n SYSTEM
Options:
-n SYSTEM Specify name of system (else use default)
"""
gen_usage = \
"""
Usage:
gen [--npop=<pop>] [--ngen=<gen>]
gen -h | --help
Options:
--npop=<pop> Number of individuals in population [default: 100]
--ngen=<gen> Number of generations to evolve [default: 250]
-h --help Show this help message
"""
rrot_usage = \
"""
Usage:
remove_rotation [fwhm]
remove_rotation [-n SYSTEM] [fwhm | rhk] [--prot=<p>] [--nrem=<nr>]
remove_rotation -h | --help
Options:
-n SYSTEM Specify name of system (else use default)
--prot=<p>
--nrem=<nr> Number of harmonics to remove (including Prot) [default: 1]
-h --help Show this help message
"""
addnoise_usage = \
"""
Usage:
add_noise <number>
add_noise -n SYSTEM <number>
add_noise -h | --help
Options
<number> Add <number> m/s to the RV uncertainties.
-n SYSTEM Specify name of system (else use default)
-h --help Show this help message
"""
nest_usage = \
"""
Usage:
nest
nest [options]
Options:
-u User sets the namelist file
-r Resume from a previous MultiNest run
-v Be verbose on output and plots
--gp Perform model selection within Gaussian Process framework
--jitter Include a jitter parameter (incompatible with --gp)
--train=None Train the GP on quantity before using it in the RVs
--skip-mcmc Skip the training MCMC: the user sets the appropriate namelist options
--lin=None Include linear dependence on quantity in the model
--ncpu=<cpu> Number of threads to use; by default use all available
--noplot Do not produce result plots
--saveplot Save all plots (does nothing if --noplot is given)
--feed Force feedback on the progress of the evidence calculation
--MAPfeed Force feedback on the current MAP parameters
--maxp=mp Maximum number of planets to include in automatic run [default: 3]
--restart Fully restart a previous automatic model selection run
--nml=None Specify the `full` path to the namelist file
--startp=None Comma-separated list of planets to start from the beginning (overide -r)
"""
restrict_usage = \
"""
Usage:
restrict [-n SYSTEM]
restrict [(err <maxerr>)]
restrict [(sn <maxsn>)]
restrict [(jd <minjd> <maxjd>)]
restrict [(year <yr>)]
restrict [(years <yr1> <yr2>)]
restrict --gui
restrict --index=None [--noask] [-n SYSTEM]
Options:
-n SYSTEM Specify name of system (else use default)
--gui Restrict data using a graphical interface (experimental)
--index=None Remove specific data points, providing their indices [default:None]
--noask Do not confirm if removing observations
"""
rotation_usage = \
"""
Usage:
rotation
rotation -n SYSTEM
rotation -h | --help
Options
-n SYSTEM Specify name of system (else use default)
-h --help Show this help message
"""
tp_mps_usage = \
"""
Usage:
to_mps
to_mps -n SYSTEM
to_mps -h | --help
Options
-n SYSTEM Specify name of system (else use default)
-h --help Show this help message
"""
create_usage = \
"""
Usage:
create
create np(%d) [p(%f)] [e(%f)] [k(%f)] [N(%d)] [out(%s)] [sample(%s)]
create --gui
Options:
np(%d) [p(%f)] [e(%f)] [k(%f)] [N(%d)] [out(%s)] [sample(%s)] Batch processing
--gui Create data using a graphical interface (experimental)
"""
command_list = \
"""
read Read RV files.
plot Plot various quantities.
per Calculate periodograms.
mod Define the model that will be adjusted to the data.
de Fit the model using a Differential Evolution algorithm (somewhat experimental...)
restrict Select data based on date, SNR or RV accuracy.
rotation Calculate rotation period from activity-rotation relation.
create Generate synthetic RV data.
killall Close all plot windows
"""
# These are additional magics that are exposed (only?) in embedded shells.
@magics_class
class EmbeddedMagics(Magics):
@line_magic
def develop(self, parameter_s=''):
# reload(classes)
import reimport, os
mod = reimport.modified()
reimport.reimport(*mod)
print 'Done re-importing'
# reload(periodograms)
# reload(core)
os.system('python scons/scons.py --gfortran=/home/joao/Software/mesasdk/bin/gfortran')
@needs_local_scope
@line_magic
def read(self, parameter_s='', local_ns=None):
""" Read files with RV measurements.
Type 'read -h' for more help """
try:
args = parse_arg_string('read', parameter_s)
except DocoptExit:
print read_usage.lstrip()
return
except SystemExit:
return
# take care of glob (and tilde) expansions
files = args['<file>']
# hack for metal-poor files
if len(files) == 1 and files[0].startswith('HD'):
files = ['/home/joao/phd/data/'+files[0]+'_harps_mean_corr.rdb']
##
globs = [glob.glob(expanduser(f)) for f in files]
filenames = list(chain.from_iterable(globs)) # some magic...
# if 'default' system is already set, return the rvSeries class
# this is useful when working with various systems simultaneously so
# that we can do, e.g., HDXXXX = %read file1 file2
if not args['-d']:
try:
return rvSeries(*filenames, skip=args['--skip'], verbose=not args['--quiet'])
except AttributeError:
pass
else:
try:
local_ns['default'] = rvSeries(*filenames, skip=args['--skip'], verbose=not args['--quiet'])
except IOError:
return
default = local_ns['default']
if args['--verbose'] and not args['--quiet']:
default.stats()
if (min(default.error) < 0.01 and not args['--nomps']):
from shell_colors import blue
mean_vrad = mean(default.vrad)
if not args['--quiet']:
# msg = blue('INFO: ') + 'Converting to m/s and subtracting mean value of %f' % mean_vrad
msg = blue('INFO: ') + 'Converting to m/s'
clogger.info(msg)
default.vrad = (default.vrad - mean_vrad)*1e3 + mean_vrad
default.error *= 1e3
default.vrad_full = (default.vrad_full - mean(default.vrad_full))*1e3 + mean(default.vrad_full)
default.error_full *= 1e3
default.units = 'm/s'
@needs_local_scope
@line_magic
def saverdb(self, parameter_s='', local_ns=None):
""" Save current system's RV in a file """
try:
args = parse_arg_string('saverdb', parameter_s)
except DocoptExit:
print saverdb_usage.lstrip()
return
except SystemExit:
return
# use default system or user defined
try:
if 'default' in local_ns and not args['-n']:
system = local_ns['default']
else:
system_name = args['-n']
system = local_ns[system_name]
except KeyError:
from shell_colors import red
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
filename = args['<file>']
system.save(filename)
@needs_local_scope
@line_magic
def plot(self, parameter_s='', local_ns=None):
""" Plot various quantities.
Type 'plot -h' for more help """
try:
args = parse_arg_string('plot', parameter_s)
except DocoptExit:
print plot_usage.lstrip()
return
except SystemExit:
return
# print args
# use default system or user defined
try:
if 'default' in local_ns and not args['-n']:
system = local_ns['default']
else:
system_name = args['-n']
system = local_ns[system_name]
except KeyError:
from shell_colors import red
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
together = False
if args['--together']:
together = True
second_quantity = args['--together']
# plot the observed radial velocities
if args['obs']:
if together: system.do_plot_obs_together(q=second_quantity, save=args['--save'])
else: system.do_plot_obs(save=args['--save'])
# plot residuals from fit
if args['resid']:
system.do_plot_resid(save=args['--save'])
# plot other quantities
extras_available = ['fwhm', 'contrast', 'bis_span', 'noise',
's_mw', 'sig_s', 'rhk', 'sig_rhk',
'sn_CaII', 'sn10', 'sn50', 'sn60']
extras_mapping = ['fwhm', 'contrast', 'bis', 'noise',
's', 's', 'rhk', 'rhk',
'sn', 'sn', 'sn', 'sn']
for i, e in enumerate(extras_available):
try:
if args[extras_mapping[i]]:
if together: system.do_plot_extras_together(e, save=args['--save'])
else: system.do_plot_extras(e, save=args['--save'])
return
except KeyError:
pass
@needs_local_scope
@line_magic
def per(self, parameter_s='', local_ns=None):
""" Calculate periodograms of various quantities.
Type 'per -h' for more help. """
from shell_colors import red
try:
args = parse_arg_string('per', parameter_s)
except DocoptExit:
print per_usage.lstrip()
return
except SystemExit:
return
# print args
if args['--describe']:
print periodograms.help_text
return
# use default system or user defined
try:
if 'default' in local_ns and not args['-n']:
system = local_ns['default']
else:
system_name = args['-n']
system = local_ns[system_name]
except KeyError:
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
# verb = True if args['--verbose'] else False
hf = float(args.pop('--hifac'))
of = float(args.pop('--ofac'))
fap = args['--fap']
bfap = args['--bfap']
showplot = not args['--noplot']
# which periodogram should be calculated?
per_fcn = None
if args['--hoef']:
per_fcn = periodograms.hoeffding
name = 'Hoeffding'
elif args['--bgls']:
per_fcn = periodograms.bgls
name = 'Bayesian Generalized Lomb-Scargle'
elif args['--bayes']:
per_fcn = periodograms.bls
name = 'Bayesian Lomb-Scargle'
elif args['--ls']:
per_fcn = periodograms.ls_PressRybicki
name = 'Lomb Scargle'
elif args['--multiband']:
per_fcn = periodograms.MultiBandGLS
name = 'Multiband Lomb-Scargle'
tempmask = system.time > 57170
if (~tempmask).all():
msg = red('ERROR: ') + 'All observations are before 57170. Multiband periodogram is not appropriate'
clogger.fatal(msg)
return
elif args['--gls']:
per_fcn = periodograms.gls
name ='Generalized Lomb-Scargle'
# this is the default if user did not specify arguments
else:
per_fcn = periodograms.gls
name ='Generalized Lomb-Scargle'
if args['obs']: # periodogram of the observed RVs
try:
# are we forced to recalculate it?
if args['--force']: raise AttributeError
# it was calculated already?
system.per
# the same periodogram?
if system.per.name != name:
raise AttributeError
# system.per._output(verbose=verb) # not ready
if showplot:
system.per._plot(doFAP=fap, dobFAP=bfap, save=args['--save'])
except AttributeError:
system.per = per_fcn(system, hifac=hf, ofac=of)
# system.per._output(verbose=verb) # not ready
if showplot:
system.per._plot(doFAP=fap, dobFAP=bfap, save=args['--save'])
if args['bis']: # periodogram of the CCF's Bisector Inverse Slope
system.bis_per = per_fcn(system, hifac=hf, ofac=of, quantity='bis')
if showplot:
system.bis_per._plot(doFAP=fap, dobFAP=bfap, save=args['--save'])
if args['fwhm']: # periodogram of the CCF's fwhm
system.fwhm_per = per_fcn(system, hifac=hf, ofac=of, quantity='fwhm')
if showplot:
system.fwhm_per._plot(doFAP=fap, dobFAP=bfap, save=args['--save'])
if args['rhk']: # periodogram of rhk
system.rhk_per = per_fcn(system, hifac=hf, ofac=of, quantity='rhk')
if showplot:
system.rhk_per._plot(doFAP=fap, dobFAP=bfap, save=args['--save'])
if args['contrast']: # periodogram of contrast
system.contrast_per = per_fcn(system, hifac=hf, ofac=of, quantity='contrast')
if showplot:
system.contrast_per._plot(doFAP=fap, dobFAP=bfap, save=args['--save'])
if args['resid']: # periodogram of the residuals of the current fit
system.resid_per = per_fcn(system, hifac=hf, ofac=of, quantity='resid')
if showplot:
system.resid_per._plot(doFAP=fap, dobFAP=bfap)
@needs_local_scope
@line_magic
def clean(self, parameter_s='', local_ns=None):
"""
Deconvolves the LS periodogram from the window function
using the CLEAN algorithm (Roberts et al. 1985)
"""
# use default system or user defined
try:
if 'default' in local_ns:
system = local_ns['default']
except KeyError:
from shell_colors import red
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
core.do_clean(system)
@needs_local_scope
@line_magic
def wf(self, parameter_s='', local_ns=None):
""" Calculate the spectral window function of the observations.
Type 'wf -h' for more help. """
args = parse_arg_string('wf', parameter_s)
if args == 1: return
# print args
# use default system or user defined
try:
if 'default' in local_ns and not args['-n']:
system = local_ns['default']
else:
system_name = args['-n']
system = local_ns[system_name]
except KeyError:
from shell_colors import red
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
try:
system.per
except AttributeError:
from shell_colors import green
clogger.debug('Calculating periodogram to get frequencies')
stdout_write('Calculating periodogram to get frequencies...')
system.per = periodograms.gls(system, hifac=5)
print green(' done')
try:
if args['--freq']:
system.wf._plot_freq()
else:
system.wf._plot()
except AttributeError:
system.wf = periodograms.SpectralWindow(system.per.freq, system.time)
@needs_local_scope
@line_magic
def dawfab(self, parameter_s='', local_ns=None):
""" Run the Dawson Fabrycky algorithm to search for aliases.
Type 'dawfab -h' for more help. """
args = parse_arg_string('dawfab', parameter_s)
if args == 1: return
#print args
# use default system or user defined
try:
if 'default' in local_ns and not args['-n']:
system = local_ns['default']
else:
system_name = args['-n']
system = local_ns[system_name]
except KeyError:
from shell_colors import red
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
core.do_Dawson_Fabrycky(system)
return
@line_magic
def listcommands(self, parameter_s=''):
""" List available commands """
print command_list
@needs_local_scope
@line_magic
def mod(self, parameter_s='', local_ns=None):
""" Define the type of model that will be adjusted to the data.
Type 'mod -h' for more help
"""
from shell_colors import yellow, blue, red
args = parse_arg_string('mod', parameter_s)
if args == 1: # called without arguments, show how it's done
msg = yellow('Usage: ') + 'mod [k<n>] [d<n>]\n' + \
'Options: k<n> Number of keplerian signals\n' + \
' d<n> Degree of polynomial drift'
clogger.fatal(msg)
return
if 'default' in local_ns:
system = local_ns['default']
if system.model is None: system.model = {}
system.model['k'] = k = int(args[0][1])
system.model['d'] = d = int(args[1][1])
else:
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
# this should be logged?
print blue('Current model:'), k, 'kep,', d, 'drifts'
# ... do someting with this ...
@needs_local_scope
@line_magic
def fit(self, parameter_s='', local_ns=None):
from shell_colors import red
args = parse_arg_string('fit', parameter_s)
if args == 1: return
#print args
verb = True if args['--verbose'] else False
if 'default' in local_ns:
system = local_ns['default']
result = core.do_fit(system, verb)
if result is not None:
system.model['drift'] = result
if verb: print 'Coeff:', result
system.do_plot_drift()
else:
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
system.do_plot_fit()
@needs_local_scope
@line_magic
def set_fit(self, parameter_s='', local_ns=None):
from shell_colors import red
# args = parse_arg_string('fit', parameter_s)
# if args == 1: return
#print args
# verb = True if args['--verbose'] else False
if 'default' in local_ns:
system = local_ns['default']
else:
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
core.do_set_fit(system)
@needs_local_scope
@line_magic
def correlate(self, parameter_s='', local_ns=None):
from shell_colors import red
args = parse_arg_string('correlate', parameter_s)
if args == 1: return
#print args
verb = args['--verbose']
rem = args['--remove']
chunks = args['--chunks']
if 'default' in local_ns:
system = local_ns['default']
else:
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
var1 = args['<var1>']
var2 = args['<var2>']
result = core.do_correlate(system, vars=(var1, var2), verbose=verb, remove=rem, chunks=chunks)
@needs_local_scope
@line_magic
def de(self, parameter_s='', local_ns=None):
""" Run the differential evolution algorithm minimization - stub """
from shell_colors import red
## take care of arguments
try:
args = parse_arg_string('de', parameter_s)
except DocoptExit:
print de_usage.lstrip()
return
except SystemExit:
return
ngen = int(args.pop('--ngen'))
npop = int(args.pop('--npop'))
# default system?
if 'default' in local_ns:
system = local_ns['default']
else:
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
core.do_diffevol(system, npop=npop, ngen=ngen)
system.do_plot_drift()
system.do_plot_fit()
@needs_local_scope
@line_magic
def demc(self, parameter_s='', local_ns=None):
""" Run the Differential Evolution MCMC. - stub"""
from shell_colors import red
## take care of arguments
try:
args = parse_arg_string('demc', parameter_s)
except DocoptExit:
print demc_usage.lstrip()
return
except SystemExit:
return
if 'default' in local_ns:
system = local_ns['default']
else:
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
print args
zfile = args.pop('<zipfile>')
results = core.do_demc(system, zfile=zfile, burnin=0)
return results
# system.do_plot_fit()
@needs_local_scope
@line_magic
def gen(self, parameter_s='', local_ns=None):
""" Run the genetic algorithm minimization - stub """
from shell_colors import red
## take care of arguments
try:
args = parse_arg_string('gen', parameter_s)
except DocoptExit:
print de_usage.lstrip()
return
except SystemExit:
return
ngen = int(args.pop('--ngen'))
npop = int(args.pop('--npop'))
if 'default' in local_ns:
system = local_ns['default']
else:
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
core.do_genetic(system, npop=npop, ngen=ngen)
system.do_plot_fit()
@needs_local_scope
@line_magic
def genyorbit(self, parameter_s='', local_ns=None):
""" Run the genetic algorithm minimization - stub """
from shell_colors import red
if 'default' in local_ns:
system = local_ns['default']
else:
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
write_yorbit_macro(system)
# core.do_genetic(system)
# system.do_plot_fit()
@needs_local_scope
@line_magic
def remove_rotation(self, parameter_s='', local_ns=None):
""" Remove rotation period and harmonics """
from shell_colors import red
try:
args = parse_arg_string('rrot', parameter_s)
except DocoptExit:
print rrot_usage.lstrip()
return
except SystemExit:
return
# use default system or user defined
try:
if 'default' in local_ns and not args['-n']:
system = local_ns['default']
else:
system_name = args['-n']
system = local_ns[system_name]
except KeyError:
from shell_colors import red
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
print args
prot = args['--prot']
nrem = int(args['--nrem'])
fwhm = args['fwhm']
rhk = args['rhk']
if 'default' in local_ns:
system = local_ns['default']
else:
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
core.do_remove_rotation(system, prot=prot, nrem=nrem, fwhm=fwhm, rhk=rhk)
# core.do_genetic(system)
# system.do_plot_fit()
@needs_local_scope
@line_magic
def add_noise(self, parameter_s='', local_ns=None):
try:
args = parse_arg_string('add_noise', parameter_s)
except DocoptExit:
print addnoise_usage.lstrip()
return
except SystemExit:
return
# use default system or user defined
try:
if 'default' in local_ns and not args['-n']:
system = local_ns['default']
else:
system_name = args['-n']
system = local_ns[system_name]
except KeyError:
from shell_colors import red
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
# print args
noise = float(args['<number>'])
kms = system.error.mean() < 0.01
if kms:
system.error = sqrt(system.error**2 + (noise / 1000)**2)
else:
system.error = sqrt(system.error**2 + noise**2)
@needs_local_scope
@line_magic
def lowpass(self, parameter_s='', local_ns=None):
from shell_colors import blue
# try:
# args = parse_arg_string('add_noise', parameter_s)
# except DocoptExit:
# print addnoise_usage.lstrip()
# return
# except SystemExit:
# return
# use default system or user defined
try:
system = local_ns['default']
except KeyError:
from shell_colors import red
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
core.do_lowpass_filter(system)
# f = 1./60
# temp = lopast(system.extras.rhk, system.time, f)
# print plt
# system.vrad = system.vrad - temp
@needs_local_scope
@line_magic
def detection_limits(self, parameter_s='', local_ns=None):
# use default system or user defined
if 'default' in local_ns:
system = local_ns['default']
else:
from shell_colors import red
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
core.do_detection_limits(system)
@needs_local_scope
@line_magic
def killall(slef, parameter_s='', local_ns=None):
from matplotlib.pyplot import close
close('all')
@needs_local_scope
@line_magic
def nest(self, parameter_s='', local_ns=None):
""" Start the MultiNest analysis and handle data interaction and IO """
from shell_colors import red
try:
args = parse_arg_string('nest', parameter_s)
except DocoptExit:
print nest_usage.lstrip()
return
except SystemExit:
return
# print args
if 'default' in local_ns:
system = local_ns['default']
else:
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
user = args['-u']
resume = args['-r']
verbose = args['-v']
gp = args['--gp']
jitter = args['--jitter']
if gp and jitter:
msg = red('ERROR: ') + '--gp and --jitter are incompatible'
clogger.fatal(msg)
return
doplot = not args['--noplot']
saveplot = args['--saveplot']
dofeedback = args['--feed']
doMAPfeedback = args['--MAPfeed']
maxp = int(args['--maxp'])
restart = args['--restart']
nml_path = args['--nml']
startp = args['--startp']
if startp is not None:
startp = [int(i) for i in startp.split(',')]
else:
startp = []
try:
ncpu = int(args['--ncpu'])
except TypeError:
ncpu = None
train_quantity = args['--train'] if bool(args['--train']) else None
skip_train_mcmc = args['--skip-mcmc']
lin_quantity = args['--lin'] if bool(args['--lin']) else None
if bool(args['--train']) and not system.is_in_extras(train_quantity):
msg = red('ERROR: ') + 'The name "%s" is not available in extras.\n' % train_quantity
clogger.fatal(msg)
return
core.do_multinest(system, user, gp, jitter, maxp=maxp,
resume=resume, ncpu=ncpu, verbose=verbose,
training=train_quantity, skip_train_mcmc=skip_train_mcmc, lin=lin_quantity,
doplot=doplot, saveplot=saveplot, feed=dofeedback, MAPfeed=doMAPfeedback,
restart=restart, nml=nml_path, startp=startp)
@needs_local_scope
@line_magic
def dnest(self, parameter_s='', local_ns=None):
pass
@needs_local_scope
@line_magic
def dnest(self, parameter_s='', local_ns=None):
from shell_colors import red
if local_ns.has_key('default'):
system = local_ns['default']
else:
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
core.do_RJ_DNest3(system)
@needs_local_scope
@line_magic
def restrict(self, parameter_s='', local_ns=None):
""" Select data based on date, SNR or radial velocity accuracy.
Type 'restrict -h' for more help
"""
from shell_colors import yellow, blue, red
args = parse_arg_string('restrict', parameter_s)
if args == DocoptExit:
msg = yellow('Warning: ') + "I'm not doing anything. Type restrict -h for help"
clogger.fatal(msg)
return
# use default system or user defined
try:
if 'default' in local_ns and not args['-n']:
system = local_ns['default']
else:
system_name = args['-n']
system = local_ns[system_name]
except KeyError:
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
if args['err']:
try:
maxerr = float(args['<maxerr>'])
except ValueError:
msg = red('ERROR: ') + 'maxerr shoud be a number!'
clogger.fatal(msg)
return
core.do_restrict(system, 'error', maxerr)
if args['sn']:
try:
maxsn = float(args['<maxsn>'])
except ValueError:
msg = red('ERROR: ') + 'maxsn shoud be a number!'
clogger.fatal(msg)
return
core.do_restrict(system, 'sn', maxsn)
if args['jd']:
try:
maxjd = int(args['<maxjd>'])
minjd = int(args['<minjd>'])
except ValueError:
msg = red('ERROR: ') + 'minjd and maxjd shoud be integer numbers!'
clogger.fatal(msg)
return
core.do_restrict(system, 'date', minjd, maxjd)
if args['year']:
try:
yr = int(args['<yr>'])
except ValueError:
msg = red('ERROR: ') + 'yr shoud be a number!'
clogger.fatal(msg)
return
core.do_restrict(system, 'year', yr)
if args['years']:
try:
yr1 = int(args['<yr1>'])
yr2 = int(args['<yr2>'])
except ValueError:
msg = red('ERROR: ') + 'yr1 and yr2 shoud be numbers!'
clogger.fatal(msg)
return
core.do_restrict(system, 'years', yr1, yr2)
if args['--gui']:
core.do_restrict(system, 'gui')
if args['--index']:
core.do_restrict(system, 'index', args['--index'], noask=args['--noask'])
# if args['--gui'] or args['--index']:
# if args['--index']:
# ind_to_remove = map(int, args['--index'].split(','))
# ind_to_remove = [i-1 for i in ind_to_remove]
# for i in ind_to_remove:
# x, y = take(system.time, i), take(system.vrad, i)
# msg = blue('INFO: ') + 'going to remove observation %d -> %8.2f, %8.2f\n' % (i+1, x, y)
# clogger.info(msg)
# else:
# ind_to_remove = selectable_plot(system, style='ro')
# n = len(ind_to_remove)
# if n == 0:
# msg = blue(' : ') + 'Not removing any observations'
# clogger.info(msg)
# return
# if args['--noask'] or ask_yes_no(red(' : ') + 'Are you sure you want to remove %d observations? (Y/n) ' % n, default=True):
# system.provenance.values()[0][1] += n
# # remove observations with indices ind_to_remove from
# # system.(time,vrad,error); leave *_full arrays intact
# system.time = delete(system.time, ind_to_remove)
# system.vrad = delete(system.vrad, ind_to_remove)
# system.error = delete(system.error, ind_to_remove)
# # remove observations with indices ind_to_remove from
# # system.extras.*; leave system.extras_full.* arrays intact
# for i, arr in enumerate(system.extras):
# field_name = system.extras._fields[i]
# replacer = {field_name:delete(arr, ind_to_remove)}
# system.extras = system.extras._replace(**replacer)
# msg = blue(' : ') + 'Done'
# clogger.info(msg)
# # delete system.per to force re-calculation
# try:
# del system.per
# except AttributeError:
# pass
# else:
# msg = blue(' : ') + 'Not removing any observations.'
# clogger.info(msg)
@needs_local_scope
@line_magic
def rotation(self, parameter_s='', local_ns=None):
""" Calculate rotation period from activity-rotation relation"""
args = parse_arg_string('rotation', parameter_s)
# print args
# use default system or user defined
try:
if 'default' in local_ns and not args['-n']:
system = local_ns['default']
else:
system_name = args['SYSTEM']
system = local_ns[system_name]
except KeyError:
from shell_colors import red
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
core.get_rotation_period(system)
@needs_local_scope
@line_magic
def create(self, parameter_s='', local_ns=None):
if '-h' in parameter_s:
print create_usage
return
# print parameter_s
if '--gui' in parameter_s:
core.load_plugin('create_GUI')
return
core.do_create_planets(parameter_s)
@needs_local_scope
@line_magic
def to_mps(self, parameter_s='', local_ns=None):
# Convert data to meters per second, if in km/s
from shell_colors import blue
args = parse_arg_string('to_mps', parameter_s)
# print args
# use default system or user defined
try:
if 'default' in local_ns and not args['-n']:
system = local_ns['default']
else:
system_name = args['SYSTEM']
system = local_ns[system_name]
except KeyError:
from shell_colors import red
msg = red('ERROR: ') + 'Set a default system or provide a system '+\
'name with the -n option'
clogger.fatal(msg)
return
if (min(system.error) < 0.01):
msg = blue('INFO: ') + 'Converting to m/s'
clogger.info(msg)
system.vrad = (system.vrad - mean(system.vrad)) * 1e3
system.error *= 1e3
system.units = 'm/s'
@needs_local_scope
@line_magic
def metalpoor(self, parameter_s='', local_ns=None):
core.load_plugin('metalpoor')
def parse_arg_string(command, arg_string):
""" Parse arguments for each of the commands. """
# docopt does the heavy-lifting parsing, we just split the argument string
# and catch the exceptions raised by -h or --help
splitted = str(arg_string).split()
if command is 'read':
args = docopt(read_usage, splitted)
if command is 'saverdb':
args = docopt(saverdb_usage, splitted)
if command is 'plot':
args = docopt(plot_usage, splitted)
if command is 'per':
args = docopt(per_usage, splitted)
# this is a little different
if command is 'mod':
import re
if arg_string == '':
return 1 # mod needs arguments
if arg_string in ('-h', '--help'):
return 1 # explain what to do
k = re.compile("k[0-9]").findall(arg_string)
if k == []: # if just drifts
k = ['k0']
d = re.compile("d[0-9]").findall(arg_string)
if d == []: # if just keplerians
d = ['d0']
args = k+d
if command is 'fit':
try:
args = docopt(fit_usage, splitted)
except SystemExit:
return 1
if command is 'correlate':
try:
args = docopt(correlate_usage, splitted)
except SystemExit:
return 1
if command is 'restrict':
if arg_string == '':
return DocoptExit # restrict needs arguments
try:
args = docopt(restrict_usage, splitted)
except (SystemExit, DocoptExit) as e:
return DocoptExit
if command is 'wf':
try:
args = docopt(wf_usage, splitted)
except SystemExit:
return 1
if command is 'dawfab':
try:
args = docopt(dawfab_usage, splitted)
except SystemExit:
return 1
if command is 'rrot':
args = docopt(rrot_usage, splitted)
if command is 'de':
args = docopt(de_usage, splitted)
if command is 'gen':
args = docopt(gen_usage, splitted)
if command is 'demc':
args = docopt(demc_usage, splitted)
if command is 'add_noise':
args = docopt(addnoise_usage, splitted)
if command is 'rotation':
args = docopt(rotation_usage, splitted)
if command is 'to_mps':
args = docopt(tp_mps_usage, splitted)
if command is 'nest':
args = docopt(nest_usage, splitted)
return args
|
j-faria/OPEN
|
OPEN/commandsOPEN.py
|
Python
|
mit
| 45,516
|
[
"Gaussian"
] |
9c89c1948a774c65a827044458ee1cd40b83c1d5a4e16c1512f83f1f2997cd32
|
import math
import operator
from csg.geom import *
from functools import reduce
class CSG(object):
"""
Constructive Solid Geometry (CSG) is a modeling technique that uses Boolean
operations like union and intersection to combine 3D solids. This library
implements CSG operations on meshes elegantly and concisely using BSP trees,
and is meant to serve as an easily understandable implementation of the
algorithm. All edge cases involving overlapping coplanar polygons in both
solids are correctly handled.
Example usage::
from csg.core import CSG
cube = CSG.cube();
sphere = CSG.sphere({'radius': 1.3});
polygons = cube.subtract(sphere).toPolygons();
## Implementation Details
All CSG operations are implemented in terms of two functions, `clipTo()` and
`invert()`, which remove parts of a BSP tree inside another BSP tree and swap
solid and empty space, respectively. To find the union of `a` and `b`, we
want to remove everything in `a` inside `b` and everything in `b` inside `a`,
then combine polygons from `a` and `b` into one solid::
a.clipTo(b);
b.clipTo(a);
a.build(b.allPolygons());
The only tricky part is handling overlapping coplanar polygons in both trees.
The code above keeps both copies, but we need to keep them in one tree and
remove them in the other tree. To remove them from `b` we can clip the
inverse of `b` against `a`. The code for union now looks like this::
a.clipTo(b);
b.clipTo(a);
b.invert();
b.clipTo(a);
b.invert();
a.build(b.allPolygons());
Subtraction and intersection naturally follow from set operations. If
union is `A | B`, subtraction is `A - B = ~(~A | B)` and intersection is
`A & B = ~(~A | ~B)` where `~` is the complement operator.
## License
Copyright (c) 2011 Evan Wallace (http://madebyevan.com/), under the MIT license.
Python port Copyright (c) 2012 Tim Knip (http://www.floorplanner.com), under the MIT license.
Additions by Alex Pletzer (Pennsylvania State University)
"""
def __init__(self):
self.polygons = []
@classmethod
def fromPolygons(cls, polygons):
csg = CSG()
csg.polygons = polygons
return csg
def clone(self):
csg = CSG()
csg.polygons = list(map(lambda p: p.clone(), self.polygons))
return csg
def toPolygons(self):
return self.polygons
def refine(self):
"""
Return a refined CSG. To each polygon, a middle point is added to each edge and to the center
of the polygon
"""
newCSG = CSG()
for poly in self.polygons:
verts = poly.vertices
numVerts = len(verts)
if numVerts == 0:
continue
midPos = reduce(operator.add, [v.pos for v in verts]) / float(numVerts)
midNormal = None
if verts[0].normal is not None:
midNormal = poly.plane.normal
midVert = Vertex(midPos, midNormal)
newVerts = verts + \
[verts[i].interpolate(verts[(i + 1)%numVerts], 0.5) for i in range(numVerts)] + \
[midVert]
i = 0
vs = [newVerts[i], newVerts[i+numVerts], newVerts[2*numVerts], newVerts[2*numVerts-1]]
newPoly = Polygon(vs, poly.shared)
newPoly.shared = poly.shared
newPoly.plane = poly.plane
newCSG.polygons.append(newPoly)
for i in range(1, numVerts):
vs = [newVerts[i], newVerts[numVerts+i], newVerts[2*numVerts], newVerts[numVerts+i-1]]
newPoly = Polygon(vs, poly.shared)
newCSG.polygons.append(newPoly)
return newCSG
def translate(self, disp):
"""
Translate Geometry.
disp: displacement (array of floats)
"""
d = Vector(disp[0], disp[1], disp[2])
for poly in self.polygons:
for v in poly.vertices:
v.pos = v.pos.plus(d)
# no change to the normals
def rotate(self, axis, angleDeg):
"""
Rotate geometry.
axis: axis of rotation (array of floats)
angleDeg: rotation angle in degrees
"""
ax = Vector(axis[0], axis[1], axis[2]).unit()
cosAngle = math.cos(math.pi * angleDeg / 180.)
sinAngle = math.sin(math.pi * angleDeg / 180.)
def newVector(v):
vA = v.dot(ax)
vPerp = v.minus(ax.times(vA))
vPerpLen = vPerp.length()
if vPerpLen == 0:
# vector is parallel to axis, no need to rotate
return v
u1 = vPerp.unit()
u2 = u1.cross(ax)
vCosA = vPerpLen*cosAngle
vSinA = vPerpLen*sinAngle
return ax.times(vA).plus(u1.times(vCosA).plus(u2.times(vSinA)))
for poly in self.polygons:
for vert in poly.vertices:
vert.pos = newVector(vert.pos)
normal = vert.normal
if normal.length() > 0:
vert.normal = newVector(vert.normal)
def toVerticesAndPolygons(self):
"""
Return list of vertices, polygons (cells), and the total
number of vertex indices in the polygon connectivity list
(count).
"""
offset = 1.234567890
verts = []
polys = []
vertexIndexMap = {}
count = 0
for poly in self.polygons:
verts = poly.vertices
cell = []
for v in poly.vertices:
p = v.pos
# use string key to remove degeneracy associated
# very close points. The format %.10e ensures that
# points differing in the 11 digits and higher are
# treated as the same. For instance 1.2e-10 and
# 1.3e-10 are essentially the same.
vKey = '%.10e,%.10e,%.10e' % (p[0] + offset,
p[1] + offset,
p[2] + offset)
if not vKey in vertexIndexMap:
vertexIndexMap[vKey] = len(vertexIndexMap)
index = vertexIndexMap[vKey]
cell.append(index)
count += 1
polys.append(cell)
# sort by index
sortedVertexIndex = sorted(vertexIndexMap.items(),
key=operator.itemgetter(1))
verts = []
for v, i in sortedVertexIndex:
p = []
for c in v.split(','):
p.append(float(c) - offset)
verts.append(tuple(p))
return verts, polys, count
def saveVTK(self, filename):
"""
Save polygons in VTK file.
"""
with open(filename, 'w') as f:
f.write('# vtk DataFile Version 3.0\n')
f.write('pycsg output\n')
f.write('ASCII\n')
f.write('DATASET POLYDATA\n')
verts, cells, count = self.toVerticesAndPolygons()
f.write('POINTS {0} float\n'.format(len(verts)))
for v in verts:
f.write('{0} {1} {2}\n'.format(v[0], v[1], v[2]))
numCells = len(cells)
f.write('POLYGONS {0} {1}\n'.format(numCells, count + numCells))
for cell in cells:
f.write('{0} '.format(len(cell)))
for index in cell:
f.write('{0} '.format(index))
f.write('\n')
def union(self, csg):
"""
Return a new CSG solid representing space in either this solid or in the
solid `csg`. Neither this solid nor the solid `csg` are modified.::
A.union(B)
+-------+ +-------+
| | | |
| A | | |
| +--+----+ = | +----+
+----+--+ | +----+ |
| B | | |
| | | |
+-------+ +-------+
"""
a = BSPNode(self.clone().polygons)
b = BSPNode(csg.clone().polygons)
a.clipTo(b)
b.clipTo(a)
b.invert()
b.clipTo(a)
b.invert()
a.build(b.allPolygons());
return CSG.fromPolygons(a.allPolygons())
def __add__(self, csg):
return self.union(csg)
def subtract(self, csg):
"""
Return a new CSG solid representing space in this solid but not in the
solid `csg`. Neither this solid nor the solid `csg` are modified.::
A.subtract(B)
+-------+ +-------+
| | | |
| A | | |
| +--+----+ = | +--+
+----+--+ | +----+
| B |
| |
+-------+
"""
a = BSPNode(self.clone().polygons)
b = BSPNode(csg.clone().polygons)
a.invert()
a.clipTo(b)
b.clipTo(a)
b.invert()
b.clipTo(a)
b.invert()
a.build(b.allPolygons())
a.invert()
return CSG.fromPolygons(a.allPolygons())
def __sub__(self, csg):
return self.subtract(csg)
def intersect(self, csg):
"""
Return a new CSG solid representing space both this solid and in the
solid `csg`. Neither this solid nor the solid `csg` are modified.::
A.intersect(B)
+-------+
| |
| A |
| +--+----+ = +--+
+----+--+ | +--+
| B |
| |
+-------+
"""
a = BSPNode(self.clone().polygons)
b = BSPNode(csg.clone().polygons)
a.invert()
b.clipTo(a)
b.invert()
a.clipTo(b)
b.clipTo(a)
a.build(b.allPolygons())
a.invert()
return CSG.fromPolygons(a.allPolygons())
def __mul__(self, csg):
return self.intersect(csg)
def inverse(self):
"""
Return a new CSG solid with solid and empty space switched. This solid is
not modified.
"""
csg = self.clone()
map(lambda p: p.flip(), csg.polygons)
return csg
@classmethod
def cube(cls, center=[0,0,0], radius=[1,1,1]):
"""
Construct an axis-aligned solid cuboid. Optional parameters are `center` and
`radius`, which default to `[0, 0, 0]` and `[1, 1, 1]`. The radius can be
specified using a single number or a list of three numbers, one for each axis.
Example code::
cube = CSG.cube(
center=[0, 0, 0],
radius=1
)
"""
c = Vector(0, 0, 0)
r = [1, 1, 1]
if isinstance(center, list): c = Vector(center)
if isinstance(radius, list): r = radius
else: r = [radius, radius, radius]
polygons = list(map(
lambda v: Polygon(
list(map(lambda i:
Vertex(
Vector(
c.x + r[0] * (2 * bool(i & 1) - 1),
c.y + r[1] * (2 * bool(i & 2) - 1),
c.z + r[2] * (2 * bool(i & 4) - 1)
),
None
), v[0]))),
[
[[0, 4, 6, 2], [-1, 0, 0]],
[[1, 3, 7, 5], [+1, 0, 0]],
[[0, 1, 5, 4], [0, -1, 0]],
[[2, 6, 7, 3], [0, +1, 0]],
[[0, 2, 3, 1], [0, 0, -1]],
[[4, 5, 7, 6], [0, 0, +1]]
]))
return CSG.fromPolygons(polygons)
@classmethod
def sphere(cls, **kwargs):
""" Returns a sphere.
Kwargs:
center (list): Center of sphere, default [0, 0, 0].
radius (float): Radius of sphere, default 1.0.
slices (int): Number of slices, default 16.
stacks (int): Number of stacks, default 8.
"""
center = kwargs.get('center', [0.0, 0.0, 0.0])
if isinstance(center, float):
center = [center, center, center]
c = Vector(center)
r = kwargs.get('radius', 1.0)
if isinstance(r, list) and len(r) > 2:
r = r[0]
slices = kwargs.get('slices', 16)
stacks = kwargs.get('stacks', 8)
polygons = []
def appendVertex(vertices, theta, phi):
d = Vector(
math.cos(theta) * math.sin(phi),
math.cos(phi),
math.sin(theta) * math.sin(phi))
vertices.append(Vertex(c.plus(d.times(r)), d))
dTheta = math.pi * 2.0 / float(slices)
dPhi = math.pi / float(stacks)
j0 = 0
j1 = j0 + 1
for i0 in range(0, slices):
i1 = i0 + 1
# +--+
# | /
# |/
# +
vertices = []
appendVertex(vertices, i0 * dTheta, j0 * dPhi)
appendVertex(vertices, i1 * dTheta, j1 * dPhi)
appendVertex(vertices, i0 * dTheta, j1 * dPhi)
polygons.append(Polygon(vertices))
j0 = stacks - 1
j1 = j0 + 1
for i0 in range(0, slices):
i1 = i0 + 1
# +
# |\
# | \
# +--+
vertices = []
appendVertex(vertices, i0 * dTheta, j0 * dPhi)
appendVertex(vertices, i1 * dTheta, j0 * dPhi)
appendVertex(vertices, i0 * dTheta, j1 * dPhi)
polygons.append(Polygon(vertices))
for j0 in range(1, stacks - 1):
j1 = j0 + 0.5
j2 = j0 + 1
for i0 in range(0, slices):
i1 = i0 + 0.5
i2 = i0 + 1
# +---+
# |\ /|
# | x |
# |/ \|
# +---+
verticesN = []
appendVertex(verticesN, i1 * dTheta, j1 * dPhi)
appendVertex(verticesN, i2 * dTheta, j2 * dPhi)
appendVertex(verticesN, i0 * dTheta, j2 * dPhi)
polygons.append(Polygon(verticesN))
verticesS = []
appendVertex(verticesS, i1 * dTheta, j1 * dPhi)
appendVertex(verticesS, i0 * dTheta, j0 * dPhi)
appendVertex(verticesS, i2 * dTheta, j0 * dPhi)
polygons.append(Polygon(verticesS))
verticesW = []
appendVertex(verticesW, i1 * dTheta, j1 * dPhi)
appendVertex(verticesW, i0 * dTheta, j2 * dPhi)
appendVertex(verticesW, i0 * dTheta, j0 * dPhi)
polygons.append(Polygon(verticesW))
verticesE = []
appendVertex(verticesE, i1 * dTheta, j1 * dPhi)
appendVertex(verticesE, i2 * dTheta, j0 * dPhi)
appendVertex(verticesE, i2 * dTheta, j2 * dPhi)
polygons.append(Polygon(verticesE))
return CSG.fromPolygons(polygons)
@classmethod
def cylinder(cls, **kwargs):
""" Returns a cylinder.
Kwargs:
start (list): Start of cylinder, default [0, -1, 0].
end (list): End of cylinder, default [0, 1, 0].
radius (float): Radius of cylinder, default 1.0.
slices (int): Number of slices, default 16.
"""
s = kwargs.get('start', Vector(0.0, -1.0, 0.0))
e = kwargs.get('end', Vector(0.0, 1.0, 0.0))
if isinstance(s, list):
s = Vector(*s)
if isinstance(e, list):
e = Vector(*e)
r = kwargs.get('radius', 1.0)
slices = kwargs.get('slices', 16)
ray = e.minus(s)
axisZ = ray.unit()
isY = (math.fabs(axisZ.y) > 0.5)
axisX = Vector(float(isY), float(not isY), 0).cross(axisZ).unit()
axisY = axisX.cross(axisZ).unit()
start = Vertex(s, axisZ.negated())
end = Vertex(e, axisZ.unit())
polygons = []
def point(stack, angle, normalBlend):
out = axisX.times(math.cos(angle)).plus(
axisY.times(math.sin(angle)))
pos = s.plus(ray.times(stack)).plus(out.times(r))
normal = out.times(1.0 - math.fabs(normalBlend)).plus(
axisZ.times(normalBlend))
return Vertex(pos, normal)
dt = math.pi * 2.0 / float(slices)
for i in range(0, slices):
t0 = i * dt
i1 = (i + 1) % slices
t1 = i1 * dt
polygons.append(Polygon([start.clone(),
point(0., t0, -1.),
point(0., t1, -1.)]))
polygons.append(Polygon([point(0., t1, 0.),
point(0., t0, 0.),
point(1., t0, 0.),
point(1., t1, 0.)]))
polygons.append(Polygon([end.clone(),
point(1., t1, 1.),
point(1., t0, 1.)]))
return CSG.fromPolygons(polygons)
@classmethod
def cone(cls, **kwargs):
""" Returns a cone.
Kwargs:
start (list): Start of cone, default [0, -1, 0].
end (list): End of cone, default [0, 1, 0].
radius (float): Maximum radius of cone at start, default 1.0.
slices (int): Number of slices, default 16.
"""
s = kwargs.get('start', Vector(0.0, -1.0, 0.0))
e = kwargs.get('end', Vector(0.0, 1.0, 0.0))
if isinstance(s, list):
s = Vector(*s)
if isinstance(e, list):
e = Vector(*e)
r = kwargs.get('radius', 1.0)
slices = kwargs.get('slices', 16)
ray = e.minus(s)
axisZ = ray.unit()
isY = (math.fabs(axisZ.y) > 0.5)
axisX = Vector(float(isY), float(not isY), 0).cross(axisZ).unit()
axisY = axisX.cross(axisZ).unit()
startNormal = axisZ.negated()
start = Vertex(s, startNormal)
polygons = []
taperAngle = math.atan2(r, ray.length())
sinTaperAngle = math.sin(taperAngle)
cosTaperAngle = math.cos(taperAngle)
def point(angle):
# radial direction pointing out
out = axisX.times(math.cos(angle)).plus(
axisY.times(math.sin(angle)))
pos = s.plus(out.times(r))
# normal taking into account the tapering of the cone
normal = out.times(cosTaperAngle).plus(axisZ.times(sinTaperAngle))
return pos, normal
dt = math.pi * 2.0 / float(slices)
for i in range(0, slices):
t0 = i * dt
i1 = (i + 1) % slices
t1 = i1 * dt
# coordinates and associated normal pointing outwards of the cone's
# side
p0, n0 = point(t0)
p1, n1 = point(t1)
# average normal for the tip
nAvg = n0.plus(n1).times(0.5)
# polygon on the low side (disk sector)
polyStart = Polygon([start.clone(),
Vertex(p0, startNormal),
Vertex(p1, startNormal)])
polygons.append(polyStart)
# polygon extending from the low side to the tip
polySide = Polygon([Vertex(p0, n0), Vertex(e, nAvg), Vertex(p1, n1)])
polygons.append(polySide)
return CSG.fromPolygons(polygons)
|
timknip/pycsg
|
csg/core.py
|
Python
|
mit
| 20,425
|
[
"VTK"
] |
b6ea4783fa7d9a2abfdad3de7d0f8cff704d567dfe378c05c10343ccbe377b46
|
## minerals.py -- Some tables for ISM abundances and depletion factors
## that are useful for calculating dust mass and dust-to-gas ratios
##
## 2016.01.22 - lia@space.mit.edu
##----------------------------------------------------------------
import numpy as np
amu = {'H':1.008,'He':4.0026,'C':12.011,'N':14.007,'O':15.999,'Ne':20.1797, \
'Na':22.989,'Mg':24.305,'Al':26.981,'Si':28.085,'P':30.973,'S':32.06, \
'Cl':35.45,'Ar':39.948,'Ca':40.078,'Ti':47.867,'Cr':51.9961,'Mn':54.938, \
'Fe':55.845,'Co':58.933,'Ni':58.6934}
amu_g = 1.661e-24 # g
mp = 1.673e-24 # g (proton mass)
wilms = {'H':12.0, 'He':10.99, 'C':8.38, 'N':7.88, 'O':8.69, 'Ne':7.94, \
'Na':6.16, 'Mg':7.40, 'Al':6.33, 'Si':7.27, 'P':5.42, 'S':7.09, \
'Cl':5.12, 'Ar':6.41, 'Ca':6.20, 'Ti':4.81, 'Cr':5.51, 'Mn':5.34, \
'Fe':7.43, 'Co':4.92, 'Ni':6.05} # 12 + log A_z
# Fraction of elements still in gas form
wilms_1mbeta = {'H':1.0, 'He':1.0, 'C':0.5, 'N':1.0, 'O':0.6, 'Ne':1.0, 'Na':0.25, \
'Mg':0.2, 'Al':0.02, 'Si':0.1, 'P':0.6, 'S':0.6, 'Cl':0.5, 'Ar':1.0, \
'Ca':0.003, 'Ti':0.002, 'Cr':0.03, 'Mn':0.07, 'Fe':0.3, 'Co':0.05, \
'Ni':0.04}
class Mineral(object):
"""
Mineral object
-------------------
Use a dictionary to define the composition.
e.g. Olivines of pure MgFe^{2+}SiO_4 composition would be
olivine_halfMg = Mineral( {'Mg':1.0, 'Fe':1.0, 'Si':1.0, 'O':4.0} )
-------------------
self.composition : dictionary containing elements and their weights
@property
self._weight_amu : amu weight of unit crystal
self.weight_g : g weight of unit crystal
"""
def __init__(self, comp):
self.composition = comp
@property
def weight_amu(self):
result = 0.0
for atom in self.composition.keys():
result += self.composition[atom] * amu[atom]
return result
@property
def weight_g(self):
return self.weight_amu * amu_g
def calc_mass_conversion( elem, mineral ):
"""
calc_mass_conversion( elem, mineral )
Returns the number of atoms per gram of a particular mineral object
Useful for converting mass column to a number density column for an element
"""
assert type(mineral) == Mineral
assert type(elem) == str
return mineral.composition[elem] / mineral.weight_g # g^{-1}
def calc_element_column( NH, fmineral, atom, mineral, d2g=0.009 ):
"""
Calculate the column density of an element for a particular NH value,
assuming a dust-to-gas ratio (d2g) and
the fraction of dust in that particular mineral species (fmineral)
--------------------------------------------------------------------
calc_element_column( NH, fmineral, atom, mineral, d2g=0.009 )
"""
dust_mass = NH * mp * d2g * fmineral # g cm^{-2}
print('Dust mass = %.3e g cm^-2' % (dust_mass))
return calc_mass_conversion(atom, mineral) * dust_mass # cm^{-2}
def get_ISM_abund(elem, abund_table=wilms):
"""
get_ISM_abund( elem, abund_table )
----
Given an abundance table, calculate the number per H atom of a
given element in any ISM form
"""
assert type(elem) == str
assert type(abund_table) == dict
return np.power(10.0, abund_table[elem] - 12.0) # number per H atom
def get_dust_abund(elem, abund_table=wilms, gas_ratio=wilms_1mbeta):
"""
get_dust_abund( elem, abund_table, gas_ratio)
----
Given an abundance table (dict) and a table of gas ratios (dict),
calculate the number per H atom of a given ISM element in *solid* form
"""
assert type(elem) == str
assert type(abund_table) == dict
assert type(gas_ratio) == dict
return get_ISM_abund(elem, abund_table) * (1.0 - gas_ratio[elem]) # number per H atom
|
eblur/dust
|
astrodust/distlib/composition/minerals.py
|
Python
|
bsd-2-clause
| 3,836
|
[
"CRYSTAL"
] |
b42f24fd9fe711676f66dd736ab4e6b1827286a82d3e1c34211e5ee0bee892b9
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import mms
df1 = mms.run_spatial('ex14.i', 4, executable='./ex14-opt')
df2 = mms.run_spatial('ex14.i', 4, 'Mesh/second_order=true', 'Variables/forced/order=SECOND', executable='./ex14-opt')
fig = mms.ConvergencePlot(xlabel='Element Size ($h$)', ylabel='$L_2$ Error')
fig.plot(df1, label='1st Order', marker='o', markersize=8)
fig.plot(df2, label='2nd Order', marker='o', markersize=8)
fig.save('ex14_mms.png')
|
nuclear-wizard/moose
|
examples/ex14_pps/mms_spatial.py
|
Python
|
lgpl-2.1
| 743
|
[
"MOOSE"
] |
1d044ba9ad40c8c129563534feb7cbe8ad96df067cb1fb5a703db90af3fb116e
|
#!/usr/bin/env python
"""
fsclean.py
Faraday synthesis using 3D CLEAN deconvolution
*******************************************************************************
Copyright 2012 Michael Bell
This file is part of fsclean.
fsclean is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
fsclean is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with fsclean. If not, see <http://www.gnu.org/licenses/>.
*******************************************************************************
Software for imaging the Faraday spectrum, i.e. the 3D distribution of
polarized intensity as a function of Faraday depth and position on the sky.
Imaging is performed using the Faraday synthesis technique (see Bell and
Ensslin (2012) for details) and therefore inherently in 3D. Deconvolution is
carried out using a 3D CLEAN algorithm.
Data is read from MeasurementSet files of the type used by CASA. Images are
written to FITS files.
"""
# leave here while testing
#import sys
#sys.path.append('/home/mrbell/Work/code/')
import os
import datetime
import numpy as np
from optparse import OptionParser
from FSCData import FSCData, FSCPolData
from FSCImage import FSCImage
from FSCleanPM import FSCleanPM
import pyrat.Messenger as M
from pyrat.RAImage import GridParams
from pyrat.RAData import read_data_from_ms
from pyrat.Constants import *
VERSION = '0.1.0.0'
class FSCoords(object):
"""
"""
def __init__(self, pm):
"""
Takes a parset manager class instance and computes all coordinate
values required.
Args:
Returns:
"""
# Requested image plane grid parameters
dphi = pm.parset['dphi']
dra = pm.parset['cellsize'] * ARCSEC_TO_RAD
ddec = pm.parset['cellsize'] * ARCSEC_TO_RAD
nphi = pm.parset['nphi']
nra = pm.parset['nra']
ndec = pm.parset['ndec']
self.grid_def = [(dphi, nphi), (ddec, ndec), (dra, nra)]
# Gridding parameters
self.grid_params = GridParams(pm.parset['grid_alpha'],
pm.parset['grid_w'])
class FSClean(object):
"""
"""
# CLEAN algorithm types
CLARK = 0
HOGBOM = 1
def __init__(self, pm=None):
"""
Initialize the FSClean imager. Sets common values and inits the
messenger class.
Args:
parset: FSCleanPM class instance, with the parset dict already
loaded
Returns:
Nothing
"""
if pm is None:
self.m = M.Messenger()
return
self.pm = pm
# Internal verbosity level convention
# -1 off
# 0 Warnings, Errors, Headers, Basic information
# 1 Useful diagnostic information for most users
# 2 Detailed diagnostic information for users
# 3 Developer diagnostics
# 4 Temporary print statements
self.m = M.Messenger(self.pm.parset['verbosity'], use_color=True,
use_structure=False, add_timestamp=True)
self.coords = FSCoords(pm)
self.K = 1. # Normalization constant for data to image transform
self.Kinv = 1. # Normalization constant for image to data transform
self._scratch_files = []
self.do_clean = False
if self.pm.parset['niter'] > 0:
self.do_clean = True
def condense_cc_list(self, cc):
"""
Desc.
Args:
Returns:
"""
tcc = list(cc)
cc_redux = []
while len(tcc) > 0:
temp = tcc.pop()
topop = []
for i in range(len(tcc)):
if temp[0] == tcc[i][0] and temp[1] == tcc[i][1] \
and temp[2] == tcc[i][2]:
temp2 = tcc[i]
topop.append(i)
temp[3] += temp2[3]
cc_redux.append(temp)
topop.sort(reverse=True)
for i in range(len(topop)):
tcc.pop(topop[i])
return cc_redux
def run(self, msfn, outfn_base):
"""
The main routine.
Args:
msfn: MeasurementSet file name
outfn_base: base name for the output files
Returns:
"""
clean_funcs = {self.CLARK: self.clark_clean,
self.HOGBOM: self.hogbom_clean}
self.ofnbase = outfn_base
self.sfnbase = os.path.join(self.pm.parset['scratch_dir'],
os.path.basename(outfn_base))
self.m.set_logfile(self.ofnbase + ".log")
imfn = self.ofnbase + '_im.hdf5'
dbfn = self.ofnbase + '_db.hdf5'
self.m.header1("Starting FSCLEAN v." + VERSION)
self.m.message("Requested parameters:", 0)
if self.m.verbosity >= 0:
self.pm.print_parset()
self.m.message("Initializing data objects...", 1)
weights = FSCData(self.sfnbase + '_weights.hdf5',
np.dtype('float64'),
m=self.m)
self.register_scratch_files([weights.fn, weights.coords.fn])
vis = FSCPolData(self.sfnbase + '_vis.hdf5',
coords=weights.coords,
m=self.m)
self.register_scratch_files([vis.Q.fn, vis.U.fn])
im = FSCImage(imfn, np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params, m=self.m)
self.register_scratch_files([im.osim.fn, im.fourier_grid.fn])
db = FSCImage(dbfn, np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params, m=self.m,
grid_dtype=np.dtype('float64'))
self.register_scratch_files([db.osim.fn, db.fourier_grid.fn])
read_data_from_ms(msfn, vis, weights, self.pm.parset['ms_column'],
'WEIGHT', mode='pol')
self.m.message("Setting l2min", 3)
l2min = vis.coords.get_min_freq()
l2min = l2min - \
self.coords.grid_params.W * 2. * im.fourier_grid.deltas[0]
# if l2min < 0.:
# l2min = 0.
im.fourier_grid.set_mincoord(0, l2min)
db.fourier_grid.set_mincoord(0, l2min)
self.m.message("l2min set to " + str(l2min) + " m^2", 3)
self.m.message("Setting normalization...", 2)
self.set_normalizations(weights, db)
self.m.message("K is " + str(self.K), 3)
self.m.message("Kinv is " + str(self.Kinv), 3)
# Hand off data to the appropriate CLEAN function
#[cc, resim] = self.clark_clean(vis, weights, im, db)
[cc, resim] = clean_funcs[self.pm.parset['clean_type']](vis, weights,
im, db)
# Write images and CC list to disk
self.m.message("Writing metadata to image files.", 2)
self.write_image_metadata(im, msfn)
self.write_image_metadata(db, msfn)
if resim is not None:
self.write_image_metadata(resim, msfn)
self.write_cclist(self.ofnbase + "_cclist.txt", cc)
self.clean_up()
def register_scratch_files(self, fns):
"""
Desc.
Args:
Returns:
"""
if isinstance(fns, str):
self._scratch_files.append(fns)
elif np.iterable(fns):
self._scratch_files += fns
else:
raise TypeError('Cannot add the requested data type to ' +
'the scratch files list.')
def write_cclist(self, fn, cc):
"""
Desc.
Args:
Returns:
"""
if not np.iterable(cc):
self.m.warn("No clean components to write.")
return
self.m.message("Writing CLEAN component list to file.", 2)
f = open(fn, 'w')
for i in range(len(cc)):
c = cc[i]
line = "%d %d %d %f %f\n" % (c[0], c[1], c[2],
c[3].real, c[3].imag)
f.write(line)
f.close()
def set_normalizations(self, weights, db):
"""
Set normalizations for transform and inverse transforms. Resets the
class attributes K and Kinv.
Args:
weights: An FSCData object containing the weights for each
visibility.
db: An FSCImage object that will be used to store the dirty beam.
Returns:
Nothing.
"""
if self.do_clean and self.pm.parset['clean_type'] == self.CLARK:
self.m.message("Computing Kinv", 3)
temp = FSCData(self.sfnbase + '_tempdata.hdf5',
coords=weights.coords,
dtype=np.dtype('float64'), m=self.m,
template=weights)
self.register_scratch_files(temp.fn)
[nphi, ndec, nra] = db.im.shape
db.multiplywith(0.)
db.im[nphi / 2, ndec / 2, nra / 2] = complex(1., 0.)
db.transform(temp)
nchan = 0.
val = 0.
for i in temp.iterkeys():
freqs = temp.coords.get_freqs(i)
for j in range(len(freqs)):
nchan += 1
val += np.mean(abs(temp.get_records(i, j)))
self.Kinv = 1. / (val / nchan)
self.m.message("Computing K", 3)
weights.transform(db)
self.K = 1. / db.find_max(abs)
def clean_up(self):
"""
Deletes all temp files created during imaging.
Args:
Returns:
"""
if self.pm.parset['clear_scratch'] != 0:
self.m.header2("Removing scratch files...")
for i in range(len(self._scratch_files)):
os.remove(self._scratch_files[i])
def write_image_metadata(self, im, msfn):
"""
Writes important parameters to the header of the image.
Args:
im: FSCImage object pointing to the file to write metadata to.
msfn: Filename of the MeasurementSet containing the visibility data
that has been imaged.
Returns:
Nothing.
"""
from pyrap import tables
if tables.tableexists(os.path.join(msfn, 'SOURCE')):
pt = tables.table(os.path.join(msfn, 'SOURCE'))
crval = pt.getcol('DIRECTION')[0]
source_name = pt.getcol('NAME')[0]
else:
crval = [0., 0.]
source_name = ''
im.f.attrs['origin'] = 'fsclean v. ' + VERSION
im.f.attrs['date'] = str(datetime.date.today())
im.f.attrs['source'] = source_name
im.f.attrs['axis_desc'] = ['Faraday Depth', 'Dec.', 'RA']
im.f.attrs['axis_units'] = ['rad/m/m', 'rad', 'rad']
im.f.attrs['image_units'] = 'Jy/beam'
im.f.attrs['crpix'] = [im.im.shape[0] / 2, im.im.shape[1] / 2,
im.im.shape[2] / 2]
im.f.attrs['cdelt'] = [im.deltas[0], im.deltas[1], im.deltas[2]]
im.f.attrs['crval'] = [0., crval[1], crval[0]]
def hogbom_clean(self, vis, weights, im, db):
"""
The 3D Hogbom CLEAN algorithm.
Args:
vis: An FSCPolData object containing the stokes Q and U visibility
data to be cleaned.
weights: An FSCData object containing the weights for each
visibility.
im: An FSCImage object in which to store the cleaned image.
db: An FSCImage object in which to store the dirty beam image.
Should have 2x the image volume of im.
Returns:
A list of clean components. Each list entry contains a tuple of
model locations (phi, dec, ra) defined in pixels, and the model
flux.
"""
self.m.header2("Started the Hogbom CLEAN routine...")
# Works fine
self.m.message("Computing dirty image...", 1)
vis.multiplywith(weights) # vis now contains the weighted data!
vis.transform(im)
# im will contain the residual image going forward
im.multiplywith(self.K)
if not self.do_clean:
return None, None
# contains the oversized dirty beam (8x larger than normal one by vol)
self.m.message("Computing oversized dirty beam...", 1)
grid_def = self.coords.grid_def
big_grid_def = list()
for i in range(3):
big_grid_def.append((grid_def[i][0], grid_def[i][1] * 2))
bigdb = FSCImage(self.sfnbase + '_bigdb.hdf5', np.dtype('complex128'),
big_grid_def, self.coords.grid_params,
m=self.m, grid_dtype=np.dtype('float64'))
self.register_scratch_files([bigdb.fn, bigdb.osim.fn,
bigdb.fourier_grid.fn])
weights.transform(bigdb)
Kbig = 1. / bigdb.find_max(abs)
bigdb.multiplywith(Kbig)
# object for holding the model point source image
pointim = FSCImage(self.sfnbase + '_pointim.hdf5',
np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params,
m=self.m)
self.register_scratch_files([pointim.fn, pointim.osim.fn,
pointim.fourier_grid.fn])
[nphi, nm, nl] = im.im.shape
cutoff = self.pm.parset['cutoff']
niter = self.pm.parset['niter']
gain = self.pm.parset['gain']
# will contain the shifted beam image scaled by the residual peak value
tdb = FSCImage(self.sfnbase + '_tdb.hdf5', np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params,
m=self.m, grid_dtype=np.dtype('float64'))
self.register_scratch_files([tdb.fn, tdb.osim.fn, tdb.fourier_grid.fn])
cclist = list()
N = 0
total_flux = complex(0, 0)
while True:
[pphi, pm, pl] = im.find_argmax(abs)
pval = im.im[pphi, pm, pl]
if abs(pval) < cutoff:
self.m.success("Stopping! Cutoff has been reached.")
break
total_flux = total_flux + pval * gain
N += 1
self.m.message(". Iteration " + str(N), 2)
self.m.message(". CLEAN Component info:", 2)
self.m.message(". . value: " + str(pval * gain), 2)
self.m.message(". . abs. value: " +
str(abs(pval * gain)), 2)
self.m.message(". . phi: " + str(pphi), 2)
self.m.message(". . m: " + str(pm), 2)
self.m.message(". . l: " + str(pl), 2)
self.m.message(". . total pol. flux: " +
str(abs(total_flux)), 2)
cclist.append([pphi, pm, pl, pval * gain])
bigdb.copy_patch_to(tdb, (pphi, pm, pl))
tdb.multiplywith(gain * pval)
im.subtractoff(tdb)
if N >= niter:
self.m.success("Stopping! Maximum iterations reached.")
break
self.m.message("Adding CLEAN model to image...", 1)
cclist = self.condense_cc_list(cclist) # OK
self.make_cclist_image(cclist, pointim) # OK
self.m.message("Convolving with CLEAN beam...", 1)
self.make_beam_image(tdb) # OK
pointim.convolve_with(tdb) # OK
resim = FSCImage(self.sfnbase + '_resim.hdf5',
np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params,
m=self.m)
im.copy_to(resim)
self.register_scratch_files([resim.osim.fn, resim.fourier_grid.fn])
im.addto(pointim)
return cclist, resim
def clark_clean(self, vis, weights, im, db):
"""
The 3D Clark CLEAN algorithm.
Args:
vis: An FSCPolData object containing the stokes Q and U visibility
data to be cleaned.
weights: An FSCData object containing the weights for each
visibility.
im: An FSCImage object in which to store the cleaned image.
db: An FSCImage object in which to store the dirty beam image.
Returns:
A list of clean components. Each list entry contains a tuple of
model locations (phi, dec, ra) defined in pixels, and the model
flux.
"""
self.m.header2("Started the Clark CLEAN routine...")
# Works fine
self.m.message("Computing dirty beam...", 1)
weights.transform(db)
db.multiplywith(self.K)
# Works fine
self.m.message("Computing dirty image...", 1)
vis.multiplywith(weights) # vis now contains the weighted data!
if not self.do_clean:
# im will contain the residual image going forward
vis.transform(im)
im.multiplywith(self.K)
return None, None
# object for holding the model point source image
pointim = FSCImage(self.sfnbase + '_pointim.hdf5',
np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params,
m=self.m)
self.register_scratch_files([pointim.fn, pointim.osim.fn,
pointim.fourier_grid.fn])
# object for holding the model visibilities
modelvis = FSCPolData(self.sfnbase + '_modelvis.hdf5',
coords=weights.coords, m=self.m, template=vis)
self.register_scratch_files([modelvis.Q.fn, modelvis.U.fn])
[nphi, nm, nl] = im.im.shape
PFRAC = self.pm.parset['beam_patch_frac']
cutoff = self.pm.parset['cutoff']
niter = self.pm.parset['niter']
gain = self.pm.parset['gain']
# number of pixels along each axis of the beam patch
pnphi = nphi / PFRAC
pnm = nm / PFRAC
pnl = nl / PFRAC
self.m.message("Extracting beam patch and computing highest " +
"external sidelobe...", 2)
tdb = FSCImage(self.sfnbase + '_tdb.hdf5', np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params,
m=self.m, grid_dtype=np.dtype('float64'))
self.register_scratch_files([tdb.fn, tdb.osim.fn, tdb.fourier_grid.fn])
db.copy_to(tdb)
patch = tdb.im[nphi / 2 - pnphi / 2:nphi / 2 + pnphi / 2,
nm / 2 - pnm / 2:nm / 2 + pnm / 2,
nl / 2 - pnl / 2:nl / 2 + pnl / 2]
# get only the rest of the beam outside of the patch
tdb.im[nphi / 2 - pnphi / 2:nphi / 2 + pnphi / 2,
nm / 2 - pnm / 2:nm / 2 + pnm / 2,
nl / 2 - pnl / 2:nl / 2 + pnl / 2] = np.zeros((pnphi, pnm, pnl),
dtype=np.dtype('complex128'))
# find the largest sidelobe external to the patch
extsl = tdb.find_max(abs)
# for test dataset, extsl should be 0.112
self.m.message("Largest sidelobe level outside beam patch: " +
str(extsl), 3)
cclist = list()
stop = False
N = 1
total_flux = complex(0, 0)
while True:
# Major cycle
self.m.message("Begin Major Cycle", 1)
# im will contain the residual image going forward
vis.transform(im)
im.multiplywith(self.K)
[pphi, pm, pl] = im.find_argmax(abs)
pval = im.im[pphi, pm, pl]
self.m.message("Initial residual map peak: " + str(abs(pval)), 1)
if abs(pval) < cutoff:
self.m.success("Stopping! Cutoff has been reached.")
stop = True
break
slim = extsl * abs(pval)
F = 1. + 1. / N
tcclist = list()
if abs(pval) < slim * F:
slim = abs(pval) / F
self.m.message("Initial minor cycle stop level: " +
str(slim * F), 2)
while abs(pval) >= slim * F:
# Minor cycle
total_flux = total_flux + pval * gain
self.m.message(". Starting minor cycle " + str(N), 2)
self.m.message(". CLEAN Component info:", 2)
self.m.message(". . value: " + str(pval * gain), 2)
self.m.message(". . abs. value: " +
str(abs(pval * gain)), 2)
self.m.message(". . phi: " + str(pphi), 2)
self.m.message(". . m: " + str(pm), 2)
self.m.message(". . l: " + str(pl), 2)
self.m.message(". . total pol. flux: " +
str(abs(total_flux)), 2)
tcclist.append([pphi, pm, pl, pval * gain])
# find phimin/max, lmin/max, mmin/max, accounting for map edges
# crop the patch if necessary (because it runs off the edge)
phimax = pphi + pnphi / 2
phimin = pphi - pnphi / 2
pc_phi_low = 0
pc_phi_high = pnphi
if phimin < 0:
phimin = 0
# lower index of the cropped patch
pc_phi_low = pnphi / 2 - pphi
if phimax > nphi:
phimax = nphi
# upper index of the cropped patch
pc_phi_high = pnphi / 2 + (nphi - pphi)
mmax = pm + pnm / 2
mmin = pm - pnm / 2
pc_m_low = 0
pc_m_high = pnm
if mmin < 0:
mmin = 0
# lower index of the cropped patch
pc_m_low = pnm / 2 - pm
if mmax > nm:
mmax = nm
# upper index of the cropped patch
pc_m_high = pnm / 2 + (nm - pm)
lmax = pl + pnl / 2
lmin = pl - pnl / 2
pc_l_low = 0
pc_l_high = pnl
if lmin < 0:
lmin = 0
# lower index of the cropped patch
pc_l_low = pnl / 2 - pl
if lmax > nl:
lmax = nl
# upper index of the cropped patch
pc_l_high = pnl / 2 + (nl - pl)
tpatch = patch[pc_phi_low:pc_phi_high,
pc_m_low:pc_m_high,
pc_l_low:pc_l_high].copy()
im.im[phimin:phimax, mmin:mmax, lmin:lmax] = \
im.im[phimin:phimax, mmin:mmax, lmin:lmax] - \
gain * pval * tpatch
[pphi, pm, pl] = im.find_argmax(abs)
pval = im.im[pphi, pm, pl]
N += 1
F += 1. / N
if abs(pval) < cutoff or N > niter:
# Is this true? Does the peak value found during the minor
# cycle count for the stop condition? The residual image
# here is kind of meaningless
self.m.success("Stopping! Cutoff or niter " +
"has been reached.")
stop = True
break
if not stop:
self.m.message("Minor cycle stop condition reached.", 1)
self.m.message("Inverting model to vis. space and " +
"subtracting...", 1)
tcclist = self.condense_cc_list(tcclist)
self.make_cclist_image(tcclist, pointim)
pointim.transform(modelvis)
modelvis.multiplywith(self.Kinv)
modelvis.multiplywith(weights)
vis.subtractoff(modelvis)
self.m.message("Done.", 1)
cclist = cclist + tcclist
if stop:
break
self.m.message("Inverting CLEAN model...", 1)
del patch
cclist = self.condense_cc_list(cclist) # OK
self.make_cclist_image(cclist, pointim) # OK
self.m.message("Convolving with CLEAN beam...", 1)
self.make_beam_image(tdb) # OK
pointim.convolve_with(tdb) # OK
# construct residual image
vis.transform(im)
im.multiplywith(self.K)
resim = FSCImage(self.sfnbase + '_resim.hdf5',
np.dtype('complex128'),
self.coords.grid_def, self.coords.grid_params,
m=self.m)
im.copy_to(resim)
self.register_scratch_files([resim.osim.fn, resim.fourier_grid.fn])
im.addto(pointim)
return cclist, resim
def make_beam_image(self, beamim):
"""
Desc.
Args:
Returns:
"""
beamim.multiplywith(0.)
ln2 = 0.693147181
# Convert arcsec to pixels which are used below
bmaj = self.pm.parset['bmaj'] / self.pm.parset['cellsize']
bmin = self.pm.parset['bmin'] / self.pm.parset['cellsize']
bphi = self.pm.parset['bphi'] / self.pm.parset['dphi']
invsigmal2 = 8 * ln2 * bmaj ** -2.
invsigmam2 = 8 * ln2 * bmin ** -2.
invsigmaphi2 = 8 * ln2 * bphi ** -2.
# the size of the image over which to compute the gaussian
# zero outside
denom = self.pm.parset['beam_patch_frac']
[nphi, nm, nl] = beamim.im.shape
patch = np.zeros((nphi / denom, nm / denom, nl / denom),
dtype=beamim.im.dtype)
phic = patch.shape[0] / 2
mc = patch.shape[1] / 2
lc = patch.shape[2] / 2
philow = nphi / 2 - phic
phihigh = nphi / 2 + phic
mlow = nm / 2 - mc
mhigh = nm / 2 + mc
llow = nl / 2 - lc
lhigh = nl / 2 + lc
for i in range(patch.shape[0]):
for j in range(patch.shape[1]):
for k in range(patch.shape[2]):
patch[i, j, k] = np.exp(-0.5 * (invsigmaphi2 *
(i - phic) ** 2 +
invsigmam2 * (j - mc) ** 2 +
invsigmal2 * (k - lc) ** 2))
beamim.im[philow:phihigh, mlow:mhigh, llow:lhigh] = patch
def make_cclist_image(self, cclist, im):
"""
Desc.
Args:
Returns:
Nothing.
"""
im.multiplywith(0.)
# list entries... [phi, m, l, val]
for i in range(len(cclist)):
[phi, m, l, val] = cclist[i]
# there must be a better way...
im.im[phi, m, l] = im.im[phi, m, l] + val
if __name__ == '__main__':
"""
Handle all parsing here if started from the command line, then pass off to
the main routine.
"""
desc = "Software for reconstructing the Faraday spectrum, i.e. the 3D " + \
"distribution of polarized intensity as a function of Faraday depth" +\
" and position on the sky, from full-polarization, multi-frequency " +\
"visibility data. Imaging is conducted using the Faraday " + \
"synthesis technique (for details see Bell and Ensslin, 2012). " + \
"Deconvolution is " + \
"carried out using a 3D CLEAN algorithm. " + \
"Data is read from MeasurementSet files of the type used by CASA. " + \
"Images are written to HDF5 image files."
parser = OptionParser(usage="%prog <parset file> <in file> <out file>",
description=desc, version="%prog " + VERSION)
parser.add_option("-p", "--parset_desc", action="store_true",
help="show parameter set file description and exit",
default=False)
(options, args) = parser.parse_args()
pm = FSCleanPM()
if options.parset_desc:
pm.print_help()
else:
if len(args) != 3:
parser.error("Incorrect number of arguments.")
pm.parse_file(args[0])
fsc = FSClean(pm)
fsc.run(args[1], args[2])
|
mrbell/fsclean
|
fsclean.py
|
Python
|
gpl-3.0
| 28,709
|
[
"Gaussian"
] |
1456c33b4a0ace8b28f0de7b1007127754b8bee06cc5ca11c6b6dbe54a1fe2f6
|
# Copyright 2013-2015 James S Blachly, MD and The Ohio State University
#
# This file is part of Mucor.
#
# Mucor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mucor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mucor. If not, see <http://www.gnu.org/licenses/>.
# mucorfeature.py
import HTSeq
# mucor modules
from variant import Variant
class MucorFeature(HTSeq.GenomicFeature):
'''Specific Genomic Feature. For example, gene SF3B1, band 13q13.1, or chromosome X'''
def __init__(self, name, type_, interval):
if name == '': raise NameError('name was an empty string')
if type_ == '': raise NameError('type_ was an empty string')
if not isinstance(interval, HTSeq.GenomicInterval): raise TypeError('interval must be of type HTSeq.GenomicInterval')
self.variants = set() # empty set to be filled with objects of class Variant
HTSeq.GenomicFeature.__init__(self, name, type_, interval)
def numVariants(self):
return len(self.variants)
def weightedVariants(self):
'''Instead of returning the number of variants, return the sum of tumor_f for all variants'''
tumor_f_sum = 0.0
for var in self.variants:
tumor_f_sum += float(var.frac)
return tumor_f_sum
def uniqueVariants(self):
'''Return the set of unique variants from the set of all variants (for this feature)'''
# exploit the hashtable and uniqueness of sets to quickly find
# unique tuples (contig, pos, ref, alt) of variant info
# sorted by chrom, pos
uniqueVariantsTemp = set()
for var in self.variants:
candidate = (var.pos.chrom, var.pos.pos, var.ref, var.alt)
uniqueVariantsTemp.add(candidate)
# sort by chr, then position
# TO DO: python sorted() will sort as: chr1, chr10, chr2, chr20, chrX. Fix.
uniqueVariantsTemp = sorted(uniqueVariantsTemp, key=lambda varx: ( varx[0] + str(varx[1]) ) )
# Now construct a returnable set of Variant objects,
# specifying multiple "sources" in the source field
# this loop's inner-product is #unique variants * #total variants, times #features
# and is a major inefficiency
uniqueVariants = set()
for uniqueVarTup in uniqueVariantsTemp:
source = ""
frac = ""
dp = ""
eff = ""
fc = ""
#annot = ""
for varClass in self.variants:
if (varClass.pos.chrom, varClass.pos.pos, varClass.ref, varClass.alt) == uniqueVarTup:
source += varClass.source + ", "
frac += str(varClass.frac) + ", "
dp += str(varClass.dp) + ", "
eff += str(varClass.eff) + ", "
fc += str(varClass.fc) + ", "
#annot += str(varClass.annot) + ", "
pos = HTSeq.GenomicPosition(uniqueVarTup[0], uniqueVarTup[1] )
uniqueVar = Variant(source.strip(", "), pos, ref=uniqueVarTup[2], alt=uniqueVarTup[3], frac=str(frac).strip(", "), dp=str(dp).strip(", "), eff=str(eff).strip(", "), fc=str(fc).strip(", ")) ######## Karl Modified ##############
uniqueVariants.add(uniqueVar)
return uniqueVariants
def numUniqueVariants(self):
'''Return the number of unique variants from the set of all variants (for this feature)'''
return len(self.uniqueVariants())
def numUniqueSamples(self):
sources = set()
for var in self.variants:
sources.add(var.source)
return len(sources)
|
blachlylab/mucor
|
mucorfeature.py
|
Python
|
gpl-3.0
| 3,693
|
[
"HTSeq"
] |
93b223ba1a1066728c3b38a155b660e9d341c562204a91efd6671db1878d974c
|
# Library of simple models.
import moose
def simple_model_a():
compt = moose.CubeMesh( '/compt' )
r = moose.Reac( '/compt/r' )
a = moose.Pool( '/compt/a' )
a.concInit = 1
b = moose.Pool( '/compt/b' )
b.concInit = 2
c = moose.Pool( '/compt/c' )
c.concInit = 0.5
moose.connect( r, 'sub', a, 'reac' )
moose.connect( r, 'prd', b, 'reac' )
moose.connect( r, 'prd', c, 'reac' )
r.Kf = 0.1
r.Kb = 0.01
tabA = moose.Table2( '/compt/a/tab' )
tabB = moose.Table2( '/compt/tabB' )
tabC = moose.Table2( '/compt/tabB/tabC' )
print(tabA, tabB, tabC)
moose.connect( tabA, 'requestOut', a, 'getConc' )
moose.connect( tabB, 'requestOut', b, 'getConc' )
moose.connect( tabC, 'requestOut', c, 'getConc' )
return [tabA, tabB, tabC]
|
upibhalla/moose-core
|
tests/python/models.py
|
Python
|
gpl-3.0
| 798
|
[
"MOOSE"
] |
202a080eab7cf9d0328f2129f638a3e095e9a623edf8656536d14841f8dbd26c
|
# -*- coding: utf-8 -*-
"""
End-to-end tests related to the cohort management on the LMS Instructor Dashboard
"""
from datetime import datetime
from pymongo import MongoClient
from pytz import UTC, utc
from bok_choy.promise import EmptyPromise
from .helpers import CohortTestMixin
from ..helpers import UniqueCourseTest, create_user_partition_json
from xmodule.partitions.partitions import Group
from ...fixtures.course import CourseFixture
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.instructor_dashboard import InstructorDashboardPage, DataDownloadPage
from ...pages.studio.settings_advanced import AdvancedSettingsPage
from ...pages.studio.settings_group_configurations import GroupConfigurationsPage
import uuid
class CohortConfigurationTest(UniqueCourseTest, CohortTestMixin):
"""
Tests for cohort management on the LMS Instructor Dashboard
"""
def setUp(self):
"""
Set up a cohorted course
"""
super(CohortConfigurationTest, self).setUp()
self.event_collection = MongoClient()["test"]["events"]
# create course with cohorts
self.manual_cohort_name = "ManualCohort1"
self.auto_cohort_name = "AutoCohort1"
self.course_fixture = CourseFixture(**self.course_info).install()
self.setup_cohort_config(self.course_fixture, auto_cohort_groups=[self.auto_cohort_name])
self.manual_cohort_id = self.add_manual_cohort(self.course_fixture, self.manual_cohort_name)
# create a non-instructor who will be registered for the course and in the manual cohort.
self.student_name = "student_user"
self.student_id = AutoAuthPage(
self.browser, username=self.student_name, email="student_user@example.com",
course_id=self.course_id, staff=False
).visit().get_user_id()
self.add_user_to_cohort(self.course_fixture, self.student_name, self.manual_cohort_id)
# create a user with unicode characters in their username
self.unicode_student_id = AutoAuthPage(
self.browser, username="Ωπ", email="unicode_student_user@example.com",
course_id=self.course_id, staff=False
).visit().get_user_id()
# login as an instructor
self.instructor_name = "instructor_user"
self.instructor_id = AutoAuthPage(
self.browser, username=self.instructor_name, email="instructor_user@example.com",
course_id=self.course_id, staff=True
).visit().get_user_id()
# go to the membership page on the instructor dashboard
self.instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
self.instructor_dashboard_page.visit()
membership_page = self.instructor_dashboard_page.select_membership()
self.cohort_management_page = membership_page.select_cohort_management_section()
def verify_cohort_description(self, cohort_name, expected_description):
"""
Selects the cohort with the given name and verifies the expected description is presented.
"""
self.cohort_management_page.select_cohort(cohort_name)
self.assertEquals(self.cohort_management_page.get_selected_cohort(), cohort_name)
self.assertIn(expected_description, self.cohort_management_page.get_cohort_group_setup())
def test_cohort_description(self):
"""
Scenario: the cohort configuration management in the instructor dashboard specifies whether
students are automatically or manually assigned to specific cohorts.
Given I have a course with a manual cohort and an automatic cohort defined
When I view the manual cohort in the instructor dashboard
There is text specifying that students are only added to the cohort manually
And when I view the automatic cohort in the instructor dashboard
There is text specifying that students are automatically added to the cohort
"""
self.verify_cohort_description(
self.manual_cohort_name,
'Students are added to this cohort only when you provide '
'their email addresses or usernames on this page',
)
self.verify_cohort_description(
self.auto_cohort_name,
'Students are added to this cohort automatically',
)
def test_no_content_groups(self):
"""
Scenario: if the course has no content groups defined (user_partitions of type cohort),
the settings in the cohort management tab reflect this
Given I have a course with a cohort defined but no content groups
When I view the cohort in the instructor dashboard and select settings
Then the cohort is not linked to a content group
And there is text stating that no content groups are defined
And I cannot select the radio button to enable content group association
And there is a link I can select to open Group settings in Studio
"""
self.cohort_management_page.select_cohort(self.manual_cohort_name)
self.assertIsNone(self.cohort_management_page.get_cohort_associated_content_group())
self.assertEqual(
"Warning:\nNo content groups exist. Create a content group",
self.cohort_management_page.get_cohort_related_content_group_message()
)
self.assertFalse(self.cohort_management_page.select_content_group_radio_button())
self.cohort_management_page.select_studio_group_settings()
group_settings_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_settings_page.wait_for_page()
def test_link_to_studio(self):
"""
Scenario: a link is present from the cohort configuration in the instructor dashboard
to the Studio Advanced Settings.
Given I have a course with a cohort defined
When I view the cohort in the LMS instructor dashboard
There is a link to take me to the Studio Advanced Settings for the course
"""
self.cohort_management_page.select_cohort(self.manual_cohort_name)
self.cohort_management_page.select_edit_settings()
advanced_settings_page = AdvancedSettingsPage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
advanced_settings_page.wait_for_page()
def test_add_students_to_cohort_success(self):
"""
Scenario: When students are added to a cohort, the appropriate notification is shown.
Given I have a course with two cohorts
And there is a user in one cohort
And there is a user in neither cohort
When I add the two users to the cohort that initially had no users
Then there are 2 users in total in the cohort
And I get a notification that 2 users have been added to the cohort
And I get a notification that 1 user was moved from the other cohort
And the user input field is empty
And appropriate events have been emitted
"""
start_time = datetime.now(UTC)
self.cohort_management_page.select_cohort(self.auto_cohort_name)
self.assertEqual(0, self.cohort_management_page.get_selected_cohort_count())
self.cohort_management_page.add_students_to_selected_cohort([self.student_name, self.instructor_name])
# Wait for the number of users in the cohort to change, indicating that the add operation is complete.
EmptyPromise(
lambda: 2 == self.cohort_management_page.get_selected_cohort_count(), 'Waiting for added students'
).fulfill()
confirmation_messages = self.cohort_management_page.get_cohort_confirmation_messages()
self.assertEqual(
[
"2 students have been added to this cohort",
"1 student was removed from " + self.manual_cohort_name
],
confirmation_messages
)
self.assertEqual("", self.cohort_management_page.get_cohort_student_input_field_value())
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_added",
"time": {"$gt": start_time},
"event.user_id": {"$in": [int(self.instructor_id), int(self.student_id)]},
"event.cohort_name": self.auto_cohort_name,
}).count(),
2
)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_removed",
"time": {"$gt": start_time},
"event.user_id": int(self.student_id),
"event.cohort_name": self.manual_cohort_name,
}).count(),
1
)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_add_requested",
"time": {"$gt": start_time},
"event.user_id": int(self.instructor_id),
"event.cohort_name": self.auto_cohort_name,
"event.previous_cohort_name": None,
}).count(),
1
)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_add_requested",
"time": {"$gt": start_time},
"event.user_id": int(self.student_id),
"event.cohort_name": self.auto_cohort_name,
"event.previous_cohort_name": self.manual_cohort_name,
}).count(),
1
)
def test_add_students_to_cohort_failure(self):
"""
Scenario: When errors occur when adding students to a cohort, the appropriate notification is shown.
Given I have a course with a cohort and a user already in it
When I add the user already in a cohort to that same cohort
And I add a non-existing user to that cohort
Then there is no change in the number of students in the cohort
And I get a notification that one user was already in the cohort
And I get a notification that one user is unknown
And the user input field still contains the incorrect email addresses
"""
self.cohort_management_page.select_cohort(self.manual_cohort_name)
self.assertEqual(1, self.cohort_management_page.get_selected_cohort_count())
self.cohort_management_page.add_students_to_selected_cohort([self.student_name, "unknown_user"])
# Wait for notification messages to appear, indicating that the add operation is complete.
EmptyPromise(
lambda: 2 == len(self.cohort_management_page.get_cohort_confirmation_messages()), 'Waiting for notification'
).fulfill()
self.assertEqual(1, self.cohort_management_page.get_selected_cohort_count())
self.assertEqual(
[
"0 students have been added to this cohort",
"1 student was already in the cohort"
],
self.cohort_management_page.get_cohort_confirmation_messages()
)
self.assertEqual(
[
"There was an error when trying to add students:",
"Unknown user: unknown_user"
],
self.cohort_management_page.get_cohort_error_messages()
)
self.assertEqual(
self.student_name + ",unknown_user,",
self.cohort_management_page.get_cohort_student_input_field_value()
)
def test_add_new_cohort(self):
"""
Scenario: A new manual cohort can be created, and a student assigned to it.
Given I have a course with a user in the course
When I add a new manual cohort to the course via the LMS instructor dashboard
Then the new cohort is displayed and has no users in it
And when I add the user to the new cohort
Then the cohort has 1 user
And appropriate events have been emitted
"""
start_time = datetime.now(UTC)
new_cohort = str(uuid.uuid4().get_hex()[0:20])
self.assertFalse(new_cohort in self.cohort_management_page.get_cohorts())
self.cohort_management_page.add_cohort(new_cohort)
# After adding the cohort, it should automatically be selected
EmptyPromise(
lambda: new_cohort == self.cohort_management_page.get_selected_cohort(), "Waiting for new cohort to appear"
).fulfill()
self.assertEqual(0, self.cohort_management_page.get_selected_cohort_count())
self.cohort_management_page.add_students_to_selected_cohort([self.instructor_name])
# Wait for the number of users in the cohort to change, indicating that the add operation is complete.
EmptyPromise(
lambda: 1 == self.cohort_management_page.get_selected_cohort_count(), 'Waiting for student to be added'
).fulfill()
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.created",
"time": {"$gt": start_time},
"event.cohort_name": new_cohort,
}).count(),
1
)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.creation_requested",
"time": {"$gt": start_time},
"event.cohort_name": new_cohort,
}).count(),
1
)
def test_link_to_data_download(self):
"""
Scenario: a link is present from the cohort configuration in
the instructor dashboard to the Data Download section.
Given I have a course with a cohort defined
When I view the cohort in the LMS instructor dashboard
There is a link to take me to the Data Download section of the Instructor Dashboard.
"""
self.cohort_management_page.select_data_download()
data_download_page = DataDownloadPage(self.browser)
data_download_page.wait_for_page()
def test_cohort_by_csv_both_columns(self):
"""
Scenario: the instructor can upload a file with user and cohort assignments, using both emails and usernames.
Given I have a course with two cohorts defined
When I go to the cohort management section of the instructor dashboard
I can upload a CSV file with assignments of users to cohorts via both usernames and emails
Then I can download a file with results
And appropriate events have been emitted
"""
# cohort_users_both_columns.csv adds instructor_user to ManualCohort1 via username and
# student_user to AutoCohort1 via email
self._verify_csv_upload_acceptable_file("cohort_users_both_columns.csv")
def test_cohort_by_csv_only_email(self):
"""
Scenario: the instructor can upload a file with user and cohort assignments, using only emails.
Given I have a course with two cohorts defined
When I go to the cohort management section of the instructor dashboard
I can upload a CSV file with assignments of users to cohorts via only emails
Then I can download a file with results
And appropriate events have been emitted
"""
# cohort_users_only_email.csv adds instructor_user to ManualCohort1 and student_user to AutoCohort1 via email
self._verify_csv_upload_acceptable_file("cohort_users_only_email.csv")
def test_cohort_by_csv_only_username(self):
"""
Scenario: the instructor can upload a file with user and cohort assignments, using only usernames.
Given I have a course with two cohorts defined
When I go to the cohort management section of the instructor dashboard
I can upload a CSV file with assignments of users to cohorts via only usernames
Then I can download a file with results
And appropriate events have been emitted
"""
# cohort_users_only_username.csv adds instructor_user to ManualCohort1 and
# student_user to AutoCohort1 via username
self._verify_csv_upload_acceptable_file("cohort_users_only_username.csv")
def _verify_csv_upload_acceptable_file(self, filename):
"""
Helper method to verify cohort assignments after a successful CSV upload.
"""
start_time = datetime.now(UTC)
self.cohort_management_page.upload_cohort_file(filename)
self._verify_cohort_by_csv_notification(
"Your file '{}' has been uploaded. Allow a few minutes for processing.".format(filename)
)
# student_user is moved from manual cohort to auto cohort
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_added",
"time": {"$gt": start_time},
"event.user_id": {"$in": [int(self.student_id)]},
"event.cohort_name": self.auto_cohort_name,
}).count(),
1
)
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_removed",
"time": {"$gt": start_time},
"event.user_id": int(self.student_id),
"event.cohort_name": self.manual_cohort_name,
}).count(),
1
)
# instructor_user (previously unassigned) is added to manual cohort
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_added",
"time": {"$gt": start_time},
"event.user_id": {"$in": [int(self.instructor_id)]},
"event.cohort_name": self.manual_cohort_name,
}).count(),
1
)
# unicode_student_user (previously unassigned) is added to manual cohort
self.assertEqual(
self.event_collection.find({
"name": "edx.cohort.user_added",
"time": {"$gt": start_time},
"event.user_id": {"$in": [int(self.unicode_student_id)]},
"event.cohort_name": self.manual_cohort_name,
}).count(),
1
)
# Verify the results can be downloaded.
data_download = self.instructor_dashboard_page.select_data_download()
EmptyPromise(
lambda: 1 == len(data_download.get_available_reports_for_download()), 'Waiting for downloadable report'
).fulfill()
report = data_download.get_available_reports_for_download()[0]
base_file_name = "cohort_results_"
self.assertIn("{}_{}".format(
'_'.join([self.course_info['org'], self.course_info['number'], self.course_info['run']]), base_file_name
), report)
report_datetime = datetime.strptime(
report[report.index(base_file_name) + len(base_file_name):-len(".csv")],
"%Y-%m-%d-%H%M"
)
self.assertLessEqual(start_time.replace(second=0, microsecond=0), utc.localize(report_datetime))
def test_cohort_by_csv_wrong_file_type(self):
"""
Scenario: if the instructor uploads a non-csv file, an error message is presented.
Given I have a course with cohorting enabled
When I go to the cohort management section of the instructor dashboard
And I upload a file without the CSV extension
Then I get an error message stating that the file must have a CSV extension
"""
self.cohort_management_page.upload_cohort_file("image.jpg")
self._verify_cohort_by_csv_notification("The file must end with the extension '.csv'.")
def test_cohort_by_csv_missing_cohort(self):
"""
Scenario: if the instructor uploads a csv file with no cohort column, an error message is presented.
Given I have a course with cohorting enabled
When I go to the cohort management section of the instructor dashboard
And I upload a CSV file that is missing the cohort column
Then I get an error message stating that the file must have a cohort column
"""
self.cohort_management_page.upload_cohort_file("cohort_users_missing_cohort_column.csv")
self._verify_cohort_by_csv_notification("The file must contain a 'cohort' column containing cohort names.")
def test_cohort_by_csv_missing_user(self):
"""
Scenario: if the instructor uploads a csv file with no username or email column, an error message is presented.
Given I have a course with cohorting enabled
When I go to the cohort management section of the instructor dashboard
And I upload a CSV file that is missing both the username and email columns
Then I get an error message stating that the file must have either a username or email column
"""
self.cohort_management_page.upload_cohort_file("cohort_users_missing_user_columns.csv")
self._verify_cohort_by_csv_notification(
"The file must contain a 'username' column, an 'email' column, or both."
)
def _verify_cohort_by_csv_notification(self, expected_message):
"""
Helper method to check the CSV file upload notification message.
"""
# Wait for notification message to appear, indicating file has been uploaded.
EmptyPromise(
lambda: 1 == len(self.cohort_management_page.get_csv_messages()), 'Waiting for notification'
).fulfill()
messages = self.cohort_management_page.get_csv_messages()
self.assertEquals(expected_message, messages[0])
class CohortContentGroupAssociationTest(UniqueCourseTest, CohortTestMixin):
"""
Tests for linking between content groups and cohort in the instructor dashboard.
"""
def setUp(self):
"""
Set up a cohorted course with a user_partition of scheme "cohort".
"""
super(CohortContentGroupAssociationTest, self).setUp()
# create course with single cohort and two content groups (user_partition of type "cohort")
self.cohort_name = "OnlyCohort"
self.course_fixture = CourseFixture(**self.course_info).install()
self.setup_cohort_config(self.course_fixture)
self.cohort_id = self.add_manual_cohort(self.course_fixture, self.cohort_name)
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Apples, Bananas',
'Content Group Partition',
[Group("0", 'Apples'), Group("1", 'Bananas')],
scheme="cohort"
)
],
},
})
# login as an instructor
self.instructor_name = "instructor_user"
self.instructor_id = AutoAuthPage(
self.browser, username=self.instructor_name, email="instructor_user@example.com",
course_id=self.course_id, staff=True
).visit().get_user_id()
# go to the membership page on the instructor dashboard
self.instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
self.instructor_dashboard_page.visit()
membership_page = self.instructor_dashboard_page.select_membership()
self.cohort_management_page = membership_page.select_cohort_management_section()
def test_no_content_group_linked(self):
"""
Scenario: In a course with content groups, cohorts are initially not linked to a content group
Given I have a course with a cohort defined and content groups defined
When I view the cohort in the instructor dashboard and select settings
Then the cohort is not linked to a content group
And there is no text stating that content groups are undefined
And the content groups are listed in the selector
"""
self.cohort_management_page.select_cohort(self.cohort_name)
self.assertIsNone(self.cohort_management_page.get_cohort_associated_content_group())
self.assertIsNone(self.cohort_management_page.get_cohort_related_content_group_message())
self.assertEquals(["Apples", "Bananas"], self.cohort_management_page.get_all_content_groups())
def test_link_to_content_group(self):
"""
Scenario: In a course with content groups, cohorts can be linked to content groups
Given I have a course with a cohort defined and content groups defined
When I view the cohort in the instructor dashboard and select settings
And I link the cohort to one of the content groups and save
Then there is a notification that my cohort has been saved
And when I reload the page
And I view the cohort in the instructor dashboard and select settings
Then the cohort is still linked to the content group
"""
self._link_cohort_to_content_group(self.cohort_name, "Bananas")
self.assertEqual("Bananas", self.cohort_management_page.get_cohort_associated_content_group())
def test_unlink_from_content_group(self):
"""
Scenario: In a course with content groups, cohorts can be unlinked from content groups
Given I have a course with a cohort defined and content groups defined
When I view the cohort in the instructor dashboard and select settings
And I link the cohort to one of the content groups and save
Then there is a notification that my cohort has been saved
And I reload the page
And I view the cohort in the instructor dashboard and select settings
And I unlink the cohort from any content group and save
Then there is a notification that my cohort has been saved
And when I reload the page
And I view the cohort in the instructor dashboard and select settings
Then the cohort is not linked to any content group
"""
self._link_cohort_to_content_group(self.cohort_name, "Bananas")
self.cohort_management_page.set_cohort_associated_content_group(None)
self._verify_settings_saved_and_reload(self.cohort_name)
self.assertEqual(None, self.cohort_management_page.get_cohort_associated_content_group())
def test_create_new_cohort_linked_to_content_group(self):
"""
Scenario: In a course with content groups, a new cohort can be linked to a content group
at time of creation.
Given I have a course with a cohort defined and content groups defined
When I create a new cohort and link it to a content group
Then when I select settings I see that the cohort is linked to the content group
And when I reload the page
And I view the cohort in the instructor dashboard and select settings
Then the cohort is still linked to the content group
"""
new_cohort = "correctly linked cohort"
self._create_new_cohort_linked_to_content_group(new_cohort, "Apples")
self.browser.refresh()
self.cohort_management_page.wait_for_page()
self.cohort_management_page.select_cohort(new_cohort)
self.assertEqual("Apples", self.cohort_management_page.get_cohort_associated_content_group())
def test_missing_content_group(self):
"""
Scenario: In a course with content groups, if a cohort is associated with a content group that no longer
exists, a warning message is shown
Given I have a course with a cohort defined and content groups defined
When I create a new cohort and link it to a content group
And I delete that content group from the course
And I reload the page
And I view the cohort in the instructor dashboard and select settings
Then the settings display a message that the content group no longer exists
And when I select a different content group and save
Then the error message goes away
"""
new_cohort = "linked to missing content group"
self._create_new_cohort_linked_to_content_group(new_cohort, "Apples")
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Apples, Bananas',
'Content Group Partition',
[Group("2", 'Pears'), Group("1", 'Bananas')],
scheme="cohort"
)
],
},
})
self.browser.refresh()
self.cohort_management_page.wait_for_page()
self.cohort_management_page.select_cohort(new_cohort)
self.assertEqual("Deleted Content Group", self.cohort_management_page.get_cohort_associated_content_group())
self.assertEquals(
["Bananas", "Pears", "Deleted Content Group"],
self.cohort_management_page.get_all_content_groups()
)
self.assertEqual(
"Warning:\nThe previously selected content group was deleted. Select another content group.",
self.cohort_management_page.get_cohort_related_content_group_message()
)
self.cohort_management_page.set_cohort_associated_content_group("Pears")
confirmation_messages = self.cohort_management_page.get_cohort_settings_messages()
self.assertEqual(["Saved cohort"], confirmation_messages)
self.assertIsNone(self.cohort_management_page.get_cohort_related_content_group_message())
self.assertEquals(["Bananas", "Pears"], self.cohort_management_page.get_all_content_groups())
def _create_new_cohort_linked_to_content_group(self, new_cohort, cohort_group):
"""
Creates a new cohort linked to a content group.
"""
self.cohort_management_page.add_cohort(new_cohort, content_group=cohort_group)
# After adding the cohort, it should automatically be selected
EmptyPromise(
lambda: new_cohort == self.cohort_management_page.get_selected_cohort(), "Waiting for new cohort to appear"
).fulfill()
self.assertEqual(cohort_group, self.cohort_management_page.get_cohort_associated_content_group())
def _link_cohort_to_content_group(self, cohort_name, content_group):
"""
Links a cohort to a content group. Saves the changes and verifies the cohort updated properly.
Then refreshes the page and selects the cohort.
"""
self.cohort_management_page.select_cohort(cohort_name)
self.cohort_management_page.set_cohort_associated_content_group(content_group)
self._verify_settings_saved_and_reload(cohort_name)
def _verify_settings_saved_and_reload(self, cohort_name):
"""
Verifies the confirmation message indicating that a cohort's settings have been updated.
Then refreshes the page and selects the cohort.
"""
confirmation_messages = self.cohort_management_page.get_cohort_settings_messages()
self.assertEqual(["Saved cohort"], confirmation_messages)
self.browser.refresh()
self.cohort_management_page.wait_for_page()
self.cohort_management_page.select_cohort(cohort_name)
|
olexiim/edx-platform
|
common/test/acceptance/tests/discussion/test_cohort_management.py
|
Python
|
agpl-3.0
| 31,431
|
[
"VisIt"
] |
49c6f70befa75a4d80489261b1a17d7de9e7209912f0b0d37e478e7477c91f03
|
# $HeadURL: $
''' ResourceManagementDB
Module that provides basic methods to access the ResourceManagementDB.
'''
from datetime import datetime
import sys
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.ResourceStatusSystem.Utilities import MySQLWrapper
__RCSID__ = '$Id: $'
class ResourceManagementDB( object ):
'''
Class that defines the tables for the ResourceManagementDB on a python dictionary.
'''
# Written PrimaryKey as list on purpose !!
_tablesDB = {}
_tablesDB[ 'AccountingCache' ] = { 'Fields' :
{
#'AccountingCacheID' : 'INT UNSIGNED AUTO_INCREMENT NOT NULL',
'Name' : 'VARCHAR(64) NOT NULL',
'PlotType' : 'VARCHAR(16) NOT NULL',
'PlotName' : 'VARCHAR(64) NOT NULL',
'Result' : 'TEXT NOT NULL',
'DateEffective' : 'DATETIME NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Name', 'PlotType', 'PlotName' ]
}
_tablesDB[ 'DowntimeCache' ] = { 'Fields' :
{
'DowntimeID' : 'VARCHAR(64) NOT NULL',
'Element' : 'VARCHAR(32) NOT NULL',
'Name' : 'VARCHAR(64) NOT NULL',
'StartDate' : 'DATETIME NOT NULL',
'EndDate' : 'DATETIME NOT NULL',
'Severity' : 'VARCHAR(32) NOT NULL',
'Description' : 'VARCHAR(512) NOT NULL',
'Link' : 'VARCHAR(255) NOT NULL',
'DateEffective' : 'DATETIME NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL',
'GOCDBServiceType' : 'VARCHAR(32) NOT NULL'
},
'PrimaryKey' : [ 'DowntimeID' ]
}
_tablesDB[ 'GGUSTicketsCache' ] = { 'Fields' :
{
'GocSite' : 'VARCHAR(64) NOT NULL',
'Link' : 'VARCHAR(1024) NOT NULL',
'OpenTickets' : 'INTEGER NOT NULL DEFAULT 0',
'Tickets' : 'VARCHAR(1024) NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'GocSite' ]
}
_tablesDB[ 'JobCache' ] = { 'Fields' :
{
'Site' : 'VARCHAR(64) NOT NULL',
'Timespan' : 'INTEGER NOT NULL',
'Checking' : 'INTEGER NOT NULL DEFAULT 0',
'Completed' : 'INTEGER NOT NULL DEFAULT 0',
'Done' : 'INTEGER NOT NULL DEFAULT 0',
'Failed' : 'INTEGER NOT NULL DEFAULT 0',
'Killed' : 'INTEGER NOT NULL DEFAULT 0',
'Matched' : 'INTEGER NOT NULL DEFAULT 0',
'Received' : 'INTEGER NOT NULL DEFAULT 0',
'Running' : 'INTEGER NOT NULL DEFAULT 0',
'Staging' : 'INTEGER NOT NULL DEFAULT 0',
'Stalled' : 'INTEGER NOT NULL DEFAULT 0',
'Waiting' : 'INTEGER NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Site', 'Timespan' ]
}
_tablesDB[ 'PilotCache' ] = { 'Fields' :
{
'CE' : 'VARCHAR(64) NOT NULL',
'Timespan' : 'INTEGER NOT NULL',
'Scheduled' : 'INTEGER NOT NULL DEFAULT 0',
'Waiting' : 'INTEGER NOT NULL DEFAULT 0',
'Submitted' : 'INTEGER NOT NULL DEFAULT 0',
'Running' : 'INTEGER NOT NULL DEFAULT 0',
'Done' : 'INTEGER NOT NULL DEFAULT 0',
'Aborted' : 'INTEGER NOT NULL DEFAULT 0',
'Cancelled' : 'INTEGER NOT NULL DEFAULT 0',
'Deleted' : 'INTEGER NOT NULL DEFAULT 0',
'Failed' : 'INTEGER NOT NULL DEFAULT 0',
'Held' : 'INTEGER NOT NULL DEFAULT 0',
'Killed' : 'INTEGER NOT NULL DEFAULT 0',
'Stalled' : 'INTEGER NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'CE', 'Timespan' ]
}
_tablesDB[ 'PolicyResult' ] = { 'Fields' :
{
'Element' : 'VARCHAR(32) NOT NULL',
'Name' : 'VARCHAR(64) NOT NULL',
'PolicyName' : 'VARCHAR(64) NOT NULL',
'StatusType' : 'VARCHAR(16) NOT NULL DEFAULT ""',
'Status' : 'VARCHAR(16) NOT NULL',
'Reason' : 'VARCHAR(512) NOT NULL DEFAULT "Unspecified"',
'DateEffective' : 'DATETIME NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Element', 'Name', 'StatusType', 'PolicyName' ]
}
_tablesDB[ 'SpaceTokenOccupancyCache' ] = { 'Fields' :
{
'Endpoint' : 'VARCHAR( 64 ) NOT NULL',
'Token' : 'VARCHAR( 64 ) NOT NULL',
'Total' : 'DOUBLE NOT NULL DEFAULT 0',
'Guaranteed' : 'DOUBLE NOT NULL DEFAULT 0',
'Free' : 'DOUBLE NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Endpoint', 'Token' ]
}
_tablesDB[ 'TransferCache' ] = { 'Fields' :
{
'SourceName' : 'VARCHAR( 64 ) NOT NULL',
'DestinationName' : 'VARCHAR( 64 ) NOT NULL',
'Metric' : 'VARCHAR( 16 ) NOT NULL',
'Value' : 'DOUBLE NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'SourceName', 'DestinationName', 'Metric' ]
}
_tablesDB[ 'UserRegistryCache' ] = { 'Fields' :
{
'Login' : 'VARCHAR(16)',
'Name' : 'VARCHAR(64) NOT NULL',
'Email' : 'VARCHAR(64) NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Login' ]
}
_tablesDB[ 'VOBOXCache' ] = { 'Fields' :
{
'Site' : 'VARCHAR( 64 ) NOT NULL',
'System' : 'VARCHAR( 64 ) NOT NULL',
'ServiceUp' : 'INTEGER NOT NULL DEFAULT 0',
'MachineUp' : 'INTEGER NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Site', 'System' ]
}
_tablesDB[ 'ErrorReportBuffer' ] = { 'Fields' :
{
'ID' : 'INT UNSIGNED AUTO_INCREMENT NOT NULL',
'Name' : 'VARCHAR(64) NOT NULL',
'ElementType' : 'VARCHAR(32) NOT NULL',
'Reporter' : 'VARCHAR(64) NOT NULL',
'ErrorMessage' : 'VARCHAR(512) NOT NULL',
'Operation' : 'VARCHAR(64) NOT NULL',
'Arguments' : 'VARCHAR(512) NOT NULL DEFAULT ""',
'DateEffective' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'ID' ]
}
_tablesLike = {}
_tablesLike[ 'PolicyResultWithID' ] = { 'Fields' :
{
'ID' : 'INT UNSIGNED AUTO_INCREMENT NOT NULL',
'Element' : 'VARCHAR(32) NOT NULL',
'Name' : 'VARCHAR(64) NOT NULL',
'PolicyName' : 'VARCHAR(64) NOT NULL',
'StatusType' : 'VARCHAR(16) NOT NULL DEFAULT ""',
'Status' : 'VARCHAR(8) NOT NULL',
'Reason' : 'VARCHAR(512) NOT NULL DEFAULT "Unspecified"',
'DateEffective' : 'DATETIME NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'ID' ]
}
_likeToTable = {
'PolicyResultLog' : 'PolicyResultWithID',
'PolicyResultHistory' : 'PolicyResultWithID',
}
def __init__( self, mySQL = None, checkTables = False ):
'''
Constructor, accepts any DB or mySQL connection, mostly used for testing
purposes.
'''
self._tableDict = self.__generateTables()
if mySQL is not None:
self.database = mySQL
else:
self.database = DB( 'ResourceManagementDB', 'ResourceStatus/ResourceManagementDB' )
if checkTables:
result = self._createTables( self._tablesDict )
if not result['OK']:
error = 'Failed to check/create tables'
self.log.fatal( 'ResourceManagementDB: %s' % error )
sys.exit( error )
if result['Value']:
self.log.info( "ResourceManagementDB: created tables %s" % result['Value'] )
## SQL Methods ###############################################################
def insert( self, params, meta ):
'''
Inserts args in the DB making use of kwargs where parameters such as
the 'table' are specified ( filled automatically by the Client). Typically you
will not pass kwargs to this function, unless you know what are you doing
and you have a very special use case.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
utcnow = datetime.utcnow().replace( microsecond = 0 )
# We force lastCheckTime to utcnow if it is not present on the params
#if not( 'lastCheckTime' in params and not( params[ 'lastCheckTime' ] is None ) ):
if 'lastCheckTime' in params and params[ 'lastCheckTime' ] is None:
params[ 'lastCheckTime' ] = utcnow
if 'dateEffective' in params and params[ 'dateEffective' ] is None:
params[ 'dateEffective' ] = utcnow
return MySQLWrapper.insert( self, params, meta )
def update( self, params, meta ):
'''
Updates row with values given on args. The row selection is done using the
default of MySQLMonkey ( column.primary or column.keyColumn ). It can be
modified using kwargs. The 'table' keyword argument is mandatory, and
filled automatically by the Client. Typically you will not pass kwargs to
this function, unless you know what are you doing and you have a very
special use case.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
# We force lastCheckTime to utcnow if it is not present on the params
#if not( 'lastCheckTime' in params and not( params[ 'lastCheckTime' ] is None ) ):
if 'lastCheckTime' in params and params[ 'lastCheckTime' ] is None:
params[ 'lastCheckTime' ] = datetime.utcnow().replace( microsecond = 0 )
return MySQLWrapper.update( self, params, meta )
def select( self, params, meta ):
'''
Uses arguments to build conditional SQL statement ( WHERE ... ). If the
sql statement desired is more complex, you can use kwargs to interact with
the MySQL buildCondition parser and generate a more sophisticated query.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
return MySQLWrapper.select( self, params, meta )
def delete( self, params, meta ):
'''
Uses arguments to build conditional SQL statement ( WHERE ... ). If the
sql statement desired is more complex, you can use kwargs to interact with
the MySQL buildCondition parser and generate a more sophisticated query.
There is only one forbidden query, with all parameters None ( this would
mean a query of the type `DELETE * from TableName` ). The usage of kwargs
is the same as in the get function.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
return MySQLWrapper.delete( self, params, meta )
## Extended SQL methods ######################################################
def addOrModify( self, params, meta ):
'''
Using the PrimaryKeys of the table, it looks for the record in the database.
If it is there, it is updated, if not, it is inserted as a new entry.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
selectQuery = self.select( params, meta )
if not selectQuery[ 'OK' ]:
return selectQuery
isUpdate = False
if selectQuery[ 'Value' ]:
# Pseudo - code
# for all column not being PrimaryKey and not a time column:
# if one or more column different than params if not None:
# we update dateTime as well
columns = selectQuery[ 'Columns' ]
values = selectQuery[ 'Value' ]
if len( values ) != 1:
return S_ERROR( 'More than one value returned on addOrModify, please report !!' )
selectDict = dict( zip( columns, values[ 0 ] ) )
newDateEffective = None
for key, value in params.items():
if key in ( 'lastCheckTime', 'dateEffective' ):
continue
if value is None:
continue
if value != selectDict[ key[0].upper() + key[1:] ]:
newDateEffective = datetime.utcnow().replace( microsecond = 0 )
break
if 'dateEffective' in params:
params[ 'dateEffective' ] = newDateEffective
userQuery = self.update( params, meta )
isUpdate = True
else:
userQuery = self.insert( params, meta )
# This part only applies to PolicyResult table
logResult = self._logRecord( params, meta, isUpdate )
if not logResult[ 'OK' ]:
return logResult
return userQuery
# FIXME: this method looks unused. Maybe can be removed from the code.
def addIfNotThere( self, params, meta ):
'''
Using the PrimaryKeys of the table, it looks for the record in the database.
If it is not there, it is inserted as a new entry.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
selectQuery = self.select( params, meta )
if not selectQuery[ 'OK' ]:
return selectQuery
if selectQuery[ 'Value' ]:
return selectQuery
return self.insert( params, meta )
## Auxiliar methods ##########################################################
def getTable( self, tableName ):
'''
Returns a table dictionary description given its name
'''
if tableName in self._tableDict:
return S_OK( self._tableDict[ tableName ] )
return S_ERROR( '%s is not on the schema' % tableName )
def getTablesList( self ):
'''
Returns a list of the table names in the schema.
'''
return S_OK( self._tableDict.keys() )
## Protected methods #########################################################
def _logRecord( self, params, meta, isUpdate ):
'''
Method that records every change on a LogTable.
'''
if not ( 'table' in meta and meta[ 'table' ] == 'PolicyResult' ):
return S_OK()
if isUpdate:
# This looks little bit like a non-sense. If we were updating, we may have
# not passed a complete set of parameters, so we have to get all them from the
# database :/. It costs us one more query.
updateRes = self.select( params, meta )
if not updateRes[ 'OK' ]:
return updateRes
params = dict( zip( updateRes[ 'Columns' ], updateRes[ 'Value' ][ 0 ] ))
# Writes to PolicyResult"Log"
meta[ 'table' ] += 'Log'
logRes = self.insert( params, meta )
return logRes
## Private methods ###########################################################
def __createTables( self, tableName = None ):
'''
Writes the schema in the database. If no tableName is given, all tables
are written in the database. If a table is already in the schema, it is
skipped to avoid problems trying to create a table that already exists.
'''
tables = {}
if tableName is None:
tables.update( self._tableDict )
elif tableName in self._tableDict:
tables = { tableName : self._tableDict[ tableName ] }
else:
return S_ERROR( '"%s" is not a known table' % tableName )
res = self.database._createTables( tables )
if not res[ 'OK' ]:
return res
# Human readable S_OK message
if res[ 'Value' ] == 0:
res[ 'Value' ] = 'No tables created'
else:
res[ 'Value' ] = 'Tables created: %s' % ( ','.join( tables.keys() ) )
return res
def __generateTables( self ):
'''
Method used to transform the class variables into instance variables,
for safety reasons.
'''
# Avoids copying object.
tables = {}
tables.update( self._tablesDB )
for tableName, tableLike in self._likeToTable.items():
tables[ tableName ] = self._tablesLike[ tableLike ]
return tables
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
coberger/DIRAC
|
ResourceStatusSystem/DB/ResourceManagementDB.py
|
Python
|
gpl-3.0
| 20,405
|
[
"DIRAC"
] |
ba420e2a57126ed4aee28bc25cefe2ecbea361bfbc2c0c6d9832ebd836dd74b2
|
"""Definitions of the free parameters.
The free parameters are meant to be used for parameters that one wants to optimize. They can be fixed to a certain
value to disable them from being optimized in a given situation, but they remain classified as 'optimizable' parameters.
"""
from mdt import FreeParameterTemplate
from mdt.component_templates.parameters import PolarAngleParameterTemplate, AzimuthAngleParameterTemplate, \
RotationalAngleParameterTemplate
from mdt.model_building.parameter_functions.priors import UniformWithinBoundsPrior, ARDBeta, ARDGaussian
from mdt.model_building.parameter_functions.transformations import ScaleTransform
__author__ = 'Robbert Harms'
__date__ = "2015-12-12"
__maintainer__ = "Robbert Harms"
__email__ = "robbert@xkls.nl"
class s0(FreeParameterTemplate):
init_value = 1e4
lower_bound = 0
upper_bound = 1e10
sampling_proposal_std = 10.0
class w(FreeParameterTemplate):
init_value = 0.5
lower_bound = 0
upper_bound = 1
sampling_proposal_std = 0.01
sampling_prior = UniformWithinBoundsPrior()
numdiff_info = {'scale_factor': 10}
class w_ard_beta(w):
"""Subclasses the weight to add a Beta prior for in use with Automatic Relevance Detection during sample."""
sampling_prior = ARDBeta()
class w_ard_gaussian(w):
"""Subclasses the weight to add a Gaussian prior for in use with Automatic Relevance Detection during sample."""
sampling_prior = ARDGaussian()
class T1(FreeParameterTemplate):
init_value = 0.05
lower_bound = 1e-5
upper_bound = 4.0
parameter_transform = ScaleTransform(1e4)
class T2(FreeParameterTemplate):
init_value = 0.05
lower_bound = 1e-5
upper_bound = 2.0
parameter_transform = ScaleTransform(1e4)
class R1(FreeParameterTemplate):
"""R1 = 1/T1, for linear T1Dec or other models. """
init_value = 2
lower_bound = 0.25
upper_bound = 100.0
parameter_transform = ScaleTransform(1e2)
class R2(FreeParameterTemplate):
"""R2 = 1/T2, for linear T2Dec or other models."""
init_value = 5
lower_bound = 0.5
upper_bound = 500.0
parameter_transform = ScaleTransform(1e2)
class R2s(FreeParameterTemplate):
"""R2s = 1/T2s, for lineaR T2sDec or other models."""
init_value = 10
lower_bound = 1
upper_bound = 50.0
parameter_transform = ScaleTransform(1e2)
class theta(PolarAngleParameterTemplate):
"""The polar/inclination angle for spherical coordinates.
We subclass from a special spherical coordinate template class to signal to the composite model we
want to restrict this parameter between [0, pi], together with phi.
"""
class phi(AzimuthAngleParameterTemplate):
"""The azimuth angle for spherical coordinates.
We subclass from a special spherical coordinate template class to signal to the composite model we
want to restrict this parameter between [0, pi], together with theta.
"""
class psi(RotationalAngleParameterTemplate):
"""The rotation angle for use in cylindrical models.
This parameter can be used to rotate a vector around another vector, as is for example done in the Tensor model.
This parameter is not part of the spherical coordinate parameters.
"""
class d(FreeParameterTemplate):
init_value = 1.7e-9
lower_bound = 1e-12
upper_bound = 1.0e-8
parameter_transform = ScaleTransform(1e10)
sampling_proposal_std = 1e-10
numdiff_info = {'scale_factor': 1e10, 'use_upper_bound': False}
class dperp0(FreeParameterTemplate):
init_value = 1.7e-10
lower_bound = 0
upper_bound = 1.0e-8
parameter_transform = ScaleTransform(1e10)
sampling_proposal_std = 1e-10
numdiff_info = {'scale_factor': 1e10, 'use_upper_bound': False}
class dperp1(FreeParameterTemplate):
init_value = 1.7e-11
lower_bound = 0
upper_bound = 1.0e-8
parameter_transform = ScaleTransform(1e10)
sampling_proposal_std = 1e-10
numdiff_info = {'scale_factor': 1e10, 'use_upper_bound': False}
class R(FreeParameterTemplate):
init_value = 1.0e-6
lower_bound = 1e-7
upper_bound = 20e-6
parameter_transform = ScaleTransform(1e7)
sampling_proposal_std = 1e-7
class kappa(FreeParameterTemplate):
"""The kappa parameter used in the NODDI Watson model.
The NODDI-Watson model computes the spherical harmonic (SH) coefficients of the Watson distribution with the
concentration parameter k (kappa) up to the 12th order.
Truncating at the 12th order gives good approximation for kappa up to 64, as such we define kappa to be between
zero and 64.
"""
init_value = 1
lower_bound = 0
upper_bound = 64
sampling_proposal_std = 0.01
numdiff_info = {'use_upper_bound': False}
parameter_transform = ScaleTransform(1/64.)
class k1(FreeParameterTemplate):
"""The kappa parameter for the Ball&Racket and NODDI Bingham model"""
init_value = 1
lower_bound = 0
upper_bound = 64
sampling_proposal_std = 0.01
numdiff_info = {'use_upper_bound': False}
parameter_transform = ScaleTransform(1/64.)
class kw(FreeParameterTemplate):
"""We optimize the ratio w = k1/k2 in the Ball&Racket and NODDI Bingham model"""
init_value = 2
lower_bound = 1
upper_bound = 64
sampling_proposal_std = 0.01
numdiff_info = {'use_upper_bound': False}
parameter_transform = ScaleTransform(1 / 64.)
class d_exvivo(FreeParameterTemplate):
"""For use in ExpT1DecSTEAM model. It assumes ex-vivo values. For in-vivo use ``d`` instead."""
init_value = 5.0e-10
lower_bound = 0.0
upper_bound = 1.0e-8
parameter_transform = ScaleTransform(1e10)
sampling_proposal_std = 1e-11
numdiff_info = {'scale_factor': 1e10, 'use_upper_bound': False}
class d_bulk(FreeParameterTemplate):
init_value = 0.e-9
lower_bound = 0
upper_bound = 1.0e-8
parameter_transform = ScaleTransform(1e10)
sampling_proposal_std = 1e-10
numdiff_info = {'scale_factor': 1e10, 'use_upper_bound': False}
|
cbclab/MDT
|
mdt/data/components/standard/parameters/free.py
|
Python
|
lgpl-3.0
| 6,030
|
[
"Gaussian"
] |
93e825e0b6efda01ac83269dc93e6156706c617873e8970dde2092358efc76a1
|
# Copyright (C) 2021 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
import numpy as np
import scipy.signal
tutorial, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
filepath="@TUTORIALS_DIR@/error_analysis/error_analysis_part1.py")
@skipIfMissingFeatures
class Tutorial(ut.TestCase):
def ar_1_process(self, n, c, phi, eps):
y0 = np.random.normal(loc=c / (1 - phi),
scale=np.sqrt(eps**2 / (1 - phi**2)))
y = c + np.random.normal(loc=0.0, scale=eps, size=n - 1)
y = np.insert(y, 0, y0)
# get an AR(1) process from an ARMA(p,q) process with p=1 and q=0
y = scipy.signal.lfilter([1.], [1., -phi], y)
return y
def test_ar1_implementation(self):
with self.assertRaises(ValueError):
tutorial.ar_1_process(10, 1.0, 1.1, 3.0)
with self.assertRaises(ValueError):
tutorial.ar_1_process(10, 1.0, -1.1, 3.0)
for seed in range(5):
for eps in [0.5, 1., 2.]:
for phi in [0.1, 0.8, 0.999, -0.3]:
c = eps / 2.
np.random.seed(seed)
seq = tutorial.ar_1_process(10, c, phi, eps)
np.random.seed(seed)
ref = self.ar_1_process(10, c, phi, eps)
np.testing.assert_allclose(seq, ref, atol=1e-12, rtol=0)
def test(self):
self.assertLess(abs(tutorial.PHI_1), 1.0)
self.assertLess(abs(tutorial.PHI_2), 1.0)
# Test manual binning analysis
ref_bin_avgs = np.mean(
tutorial.time_series_1[:tutorial.N_BINS * tutorial.BIN_SIZE].reshape((tutorial.N_BINS, -1)), axis=1)
np.testing.assert_allclose(
tutorial.bin_avgs,
ref_bin_avgs,
atol=1e-12,
rtol=0)
self.assertAlmostEqual(
tutorial.avg,
np.mean(ref_bin_avgs),
delta=1e-10)
self.assertAlmostEqual(
tutorial.sem,
np.std(ref_bin_avgs, ddof=1.5) / np.sqrt(tutorial.N_BINS),
delta=1e-10)
# Test binning analysis function
for bin_size in [2, 10, 76, 100]:
data = np.random.random(500)
n_bins = 500 // bin_size
sem = tutorial.do_binning_analysis(data, bin_size)
ref_bin_avgs = np.mean(
data[:n_bins * bin_size].reshape((n_bins, -1)), axis=1)
ref_sem = np.std(ref_bin_avgs, ddof=1.5) / np.sqrt(n_bins)
self.assertAlmostEqual(sem, ref_sem, delta=1e-10)
# The analytic expressions for the AR(1) process are taken from
# https://en.wikipedia.org/wiki/Autoregressive_model#Example:_An_AR(1)_process
# (accessed June 2021)
SIGMA_1 = np.sqrt(tutorial.EPS_1 ** 2 / (1 - tutorial.PHI_1 ** 2))
TAU_EXP_1 = -1 / np.log(tutorial.PHI_1)
# The autocorrelation is exponential, thus tau_exp = tau_int, and
# therefore
SEM_1 = np.sqrt(2 * SIGMA_1 ** 2 * TAU_EXP_1 / tutorial.N_SAMPLES)
self.assertAlmostEqual(
tutorial.fit_params[2],
SEM_1,
delta=0.1 * SEM_1)
self.assertAlmostEqual(tutorial.AN_SEM_1, SEM_1, delta=1e-10 * SEM_1)
SIGMA_2 = np.sqrt(tutorial.EPS_2 ** 2 / (1 - tutorial.PHI_2 ** 2))
TAU_EXP_2 = -1 / np.log(tutorial.PHI_2)
SEM_2 = np.sqrt(2 * SIGMA_2 ** 2 * TAU_EXP_2 / tutorial.N_SAMPLES)
self.assertAlmostEqual(tutorial.AN_SEM_2, SEM_2, delta=1e-10 * SEM_2)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/scripts/tutorials/test_error_analysis_part1.py
|
Python
|
gpl-3.0
| 4,252
|
[
"ESPResSo"
] |
1d1f662f6da300e7c7bb5196f190ca828c31f0eb1aeed47a3624a767da95d066
|
from Bio.Blast import NCBIXML as nx
import sys
import os
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
import argparse
parser = argparse.ArgumentParser(description='Parse & plot blast results')
parser.add_argument('--bed', help='bed file of SV regions', type=str)
parser.add_argument('--show_plot', help='Set to 1 if you want to see interactive plot after generation of eash plot', default=0, type=int)
parser.add_argument('-o', help='Path for working folder', default=os.getcwd(), type=str)
parser.add_argument('--overhang', help='overhang at left and right flanks', default=1000, type=int)
args = parser.parse_args()
minContigSize = 150
minCoverage = 3
minHspLength = 26
minBitScore = 1.7
def ParseQueryTitle(title,assembler_name):
tmp = {}
if assembler_name == 'spades' or assembler_name == 'velvet':
title = title.split('_')
tmp["contigName"] = title[1]
tmp["coverage"] = float(title[5])
tmp["length"] = int(title[3])
elif assembler_name == 'dipspades':
title = title.split('_')
tmp["contigName"] = title[0]
tmp["coverage"] = 1.0
tmp["length"] = int(title[2])
elif assembler_name == 'abyss':
title = title.split(' ')
tmp["contigName"] = title[0]
tmp["coverage"] = int(title[2])/int(title[1])
tmp["length"] = int(title[1])
return tmp
def parseblastout(line,assembler_name):
array = line.rstrip('\n').split('\t')
title = '_'.join(array)
folder = os.path.join(args.o, array[4], array[5], title)
file = os.path.join(folder, assembler_name + ".blastout")
if not os.path.exists(file):
sys.stderr.write(file + " does not exist\n")
return
bfile = open(file,'r')
blast_records = nx.parse(bfile)
blast_records = list(blast_records)
sys.stderr.write("Blast output file is parsed successfully!\nThere are " + str(len(blast_records)) + " Blast records in the file\n")
region = (array[0],int(array[1])-args.overhang,int(array[2])+args.overhang)
ymax = 0
xLabels = []
yLabels = []
contigList = []
colorCodes = "bgrcmyk"
colorCount = 0
for b in blast_records:
colorCount += 1
cn = colorCodes[colorCount % 7]
if len(b.alignments) > 0 and b.query_length >= minContigSize:
contigInfo = ParseQueryTitle(b.query,assembler_name)
contigBits = [0.0] * b.query_length
if b.query_length > ymax: ymax = b.query_length + 20
contigHsps = []
sys.stderr.write("There are " + str(len(b.alignments)) + " alignments for " + b.query + "\n")
for a in b.alignments:
chromosome = a.title.split(' ')[1]
sys.stderr.write("There are " + str(len(a.hsps)) + " hsps for chromosome " + chromosome + "\n")
for hsp in a.hsps:
tmpContigBits = contigBits[hsp.query_start-1:hsp.query_start+hsp.align_length-1]
avgTmpContigBits = (sum(tmpContigBits)/len(tmpContigBits))
hspBits = [hsp.bits / hsp.align_length] * hsp.align_length
avgHspBits = (sum(hspBits)/len(hspBits))
if avgHspBits > minBitScore and avgTmpContigBits < avgHspBits-0.1:
contigHsps.append((chromosome,avgHspBits,hsp))
for i in range(hsp.query_start-1,hsp.query_start+hsp.align_length-1):
if i >= 0 and i < len(contigBits):
contigBits[i] = avgHspBits
contigScore = (sum(contigBits)/b.query_length) * contigInfo["coverage"]
contigHsps = sorted(contigHsps, key=lambda x: x[2].query_start)
contigList.append((contigHsps,contigScore,contigInfo))
for index in range(len(contigHsps)):
chromosome, avgHspBits, hsp = contigHsps[index]
hspPlotLabel, ctrl = '', False
if len(contigHsps) > 1: hspPlotLabel = contigInfo['contigName']+'_'+str(index); ctrl = True
else: hspPlotLabel = contigInfo['contigName']
# if hsp.bits / hsp.align_length > minBitScore and hsp.align_length >= minHspLength:
if avgHspBits > minBitScore and chromosome == region[0] and hsp.sbjct_start >= region[1] and (hsp.sbjct_start + (hsp.align_length * hsp.frame[1])) <= region[2]:
sys.stderr.write("plotting " + str(hsp.query_start) +" "+ str(hsp.sbjct_start) +" "+ str(hsp.query_start + (hsp.align_length * hsp.frame[0]))+" "+str(hsp.sbjct_start + (hsp.align_length * hsp.frame[1]))+"\n")
plt.plot([hsp.sbjct_start,hsp.sbjct_start + (hsp.align_length * hsp.frame[1])],[hsp.query_start,hsp.query_start + (hsp.align_length * hsp.frame[0])],'k-',lw=3,c=cn)
if ctrl: xLabels.append(hsp.sbjct_start); xLabels.append(hsp.sbjct_start + (hsp.align_length * hsp.frame[1])); yLabels.append(hsp.query_start); yLabels.append(hsp.query_start + (hsp.align_length * hsp.frame[0]));
plt.annotate(hspPlotLabel,xy=((hsp.sbjct_start + (hsp.sbjct_start + (hsp.align_length * hsp.frame[1])))/2,(hsp.query_start+(hsp.query_start + (hsp.align_length * hsp.frame[0])))/2),
xytext=((hsp.sbjct_start + (hsp.sbjct_start + (hsp.align_length * hsp.frame[1])))/2,(hsp.query_start+(hsp.query_start + (hsp.align_length * hsp.frame[0])))/2))
else:
sys.stderr.write("Length of " + b.query + " is less than MinContigSize: " + str(minContigSize) + "\t Skipping...\n")
plt.xticks(rotation=45)
plt.xlabel('chr' + region[0])
plt.ylim(0,ymax+50)
plt.xlim(region[1]-50,region[2]+50)
ax = plt.gca()
ax.tick_params(pad=25)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.set_xticks(xLabels,minor=False)
ax.set_yticks(yLabels,minor=False)
ax.yaxis.grid(True,which='major')
ax.xaxis.grid(True,which='major')
pngname = os.path.join(args.o, array[4], array[5], title + "_" + assembler_name + ".png")
plt.savefig(pngname,dpi=300,bbox_inches='tight')
if args.show_plot: plt.show()
plt.close()
contigList = sorted(contigList,reverse=True ,key=lambda x:x[1])
outname = os.path.join(args.o, array[4], array[5],title + "_" + assembler_name + "_svmap.out")
outfile = open(outname,'w')
for c in contigList:
outfile.write("##contig_ID:" + c[2]["contigName"] +
" length:"+ str(c[2]["length"]) +
" coverage:"+str(c[2]["coverage"]) +
" contig_score:"+str(c[1]) +
" number_of_hits:"+str(len(c[0])) + "\n")
#if len(c[0]) > 1:
previousHsp = None
previousChr = None
for i in range(len(c[0])):
hsp = c[0][i][2]
if previousHsp:
prevQueryEnd = (previousHsp.query_start + (previousHsp.align_length * previousHsp.frame[0]) - 1)
if prevQueryEnd > hsp.query_start: prevQueryEnd = hsp.query_start - 1 #left align breakpoints
events = []
if c[0][i][0] != previousChr:
events.append("inter_chromosomal_translocation")
else:
if previousHsp.frame[1] != hsp.frame[1]:
events.append("inversion")
if hsp.query_start - prevQueryEnd > 1:
events.append("insertion:Query:" + str(prevQueryEnd) +"-"+ str(hsp.query_start))
if abs(hsp.sbjct_start - (previousHsp.sbjct_start + (previousHsp.align_length * previousHsp.frame[1]))):
events.append("deletion:" + c[0][i][0] + ":" +
str(min(hsp.sbjct_start , (previousHsp.sbjct_start + (previousHsp.align_length * previousHsp.frame[1])))) + "-" +
str(max(hsp.sbjct_start , (previousHsp.sbjct_start + (previousHsp.align_length * previousHsp.frame[1])))))
outfile.write("\tEVENTS: " + ','.join(events) + "\n")
strand = ''
if hsp.frame[1] < 0: strand= 'Rev'
else: strand = 'Fwd'
gaps = 0
mismatches = 0
for n in hsp.query:
if n == '-': gaps += 1
for n in hsp.sbjct:
if n == '-': gaps += 1
for m in hsp.match:
if m == ' ': mismatches += 1
outfile.write("\t#hit:" +c[2]["contigName"] +"_"+ str(i) + " strand:" + strand +
" contig_pos:" + str(hsp.query_start) +"-"+ str(hsp.query_start + (hsp.align_length * hsp.frame[0]) - 1) +
" ref_pos:"+ c[0][i][0] + ":" + str(hsp.sbjct_start) + "-" + str(hsp.sbjct_start + (hsp.align_length * hsp.frame[1]) - 1) +
" aligned_length:" + str(hsp.align_length) +
" gaps:"+ str(gaps) + " mismatches:" + str(mismatches) + "\n")
outfile.write("\tquery: " + str(hsp.query) + "\n\tmatch: " + str(hsp.match) + "\n\tsbjct: " + str(hsp.sbjct) + "\n")
previousHsp = hsp
previousChr = c[0][i][0]
def main():
bed = open(args.bed, 'r')
#header = bed.readline()
line = bed.readline()
while len(line) > 3:
sys.stderr.write("Parsing dipspades\n")
parseblastout(line, "dipspades")
sys.stderr.write("Parsing spades\n")
parseblastout(line, "spades")
sys.stderr.write("Parsing abyss\n")
parseblastout(line, "abyss")
sys.stderr.write("Parsing velvet\n")
parseblastout(line, "velvet")
line = bed.readline()
if __name__ == '__main__':
main()
|
berguner/svmap
|
svmap.py
|
Python
|
gpl-3.0
| 9,738
|
[
"BLAST"
] |
de17bf799ddb7659109a4e67a99e147db429bda6b2fa2ba0e1fe7916c015ec13
|
# Version: 0.16
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.3, 3.4, 3.5, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `style`: the style of version string to be produced. See "Styles" below for
details. Defaults to "pep440", which looks like
`TAG[+DISTANCE.gSHORTHASH[.dirty]]`.
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file, so it can be imported at runtime. If your project uses
`src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
This file should be checked in to your VCS as usual: the copy created below
by `setup.py setup_versioneer` will include code that parses expanded VCS
keywords in generated tarballs. The 'build' and 'sdist' commands will
replace it with a copy that has just the calculated version string.
This must be set even if your project does not have any modules (and will
therefore never import `_version.py`), since "setup.py sdist" -based trees
still need somewhere to record the pre-calculated version strings. Anywhere
in the source tree should do. If there is a `__init__.py` next to your
`_version.py`, the `setup.py setup_versioneer` command (described below)
will append some `__version__`-setting assignments, if they aren't already
present.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
If this is set to None, then `setup.py build` will not attempt to rewrite
any `_version.py` in the built tree. If your project does not have any
libraries (e.g. if it only builds a script), then you should use
`versionfile_build = None`. To actually use the computed version string,
your `setup.py` will need to override `distutils.command.build_scripts`
with a subclass that explicitly inserts a copy of
`versioneer.get_version()` into your script file. See
`test/demoapp-script-only/setup.py` for an example.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string, using either `tag_prefix=` or `tag_prefix=''`.
* `parentdir_prefix`:
a optional string, frequently the same as tag_prefix, which appears at the
start of all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'. To disable this feature,
just omit the field from your `setup.cfg`.
This tool provides one script, named `versioneer`. That script has one mode,
"install", which writes a copy of `versioneer.py` into the current directory
and runs `versioneer.py setup` to finish the installation.
To versioneer-enable your project:
* 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and
populating it with the configuration values you decided earlier (note that
the option names are not case-sensitive):
````
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
````
* 2: Run `versioneer install`. This will do the following:
* copy `versioneer.py` into the top of your source tree
* create `_version.py` in the right place (`versionfile_source`)
* modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`)
* modify your `MANIFEST.in` to include both `versioneer.py` and the
generated `_version.py` in sdist tarballs
`versioneer install` will complain about any problems it finds with your
`setup.py` or `setup.cfg`. Run it multiple times until you have fixed all
the problems.
* 3: add a `import versioneer` to your setup.py, and add the following
arguments to the setup() call:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: commit these changes to your VCS. To make sure you won't forget,
`versioneer install` will mark everything it touched for addition using
`git add`. Don't forget to add `setup.py` and `setup.cfg` too.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at
least one tag in its history.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See details.md in the Versioneer source tree for
descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
### Upgrading to 0.16
Nothing special.
### Upgrading to 0.15
Starting with this version, Versioneer is configured with a `[versioneer]`
section in your `setup.cfg` file. Earlier versions required the `setup.py` to
set attributes on the `versioneer` module immediately after import. The new
version will refuse to run (raising an exception during import) until you
have provided the necessary `setup.cfg` section.
In addition, the Versioneer package provides an executable named
`versioneer`, and the installation process is driven by running `versioneer
install`. In 0.14 and earlier, the executable was named
`versioneer-installer` and was run without an argument.
### Upgrading to 0.14
0.14 changes the format of the version string. 0.13 and earlier used
hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a
plus-separated "local version" section strings, with dot-separated
components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old
format, but should be ok with the new one.
### Upgrading from 0.11 to 0.12
Nothing special.
### Upgrading from 0.10 to 0.11
You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
`setup.py setup_versioneer`. This will enable the use of additional
version-control systems (SVN, etc) in the future.
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg) as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print(f"unable to find command, tried {commands}")
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
LONG_VERSION_PY['git'] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.16 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with "
"prefix '%%s'" %% (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "main".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs)
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "main".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-time keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except OSError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.16) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
import sys
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except OSError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set {} to '{}'".format(filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print(f"got version from file {versionfile_abs} {ver}")
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version"}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy) as f:
old = f.read()
except OSError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in) as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except OSError:
pass
# That doesn't cover everything MANIFEST.in can do
# (https://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
jakirkham/dask
|
versioneer.py
|
Python
|
bsd-3-clause
| 65,581
|
[
"Brian"
] |
2751332cc6684c5de41c4234d1ab9daf08f7705f7f291a6a51eefcde35f7f1d8
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Provides the capability to load netCDF files and interpret them
according to the 'NetCDF Climate and Forecast (CF) Metadata Conventions'.
References:
[CF] NetCDF Climate and Forecast (CF) Metadata conventions, Version 1.5, October, 2010.
[NUG] NetCDF User's Guide, http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
"""
from abc import ABCMeta, abstractmethod
from collections.abc import Iterable, MutableMapping
import os
import re
import warnings
import netCDF4
import numpy as np
import numpy.ma as ma
import iris.util
#
# CF parse pattern common to both formula terms and measure CF variables.
#
_CF_PARSE = re.compile(
r"""
\s*
(?P<lhs>[\w_]+)
\s*:\s*
(?P<rhs>[\w_]+)
\s*
""",
re.VERBOSE,
)
# NetCDF variable attributes handled by the netCDF4 module and
# therefore automatically classed as "used" attributes.
_CF_ATTRS_IGNORE = set(
["_FillValue", "add_offset", "missing_value", "scale_factor",]
)
#: Supported dimensionless vertical coordinate reference surface/phemomenon
#: formula terms. Ref: [CF] Appendix D.
reference_terms = dict(
atmosphere_sigma_coordinate=["ps"],
atmosphere_hybrid_sigma_pressure_coordinate=["ps"],
atmosphere_hybrid_height_coordinate=["orog"],
atmosphere_sleve_coordinate=["zsurf1", "zsurf2"],
ocean_sigma_coordinate=["eta", "depth"],
ocean_s_coordinate=["eta", "depth"],
ocean_sigma_z_coordinate=["eta", "depth"],
ocean_s_coordinate_g1=["eta", "depth"],
ocean_s_coordinate_g2=["eta", "depth"],
)
# NetCDF returns a different type for strings depending on Python version.
def _is_str_dtype(var):
return np.issubdtype(var.dtype, np.bytes_)
################################################################################
class CFVariable(metaclass=ABCMeta):
"""Abstract base class wrapper for a CF-netCDF variable."""
#: Name of the netCDF variable attribute that identifies this
#: CF-netCDF variable.
cf_identity = None
def __init__(self, name, data):
# Accessing the list of netCDF attributes is surprisingly slow.
# Since it's used repeatedly, caching the list makes things
# quite a bit faster.
self._nc_attrs = data.ncattrs()
#: NetCDF variable name.
self.cf_name = name
#: NetCDF4 Variable data instance.
self.cf_data = data
#: Collection of CF-netCDF variables associated with this variable.
self.cf_group = None
#: CF-netCDF formula terms that his variable participates in.
self.cf_terms_by_root = {}
self.cf_attrs_reset()
@staticmethod
def _identify_common(variables, ignore, target):
if ignore is None:
ignore = []
if target is None:
target = variables
elif isinstance(target, str):
if target not in variables:
raise ValueError(
"Cannot identify unknown target CF-netCDF variable %r"
% target
)
target = {target: variables[target]}
else:
raise TypeError("Expect a target CF-netCDF variable name")
return (ignore, target)
@abstractmethod
def identify(self, variables, ignore=None, target=None, warn=True):
"""
Identify all variables that match the criterion for this CF-netCDF variable class.
Args:
* variables:
Dictionary of netCDF4.Variable instance by variable name.
Kwargs:
* ignore:
List of variable names to ignore.
* target:
Name of a single variable to check.
* warn:
Issue a warning if a missing variable is referenced.
Returns:
Dictionary of CFVariable instance by variable name.
"""
pass
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
result = set(self.dimensions).issubset(cf_variable.dimensions)
return result
def __eq__(self, other):
# CF variable names are unique.
return self.cf_name == other.cf_name
def __ne__(self, other):
# CF variable names are unique.
return self.cf_name != other.cf_name
def __hash__(self):
# CF variable names are unique.
return hash(self.cf_name)
def __getattr__(self, name):
# Accessing netCDF attributes is surprisingly slow. Since
# they're often read repeatedly, caching the values makes things
# quite a bit faster.
if name in self._nc_attrs:
self._cf_attrs.add(name)
value = getattr(self.cf_data, name)
setattr(self, name, value)
return value
def __getitem__(self, key):
return self.cf_data.__getitem__(key)
def __len__(self):
return self.cf_data.__len__()
def __repr__(self):
return "%s(%r, %r)" % (
self.__class__.__name__,
self.cf_name,
self.cf_data,
)
def cf_attrs(self):
"""Return a list of all attribute name and value pairs of the CF-netCDF variable."""
return tuple(
(attr, self.getncattr(attr)) for attr in sorted(self._nc_attrs)
)
def cf_attrs_ignored(self):
"""Return a list of all ignored attribute name and value pairs of the CF-netCDF variable."""
return tuple(
(attr, self.getncattr(attr))
for attr in sorted(set(self._nc_attrs) & _CF_ATTRS_IGNORE)
)
def cf_attrs_used(self):
"""Return a list of all accessed attribute name and value pairs of the CF-netCDF variable."""
return tuple(
(attr, self.getncattr(attr)) for attr in sorted(self._cf_attrs)
)
def cf_attrs_unused(self):
"""Return a list of all non-accessed attribute name and value pairs of the CF-netCDF variable."""
return tuple(
(attr, self.getncattr(attr))
for attr in sorted(set(self._nc_attrs) - self._cf_attrs)
)
def cf_attrs_reset(self):
"""Reset the history of accessed attribute names of the CF-netCDF variable."""
self._cf_attrs = set([item[0] for item in self.cf_attrs_ignored()])
def add_formula_term(self, root, term):
"""
Register the participation of this CF-netCDF variable in a CF-netCDF formula term.
Args:
* root (string):
The name of CF-netCDF variable that defines the CF-netCDF formula_terms attribute.
* term (string):
The associated term name of this variable in the formula_terms definition.
Returns:
None.
"""
self.cf_terms_by_root[root] = term
def has_formula_terms(self):
"""
Determine whether this CF-netCDF variable participates in a CF-netcdf formula term.
Returns:
Boolean.
"""
return bool(self.cf_terms_by_root)
class CFAncillaryDataVariable(CFVariable):
"""
A CF-netCDF ancillary data variable is a variable that provides metadata
about the individual values of another data variable.
Identified by the CF-netCDF variable attribute 'ancillary_variables'.
Ref: [CF] Section 3.4. Ancillary Data.
"""
cf_identity = "ancillary_variables"
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF ancillary data variables.
for nc_var_name, nc_var in target.items():
# Check for ancillary data variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = "Missing CF-netCDF ancillary data variable %r, referenced by netCDF variable %r"
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFAncillaryDataVariable(
name, variables[name]
)
return result
class CFAuxiliaryCoordinateVariable(CFVariable):
"""
A CF-netCDF auxiliary coordinate variable is any netCDF variable that contains
coordinate data, but is not a CF-netCDF coordinate variable by definition.
There is no relationship between the name of a CF-netCDF auxiliary coordinate
variable and the name(s) of its dimension(s).
Identified by the CF-netCDF variable attribute 'coordinates'.
Also see :class:`iris.fileformats.cf.CFLabelVariable`.
Ref: [CF] Chapter 5. Coordinate Systems.
[CF] Section 6.2. Alternative Coordinates.
"""
cf_identity = "coordinates"
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF auxiliary coordinate variables.
for nc_var_name, nc_var in target.items():
# Check for auxiliary coordinate variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = "Missing CF-netCDF auxiliary coordinate variable %r, referenced by netCDF variable %r"
warnings.warn(message % (name, nc_var_name))
else:
# Restrict to non-string type i.e. not a CFLabelVariable.
if not _is_str_dtype(variables[name]):
result[name] = CFAuxiliaryCoordinateVariable(
name, variables[name]
)
return result
class CFBoundaryVariable(CFVariable):
"""
A CF-netCDF boundary variable is associated with a CF-netCDF variable that contains
coordinate data. When a data value provides information about conditions in a cell
occupying a region of space/time or some other dimension, the boundary variable
provides a description of cell extent.
A CF-netCDF boundary variable will have one more dimension than its associated
CF-netCDF coordinate variable or CF-netCDF auxiliary coordinate variable.
Identified by the CF-netCDF variable attribute 'bounds'.
Ref: [CF] Section 7.1. Cell Boundaries.
"""
cf_identity = "bounds"
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF boundary variables.
for nc_var_name, nc_var in target.items():
# Check for a boundary variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = "Missing CF-netCDF boundary variable %r, referenced by netCDF variable %r"
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFBoundaryVariable(
name, variables[name]
)
return result
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore the bounds extent dimension.
result = set(source[:-1]).issubset(target) or set(
source[1:]
).issubset(target)
return result
class CFClimatologyVariable(CFVariable):
"""
A CF-netCDF climatology variable is associated with a CF-netCDF variable that contains
coordinate data. When a data value provides information about conditions in a cell
occupying a region of space/time or some other dimension, the climatology variable
provides a climatological description of cell extent.
A CF-netCDF climatology variable will have one more dimension than its associated
CF-netCDF coordinate variable.
Identified by the CF-netCDF variable attribute 'climatology'.
Ref: [CF] Section 7.4. Climatological Statistics
"""
cf_identity = "climatology"
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF climatology variables.
for nc_var_name, nc_var in target.items():
# Check for a climatology variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = "Missing CF-netCDF climatology variable %r, referenced by netCDF variable %r"
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFClimatologyVariable(
name, variables[name]
)
return result
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore the climatology extent dimension.
result = set(source[:-1]).issubset(target) or set(
source[1:]
).issubset(target)
return result
class CFCoordinateVariable(CFVariable):
"""
A CF-netCDF coordinate variable is a one-dimensional variable with the same name
as its dimension, and it is defined as a numeric data type with values that are
ordered monotonically. Missing values are not allowed in CF-netCDF coordinate
variables. Also see [NUG] Section 2.3.1.
Identified by the above criterion, there is no associated CF-netCDF variable
attribute.
Ref: [CF] 1.2. Terminology.
"""
@classmethod
def identify(
cls, variables, ignore=None, target=None, warn=True, monotonic=False
):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF coordinate variables.
for nc_var_name, nc_var in target.items():
if nc_var_name in ignore:
continue
# String variables can't be coordinates
if _is_str_dtype(nc_var):
continue
# Restrict to one-dimensional with name as dimension
if not (nc_var.ndim == 1 and nc_var_name in nc_var.dimensions):
continue
# Restrict to monotonic?
if monotonic:
data = nc_var[:]
# Gracefully fill a masked coordinate.
if ma.isMaskedArray(data):
data = ma.filled(data)
if (
nc_var.shape == ()
or nc_var.shape == (1,)
or iris.util.monotonic(data)
):
result[nc_var_name] = CFCoordinateVariable(
nc_var_name, nc_var
)
else:
result[nc_var_name] = CFCoordinateVariable(nc_var_name, nc_var)
return result
class CFDataVariable(CFVariable):
"""
A CF-netCDF variable containing data pay-load that maps to an Iris :class:`iris.cube.Cube`.
"""
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
raise NotImplementedError
class _CFFormulaTermsVariable(CFVariable):
"""
A CF-netCDF formula terms variable corresponds to a term in a formula that
allows dimensional vertical coordinate values to be computed from dimensionless
vertical coordinate values and associated variables at specific grid points.
Identified by the CF-netCDF variable attribute 'formula_terms'.
Ref: [CF] Section 4.3.2. Dimensional Vertical Coordinate.
[CF] Appendix D. Dimensionless Vertical Coordinates.
"""
cf_identity = "formula_terms"
def __init__(self, name, data, formula_root, formula_term):
CFVariable.__init__(self, name, data)
# Register the formula root and term relationship.
self.add_formula_term(formula_root, formula_term)
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF formula terms variables.
for nc_var_name, nc_var in target.items():
# Check for formula terms variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for match_item in _CF_PARSE.finditer(nc_var_att):
match_group = match_item.groupdict()
# Ensure that term name is lower case, as expected.
term_name = match_group["lhs"].lower()
variable_name = match_group["rhs"]
if variable_name not in ignore:
if variable_name not in variables:
if warn:
message = "Missing CF-netCDF formula term variable %r, referenced by netCDF variable %r"
warnings.warn(
message % (variable_name, nc_var_name)
)
else:
if variable_name not in result:
result[
variable_name
] = _CFFormulaTermsVariable(
variable_name,
variables[variable_name],
nc_var_name,
term_name,
)
else:
result[variable_name].add_formula_term(
nc_var_name, term_name
)
return result
def __repr__(self):
return "%s(%r, %r, %r)" % (
self.__class__.__name__,
self.cf_name,
self.cf_data,
self.cf_terms_by_root,
)
class CFGridMappingVariable(CFVariable):
"""
A CF-netCDF grid mapping variable contains a list of specific attributes that
define a particular grid mapping. A CF-netCDF grid mapping variable must contain
the attribute 'grid_mapping_name'.
Based on the value of the 'grid_mapping_name' attribute, there are associated
standard names of CF-netCDF coordinate variables that contain the mapping's
independent variables.
Identified by the CF-netCDF variable attribute 'grid_mapping'.
Ref: [CF] Section 5.6. Horizontal Coordinate Reference Systems, Grid Mappings, and Projections.
[CF] Appendix F. Grid Mappings.
"""
cf_identity = "grid_mapping"
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all grid mapping variables.
for nc_var_name, nc_var in target.items():
# Check for a grid mapping variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = "Missing CF-netCDF grid mapping variable %r, referenced by netCDF variable %r"
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFGridMappingVariable(
name, variables[name]
)
return result
class CFLabelVariable(CFVariable):
"""
A CF-netCDF CF label variable is any netCDF variable that contain string
textual information, or labels.
Identified by the CF-netCDF variable attribute 'coordinates'.
Also see :class:`iris.fileformats.cf.CFAuxiliaryCoordinateVariable`.
Ref: [CF] Section 6.1. Labels.
"""
cf_identity = "coordinates"
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF label variables.
for nc_var_name, nc_var in target.items():
# Check for label variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = "Missing CF-netCDF label variable %r, referenced by netCDF variable %r"
warnings.warn(message % (name, nc_var_name))
else:
# Register variable, but only allow string type.
var = variables[name]
if _is_str_dtype(var):
result[name] = CFLabelVariable(name, var)
return result
def cf_label_data(self, cf_data_var):
"""
Return the associated CF-netCDF label variable strings.
Args:
* cf_data_var (:class:`iris.fileformats.cf.CFDataVariable`):
The CF-netCDF data variable which the CF-netCDF label variable describes.
Returns:
String labels.
"""
if not isinstance(cf_data_var, CFDataVariable):
raise TypeError(
"cf_data_var argument should be of type CFDataVariable. Got %r."
% type(cf_data_var)
)
# Determine the name of the label string (or length) dimension by
# finding the dimension name that doesn't exist within the data dimensions.
str_dim_name = list(set(self.dimensions) - set(cf_data_var.dimensions))
if len(str_dim_name) != 1:
raise ValueError(
"Invalid string dimensions for CF-netCDF label variable %r"
% self.cf_name
)
str_dim_name = str_dim_name[0]
label_data = self[:]
if ma.isMaskedArray(label_data):
label_data = label_data.filled()
# Determine whether we have a string-valued scalar label
# i.e. a character variable that only has one dimension (the length of the string).
if self.ndim == 1:
label_string = b"".join(label_data).strip()
label_string = label_string.decode("utf8")
data = np.array([label_string])
else:
# Determine the index of the string dimension.
str_dim = self.dimensions.index(str_dim_name)
# Calculate new label data shape (without string dimension) and create payload array.
new_shape = tuple(
dim_len for i, dim_len in enumerate(self.shape) if i != str_dim
)
string_basetype = "|U%d"
string_dtype = string_basetype % self.shape[str_dim]
data = np.empty(new_shape, dtype=string_dtype)
for index in np.ndindex(new_shape):
# Create the slice for the label data.
if str_dim == 0:
label_index = (slice(None, None),) + index
else:
label_index = index + (slice(None, None),)
label_string = b"".join(label_data[label_index]).strip()
label_string = label_string.decode("utf8")
data[index] = label_string
return data
def cf_label_dimensions(self, cf_data_var):
"""
Return the name of the associated CF-netCDF label variable data dimensions.
Args:
* cf_data_var (:class:`iris.fileformats.cf.CFDataVariable`):
The CF-netCDF data variable which the CF-netCDF label variable describes.
Returns:
Tuple of label data dimension names.
"""
if not isinstance(cf_data_var, CFDataVariable):
raise TypeError(
"cf_data_var argument should be of type CFDataVariable. Got %r."
% type(cf_data_var)
)
return tuple(
[
dim_name
for dim_name in self.dimensions
if dim_name in cf_data_var.dimensions
]
)
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore label string length dimension.
result = set(source[:-1]).issubset(target) or set(
source[1:]
).issubset(target)
return result
class CFMeasureVariable(CFVariable):
"""
A CF-netCDF measure variable is a variable that contains cell areas or volumes.
Identified by the CF-netCDF variable attribute 'cell_measures'.
Ref: [CF] Section 7.2. Cell Measures.
"""
cf_identity = "cell_measures"
def __init__(self, name, data, measure):
CFVariable.__init__(self, name, data)
#: Associated cell measure of the cell variable
self.cf_measure = measure
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF measure variables.
for nc_var_name, nc_var in target.items():
# Check for measure variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for match_item in _CF_PARSE.finditer(nc_var_att):
match_group = match_item.groupdict()
measure = match_group["lhs"]
variable_name = match_group["rhs"]
var_matches_nc = variable_name != nc_var_name
if variable_name not in ignore and var_matches_nc:
if variable_name not in variables:
if warn:
message = "Missing CF-netCDF measure variable %r, referenced by netCDF variable %r"
warnings.warn(
message % (variable_name, nc_var_name)
)
else:
result[variable_name] = CFMeasureVariable(
variable_name,
variables[variable_name],
measure,
)
return result
################################################################################
class CFGroup(MutableMapping):
"""
Represents a collection of 'NetCDF Climate and Forecast (CF) Metadata
Conventions' variables and netCDF global attributes.
"""
def __init__(self):
#: Collection of CF-netCDF variables
self._cf_variables = {}
#: Collection of netCDF global attributes
self.global_attributes = {}
#: Collection of CF-netCDF variables promoted to a CFDataVariable.
self.promoted = {}
def _cf_getter(self, cls):
# Generate dictionary with dictionary comprehension.
return {
cf_name: cf_var
for cf_name, cf_var in self._cf_variables.items()
if isinstance(cf_var, cls)
}
@property
def ancillary_variables(self):
"""Collection of CF-netCDF ancillary variables."""
return self._cf_getter(CFAncillaryDataVariable)
@property
def auxiliary_coordinates(self):
"""Collection of CF-netCDF auxiliary coordinate variables."""
return self._cf_getter(CFAuxiliaryCoordinateVariable)
@property
def bounds(self):
"""Collection of CF-netCDF boundary variables."""
return self._cf_getter(CFBoundaryVariable)
@property
def climatology(self):
"""Collection of CF-netCDF climatology variables."""
return self._cf_getter(CFClimatologyVariable)
@property
def coordinates(self):
"""Collection of CF-netCDF coordinate variables."""
return self._cf_getter(CFCoordinateVariable)
@property
def data_variables(self):
"""Collection of CF-netCDF data pay-load variables."""
return self._cf_getter(CFDataVariable)
@property
def formula_terms(self):
"""Collection of CF-netCDF variables that participate in a CF-netCDF formula term."""
return {
cf_name: cf_var
for cf_name, cf_var in self._cf_variables.items()
if cf_var.has_formula_terms()
}
@property
def grid_mappings(self):
"""Collection of CF-netCDF grid mapping variables."""
return self._cf_getter(CFGridMappingVariable)
@property
def labels(self):
"""Collection of CF-netCDF label variables."""
return self._cf_getter(CFLabelVariable)
@property
def cell_measures(self):
"""Collection of CF-netCDF measure variables."""
return self._cf_getter(CFMeasureVariable)
def keys(self):
"""Return the names of all the CF-netCDF variables in the group."""
return self._cf_variables.keys()
def __len__(self):
return len(self._cf_variables)
def __iter__(self):
for item in self._cf_variables:
yield item
def __setitem__(self, name, variable):
if not isinstance(variable, CFVariable):
raise TypeError(
"Attempted to add an invalid CF-netCDF variable to the %s"
% self.__class__.__name__
)
if name != variable.cf_name:
raise ValueError(
"Mismatch between key name %r and CF-netCDF variable name %r"
% (str(name), variable.cf_name)
)
self._cf_variables[name] = variable
def __getitem__(self, name):
if name not in self._cf_variables:
raise KeyError(
"Cannot get unknown CF-netCDF variable name %r" % str(name)
)
return self._cf_variables[name]
def __delitem__(self, name):
if name not in self._cf_variables:
raise KeyError(
"Cannot delete unknown CF-netcdf variable name %r" % str(name)
)
del self._cf_variables[name]
def __repr__(self):
result = []
result.append("variables:%d" % len(self._cf_variables))
result.append("global_attributes:%d" % len(self.global_attributes))
result.append("promoted:%d" % len(self.promoted))
return "<%s of %s>" % (self.__class__.__name__, ", ".join(result))
################################################################################
class CFReader:
"""
This class allows the contents of a netCDF file to be interpreted according
to the 'NetCDF Climate and Forecast (CF) Metadata Conventions'.
"""
def __init__(self, filename, warn=False, monotonic=False):
self._filename = os.path.expanduser(filename)
# All CF variable types EXCEPT for the "special cases" of
# CFDataVariable, CFCoordinateVariable and _CFFormulaTermsVariable.
self._variable_types = (
CFAncillaryDataVariable,
CFAuxiliaryCoordinateVariable,
CFBoundaryVariable,
CFClimatologyVariable,
CFGridMappingVariable,
CFLabelVariable,
CFMeasureVariable,
)
#: Collection of CF-netCDF variables associated with this netCDF file
self.cf_group = CFGroup()
self._dataset = netCDF4.Dataset(self._filename, mode="r")
# Issue load optimisation warning.
if warn and self._dataset.file_format in [
"NETCDF3_CLASSIC",
"NETCDF3_64BIT",
]:
warnings.warn(
"Optimise CF-netCDF loading by converting data from NetCDF3 "
'to NetCDF4 file format using the "nccopy" command.'
)
self._check_monotonic = monotonic
self._translate()
self._build_cf_groups()
self._reset()
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._filename)
def _translate(self):
"""Classify the netCDF variables into CF-netCDF variables."""
netcdf_variable_names = list(self._dataset.variables.keys())
# Identify all CF coordinate variables first. This must be done
# first as, by CF convention, the definition of a CF auxiliary
# coordinate variable may include a scalar CF coordinate variable,
# whereas we want these two types of variables to be mutually exclusive.
coords = CFCoordinateVariable.identify(
self._dataset.variables, monotonic=self._check_monotonic
)
self.cf_group.update(coords)
coordinate_names = list(self.cf_group.coordinates.keys())
# Identify all CF variables EXCEPT for the "special cases".
for variable_type in self._variable_types:
# Prevent grid mapping variables being mis-identified as CF coordinate variables.
ignore = (
None
if issubclass(variable_type, CFGridMappingVariable)
else coordinate_names
)
self.cf_group.update(
variable_type.identify(self._dataset.variables, ignore=ignore)
)
# Identify global netCDF attributes.
attr_dict = {
attr_name: _getncattr(self._dataset, attr_name, "")
for attr_name in self._dataset.ncattrs()
}
self.cf_group.global_attributes.update(attr_dict)
# Identify and register all CF formula terms.
formula_terms = _CFFormulaTermsVariable.identify(
self._dataset.variables
)
for cf_var in formula_terms.values():
for cf_root, cf_term in cf_var.cf_terms_by_root.items():
# Ignore formula terms owned by a bounds variable.
if cf_root not in self.cf_group.bounds:
cf_name = cf_var.cf_name
if cf_var.cf_name not in self.cf_group:
self.cf_group[cf_name] = CFAuxiliaryCoordinateVariable(
cf_name, cf_var.cf_data
)
self.cf_group[cf_name].add_formula_term(cf_root, cf_term)
# Determine the CF data variables.
data_variable_names = (
set(netcdf_variable_names)
- set(self.cf_group.ancillary_variables)
- set(self.cf_group.auxiliary_coordinates)
- set(self.cf_group.bounds)
- set(self.cf_group.climatology)
- set(self.cf_group.coordinates)
- set(self.cf_group.grid_mappings)
- set(self.cf_group.labels)
- set(self.cf_group.cell_measures)
)
for name in data_variable_names:
self.cf_group[name] = CFDataVariable(
name, self._dataset.variables[name]
)
def _build_cf_groups(self):
"""Build the first order relationships between CF-netCDF variables."""
def _build(cf_variable):
coordinate_names = list(self.cf_group.coordinates.keys())
cf_group = CFGroup()
# Build CF variable relationships.
for variable_type in self._variable_types:
# Prevent grid mapping variables being mis-identified as
# CF coordinate variables.
if issubclass(variable_type, CFGridMappingVariable):
ignore = None
else:
ignore = coordinate_names
match = variable_type.identify(
self._dataset.variables,
ignore=ignore,
target=cf_variable.cf_name,
warn=False,
)
# Sanity check dimensionality coverage.
for cf_name, cf_var in match.items():
if cf_var.spans(cf_variable):
cf_group[cf_name] = self.cf_group[cf_name]
else:
# Register the ignored variable.
# N.B. 'ignored' variable from enclosing scope.
ignored.add(cf_name)
msg = (
"Ignoring variable {!r} referenced "
"by variable {!r}: Dimensions {!r} do not "
"span {!r}".format(
cf_name,
cf_variable.cf_name,
cf_var.dimensions,
cf_variable.dimensions,
)
)
warnings.warn(msg)
# Build CF data variable relationships.
if isinstance(cf_variable, CFDataVariable):
# Add global netCDF attributes.
cf_group.global_attributes.update(
self.cf_group.global_attributes
)
# Add appropriate "dimensioned" CF coordinate variables.
cf_group.update(
{
cf_name: self.cf_group[cf_name]
for cf_name in cf_variable.dimensions
if cf_name in self.cf_group.coordinates
}
)
# Add appropriate "dimensionless" CF coordinate variables.
coordinates_attr = getattr(cf_variable, "coordinates", "")
cf_group.update(
{
cf_name: self.cf_group[cf_name]
for cf_name in coordinates_attr.split()
if cf_name in self.cf_group.coordinates
}
)
# Add appropriate formula terms.
for cf_var in self.cf_group.formula_terms.values():
for cf_root in cf_var.cf_terms_by_root:
if (
cf_root in cf_group
and cf_var.cf_name not in cf_group
):
# Sanity check dimensionality.
if cf_var.spans(cf_variable):
cf_group[cf_var.cf_name] = cf_var
else:
# Register the ignored variable.
# N.B. 'ignored' variable from enclosing scope.
ignored.add(cf_var.cf_name)
msg = (
"Ignoring formula terms variable {!r} "
"referenced by data variable {!r} via "
"variable {!r}: Dimensions {!r} do not "
"span {!r}".format(
cf_var.cf_name,
cf_variable.cf_name,
cf_root,
cf_var.dimensions,
cf_variable.dimensions,
)
)
warnings.warn(msg)
# Add the CF group to the variable.
cf_variable.cf_group = cf_group
# Ignored variables are those that cannot be attached to a
# data variable as the dimensionality of that variable is not
# a subset of the dimensionality of the data variable.
ignored = set()
for cf_variable in self.cf_group.values():
_build(cf_variable)
# Determine whether there are any formula terms that
# may be promoted to a CFDataVariable and restrict promotion to only
# those formula terms that are reference surface/phenomenon.
for cf_var in self.cf_group.formula_terms.values():
for cf_root, cf_term in cf_var.cf_terms_by_root.items():
cf_root_var = self.cf_group[cf_root]
name = cf_root_var.standard_name or cf_root_var.long_name
terms = reference_terms.get(name, [])
if isinstance(terms, str) or not isinstance(terms, Iterable):
terms = [terms]
cf_var_name = cf_var.cf_name
if (
cf_term in terms
and cf_var_name not in self.cf_group.promoted
):
data_var = CFDataVariable(cf_var_name, cf_var.cf_data)
self.cf_group.promoted[cf_var_name] = data_var
_build(data_var)
break
# Promote any ignored variables.
promoted = set()
not_promoted = ignored.difference(promoted)
while not_promoted:
cf_name = not_promoted.pop()
if (
cf_name not in self.cf_group.data_variables
and cf_name not in self.cf_group.promoted
):
data_var = CFDataVariable(
cf_name, self.cf_group[cf_name].cf_data
)
self.cf_group.promoted[cf_name] = data_var
_build(data_var)
# Determine whether there are still any ignored variables
# yet to be promoted.
promoted.add(cf_name)
not_promoted = ignored.difference(promoted)
def _reset(self):
"""Reset the attribute touch history of each variable."""
for nc_var_name in self._dataset.variables.keys():
self.cf_group[nc_var_name].cf_attrs_reset()
def __del__(self):
# Explicitly close dataset to prevent file remaining open.
self._dataset.close()
def _getncattr(dataset, attr, default=None):
"""
Simple wrapper round `netCDF4.Dataset.getncattr` to make it behave
more like `getattr`.
"""
try:
value = dataset.getncattr(attr)
except AttributeError:
value = default
return value
|
pp-mo/iris
|
lib/iris/fileformats/cf.py
|
Python
|
lgpl-3.0
| 45,520
|
[
"NetCDF"
] |
e235156e7f89f300ee5a7464a16819ccd2fd0f54474ac554664193a3f678d057
|
"""
Acceptance tests for Studio related to the container page.
"""
from ..pages.studio.auto_auth import AutoAuthPage
from ..pages.studio.overview import CourseOutlinePage
from ..fixtures.course import CourseFixture, XBlockFixtureDesc
from .helpers import UniqueCourseTest
class ContainerBase(UniqueCourseTest):
"""
Base class for tests that do operations on the container page.
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(ContainerBase, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.container_title = ""
self.group_a = "Expand or Collapse\nGroup A"
self.group_b = "Expand or Collapse\nGroup B"
self.group_empty = "Expand or Collapse\nGroup Empty"
self.group_a_item_1 = "Group A Item 1"
self.group_a_item_2 = "Group A Item 2"
self.group_b_item_1 = "Group B Item 1"
self.group_b_item_2 = "Group B Item 2"
self.group_a_handle = 0
self.group_a_item_1_handle = 1
self.group_a_item_2_handle = 2
self.group_empty_handle = 3
self.group_b_handle = 4
self.group_b_item_1_handle = 5
self.group_b_item_2_handle = 6
self.group_a_item_1_action_index = 0
self.group_a_item_2_action_index = 1
self.duplicate_label = "Duplicate of '{0}'"
self.discussion_label = "Discussion"
self.setup_fixtures()
self.auth_page.visit()
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('vertical', 'Test Container').add_children(
XBlockFixtureDesc('vertical', 'Group A').add_children(
XBlockFixtureDesc('html', self.group_a_item_1),
XBlockFixtureDesc('html', self.group_a_item_2)
),
XBlockFixtureDesc('vertical', 'Group Empty'),
XBlockFixtureDesc('vertical', 'Group B').add_children(
XBlockFixtureDesc('html', self.group_b_item_1),
XBlockFixtureDesc('html', self.group_b_item_2)
)
)
)
)
)
).install()
def go_to_container_page(self, make_draft=False):
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.toggle_expand().unit('Test Unit').go_to()
if make_draft:
unit.edit_draft()
container = unit.components[0].go_to_container()
return container
def verify_ordering(self, container, expected_orderings):
xblocks = container.xblocks
for expected_ordering in expected_orderings:
for xblock in xblocks:
parent = expected_ordering.keys()[0]
if xblock.name == parent:
children = xblock.children
expected_length = len(expected_ordering.get(parent))
self.assertEqual(
expected_length, len(children),
"Number of children incorrect for group {0}. Expected {1} but got {2}.".format(parent, expected_length, len(children)))
for idx, expected in enumerate(expected_ordering.get(parent)):
self.assertEqual(expected, children[idx].name)
break
def do_action_and_verify(self, action, expected_ordering):
container = self.go_to_container_page(make_draft=True)
action(container)
self.verify_ordering(container, expected_ordering)
# Reload the page to see that the change was persisted.
container = self.go_to_container_page()
self.verify_ordering(container, expected_ordering)
class DragAndDropTest(ContainerBase):
"""
Tests of reordering within the container page.
"""
__test__ = True
def drag_and_verify(self, source, target, expected_ordering):
self.do_action_and_verify(
lambda (container): container.drag(source, target),
expected_ordering
)
def test_reorder_in_group(self):
"""
Drag Group A Item 2 before Group A Item 1.
"""
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_2, self.group_a_item_1]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_a_item_2_handle, self.group_a_item_1_handle, expected_ordering)
def test_drag_to_top(self):
"""
Drag Group A Item 1 to top level (outside of Group A).
"""
expected_ordering = [{self.container_title: [self.group_a_item_1, self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_a_item_1_handle, self.group_a_handle, expected_ordering)
def test_drag_into_different_group(self):
"""
Drag Group B Item 1 into Group A (first element).
"""
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_b_item_1, self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_b_item_1_handle, self.group_a_item_1_handle, expected_ordering)
def test_drag_group_into_group(self):
"""
Drag Group B into Group A (first element).
"""
expected_ordering = [{self.container_title: [self.group_a, self.group_empty]},
{self.group_a: [self.group_b, self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_b_handle, self.group_a_item_1_handle, expected_ordering)
def test_drag_after_addition(self):
"""
Add some components and then verify that drag and drop still works.
"""
group_a_menu = 0
def add_new_components_and_rearrange(container):
# Add a video component to Group 1
container.add_discussion(group_a_menu)
# Duplicate the first item in Group A
container.duplicate(self.group_a_item_1_action_index)
first_handle = self.group_a_item_1_handle
# Drag newly added video component to top.
container.drag(first_handle + 3, first_handle)
# Drag duplicated component to top.
container.drag(first_handle + 2, first_handle)
duplicate_label = self.duplicate_label.format(self.group_a_item_1)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [duplicate_label, self.discussion_label, self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.do_action_and_verify(add_new_components_and_rearrange, expected_ordering)
class AddComponentTest(ContainerBase):
"""
Tests of adding a component to the container page.
"""
__test__ = True
def add_and_verify(self, menu_index, expected_ordering):
self.do_action_and_verify(
lambda (container): container.add_discussion(menu_index),
expected_ordering
)
def test_add_component_in_group(self):
group_b_menu = 2
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2, self.discussion_label]},
{self.group_empty: []}]
self.add_and_verify(group_b_menu, expected_ordering)
def test_add_component_in_empty_group(self):
group_empty_menu = 1
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: [self.discussion_label]}]
self.add_and_verify(group_empty_menu, expected_ordering)
def test_add_component_in_container(self):
container_menu = 3
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b, self.discussion_label]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.add_and_verify(container_menu, expected_ordering)
class DuplicateComponentTest(ContainerBase):
"""
Tests of duplicating a component on the container page.
"""
__test__ = True
def duplicate_and_verify(self, source_index, expected_ordering):
self.do_action_and_verify(
lambda (container): container.duplicate(source_index),
expected_ordering
)
def test_duplicate_first_in_group(self):
duplicate_label = self.duplicate_label.format(self.group_a_item_1)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, duplicate_label, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.duplicate_and_verify(self.group_a_item_1_action_index, expected_ordering)
def test_duplicate_second_in_group(self):
duplicate_label = self.duplicate_label.format(self.group_a_item_2)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2, duplicate_label]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.duplicate_and_verify(self.group_a_item_2_action_index, expected_ordering)
def test_duplicate_the_duplicate(self):
first_duplicate_label = self.duplicate_label.format(self.group_a_item_1)
second_duplicate_label = self.duplicate_label.format(first_duplicate_label)
expected_ordering = [
{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, first_duplicate_label, second_duplicate_label, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}
]
def duplicate_twice(container):
container.duplicate(self.group_a_item_1_action_index)
container.duplicate(self.group_a_item_1_action_index + 1)
self.do_action_and_verify(duplicate_twice, expected_ordering)
class DeleteComponentTest(ContainerBase):
"""
Tests of deleting a component from the container page.
"""
__test__ = True
def delete_and_verify(self, source_index, expected_ordering):
self.do_action_and_verify(
lambda (container): container.delete(source_index),
expected_ordering
)
def test_delete_first_in_group(self):
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.delete_and_verify(self.group_a_item_1_action_index, expected_ordering)
|
nanolearning/edx-platform
|
common/test/acceptance/tests/test_studio_container.py
|
Python
|
agpl-3.0
| 13,333
|
[
"VisIt"
] |
10bf9e54bfbb65523edb41f7418a0eeab6bede6b287e2cbc30b0108014cfc348
|
"""Hyperion config flow."""
from __future__ import annotations
import asyncio
from contextlib import suppress
import logging
from typing import Any
from urllib.parse import urlparse
from hyperion import client, const
import voluptuous as vol
from homeassistant.components.ssdp import ATTR_SSDP_LOCATION, ATTR_UPNP_SERIAL
from homeassistant.config_entries import (
SOURCE_REAUTH,
ConfigEntry,
ConfigFlow,
OptionsFlow,
)
from homeassistant.const import (
CONF_BASE,
CONF_HOST,
CONF_ID,
CONF_PORT,
CONF_SOURCE,
CONF_TOKEN,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
import homeassistant.helpers.config_validation as cv
from . import create_hyperion_client
from .const import (
CONF_AUTH_ID,
CONF_CREATE_TOKEN,
CONF_EFFECT_HIDE_LIST,
CONF_EFFECT_SHOW_LIST,
CONF_PRIORITY,
DEFAULT_ORIGIN,
DEFAULT_PRIORITY,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(logging.DEBUG)
# +------------------+ +------------------+ +--------------------+ +--------------------+
# |Step: SSDP | |Step: user | |Step: import | |Step: reauth |
# | | | | | | | |
# |Input: <discovery>| |Input: <host/port>| |Input: <import data>| |Input: <entry_data> |
# +------------------+ +------------------+ +--------------------+ +--------------------+
# v v v v
# +-------------------+-----------------------+--------------------+
# Auth not | Auth |
# required? | required? |
# | v
# | +------------+
# | |Step: auth |
# | | |
# | |Input: token|
# | +------------+
# | Static |
# v token |
# <------------------+
# | |
# | | New token
# | v
# | +------------------+
# | |Step: create_token|
# | +------------------+
# | |
# | v
# | +---------------------------+ +--------------------------------+
# | |Step: create_token_external|-->|Step: create_token_external_fail|
# | +---------------------------+ +--------------------------------+
# | |
# | v
# | +-----------------------------------+
# | |Step: create_token_external_success|
# | +-----------------------------------+
# | |
# v<------------------+
# |
# v
# +-------------+ Confirm not required?
# |Step: Confirm|---------------------->+
# +-------------+ |
# | |
# v SSDP: Explicit confirm |
# +------------------------------>+
# |
# v
# +----------------+
# | Create/Update! |
# +----------------+
# A note on choice of discovery mechanisms: Hyperion supports both Zeroconf and SSDP out
# of the box. This config flow needs two port numbers from the Hyperion instance, the
# JSON port (for the API) and the UI port (for the user to approve dynamically created
# auth tokens). With Zeroconf the port numbers for both are in different Zeroconf
# entries, and as Home Assistant only passes a single entry into the config flow, we can
# only conveniently 'see' one port or the other (which means we need to guess one port
# number). With SSDP, we get the combined block including both port numbers, so SSDP is
# the favored discovery implementation.
class HyperionConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle a Hyperion config flow."""
VERSION = 1
def __init__(self) -> None:
"""Instantiate config flow."""
self._data: dict[str, Any] = {}
self._request_token_task: asyncio.Task | None = None
self._auth_id: str | None = None
self._require_confirm: bool = False
self._port_ui: int = const.DEFAULT_PORT_UI
def _create_client(self, raw_connection: bool = False) -> client.HyperionClient:
"""Create and connect a client instance."""
return create_hyperion_client(
self._data[CONF_HOST],
self._data[CONF_PORT],
token=self._data.get(CONF_TOKEN),
raw_connection=raw_connection,
)
async def _advance_to_auth_step_if_necessary(
self, hyperion_client: client.HyperionClient
) -> FlowResult:
"""Determine if auth is required."""
auth_resp = await hyperion_client.async_is_auth_required()
# Could not determine if auth is required.
if not auth_resp or not client.ResponseOK(auth_resp):
return self.async_abort(reason="auth_required_error")
auth_required = auth_resp.get(const.KEY_INFO, {}).get(const.KEY_REQUIRED, False)
if auth_required:
return await self.async_step_auth()
return await self.async_step_confirm()
async def async_step_reauth(
self,
config_data: dict[str, Any],
) -> FlowResult:
"""Handle a reauthentication flow."""
self._data = dict(config_data)
async with self._create_client(raw_connection=True) as hyperion_client:
if not hyperion_client:
return self.async_abort(reason="cannot_connect")
return await self._advance_to_auth_step_if_necessary(hyperion_client)
async def async_step_ssdp(self, discovery_info: dict[str, Any]) -> FlowResult:
"""Handle a flow initiated by SSDP."""
# Sample data provided by SSDP: {
# 'ssdp_location': 'http://192.168.0.1:8090/description.xml',
# 'ssdp_st': 'upnp:rootdevice',
# 'deviceType': 'urn:schemas-upnp-org:device:Basic:1',
# 'friendlyName': 'Hyperion (192.168.0.1)',
# 'manufacturer': 'Hyperion Open Source Ambient Lighting',
# 'manufacturerURL': 'https://www.hyperion-project.org',
# 'modelDescription': 'Hyperion Open Source Ambient Light',
# 'modelName': 'Hyperion',
# 'modelNumber': '2.0.0-alpha.8',
# 'modelURL': 'https://www.hyperion-project.org',
# 'serialNumber': 'f9aab089-f85a-55cf-b7c1-222a72faebe9',
# 'UDN': 'uuid:f9aab089-f85a-55cf-b7c1-222a72faebe9',
# 'ports': {
# 'jsonServer': '19444',
# 'sslServer': '8092',
# 'protoBuffer': '19445',
# 'flatBuffer': '19400'
# },
# 'presentationURL': 'index.html',
# 'iconList': {
# 'icon': {
# 'mimetype': 'image/png',
# 'height': '100',
# 'width': '100',
# 'depth': '32',
# 'url': 'img/hyperion/ssdp_icon.png'
# }
# },
# 'ssdp_usn': 'uuid:f9aab089-f85a-55cf-b7c1-222a72faebe9',
# 'ssdp_ext': '',
# 'ssdp_server': 'Raspbian GNU/Linux 10 (buster)/10 UPnP/1.0 Hyperion/2.0.0-alpha.8'}
# SSDP requires user confirmation.
self._require_confirm = True
self._data[CONF_HOST] = urlparse(discovery_info[ATTR_SSDP_LOCATION]).hostname
try:
self._port_ui = urlparse(discovery_info[ATTR_SSDP_LOCATION]).port
except ValueError:
self._port_ui = const.DEFAULT_PORT_UI
try:
self._data[CONF_PORT] = int(
discovery_info.get("ports", {}).get(
"jsonServer", const.DEFAULT_PORT_JSON
)
)
except ValueError:
self._data[CONF_PORT] = const.DEFAULT_PORT_JSON
hyperion_id = discovery_info.get(ATTR_UPNP_SERIAL)
if not hyperion_id:
return self.async_abort(reason="no_id")
# For discovery mechanisms, we set the unique_id as early as possible to
# avoid discovery popping up a duplicate on the screen. The unique_id is set
# authoritatively later in the flow by asking the server to confirm its id
# (which should theoretically be the same as specified here)
await self.async_set_unique_id(hyperion_id)
self._abort_if_unique_id_configured()
async with self._create_client(raw_connection=True) as hyperion_client:
if not hyperion_client:
return self.async_abort(reason="cannot_connect")
return await self._advance_to_auth_step_if_necessary(hyperion_client)
async def async_step_user(
self,
user_input: dict[str, Any] | None = None,
) -> FlowResult:
"""Handle a flow initiated by the user."""
errors = {}
if user_input:
self._data.update(user_input)
async with self._create_client(raw_connection=True) as hyperion_client:
if hyperion_client:
return await self._advance_to_auth_step_if_necessary(
hyperion_client
)
errors[CONF_BASE] = "cannot_connect"
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST): str,
vol.Optional(CONF_PORT, default=const.DEFAULT_PORT_JSON): int,
}
),
errors=errors,
)
async def _cancel_request_token_task(self) -> None:
"""Cancel the request token task if it exists."""
if self._request_token_task is not None:
if not self._request_token_task.done():
self._request_token_task.cancel()
with suppress(asyncio.CancelledError):
await self._request_token_task
self._request_token_task = None
async def _request_token_task_func(self, auth_id: str) -> None:
"""Send an async_request_token request."""
auth_resp: dict[str, Any] | None = None
async with self._create_client(raw_connection=True) as hyperion_client:
if hyperion_client:
# The Hyperion-py client has a default timeout of 3 minutes on this request.
auth_resp = await hyperion_client.async_request_token(
comment=DEFAULT_ORIGIN, id=auth_id
)
await self.hass.config_entries.flow.async_configure(
flow_id=self.flow_id, user_input=auth_resp
)
def _get_hyperion_url(self) -> str:
"""Return the URL of the Hyperion UI."""
# If this flow was kicked off by SSDP, this will be the correct frontend URL. If
# this is a manual flow instantiation, then it will be a best guess (as this
# flow does not have that information available to it). This is only used for
# approving new dynamically created tokens, so the complexity of asking the user
# manually for this information is likely not worth it (when it would only be
# used to open a URL, that the user already knows the address of).
return f"http://{self._data[CONF_HOST]}:{self._port_ui}"
async def _can_login(self) -> bool | None:
"""Verify login details."""
async with self._create_client(raw_connection=True) as hyperion_client:
if not hyperion_client:
return None
return bool(
client.LoginResponseOK(
await hyperion_client.async_login(token=self._data[CONF_TOKEN])
)
)
async def async_step_auth(
self,
user_input: dict[str, Any] | None = None,
) -> FlowResult:
"""Handle the auth step of a flow."""
errors = {}
if user_input:
if user_input.get(CONF_CREATE_TOKEN):
return await self.async_step_create_token()
# Using a static token.
self._data[CONF_TOKEN] = user_input.get(CONF_TOKEN)
login_ok = await self._can_login()
if login_ok is None:
return self.async_abort(reason="cannot_connect")
if login_ok:
return await self.async_step_confirm()
errors[CONF_BASE] = "invalid_access_token"
return self.async_show_form(
step_id="auth",
data_schema=vol.Schema(
{
vol.Required(CONF_CREATE_TOKEN): bool,
vol.Optional(CONF_TOKEN): str,
}
),
errors=errors,
)
async def async_step_create_token(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Send a request for a new token."""
if user_input is None:
self._auth_id = client.generate_random_auth_id()
return self.async_show_form(
step_id="create_token",
description_placeholders={
CONF_AUTH_ID: self._auth_id,
},
)
# Cancel the request token task if it's already running, then re-create it.
await self._cancel_request_token_task()
# Start a task in the background requesting a new token. The next step will
# wait on the response (which includes the user needing to visit the Hyperion
# UI to approve the request for a new token).
assert self._auth_id is not None
self._request_token_task = self.hass.async_create_task(
self._request_token_task_func(self._auth_id)
)
return self.async_external_step(
step_id="create_token_external", url=self._get_hyperion_url()
)
async def async_step_create_token_external(
self, auth_resp: dict[str, Any] | None = None
) -> FlowResult:
"""Handle completion of the request for a new token."""
if auth_resp is not None and client.ResponseOK(auth_resp):
token = auth_resp.get(const.KEY_INFO, {}).get(const.KEY_TOKEN)
if token:
self._data[CONF_TOKEN] = token
return self.async_external_step_done(
next_step_id="create_token_success"
)
return self.async_external_step_done(next_step_id="create_token_fail")
async def async_step_create_token_success(
self, _: dict[str, Any] | None = None
) -> FlowResult:
"""Create an entry after successful token creation."""
# Clean-up the request task.
await self._cancel_request_token_task()
# Test the token.
login_ok = await self._can_login()
if login_ok is None:
return self.async_abort(reason="cannot_connect")
if not login_ok:
return self.async_abort(reason="auth_new_token_not_work_error")
return await self.async_step_confirm()
async def async_step_create_token_fail(
self, _: dict[str, Any] | None = None
) -> FlowResult:
"""Show an error on the auth form."""
# Clean-up the request task.
await self._cancel_request_token_task()
return self.async_abort(reason="auth_new_token_not_granted_error")
async def async_step_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Get final confirmation before entry creation."""
if user_input is None and self._require_confirm:
return self.async_show_form(
step_id="confirm",
description_placeholders={
CONF_HOST: self._data[CONF_HOST],
CONF_PORT: self._data[CONF_PORT],
CONF_ID: self.unique_id,
},
)
async with self._create_client() as hyperion_client:
if not hyperion_client:
return self.async_abort(reason="cannot_connect")
hyperion_id = await hyperion_client.async_sysinfo_id()
if not hyperion_id:
return self.async_abort(reason="no_id")
entry = await self.async_set_unique_id(hyperion_id, raise_on_progress=False)
if self.context.get(CONF_SOURCE) == SOURCE_REAUTH and entry is not None:
self.hass.config_entries.async_update_entry(entry, data=self._data)
# Need to manually reload, as the listener won't have been installed because
# the initial load did not succeed (the reauth flow will not be initiated if
# the load succeeds)
await self.hass.config_entries.async_reload(entry.entry_id)
return self.async_abort(reason="reauth_successful")
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=f"{self._data[CONF_HOST]}:{self._data[CONF_PORT]}", data=self._data
)
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> HyperionOptionsFlow:
"""Get the Hyperion Options flow."""
return HyperionOptionsFlow(config_entry)
class HyperionOptionsFlow(OptionsFlow):
"""Hyperion options flow."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize a Hyperion options flow."""
self._config_entry = config_entry
def _create_client(self) -> client.HyperionClient:
"""Create and connect a client instance."""
return create_hyperion_client(
self._config_entry.data[CONF_HOST],
self._config_entry.data[CONF_PORT],
token=self._config_entry.data.get(CONF_TOKEN),
)
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
effects = {source: source for source in const.KEY_COMPONENTID_EXTERNAL_SOURCES}
async with self._create_client() as hyperion_client:
if not hyperion_client:
return self.async_abort(reason="cannot_connect")
for effect in hyperion_client.effects or []:
if const.KEY_NAME in effect:
effects[effect[const.KEY_NAME]] = effect[const.KEY_NAME]
# If a new effect is added to Hyperion, we always want it to show by default. So
# rather than store a 'show list' in the config entry, we store a 'hide list'.
# However, it's more intuitive to ask the user to select which effects to show,
# so we inverse the meaning prior to storage.
if user_input is not None:
effect_show_list = user_input.pop(CONF_EFFECT_SHOW_LIST)
user_input[CONF_EFFECT_HIDE_LIST] = sorted(
set(effects) - set(effect_show_list)
)
return self.async_create_entry(title="", data=user_input)
default_effect_show_list = list(
set(effects)
- set(self._config_entry.options.get(CONF_EFFECT_HIDE_LIST, []))
)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_PRIORITY,
default=self._config_entry.options.get(
CONF_PRIORITY, DEFAULT_PRIORITY
),
): vol.All(vol.Coerce(int), vol.Range(min=0, max=255)),
vol.Optional(
CONF_EFFECT_SHOW_LIST,
default=default_effect_show_list,
): cv.multi_select(effects),
}
),
)
|
sander76/home-assistant
|
homeassistant/components/hyperion/config_flow.py
|
Python
|
apache-2.0
| 20,066
|
[
"VisIt"
] |
032059127990749aec641ee9ddc79288de1535b0cc71287ee0984fc838650a22
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This software (including its Debian packaging) is available to you under the terms of the GPL-3,
# see "/usr/share/common-licenses/GPL-3".
# Software is created and maintained by Laboratory of Biomolecular Systems Simulation at University of Gdansk.
# Contributors:
# - Tomasz Makarewicz (makson96@gmail.com)
# - Ajit B. Datta (ajit@jcbose.ac.in)
# - Sara Boch Kminikowska
# - Manish Sud (msud@san.rr.com; URL: www.MayaChemTools.org)
# - Thomas Holder
#
from __future__ import print_function
# --Import libraries--
# Import nativ python libraries
import os
import pickle
import shutil
import subprocess
import sys
import time
import tarfile
# This is actually needed if Tk will be removed.
import re
# Import libraries for tk graphic interface
if sys.version_info[0] < 3:
import thread
import Queue
from Tkinter import *
import tkMessageBox
import tkFileDialog
from ttk import Progressbar, Scrollbar
else:
import _thread as thread
import queue as Queue
from tkinter import *
from tkinter import messagebox as tkMessageBox
from tkinter import filedialog as tkFileDialog
from tkinter.ttk import Progressbar, Scrollbar
# Import libraries for Qt graphic interface
try:
from pymol import Qt
from pymol.Qt import QtWidgets
except ImportError:
import PyQt5
from PyQt5 import QtWidgets
# Import libraries from PyMOL specific work.
from pymol import cmd, cgo, parsing, plugins, CmdException
# TODO: It seams that stored is removed from PyMOL API. We need to handle it correctly
try:
from pymol import stored
except ImportError:
stored = False
# Check for ProDy
try:
import prody
except ModuleNotFoundError:
prody = False
# Plugin Version
plugin_ver = " 3.0.0pre"
EM_INIT_CONFIG = """define = -DFLEX_SPC
constraints = none
integrator = steep
nsteps = 10000
nstlist = 10
ns_type = simple
rlist = 1.5
rcoulomb = 1.5
rvdw = 1.5
emtol = 1000.0
emstep = 0.01
implicit-solvent = no
;gb-algorithm = Still
;pbc = no
;rgbradii = 0
cutoff-scheme = Verlet
coulombtype = PME"""
PR_INIT_CONFIG = """define = -DPOSRES
constraints = all-bonds
integrator = md-vv
dt = 0.002
nsteps = 5000
nstcomm = 1
nstxout = 100
nstvout = 100
nstfout = 0
nstlog = 10
nstenergy = 10
nstlist = 10
ns_type = simple
rlist = 1.5
rcoulomb = 1.5
rvdw = 1.5
Tcoupl = v-rescale
tau_t = 0.1 0.1
tc-grps = protein Non-Protein
ref_t = 298 298
Pcoupl = no
tau_p = 0.5
compressibility = 4.5e-5
ref_p = 1.0
gen_vel = yes
gen_temp = 298.0
gen_seed = 173529
cutoff-scheme = Verlet
coulombtype = PME"""
MD_INIT_CONFIG = """;define = -DPOSRES
integrator = md-vv
dt = 0.002
nsteps = 5000
nstcomm = 1
nstxout = 50
nstvout = 50
nstfout = 0
nstlist = 10
ns_type = simple
rlist = 1.5
rcoulomb = 1.5
rvdw = 1.5
Tcoupl = v-rescale
tau_t = 0.1 0.1
tc-grps = protein Non-Protein
ref_t = 298 298
Pcoupl = no
tau_p = 0.5
compressibility = 4.5e-5
ref_p = 1.0
gen_vel = yes
gen_temp = 298.0
gen_seed = 173529
constraints = all-bonds
constraint-algorithm = Lincs
continuation = no
shake-tol = 0.0001
lincs-order = 4
lincs-warnangle = 30
morse = no
implicit-solvent = no
;gb-algorithm = Still
;pbc = no
;rgbradii = 0
;comm_mode = ANGULAR
cutoff-scheme = Verlet
coulombtype = PME"""
# This function will initialize all plugin stufs
def init_function(travis_ci=False, gui_library="qt", parent=False):
# Fallback to tk, till qt is ready
gui_library = "tk"
status = ["ok", ""]
# Make sure HOME environment variable is defined before setting up directories...
home_dir = os.path.expanduser('~')
if home_dir:
os.chdir(home_dir)
else:
print("HOME environment variable not defined")
status = ["fail", "HOME environment variable not defined. Please set its value and try again."]
dynamics_dir = get_dynamics_dir()
project_dir = get_project_dirs()
# Clean up any temporary project directory...
if os.path.isdir(project_dir):
shutil.rmtree(project_dir)
# Create temporary project directory along with any subdirectories...
if not os.path.isdir(project_dir):
os.makedirs(project_dir)
print("Searching for GROMACS installation")
os.chdir(dynamics_dir)
gmx_exe, gmx_version, gmx_build_arch, gmx_on_cygwin = get_gromacs_exe_info()
os.chdir(home_dir)
supported_gmx_versions = ["2016", "2018"]
if not len(gmx_exe):
print("GROMACS 2016 or newer not detected.")
status = ["fail",
"GROMACS not detected. Please install and setup GROMACS 2016 or newer correctly for your platform."
" Check '~/.dynamics/test_gromacs.txt' for more details. Don't forget to add GROMACS bin directory"
" to your PATH"]
elif gmx_version[0:4] not in supported_gmx_versions:
print("Warning. Unsupported GROMACS Version")
if status[0] == "ok":
simulation_parameters = SimulationParameters()
else:
simulation_parameters = False
if not travis_ci:
create_gui(gui_library, status, simulation_parameters, parent)
return status, simulation_parameters
class SimulationParameters:
gmx_output = ""
gmx_input = ""
vectors_prody = False
stop = False
project_name = "nothing"
progress = ""
em_file = ""
pr_file = ""
md_file = ""
def __init__(self):
self.gmx_output = GromacsOutput()
self.gmx_input = GromacsInput()
print("Found GROMACS VERSION {}".format(self.gmx_output.version))
if prody:
self.vectors_prody = Vectors()
print("ProDy correctly imported")
self.progress = ProgressStatus()
def create_cfg_files(self):
self.em_file, self.pr_file, self.md_file = create_config_files(self.project_name)
def change_stop_value(self, value):
if value:
self.stop = True
else:
self.stop = False
def change_project_name(self, name):
self.project_name = name
project_dir = get_project_dirs(self.project_name)
if not os.path.isdir(project_dir):
os.makedirs(project_dir)
# This class is responsible for interface to GROMACS. It will read all important data from GROMACS tools.
class GromacsOutput:
version = "GROMACS not found"
command = ""
force_list = []
water_list = []
group_list = []
restraints = []
def __init__(self):
# Remove garbage
dynamics_dir = get_dynamics_dir()
garbage_files = next(os.walk(dynamics_dir))[2]
for garbage in garbage_files:
if garbage[0] == "#":
os.remove(dynamics_dir + garbage)
gmx_exe, gmx_version, gmx_build_arch, gmx_on_cygwin = get_gromacs_exe_info()
self.version = gmx_version
self.command = gmx_exe
# Track current directiry and switch to dynamics_dir before invoking gmx...
current_dir = os.getcwd()
os.chdir(dynamics_dir)
self.init2()
# Switch back to current directory...
os.chdir(current_dir)
def init2(self):
print("Reading available force fields and water models")
fo = open("test_gromacs.pdb", "wb")
fo.write(b"ATOM 1 N LYS 1 24.966 -0.646 22.314 1.00 32.74 1SRN 99\n")
fo.close()
gmx_stdin_file_path = "gromacs_stdin.txt"
fo = open(gmx_stdin_file_path, "w")
fo.write("1\n")
fo.write("1")
fo.close()
gmx_stdout_file_path = "test_gromacs.txt"
cmd = "{} pdb2gmx -f test_gromacs.pdb -o test_gromacs.gro -p test_gromacs.top".format(self.command)
execute_subprocess(cmd, gmx_stdin_file_path, gmx_stdout_file_path)
lista_gromacs = read_text_lines(gmx_stdout_file_path)
# Reading available force fields
force_start_line = 0
while lista_gromacs[force_start_line] != "Select the Force Field:\n":
force_start_line = force_start_line + 1
force_start_line = force_start_line + 2
force_end_line = force_start_line
while lista_gromacs[force_end_line] != "\n":
force_end_line = force_end_line + 1
force_list = lista_gromacs[force_start_line:force_end_line]
force_list2 = []
number = 1
for force in force_list:
force_list2.append([number, force[:-1]])
number = number + 1
self.force_list = force_list2
# Reading available water models
self.water_list = get_water_models_info(lista_gromacs)
print("Reading available groups")
gmx_stdin_file_path = "gromacs_stdin.txt"
fo = open(gmx_stdin_file_path, "w")
fo.write("1")
fo.close()
gmx_stdout_file_path = "test_gromacs.txt"
cmd = "{} trjconv -f test_gromacs.pdb -s test_gromacs.pdb -o test_gromacs2.pdb".format(self.command)
execute_subprocess(cmd, gmx_stdin_file_path, gmx_stdout_file_path)
group_test_list = read_text_lines(gmx_stdout_file_path)
# Reading available groups
group_start_line = 0
while group_test_list[group_start_line] != "Will write pdb: Protein data bank file\n":
group_start_line = group_start_line + 1
group_start_line = group_start_line + 1
group_end_line = group_start_line + 1
while group_test_list[group_end_line][0:14] != "Select a group":
group_end_line = group_end_line + 1
group_list = group_test_list[group_start_line:group_end_line]
group_list2 = []
number = 0
for group in group_list:
group1 = group.split(' has')
group2 = group1[0].split('Group ')
if len(group2) == 2:
group_list2.append([number, group2[1]])
number = number + 1
self.group_list = group_list2
# This function will update water list if force field is changed.
def water_update(self, force_number):
# Track current directiry and switch to dynamics_dir before invoking gmx...
current_dir = os.getcwd()
os.chdir(get_dynamics_dir())
print("Updating available water models")
gmx_stdin_file_path = "gromacs_stdin.txt"
fo = open(gmx_stdin_file_path, "w")
fo.write("%d\n" % force_number)
fo.write("1")
fo.close()
gmx_stdout_file_path = "test_gromacs.txt"
cmd = "{} pdb2gmx -f test_gromacs.pdb -o test_gromacs.gro -p test_gromacs.top".format(self.command)
execute_subprocess(cmd, gmx_stdin_file_path, gmx_stdout_file_path)
lista_gromacs = read_text_lines(gmx_stdout_file_path)
self.water_list = get_water_models_info(lista_gromacs)
# Switch back to current directory...
os.chdir(current_dir)
# save_options()
return self.water_list
# This function will read atoms group for restraints for current molecule.
def restraints_index(self, project_name):
self.restraints = []
current_dir = os.getcwd()
os.chdir(get_project_dirs(project_name))
fo = open("gromacs_stdin.txt", "w")
fo.write("q")
fo.close()
cmd = "{} make_ndx -f {}.pdb -o index.ndx".format(self.command, project_name)
execute_subprocess(cmd, "gromacs_stdin.txt", "restraints.log")
index_list = read_text_lines("restraints.log")
index_position = 0
atoms = ""
for line in index_list:
if line[0] == "[":
self.restraints.append([])
self.restraints[index_position].append(line)
if index_position != 0:
self.restraints[index_position - 1].append(atoms)
index_position = index_position + 1
atoms = ""
else:
atoms = atoms + line
self.restraints[index_position - 1].append(atoms)
os.chdir(current_dir)
# This class is responsible for performing molecular dynamics simulation with GROMACS tools.
class GromacsInput:
force = 1
water = 1
group = 1
box_type = "triclinic"
explicit = 1
# variable to choose heavy hydrogen
hydro = "noheavyh"
box_distance = "0.8"
box_density = "1000"
restraints_nr = 1
# four variables salt, positive, negative and neutral
neutrality = "neutral"
salt_conc = "0.15"
positive_ion = "NA"
negative_ion = "CL"
command_distinction = "\n!************************!\n"
# This function will change given variabless stored by the class (needed for lambda statements)
def update(self, gmx_options):
for key, value in gmx_options.items():
if key == "force":
self.force = value
elif key == "water":
self.water = value
elif key == "group":
self.group = value
elif key == "box_type":
self.box_type = value
elif key == "hydro":
self.hydro = value
elif key == "box_distance":
self.box_distance = value
elif key == "box_density":
self.box_density = value
elif key == "restraints_nr":
self.restraints_nr = value
elif key == "neutrality":
self.neutrality = value
elif key == "salt_conc":
self.salt_conc = value
elif key == "positive_ion":
self.positive_ion = value
elif key == "negative_ion":
self.negative_ion = value
elif key == "explicit":
self.explicit = value
# save_options()
print("gromacs updated")
# This function will create initial topology and trajectory using pdb file and choosen force field
def pdb2top(self, s_params):
status = ["ok", "Calculating topology using Force fields"]
status_update(status)
hh = "-" + self.hydro
gmx_cmd = s_params.gmx_output.command
project_name = s_params.project_name
try:
os.remove("{}.gro".format(project_name))
os.remove("{}.top".format(project_name))
except FileNotFoundError:
pass
fo = open("gromacs_stdin.txt", "w")
fo.write("%s\n" % str(self.force))
fo.write("%s" % str(self.water))
fo.close()
command = "{0} pdb2gmx -f {1}.pdb -o {1}.gro -p {1}.top {2}".format(gmx_cmd, project_name, hh)
execute_and_monitor_subprocess(command, 'gromacs_stdin.txt', 'log1.txt', 'log.txt')
if os.path.isfile("{}.gro".format(project_name)):
status = ["ok", ""]
else:
status = ["fail", "Warning. Trying to ignore unnecessary hydrogen atoms."]
command = "{0} pdb2gmx -ignh -f {1}.pdb -o {1}.gro -p {1}.top {2}".format(gmx_cmd, project_name, hh)
execute_and_monitor_subprocess(command, 'gromacs_stdin.txt', 'log1.txt', 'log.txt')
status_update(status)
stop = s_params.stop
if os.path.isfile("{}.gro".format(project_name)) and not stop:
status = ["ok", "Calculated topology using Force fields"]
else:
status = ["fail", "Force field unable to create topology file"]
return status
# This is alternative function to create initial topology and triectory using pdb file
@staticmethod
def x2top(s_params):
status = ["ok", "Calculating topology using Force fields"]
status_update(status)
gmx_cmd = s_params.gmx_output.command
project_name = s_params.project_name
try:
os.remove("{}.gro".format(project_name))
os.remove("{}.top".format(project_name))
except FileNotFoundError:
pass
command = "{0} x2top -f {1}.pdb -o {1}.top".format(gmx_cmd, project_name)
execute_and_monitor_subprocess(command, None, 'log1.txt', 'log.txt')
stop = s_params.stop
if os.path.isfile("{}.top".format(project_name)) and not stop:
status = ["ok", "Calculating structure using trjconv."]
else:
status = ["fail", "Unable to create topology file."]
status_update(status)
if status[0] == "ok":
fo = open("gromacs_stdin.txt", "w")
fo.write("0")
fo.close()
command = "{0} trjconv -f {1}.pdb -s {1}.pdb -o {1}.gro".format(gmx_cmd, project_name)
execute_and_monitor_subprocess(command, 'gromacs_stdin.txt', 'log1.txt', 'log.txt')
stop = s_params.stop
if os.path.isfile("{}.gro".format(project_name)) and not stop:
status = ["ok", "Calculated structure using trjconv."]
else:
status = ["fail", "Unable to create structure file."]
return status
# This function will create and add waterbox.
def waterbox(self, s_params):
status = ["ok", "Generating waterbox"]
box_type = "-bt {}".format(self.box_type)
distance = "-d {}".format(self.box_distance)
density = "-density {}".format(self.box_density)
gmx_cmd = s_params.gmx_output.command
project_name = s_params.project_name
try:
os.remove("{}1.gro".format(project_name))
os.remove("{}_solv.gro".format(project_name))
except FileNotFoundError:
pass
status_update(status)
command = "{0} editconf -f {1}.gro -o {1}1.gro -c {2} {3} {4}".format(gmx_cmd, project_name, box_type, distance,
density)
execute_and_monitor_subprocess(command, None, 'log1.txt', 'log.txt')
water_name = s_params.gmx_output.water_list[self.water - 1][1][4:8].lower()
print(water_name)
if water_name == "tip4":
water_gro = "tip4p.gro"
elif water_name == "tip5":
water_gro = "tip5p.gro"
else:
water_gro = "spc216.gro"
command = "{0} solvate -cp {1}1.gro -cs {2} -o {1}_solv.gro -p {1}.top".format(gmx_cmd, project_name, water_gro)
status = ["ok", "Adding Water Box"]
status_update(status)
execute_and_monitor_subprocess(command, None, 'log1.txt', 'log.txt')
stop = s_params.stop
if os.path.isfile("{}1.gro".format(project_name)) and not stop:
status = ["ok", "Water Box Added"]
else:
status = ["fail", "Unable to add water box"]
return status
# This function will add ions/salts to the protein in waterbox
def saltadd(self, s_params):
status = ["ok", "Preparing to add ions or salt"]
salt = "-conc {}".format(self.salt_conc)
positive = "-pname {}".format(self.positive_ion)
negative = "-nname {}".format(self.negative_ion)
neu = "-{}".format(self.neutrality)
gmx_cmd = s_params.gmx_output.command
project_name = s_params.project_name
try:
os.remove(project_name + "_b4em.gro")
os.remove(project_name + "_ions.tpr")
except FileNotFoundError:
pass
command = "{0} grompp -f em -c {1}_solv.gro -o {1}_ions.tpr -p {1}.top -maxwarn 1".format(gmx_cmd, project_name)
status_update(status)
execute_and_monitor_subprocess(command, None, 'log1.txt', 'log.txt')
fo = open("gromacs_stdin.txt", "w")
fo.write("13")
fo.close()
status = ["ok", "Adding salts and ions"]
status_update(status)
command = "{0} genion -s {1}_ions.tpr -o {1}_b4em.gro {2} {3} {4} {5} -p {1}.top".format(gmx_cmd,
project_name, positive,
negative, salt, neu)
execute_and_monitor_subprocess(command, 'gromacs_stdin.txt', 'log1.txt', 'log.txt')
stop = s_params.stop
if os.path.isfile("{}_b4em.gro".format(project_name)) and not stop:
status = ["ok", "Ions added successfully"]
elif stop == 0:
status = ["ok", "Find out what's wrong!"]
else:
status = ["failed", "Unable to add ions"]
return status
# This function will perform energy minimization
@staticmethod
def em(s_params):
status = ["ok", "Energy Minimization"]
gmx_cmd = s_params.gmx_output.command
project_name = s_params.project_name
try:
os.remove("{}_em.tpr".format(project_name))
os.remove("{}_em.trr".format(project_name))
os.remove("{}_b4pr.gro".format(project_name))
except FileNotFoundError:
pass
# Check if waterbox was added and adjust accordingly.
if not os.path.isfile("{}_b4em.gro".format(project_name)):
if os.path.isfile("{}_solv.gro".format(project_name)):
shutil.copy("{}_solv.gro".format(project_name), "{}_b4em.gro".format(project_name))
elif os.path.isfile(project_name + "{}.gro".format(project_name)):
shutil.copy("{}.gro".format(project_name), "{}_b4em.gro".format(project_name))
status_update(status)
command = "{0} grompp -f em -c {1}_b4em -p {1} -o {1}_em".format(gmx_cmd, project_name)
execute_and_monitor_subprocess(command, None, 'log1.txt', 'log.txt')
command = "{0} mdrun -nice 4 -s {1}_em -o {1}_em -c {1}_b4pr -v".format(gmx_cmd, project_name)
execute_and_monitor_subprocess(command, None, 'log1.txt', 'log.txt')
stop = s_params.stop
if os.path.isfile("{}_em.tpr".format(project_name)) and os.path.isfile("{}_b4pr.gro".format(project_name)) and \
not stop:
status = ["ok", "Energy Minimized"]
else:
status = ["fail", "Unable to perform Energy Minimization"]
return status
# This function will perform position restrained MD
@staticmethod
def pr(s_params):
status = ["ok", "Position Restrained MD"]
gmx_cmd = s_params.gmx_output.command
project_name = s_params.project_name
try:
os.remove("{}_pr.tpr".format(project_name))
os.remove("{}_pr.trr".format(project_name))
os.remove("{}_b4md.gro".format(project_name))
except FileNotFoundError:
pass
status_update(status)
command = "{0} grompp -f pr -c {1}_b4pr -r {1}_b4pr -p {1} -o {1}_pr".format(gmx_cmd, project_name)
execute_and_monitor_subprocess(command, None, 'log1.txt', 'log.txt')
command = "{0} mdrun -nice 4 -s {1}_pr -o {1}_pr -c {1}_b4md -v".format(gmx_cmd, project_name)
execute_and_monitor_subprocess(command, None, 'log1.txt', 'log.txt')
stop = s_params.stop
if os.path.isfile("{}_pr.tpr".format(project_name)) and not stop:
status = ["ok", "Position Restrained MD finished"]
else:
status = ["fail", "Unable to perform Position Restrained"]
return status
# This function will create posre.itp file for molecular dynamics simulation with choosen atoms if
# restraints were selected
@staticmethod
def restraints(s_params):
status = ["ok", "Adding Restraints"]
gmx_cmd = s_params.gmx_output.command
project_name = s_params.project_name
try:
os.remove("posre_2.itp")
except FileNotFoundError:
pass
fo = open("gromacs_stdin.txt", "w")
fo.write("0")
fo.close()
status_update(status)
command = "{0} genrestr -f {1}.pdb -o posre_2.itp -n index_dynamics.ndx".format(gmx_cmd, project_name)
execute_and_monitor_subprocess(command, 'gromacs_stdin.txt', 'log1.txt', 'log.txt')
stop = s_params.stop
if os.path.isfile("posre_2.itp") and not stop:
status = ["ok", "Added Restraints"]
if os.path.isfile("posre.itp"):
os.remove("posre.itp")
shutil.copy("posre_2.itp", "posre.itp")
else:
status = ["fail", "Unable to create restraints file"]
return status
# This function will perform position final molecular dynamics simulation
@staticmethod
def md(s_params):
status = ["ok", "Molecular Dynamics Simulation"]
gmx_cmd = s_params.gmx_output.command
project_name = s_params.project_name
try:
os.remove("{}_md.tpr".format(project_name))
os.remove("{}_md.trr".format(project_name))
except FileNotFoundError:
pass
# Check if em and/or pr was done and adjust accordingly.
if not os.path.isfile("{}_b4md.gro".format(project_name)):
if not os.path.isfile("{}_b4pr.gro".format(project_name)):
# No em and pr
shutil.copy("{}_b4em.gro".format(project_name), "{}_b4md.gro".format(project_name))
else:
# No pr
shutil.copy("{}_b4pr.gro".format(project_name), "{}_b4md.gro".format(project_name))
status_update(status)
command = "{0} grompp -f md -c {1}_b4md -p {1} -o {1}_md".format(gmx_cmd, project_name)
execute_and_monitor_subprocess(command, None, 'log1.txt', 'log.txt')
command = "{0} mdrun -nice 4 -s {1}_md -o {1}_md -c {1}_after_md -v".format(gmx_cmd, project_name)
execute_and_monitor_subprocess(command, None, 'log1.txt', 'log.txt')
stop = s_params.stop
if os.path.isfile("{}_md.tpr".format(project_name)) and not stop:
status = ["ok", "Molecular Dynamics Simulation finished"]
else:
status = ["fail", "Unable to perform Molecular Dynamics Simulation"]
return status
# This function will convert final results to multimodel pdb file
def trjconv(self, s_params):
status = ["ok", "Creating Multimodel PDB"]
gmx_cmd = s_params.gmx_output.command
project_name = s_params.project_name
try:
os.remove("{}_multimodel.pdb".format(project_name))
except FileNotFoundError:
pass
if os.path.isfile("{}_multimodel.pdb".format(project_name)):
os.remove("{}_multimodel.pdb".format(project_name))
fo = open("gromacs_stdin.txt", "w")
fo.write("%s" % str(self.group))
fo.close()
status_update(status)
command = "{0} trjconv -f {1}_md.trr -s {1}_md.tpr -o {1}_multimodel.pdb".format(gmx_cmd, project_name)
execute_and_monitor_subprocess(command, 'gromacs_stdin.txt', 'log1.txt', 'log.txt')
stop = s_params.stop
if os.path.isfile("{}_multimodel.pdb".format(project_name)) and not stop:
status = ["ok", "Finished!"]
else:
status = ["fail", "Unable to generate multimodel PDB file"]
return status
# This class will handle PCA by ProDy python library and show vectors from NMD file.
class Vectors:
nmd_name = []
nmd_atomnames = []
nmd_resnames = []
nmd_resids = []
nmd_bfactors = []
nmd_coordinates = []
nmd_mode = []
nmd_scale_mode = []
color = "grey"
scale = 1.0
mode_nr = 0
calculation_type = 0
contact_map = 0
block_contact_map = 0
enm = 0
# Change Multimodel PDB file into NMD vector file
def prody(self, project_name):
# Silence ProDy and create logs
prody.confProDy(verbosity='none')
prody.startLogfile("log_prody.log")
# Prepare ensemble
model = prody.parsePDB(project_name + "_multimodel.pdb", subset='calpha')
ensemble = prody.Ensemble(project_name + ' ensemble')
ensemble.setCoords(model.getCoords())
ensemble.addCoordset(model.getCoordsets())
ensemble.iterpose()
# ANM calculations
if self.calculation_type == 0:
anm = prody.ANM(project_name)
anm.buildHessian(ensemble)
anm.calcModes()
write_nmd = anm
self.enm = anm
# PCA calculations
elif self.calculation_type == 1:
pca = prody.PCA(project_name)
pca.buildCovariance(ensemble)
pca.calcModes()
write_nmd = pca
# GNM calculations
elif self.calculation_type == 2:
gnm = prody.GNM(project_name)
gnm.buildKirchhoff(ensemble)
gnm.calcModes()
write_nmd = gnm
self.enm = gnm
# Write NMD file
prody.writeNMD(project_name + '.nmd', write_nmd[:3], model)
prody.closeLogfile("log_prody.log")
# Read NMD file
def nmd_format(self, project_name):
file_nmd = open('{}.nmd'.format(project_name), "r")
list_nmd = file_nmd.readlines()
self.nmd_mode = []
self.nmd_scale_mode = []
for line in list_nmd:
split_line = line.split()
if split_line[0] == "name":
self.nmd_name = split_line
self.nmd_name.pop(0)
elif split_line[0] == "atomnames":
self.nmd_atomnames = split_line
self.nmd_atomnames.pop(0)
elif split_line[0] == "resnames":
self.nmd_resnames = split_line
self.nmd_resnames.pop(0)
elif split_line[0] == "resids":
self.nmd_resids = split_line
self.nmd_resids.pop(0)
elif split_line[0] == "bfactors":
self.nmd_bfactors = split_line
self.nmd_bfactors.pop(0)
elif split_line[0] == "coordinates":
self.nmd_coordinates = split_line
self.nmd_coordinates.pop(0)
elif split_line[0] == "mode":
pre_mode = split_line
self.nmd_mode.append(pre_mode[3:])
self.nmd_scale_mode.append(pre_mode[2])
# Show contact map on PyMOL screen
def show_contact_map(self, sensitivity, project_name):
contact_matrix = self.enm.getKirchhoff()
print(contact_matrix)
c_alpha_nr = 0
for c_alpha_list in contact_matrix:
c_alpha_nr = c_alpha_nr + 1
c_alpha_target_nr = 0
for c_alpha_1 in c_alpha_list:
c_alpha_target_nr = c_alpha_target_nr + 1
if c_alpha_nr != c_alpha_target_nr and float(c_alpha_1) < float(sensitivity):
cmd.select("sele1",
"n. ca and {}_multimodel and i. {}".format(project_name, str(c_alpha_nr))) # PyMOL API
cmd.select("sele2", "n. ca and {}_multimodel and i. {}".format(project_name,
str(c_alpha_target_nr))) # PyMOL API
cmd.distance("contact_map", "sele1", "sele2") # PyMOL API
try:
cmd.hide("labels", "contact_map") # PyMOL API
cmd.delete("sele1") # PyMOL API
cmd.delete("sele2") # PyMOL API
except:
pass
# Show contact map/cross corelation as a graph
def graph_contact_map(self, plot_type):
if plot_type == "contact":
# matplotlib
prody.showContactMap(self.enm)
elif plot_type == "cross":
# matplotlib
prody.showCrossCorr(self.enm)
# Show vectors from NMD file
def show_vectors(self):
color1 = cmd.get_color_tuple(self.color) # PyMOL API
color2 = cmd.get_color_tuple(self.color) # PyMOL API
if color1:
color1 = list(color1)
# Fallback to grey in case of unrecognized color
else:
color1 = [0.5, 0.5, 0.5]
if color2:
color2 = list(color2)
# Fallback to grey in case of unrecognized color
else:
color2 = [0.5, 0.5, 0.5]
arrow_head_radius = 0.15
x1 = []
y1 = []
z1 = []
coor = "x"
for coordinate in self.nmd_coordinates:
if coor == "x":
x1.append(float(coordinate))
coor = "y"
elif coor == "y":
y1.append(float(coordinate))
coor = "z"
elif coor == "z":
z1.append(float(coordinate))
coor = "x"
x2 = []
y2 = []
z2 = []
# This factor is provided to make vector length more like in NMWiz.
# More investigation is needed to get exact formula.
approximation_factor = 16.6
coor = "x"
coor_nr = 0
round_nr = 0
for mode in self.nmd_mode[self.mode_nr]:
if coor == "x":
x2.append(
float(mode) * float(self.nmd_scale_mode[self.mode_nr]) * approximation_factor * self.scale + x1[
coor_nr])
coor = "y"
elif coor == "y":
y2.append(
float(mode) * float(self.nmd_scale_mode[self.mode_nr]) * approximation_factor * self.scale + y1[
coor_nr])
coor = "z"
elif coor == "z":
z2.append(
float(mode) * float(self.nmd_scale_mode[self.mode_nr]) * approximation_factor * self.scale + z1[
coor_nr])
coor = "x"
round_nr = round_nr + 1
if round_nr == 3:
round_nr = 0
coor_nr = coor_nr + 1
coor_nr = 0
for position in x1:
try:
cmd.delete("Mode_Vector_" + str(coor_nr))
except:
pass
cone = [cgo.CONE, x1[coor_nr], y1[coor_nr], z1[coor_nr], x2[coor_nr], y2[coor_nr], z2[coor_nr],
arrow_head_radius, 0.0] + color1 + color2 + [1.0, 0.0]
cmd.load_cgo(cone, "Mode_Vector_" + str(coor_nr)) # PyMOL API
coor_nr = coor_nr + 1
# Another workaround for PyMOL 1.8 with TravisCI
try:
cam_possition = cmd.get_view(quiet=1) # PyMOL API
cmd.set_view(cam_possition) # PyMOL API
except TypeError:
pass
def change_vectors_color(self, color):
self.color = color
self.show_vectors()
def change_vectors_scale(self, scale):
scale = float(scale)
self.scale = scale
self.show_vectors()
def change_vectors_mode_nr(self, mode_nr):
self.mode_nr = mode_nr
self.show_vectors()
def options_change(self, v1, v2, root):
self.calculation_type = v1.get()
self.contact_map = v2.get()
# save_options()
root.destroy()
def block_contact(self, block, contact_map_b, contact_map_v):
# ToDO: Replace below Tk code with something agnostic
self.block_contact_map = block
if block == 0:
contact_map_b.configure(state=ACTIVE)
elif block == 1:
contact_map_b.configure(state=DISABLED)
contact_map_v.set(0)
# This class create and maintain abstraction mdp file representatives. em.mdp, pr.mdp, md.mdp
class MdpConfig:
external_file = 0
options = [[]]
file_name = ""
def __init__(self, file_name, init_config, external_file=0):
self.file_name = file_name
self.external_file = external_file
list1 = init_config.split("\n")
list2 = []
for line in list1:
list2.append(line.split(" = "))
self.options = list2
def update(self, option_nr, value, check=1):
self.options[option_nr][1] = value
if check == 0 and self.options[option_nr][0][0] != ";":
self.options[option_nr][0] = ";" + self.options[option_nr][0]
elif check == 1 and self.options[option_nr][0][0] == ";":
self.options[option_nr][0] = self.options[option_nr][0][1:]
self.clean_artefacts()
def save_file(self, s_params):
project_name = s_params.project_name
project_dir = get_project_dirs(project_name)
config = ""
for option in self.options:
# pass empty option
if option == ['']:
pass
else:
config = "{}{} = {}\n".format(config, str(option[0]), str(option[1]))
mdp = open(project_dir + self.file_name, "w")
mdp.write(config)
mdp.close()
# Clean options from artefacts
def clean_artefacts(self):
try:
self.options.remove([''])
except:
pass
# This function creates files needed by the project
def create_config_files(project_name):
dynamics_dir = get_dynamics_dir()
project_dir = get_project_dirs(project_name)
print("Create config files")
project_dir = get_project_dirs(project_name)
# if not os.path.isfile(project_dir + "options.pickle"):
# pass
# else:
# load_options(s_params)
if os.path.isfile(dynamics_dir + "em.mdp"):
shutil.copy(dynamics_dir + "em.mdp", project_dir + "em.mdp")
print("Found em.mdp file. Using it instead of local configuration.")
elif os.path.isfile(project_dir + "em.mdp"):
em_file_config = open(project_dir + "em.mdp", "r").read()
em_file = MdpConfig("em.mdp", em_file_config, 1)
else:
em_file = MdpConfig("em.mdp", EM_INIT_CONFIG, 0)
if os.path.isfile(dynamics_dir + "pr.mdp"):
shutil.copy(dynamics_dir + "pr.mdp", project_dir + "pr.mdp")
print("Found pr.mdp file. Using it instead of local configuration.")
elif os.path.isfile(project_dir + "pr.mdp"):
pr_file_config = open(project_dir + "pr.mdp", "r").read()
pr_file = MdpConfig("pr.mdp", pr_file_config, 1)
else:
pr_file = MdpConfig("pr.mdp", PR_INIT_CONFIG, 0)
if os.path.isfile(dynamics_dir + "md.mdp"):
shutil.copy(dynamics_dir + "md.mdp", project_dir + "md.mdp")
print("Found md.mdp file. Using it instead of local configuration.")
elif os.path.isfile(project_dir + "md.mdp"):
md_file_config = open(project_dir + "md.mdp", "r").read()
md_file = MdpConfig("md.mdp", md_file_config, 1)
else:
md_file = MdpConfig("md.mdp", MD_INIT_CONFIG, 0)
# save_options()
try:
if project_name in cmd.get_names("objects"): # PyMOL API
cmd.save(project_dir + project_name + ".pdb", project_name) # PyMOL API
print("cmd saved")
except (AttributeError, TypeError) as e:
pass
return em_file, pr_file, md_file
# Status and to_do maintaining class
class ProgressStatus:
# 0:Save configuration files; 1:Generate topology file from pdb; 2:Adding Water Box;
# 3: Adding ions and neutralization 4:Energy Minimization; 5:Position Restrained MD;
# 6:Restraints; 7:Molecular Dynamics Simulation; 8:Generate multimodel PDB; 9:Calculate vectors using ProDy
status = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
to_do = [1, 1, 1, 1, 1, 1, 0, 1, 1, 1]
resume = 0
x2top = 0
steps = 8
def to_do_update(self, position, value):
if isinstance(position, int):
self.to_do[position] = value
self.to_do = self.to_do
# save_options()
def x2top_update(self, value):
if isinstance(value, int):
self.x2top = value
def to_do_status(self):
to_do = []
for work in self.status:
if work == 0:
to_do.append(1)
elif work == 1:
to_do.append(0)
self.to_do = to_do
# Detect gmx executable along with other associated information...
def get_gromacs_exe_info():
gmx_exes = ['gmx_mpi_d', 'gmx_mpi', 'gmx']
gmx_exe = ""
version = ""
build_arch = ""
build_on_cygwin = 0
stdout_file = "test_gromacs.txt"
if os.path.isfile(stdout_file):
os.remove(stdout_file)
for gmx in gmx_exes:
cmd = gmx + " -version"
execute_subprocess(cmd, None, stdout_file)
ofs = open(stdout_file, "r")
output = ofs.read()
ofs.close()
output = standardize_new_line_char(output)
if not re.search("GROMACS version:", output, re.I):
continue
gmx_exe = gmx
for line in output.split("\n"):
if re.search("^[ ]*GROMACS version:", line, re.I):
gmx_exe = gmx
version = re.sub("^[ ]*GROMACS version:[ ]*", "", line, flags=re.I)
if "VERSION " in version:
version = version.split("VERSION ")[1].rstrip()
elif re.search(r"^[ ]*Build OS/arch:", line, re.I):
build_arch = re.sub("^[ ]*Build OS/arch:[ ]*", "", line, flags=re.I)
if re.search(r"CYGWIN", build_arch, re.I):
build_on_cygwin = 1
break
return gmx_exe, version, build_arch, build_on_cygwin
def get_dynamics_dir():
home_dir = os.path.expanduser('~')
gmx_home_dir_path = os.path.abspath(home_dir)
dynamics_dir = os.path.join(gmx_home_dir_path, '.dynamics', '')
return dynamics_dir
def get_project_dirs(project_name="nothing"):
dynamics_dir = get_dynamics_dir()
project_dir = os.path.join(dynamics_dir, project_name, '')
return project_dir
# Execute command using stdin/stdout as needed...
def execute_subprocess(command, stdin_file_path=None, stdout_file_path=None):
stdin_file = None
stdin_msg = "None"
if stdin_file_path:
stdin_file = open(stdin_file_path, "r")
stdin_msg = stdin_file_path
stdout_file = None
stdout_msg = "None"
if stdout_file_path:
stdout_file = open(stdout_file_path, "w")
stdout_msg = stdout_file_path
print("Running command: " + command + "; STDIN: " + stdin_msg + "; STDOUT: " + stdout_msg)
return_code = subprocess.call(command, stdin=stdin_file, stdout=stdout_file, stderr=subprocess.STDOUT, shell=True)
if stdin_file_path:
stdin_file.close()
if stdout_file_path:
stdout_file.close()
return return_code
# Start a subprocess and wait for it to complete along with an option to kill it...
def execute_and_monitor_subprocess(command, stdin_file_path=None, stdout_file_path=None, log_file_path=None):
if log_file_path:
if os.path.isfile(log_file_path):
log_file = open(log_file_path, 'a')
else:
log_file = open(log_file_path, 'w')
star_mark = "\n!{0}!\n".format("*" * 25)
log_file.write("{0}{1}{0}".format(star_mark, command))
log_file.close()
stdin_file = None
stdin_msg = "None"
if stdin_file_path:
stdin_file = open(stdin_file_path, "r")
stdin_msg = stdin_file_path
stdout_file = None
stdout_msg = "None"
if stdout_file_path:
stdout_file = open(stdout_file_path, "w")
stdout_msg = stdout_file_path
print("Running command: {}; STDIN: {}; STDOUT: {}".format(command, stdin_msg, stdout_msg))
gmx = subprocess.Popen(command, stdin=stdin_file, stdout=stdout_file, stderr=subprocess.STDOUT, shell=True)
while gmx.poll() is None:
# if stop == 1:
# gmx.kill()
# break
time.sleep(1.0)
if stdin_file_path:
stdin_file.close()
if stdout_file_path:
stdout_file.close()
# Append any stdout to log file...
if log_file_path and stdout_file_path:
log_file = open(log_file_path, "a")
stdout_file = open(stdout_file_path, "r")
log_file.write(stdout_file.read())
log_file.close()
stdout_file.close()
# Change Windows and Mac new line char to UNIX...
def standardize_new_line_char(in_text):
out_text = re.sub("(\r\n)|(\r)", "\n", in_text)
return out_text
# Read text lines and standardize new line char...
def read_text_lines(text_file_path):
text_lines = []
ifs = open(text_file_path, "r")
for line in iter(ifs.readline, ''):
new_line = standardize_new_line_char(line)
text_lines.append(new_line)
ifs.close()
return text_lines
# Collect water modes information...
def get_water_models_info(gmx_output_lines):
start_line = 0
while gmx_output_lines[start_line][0:7] != "Opening":
start_line = start_line + 1
start_line = start_line + 1
end_line = start_line
while (gmx_output_lines[end_line][0:7] != "Opening") and (gmx_output_lines[end_line][0] != "\n"):
end_line = end_line + 1
waters_info = gmx_output_lines[start_line:end_line]
waters_info2 = []
number = 1
for water in waters_info:
waters_info2.append([number, water[:-1]])
number = number + 1
return waters_info2
# Steps mark work as done.
def steps_status_done(step_nr, s_params):
progress = s_params.progress
if progress.status[step_nr] == 1:
return " [done]"
elif progress.status[step_nr] == 0:
return ""
# This function will receive status from gromacs2 class and change it to global variable.
def status_update(input_status):
status = input_status
print(status[1])
# This function will start real workflow of the plugin, once everything is set
def dynamics(s_params):
print("Starting PyMOL plugin 'dynamics' ver. {}".format(plugin_ver))
status = ["ok", ""]
project_name = s_params.project_name
project_dir = get_project_dirs(project_name)
progress = s_params.progress
gromacs2 = s_params.gmx_input
vectors_prody = s_params.vectors_prody
os.chdir(project_dir)
stop = 0
# Saving configuration files
if status[0] == "ok" and stop == 0 and progress.to_do[0] == 1:
mdp_files(s_params)
if status[0] == "ok":
progress.status[0] = 1
progress.to_do[0] = 0
save_options(s_params)
# Counting topology
if status[0] == "ok" and stop == 0 and progress.to_do[1] == 1 and progress.x2top == 0:
status = gromacs2.pdb2top(s_params)
if status[0] == "ok":
progress.status[1] = 1
progress.to_do[1] = 0
save_options(s_params)
elif status[0] == "ok" and stop == 0 and progress.to_do[1] == 1 and progress.x2top == 1:
status = gromacs2.x2top(s_params)
if status[0] == "ok":
progress.status[1] = 1
progress.to_do[1] = 0
save_options(s_params)
# Adding water box
if status[0] == "ok" and stop == 0 and progress.to_do[2] == 1:
status = gromacs2.waterbox(s_params)
if status[0] == "ok":
progress.status[2] = 1
progress.to_do[2] = 0
save_options(s_params)
# Adding ions
if status[0] == "ok" and stop == 0 and progress.to_do[3] == 1:
status = gromacs2.saltadd(s_params)
if status[0] == "ok":
progress.status[3] = 1
progress.to_do[3] = 0
save_options(s_params)
# EM
if status[0] == "ok" and stop == 0 and progress.to_do[4] == 1:
status = gromacs2.em(s_params)
if status[0] == "ok":
progress.status[4] = 1
progress.to_do[4] = 0
save_options(s_params)
elif status[0] == "ok" and stop == 0 and progress.to_do[4] == 0 and progress.status[4] == 0:
shutil.copy(project_name + "_b4em.gro", project_name + "_b4pr.gro")
# PR
if status[0] == "ok" and stop == 0 and progress.to_do[5] == 1:
status = gromacs2.pr(s_params)
if status[0] == "ok":
progress.status[5] = 1
progress.to_do[5] = 0
save_options(s_params)
elif status[0] == "ok" and stop == 0 and progress.to_do[5] == 0 and progress.status[5] == 0:
shutil.copy(project_name + "_b4pr.gro", project_name + "_b4md.gro")
# Restraints
if status[0] == "ok" and stop == 0 and progress.to_do[6] == 1:
status = gromacs2.restraints(project_name)
if status[0] == "ok":
progress.status[6] = 1
progress.to_do[6] = 0
save_options(s_params)
# MD
if status[0] == "ok" and stop == 0 and progress.to_do[7] == 1:
status = gromacs2.md(s_params)
if status[0] == "ok":
progress.status[7] = 1
progress.to_do[7] = 0
save_options(s_params)
# Trjconv
if status[0] == "ok" and stop == 0 and progress.to_do[8] == 1:
status = gromacs2.trjconv(s_params)
show_multipdb(s_params)
progress.status[8] = 1
progress.to_do[8] = 0
save_options(s_params)
# Calculating vectors
if status[0] == "ok" and stop == 0 and progress.to_do[9] == 1 and prody:
vectors_prody.prody(project_name)
vectors_prody.nmd_format(project_name)
vectors_prody.show_vectors()
progress.status[9] = 1
progress.to_do[9] = 0
save_options(s_params)
elif status[0] == "fail":
print(status[1])
if stop == 0:
error_message(s_params)
# Saving configuration files
def mdp_files(s_params):
dynamics_dir = get_dynamics_dir()
em_file = s_params.em_file
pr_file = s_params.pr_file
md_file = s_params.md_file
if not os.path.isfile("{}em.mdp".format(dynamics_dir)):
em_file.save_file(s_params)
if not os.path.isfile("{}pr.mdp".format(dynamics_dir)):
pr_file.save_file(s_params)
if not os.path.isfile("{}md.mdp".format(dynamics_dir)):
md_file.save_file(s_params)
# Show multimodel PDB file in PyMOL
def show_multipdb(s_params):
project_name = s_params.project_name
try:
cmd.hide("everything", project_name) # PyMOL API
except (parsing.QuietException, CmdException) as e: # PyMOL API
print("Warning: {}".format(e))
try:
cmd.load("{}_multimodel.pdb".format(project_name)) # PyMOL API
except AttributeError:
pass
# Detect list of PyMOL loaded PDB files if no files than list "nothing"
def get_pdb_names():
all_names = cmd.get_names("objects") # PyMOL API
all_names1 = []
for name in all_names:
name1 = name.split("_")
if name1[-1] == "multimodel" or name1[-1] == "(sele)" or (name1[0] == "Mode" and len(name1) == 3):
pass
else:
all_names1.append(name)
all_names = all_names1
if not all_names:
all_names = ["nothing"]
return all_names
def read_and_set_init_project(s_params):
all_names = get_pdb_names()
project_name = all_names[0]
s_params.change_project_name(project_name)
if all_names != ["nothing"]:
s_params.create_cfg_files()
return all_names, project_name
# List of previous projects
def list_prev_projects(all_names):
dynamics_dir = get_dynamics_dir()
if os.path.isdir(dynamics_dir):
projects = os.listdir(dynamics_dir)
else:
projects = []
projects2 = []
for file_dir in projects:
if os.path.isdir(dynamics_dir + file_dir) and file_dir not in all_names and file_dir != "nothing":
projects2.append(file_dir)
return projects2
# Saving tar.bz file
def save_file(destination_path, s_params):
project_name = s_params.project_name
project_dir = get_project_dirs(project_name)
print("Saving")
import tarfile
save_options(s_params)
tar = tarfile.open(destination_path + ".tar.bz2", "w:bz2")
tar.add(project_dir, recursive=True, arcname=project_name)
tar.close()
os.remove(destination_path)
# Load tar.bz file
def load_file(file_path, s_params):
print("Loading file: " + file_path)
dynamics_dir = get_dynamics_dir()
tar = tarfile.open(file_path, "r:bz2")
names = tar.getnames()
# Backup same name folder if file is loaded
if os.path.isdir(dynamics_dir + names[0]):
back_folder = dynamics_dir + names[0] + "_back"
while os.path.isdir(back_folder):
back_folder = back_folder + "_b"
os.rename(dynamics_dir + names[0], back_folder)
tar.extractall(dynamics_dir)
project_name = names[0]
s_params.change_project_name(project_name)
load_options(s_params)
# Save all settings to options.pickle file
def save_options(s_params):
project_name = s_params.project_name
project_dir = get_project_dirs(project_name)
gmx_version = s_params.gmx_output.version
gromacs2 = s_params.gmx_input
progress = s_params.progress
em_file = s_params.em_file
pr_file = s_params.pr_file
md_file = s_params.md_file
if not prody:
vectors_prody = 0
else:
vectors_prody = s_params.vectors_prody
print("updating project files")
if not os.path.isdir(project_dir):
os.makedirs(project_dir)
destination_option = open(project_dir + "options.pickle", "wb")
pickle_list = [plugin_ver, gmx_version, gromacs2, em_file, pr_file, md_file, progress, vectors_prody]
pickle.dump(pickle_list, destination_option)
del destination_option
# Load all settings from options.pickle file
def load_options(s_params):
project_name = s_params.project_name
project_dir = get_project_dirs(project_name)
gmx_version = s_params.gmx_output.version
gromacs2 = s_params.gmx_input
pickle_file = open(project_dir + "options.pickle", "rb")
options = pickle.load(pickle_file)
print("Loading project {}".format(project_name))
print("Project was created for Dynamics PyMOL Plugin {} and GROMACS {}".format(options[0], options[1]))
if gmx_version != options[1]:
print("GROMACS versions is different for loaded file.")
if options[0][1:4] == "2.2":
gromacs2.update({"force": options[2].force, "water": options[2].water, "group": options[2].group,
"box_type": options[2].box_type, "hydro": options[2].hydro,
"box_distance": options[2].box_distance, "box_density": options[2].box_density,
"restraints_nr": options[2].restraints_nr, "neutrality": options[2].neutrality,
"salt_conc": options[2].salt_conc, "positive_ion": options[2].positive_ion,
"negative_ion": options[2].negative_ion, "explicit": options[2].explicit})
em_file = options[3]
pr_file = options[4]
md_file = options[5]
progress = options[6]
if prody and options[7] != 0:
vectors_prody = options[7]
elif options[0][1:4] == "2.1":
print("plugin 2.1 compatibility layer")
gromacs2.update({"force": options[2].force, "water": options[2].water, "group": options[2].group,
"box_type": options[2].box_type, "hydro": options[2].hydro,
"box_distance": options[2].box_distance, "box_density": options[2].box_density,
"restraints_nr": options[2].restraints_nr, "neutrality": options[2].neutrality,
"salt_conc": options[2].salt_conc, "positive_ion": options[2].positive_ion,
"negative_ion": options[2].negative_ion})
em_file = options[3]
pr_file = options[4]
md_file = options[5]
progress = options[6]
gromacs2.update({"explicit": options[7]})
if prody and options[8] != 0:
vectors_prody = options[8]
else:
print("Warning. Importing projects from plugin version " + options[0] + " is not supported. Aboring import.")
# Text for "Help"
def help_option():
help_message = """This is the dynamics PyMOL Plugin.
This software (including its Debian packaging) is available to you under the terms of the GPL-3,
see "/usr/share/common-licenses/GPL-3".
Software is created and maintained by Laboratory of Biomolecular Systems Simulation at University of Gdansk.
Contributors:
- Tomasz Makarewicz (makson96@gmail.com)
- Ajit B. Datta (ajit@jcbose.ac.in)
- Sara Boch Kminikowska
- Manish Sud (msud@san.rr.com; URL: www.MayaChemTools.org)
Full manual is available to you on project website: https://github.com/makson96/Dynamics/raw/master/manual.odt
or as a file: /usr/share/doc/dynamics-pymol-plugin/manual.odt
The purpose of this plugin is to perform molecular dynamics simulation by GROMACS using easy graphical tool and powerful molecular viewer.
To use this program run it as a PyMOL plugin.
Choose molecule (PDB) for which you want to perform molecular dynamics simulation (left column).
Choose force field and water model options in the middle column.
Choose any additional options in the right column.
Press OK button.
Click Start button and wait till calculation is finished.
Multimodel PDB file will be displayed in PyMOL viewer.
You can click Play button in order to see animation."""
return help_message
# Clean function
def clean_option():
shutil.rmtree(get_dynamics_dir())
print("Temporary files are now removed.")
# If molecular dynamics simulation fails, this function will show the error
def error_message(s_params):
project_name = s_params.project_name
project_dir = get_project_dirs(project_name)
log = open(project_dir + "log.txt", "r")
log_list = log.readlines()
error_start_line = 0
for log_line in log_list:
error_start_line += 1
if "Fatal error:" in log_line:
error_start_line -= 0
break
error_end_line = error_start_line
for log_line in log_list[error_start_line:]:
error_end_line += 1
if "-------------------------------------------------------" in log_line:
error_end_line -= 0
break
error_list = log_list[error_start_line:error_end_line]
error = ""
for line in error_list:
error = error + line
print(error)
# Debug
file = open("log.txt", "r")
print(file.read())
file = open("log1.txt", "r")
print(file.read())
# GUI. This is GUI section of the file. It should replace TkInter with Qt in the future.
# init function - puts plugin into menu and starts 'init_function' after clicking.
p_label = "Dynamics Gromacs {}".format(plugin_ver)
if int(cmd.get_version()[0][0]) >= 2: # PyMOL API
def __init_plugin__(app=None):
plugins.addmenuitemqt(p_label, init_function)
else:
def __init_plugin__(app):
app.menuBar.addmenuitem('Plugin', 'command', label=p_label,
command=lambda: init_function(parent=app.root, gui_library="tk"))
def create_gui(gui_library, status, s_parameters, parent):
if gui_library == "tk":
if status[0] == "ok":
root_window(status, s_parameters, parent)
else:
tkMessageBox.showerror("Initialization error", status[1])
elif gui_library == "qt":
if status[0] == "ok":
qt_root_window(status, s_parameters)
else:
qt_show_message(status[1], m_type="error", m_title="Initialization error")
# --Graphic Interface Qt--
def qt_show_message(message, m_type="error", m_title="Dynamics message"):
if m_type.lower() == "information":
QtWidgets.QMessageBox.information(None, m_title, message)
if m_type.lower() == "question":
QtWidgets.QMessageBox.question(None, m_title, message)
if m_type.lower() == "warning":
QtWidgets.QMessageBox.warning(None, m_title, message)
if m_type.lower() == "error":
QtWidgets.QMessageBox.critical(None, m_title, message)
else:
QtWidgets.QMessageBox.information(None, m_title, message)
def qt_root_window(status, s_params):
all_names, project_name = read_and_set_init_project(s_params)
# --Graphic Interface Tk--
# Don't care too much of below code quality, as Tk it depreciated and will be removed in plugin version 3.1
# Root menu window
def root_window(status, s_params, parent):
if parent:
root = parent
else:
# First try to get this root fails, but the second try works fine.
try:
root_pymol = plugins.get_tk_root()
except ModuleNotFoundError:
root_pymol = plugins.get_tk_root()
root = Toplevel(root_pymol)
root.wm_title("Dynamics with Gromacs" + plugin_ver)
calculationW = CalculationWindow()
waterW = WaterWindows()
restraintsW = RestraintsWindow()
genionW = GenionWindow()
gromacs = s_params.gmx_output
gromacs2 = s_params.gmx_input
dynamics_dir = get_dynamics_dir()
vectors_prody = s_params.vectors_prody
all_names, project_name = read_and_set_init_project(s_params)
# TkInter variables
v1_name = StringVar(root)
v1_name.set(project_name)
group_nr = gromacs.group_list[1][0]
v2_group = IntVar(root)
v2_group.set(group_nr)
force_nr = gromacs.force_list[0][0]
v3_force = IntVar(root)
v3_force.set(force_nr)
water_nr = gromacs.water_list[0][0]
v4_water = IntVar(root)
v4_water.set(water_nr)
water_v = StringVar(root)
water_v.set(gromacs.water_list[0][1])
time_entry_value = StringVar(root)
time_entry_value.set("10.0")
# Start drawing interface
frame0 = Frame(root)
frame0.pack(side=TOP)
w_version = Label(frame0, text="GROMACS VERSION " + gromacs.version)
w_version.pack(side=TOP)
frame1 = Frame(root)
frame1.pack(side=TOP)
frame1_1 = Frame(frame1, borderwidth=1, relief=RAISED)
frame1_1.pack(side=LEFT)
w1 = Label(frame1_1, text="Molecules", font="bold")
w1.pack(side=TOP)
frame1_1a = Frame(frame1_1)
frame1_1a.pack(side=TOP)
# List of PyMOL loaded PDB files
if all_names[0] != "nothing":
for molecule in all_names:
radio_button1 = Radiobutton(frame1_1a, text=molecule, value=molecule, variable=v1_name,
command=lambda: set_variables(v1_name.get(), v2_group, v3_force, v4_water,
water_v, check1_button, s_params))
radio_button1.pack(side=TOP, anchor=W)
# If no loaded PDB files, than add button to choose one
else:
w1_1 = Label(frame1_1a, text="Choose PDB file")
w1_1.pack(side=TOP)
frame1_1_1 = Frame(frame1_1a)
frame1_1_1.pack(side=TOP)
label1 = Label(frame1_1_1, textvariable=v1_name)
label1.pack(side=LEFT)
button_e1 = Button(frame1_1_1, text="Browse", command=lambda: select_file(v1_name, s_params))
button_e1.pack(side=LEFT)
# List of previous projects
projects = list_prev_projects(all_names)
if projects:
w1_2 = Label(frame1_1, text="Previous Projects")
w1_2.pack(side=TOP)
for molecule in projects:
molecule1 = molecule.split("_")
if molecule1[-1] == "multimodel":
pass
else:
molecule1 = molecule.split("-")
if molecule1[0] == "gromacs":
pass
else:
radio_button1 = Radiobutton(frame1_1, text=molecule, value=molecule, variable=v1_name,
command=lambda: set_variables(v1_name.get(), v2_group, v3_force,
v4_water, water_v, check1_button))
radio_button1.pack(side=TOP, anchor=W)
# List of group for final model
w2 = Label(frame1_1, text="Group", font="bold")
w2.pack(side=TOP)
for group in gromacs.group_list:
radio_button2 = Radiobutton(frame1_1, text=group[1], value=group[0], variable=v2_group,
command=lambda: gromacs2.update({"group": v2_group.get()}))
radio_button2.pack(side=TOP, anchor=W)
frame1_2 = Frame(frame1, borderwidth=1, relief=RAISED)
frame1_2.pack(side=LEFT)
# List of available force fields
w3 = Label(frame1_2, text="Force fields", anchor=E, font="bold")
w3.pack(side=TOP)
for force in gromacs.force_list:
radio_button3 = Radiobutton(frame1_2, text=force[1], value=force[0], variable=v3_force,
command=lambda: waterW.change(v4_water, water_v, v3_force.get()))
radio_button3.pack(side=TOP, anchor=W)
# Label of choosen water model
w4 = Label(frame1_2, text="Water Model", anchor=E, font="bold")
w4.pack(side=TOP)
frame1_2_1 = Frame(frame1_2)
frame1_2_1.pack(side=TOP)
# Buttons to choose water model and configure water box
water_label = Label(frame1_2_1, textvariable=water_v)
water_label.pack(side=LEFT)
water_button = Button(frame1_2_1, text="Choose...",
command=lambda: waterW.choose(v4_water, water_v, waterbox_button, root, s_params))
water_button.pack(side=LEFT)
waterbox_button = Button(frame1_2_1, text="Configure", command=lambda: waterW.box(root, s_params))
waterbox_button.pack(side=LEFT)
waterbox_button2 = Button(frame1_2_1, text="Hydrogen Mass", command=lambda: waterW.box2(root, s_params))
waterbox_button2.pack(side=LEFT)
frame1_3 = Frame(frame1)
frame1_3.pack(side=LEFT)
frame1_3_1 = Frame(frame1_3, borderwidth=1, relief=RAISED)
frame1_3_1.pack(side=TOP)
w4 = Label(frame1_3_1, text="Configuration", font="bold")
w4.pack(side=TOP)
# Button for configuration of Simulation Steps
steps_label = Label(frame1_3_1, text="Simulation Steps")
steps_label.pack(side=TOP)
steps_button = Button(frame1_3_1, text="Configure",
command=lambda: steps_configure(root, check1_button, s_params, restraintsW))
steps_button.pack(side=TOP)
# Button for Genion configuration
ion_label = Label(frame1_3_1, text="Adding ions & Neutralize")
ion_label.pack(side=TOP)
ion_button2 = Button(frame1_3_1, text="Configure", command=lambda: genionW.window(root, s_params))
ion_button2.pack(side=TOP)
# Button for configuration of MDP files
em_label = Label(frame1_3_1, text="Energy Minimization")
em_label.pack(side=TOP)
em_button2 = Button(frame1_3_1, text="Configure", command=lambda: mdp_configure("em", root, s_params))
em_button2.pack(side=TOP)
if os.path.isfile(dynamics_dir + "em.mdp"):
em_button2.configure(state=DISABLED)
pr_label = Label(frame1_3_1, text="Position Restrained MD")
pr_label.pack(side=TOP)
pr_button2 = Button(frame1_3_1, text="Configure", command=lambda: mdp_configure("pr", root, s_params))
pr_button2.pack(side=TOP)
if os.path.isfile(dynamics_dir + "pr.mdp"):
pr_button2.configure(state=DISABLED)
md_label = Label(frame1_3_1, text="Molecular Dynamics Simulation")
md_label.pack(side=TOP)
md_button2 = Button(frame1_3_1, text="Configure", command=lambda: mdp_configure("md", root, s_params))
md_button2.pack(side=TOP)
if os.path.isfile(dynamics_dir + "md.mdp"):
md_button2.configure(state=DISABLED)
# Button for configuration of Restraints
re_label = Label(frame1_3_1, text="Restraints (Select Atoms)")
re_label.pack(side=TOP)
check1_button = Button(frame1_3_1, text="Configure", command=lambda: restraintsW.window(root, s_params))
check1_button.pack(side=TOP)
# Button for ProDy options
pro_label = Label(frame1_3_1, text="Vectors Options")
pro_label.pack(side=TOP)
prody_button = Button(frame1_3_1, text="Configure", command=lambda: vectors_prody.window(root))
prody_button.pack(side=TOP)
# Dynamics Simulation Time
time_label = Label(frame1_3_1, text="Dynamics Simulation Time")
time_label.pack(side=TOP)
frame1_3_1_1 = Frame(frame1_3_1)
frame1_3_1_1.pack(side=TOP)
time_entry = Entry(frame1_3_1_1, textvariable=time_entry_value)
time_entry.pack(side=LEFT)
time_label2 = Label(frame1_3_1_1, text="[ps]")
time_label2.pack(side=LEFT)
time_button = Button(frame1_3_1_1, text="OK", command=lambda: s_params.md_file.update(3, str(
int(float(time_entry_value.get()) / float(s_params.md_file.options[2][1])))))
time_button.pack(side=LEFT)
# Disable configuration of ProDy (Vectors) if ProDy is not installed
if not prody:
prody_button.configure(state=DISABLED)
frame2 = Frame(root)
frame2.pack(side=TOP)
# Additional Buttons
exit_button = Button(frame2, text="Exit", command=root.destroy)
exit_button.pack(side=LEFT)
clean_button = Button(frame2, text="Clean", command=clean_message)
clean_button.pack(side=LEFT)
help_button = Button(frame2, text="Help", command=lambda: help_window(root))
help_button.pack(side=LEFT)
save_button = Button(frame2, text="Save", command=select_file_save)
save_button.pack(side=LEFT)
load_button = Button(frame2, text="Load",
command=lambda: select_file_load(frame1_1a, v1_name, v2_group, v3_force, v4_water, water_v,
check1_button, s_params))
load_button.pack(side=LEFT)
count_button = Button(frame2, text="OK", command=lambda: calculationW.check_window(root, root_pymol, s_params,
status))
count_button.pack(side=LEFT)
# Initial configuration
set_variables(v1_name.get(), v2_group, v3_force, v4_water, water_v, check1_button, s_params)
# Molecular Dynamics Performing window
class CalculationWindow:
tasks_to_do = 0
bar_var = ""
bar_widget = ""
start_button = ""
stop_button = ""
log_button = ""
def __init__(self):
self.queue_status = Queue.Queue()
self.queue_percent = Queue.Queue()
# This will prevent Calculation Window to display if non protein has been selected
def check_window(self, master, g_parent, s_params, status):
project_name = s_params.project_name
if project_name != "nothing":
master.destroy()
root = Toplevel(g_parent)
self.window(root, s_params, status, g_parent)
elif project_name == "nothing":
no_molecule_warning()
# This function will create main Calculation Window
def window(self, root, s_params, status, parent):
root.wm_title("Calculation Window")
frame1 = Frame(root)
frame1.pack(side=TOP)
frame2 = Frame(root)
frame2.pack(side=TOP)
self.bar_var = StringVar(root)
self.bar_var.set("Ready to start")
w5 = Label(frame1, textvariable=self.bar_var)
w5.pack(side=TOP)
self.bar_widget = Progressbar(frame1)
self.bar_widget.pack(side=TOP)
exit_button = Button(frame2, text="EXIT", command=root.destroy)
exit_button.pack(side=LEFT)
save_button = Button(frame2, text="SAVE", command=lambda: select_file_save(1))
save_button.pack(side=LEFT)
stop_button = Button(frame2, text="STOP", command=lambda: self.start_counting(0))
stop_button.pack(side=LEFT)
stop = s_params.stop
if stop:
stop_button.configure(state=DISABLED)
self.stop_button = stop_button
start_button = Button(frame2, text="START", command=lambda: self.start_counting(1))
start_button.pack(side=LEFT)
if stop == 0:
start_button.configure(state=DISABLED)
self.start_button = start_button
log_button = Button(frame2, text="LOG", command=log_window)
log_button.pack(side=LEFT)
log_button.configure(state=DISABLED)
self.log_button = log_button
# Updateing status bar
tasks_nr = 0.0
for task in s_params.progress.to_do:
tasks_nr = tasks_nr + task
self.tasks_to_do = tasks_nr
thread.start_new_thread(self.bar_update, s_params, status)
self.bar_display(root, parent, s_params)
# This function will update status bar during molecular dynamics simulation (beware this is separate thread)
def bar_update(self, s_params, status):
percent = 0.0
while s_params.stop:
time.sleep(0.5)
while percent != 100: # and error == ""
time.sleep(0.5)
percent = steps_status_bar("only_bar", s_params)
self.queue_percent.put(percent)
if s_params.stop == 0:
self.queue_status.put(status[1])
elif s_params.stop == 1:
self.queue_status.put("User Stoped")
# if error != "":
# self.queue_status.put("Fatal Error")
# This function will update status bar in thread safe manner
def bar_display(self, root, parent, s_params):
try:
status = self.queue_status.get(block=False)
self.bar_var.set(status)
except Queue.Empty:
status = "No change"
try:
percent = self.queue_percent.get(block=False)
self.bar_widget.configure(value=percent)
except:
pass
if status == "Fatal Error":
self.start_counting(0)
self.start_button.configure(state=DISABLED)
tkMessageBox.showerror("GROMACS Error Message", "Error") # error)
if status == "Finished!":
root.destroy()
# Show interpretation window after successful completion of the calculations...
show_interpretation_window(parent, s_params)
else:
root.after(100, self.bar_display, root)
# This function will change global value if stop is clicked during simulation
def start_counting(self, value):
if value == 1:
stop = 0
thread.start_new_thread(dynamics, ())
self.stop_button.configure(state=ACTIVE)
self.start_button.configure(state=DISABLED)
self.log_button.configure(state=DISABLED)
elif value == 0:
stop = 1
self.stop_button.configure(state=DISABLED)
self.start_button.configure(state=ACTIVE)
self.log_button.configure(state=ACTIVE)
# This window will allow to manipulate final molecule to interprate MD simulation results
class InterpretationWindow:
dt = 0.0
nsteps = 0.0
nstxout = 0.0
max_time = 0.0
tentry_value = ""
pause = 1
def __init__(self, g_parent, s_params):
self.queue_time = Queue.Queue()
self.md_time(s_params)
root = Toplevel(g_parent)
self.window(root, s_params)
def md_time(self, s_params):
project_name = s_params.project_name
project_dir = get_project_dirs(project_name)
md_file = open(project_dir + "md.mdp", "r")
for lines in md_file.readlines():
splited_line = lines.split(" ")
if splited_line[0] == "dt":
dt = float(splited_line[2])
self.dt = dt
elif splited_line[0] == "nsteps":
nsteps = float(splited_line[2])
self.nsteps = nsteps
elif splited_line[0] == "nstxout":
nstxout = float(splited_line[2])
self.nstxout = nstxout
max_time = dt * nsteps
self.max_time = max_time
def window(self, root, s_params):
vectors_prody = s_params.vector_prody
root.wm_title("MD Interpretation")
self.tentry_value = StringVar(root)
self.tentry_value.set("0.0")
sentry_value = StringVar(root)
sentry_value.set("1.0")
contact_entry_value = StringVar(root)
contact_entry_value.set("-1.0")
frame1 = Frame(root)
frame1.pack(side=TOP)
# Animation
alabel = Label(frame1, text="Animation", font="bold")
alabel.pack()
frame1_1 = Frame(frame1)
frame1_1.pack(side=TOP)
play_button = Button(frame1_1, text="PLAY", command=lambda: self.pause_play(0))
play_button.pack(side=LEFT)
pause_button = Button(frame1_1, text="PAUSE", command=lambda: self.pause_play(1))
pause_button.pack(side=LEFT)
frame1_2 = Frame(frame1)
frame1_2.pack(side=TOP, anchor=W)
tlabel = Label(frame1_2, text="Time [ps] (Max " + str(self.max_time) + " [ps])")
tlabel.pack(side=LEFT)
tentry = Entry(frame1_2, textvariable=self.tentry_value)
tentry.pack(side=LEFT)
tok_button = Button(frame1_2, text="OK",
command=lambda: cmd.frame(self.time2frames(self.tentry_value.get()))) # PyMOL API
tok_button.pack(side=LEFT)
frame1_3 = Frame(frame1)
frame1_3.pack(side=TOP, anchor=W)
mlabel = Label(frame1_3, text="Model Type")
mlabel.pack(side=LEFT)
lines_button = Button(frame1_3, text="Lines", command=lambda: self.shape("lines", s_params))
lines_button.pack(side=LEFT)
sticks_button = Button(frame1_3, text="Sticks", command=lambda: self.shape("sticks", s_params))
sticks_button.pack(side=LEFT)
ribbon_button = Button(frame1_3, text="Ribbon", command=lambda: self.shape("ribbon", s_params))
ribbon_button.pack(side=LEFT)
cartoon_button = Button(frame1_3, text="Cartoon", command=lambda: self.shape("cartoon", s_params))
cartoon_button.pack(side=LEFT)
frame1_3_1 = Frame(frame1)
frame1_3_1.pack(side=TOP, anchor=W)
mlabel = Label(frame1_3_1, text="Labels")
mlabel.pack(side=LEFT)
end_button = Button(frame1_3_1, text="Terminus", command=lambda: self.label("terminus", s_params))
end_button.pack(side=LEFT)
acids_button = Button(frame1_3_1, text="Amino Acids", command=lambda: self.label("acids", s_params))
acids_button.pack(side=LEFT)
clear_button = Button(frame1_3_1, text="Clear", command=lambda: self.label("clear", s_params))
clear_button.pack(side=LEFT)
thread.start_new_thread(self.watch_frames, ())
self.display_time(root)
# Vectors
vlabel = Label(frame1, text="Vectors (Require ProDy)", font="bold")
vlabel.pack()
frame1_4 = Frame(frame1)
frame1_4.pack(side=TOP, anchor=W)
modlabel = Label(frame1_4, text="Mode Nr")
modlabel.pack(side=LEFT)
one_button = Button(frame1_4, text="1", command=lambda: vectors_prody.change_vectors_mode_nr(0))
one_button.pack(side=LEFT)
two_button = Button(frame1_4, text="2", command=lambda: vectors_prody.change_vectors_mode_nr(1))
two_button.pack(side=LEFT)
three_button = Button(frame1_4, text="3", command=lambda: vectors_prody.change_vectors_mode_nr(2))
three_button.pack(side=LEFT)
frame1_5 = Frame(frame1)
frame1_5.pack(side=TOP, anchor=W)
slabel = Label(frame1_5, text="Scale")
slabel.pack(side=LEFT)
sentry = Entry(frame1_5, textvariable=sentry_value)
sentry.pack(side=LEFT)
sok_button = Button(frame1_5, text="OK", command=lambda: vectors_prody.change_vectors_scale(sentry_value.get()))
sok_button.pack(side=LEFT)
frame1_6 = Frame(frame1)
frame1_6.pack(side=TOP, anchor=W)
modlabel = Label(frame1_6, text="Color")
modlabel.pack(side=LEFT)
gray_button = Button(frame1_6, text="Gray", command=lambda: vectors_prody.change_vectors_color("gray"))
gray_button.pack(side=LEFT)
red_button = Button(frame1_6, text="Red", command=lambda: vectors_prody.change_vectors_color("red"))
red_button.pack(side=LEFT)
blue_button = Button(frame1_6, text="Blue", command=lambda: vectors_prody.change_vectors_color("blue"))
blue_button.pack(side=LEFT)
green_button = Button(frame1_6, text="Green", command=lambda: vectors_prody.change_vectors_color("green"))
green_button.pack(side=LEFT)
frame1_7 = Frame(frame1)
frame1_7.pack(side=TOP, anchor=W)
modlabel = Label(frame1_7, text="Plot results")
modlabel.pack(side=LEFT)
contact_button = Button(frame1_7, text="Show Contact Map Graph",
command=lambda: vectors_prody.graph_contact_map("contact"))
contact_button.pack(side=LEFT)
cross_button = Button(frame1_7, text="Show Cross-correlations Graph",
command=lambda: vectors_prody.graph_contact_map("cross"))
cross_button.pack(side=LEFT)
frame1_8 = Frame(frame1)
frame1_8.pack(side=TOP, anchor=W)
modlabel = Label(frame1_8, text="Plot results")
modlabel.pack(side=LEFT)
contact_pymol_button = Button(frame1_8, text="Show Contact Map In PyMOL",
command=lambda: vectors_prody.show_contact_map(contact_entry_value.get(),
s_params.project_name))
contact_pymol_button.pack(side=LEFT)
contact_label = Label(frame1_8, text="Sensitivity")
contact_label.pack(side=LEFT)
contact_entry = Entry(frame1_8, textvariable=contact_entry_value)
contact_entry.pack(side=LEFT)
frame1_8 = Frame(frame1)
frame1_8.pack(side=TOP)
exit_button = Button(frame1_8, text="Exit", command=root.destroy)
exit_button.pack(side=LEFT)
save_button = Button(frame1_8, text="Save", command=lambda: select_file_save(s_params))
save_button.pack(side=LEFT)
log_button = Button(frame1_8, text="Log", command=log_window)
log_button.pack(side=LEFT)
if not prody:
print("No ProDy found")
one_button.configure(state=DISABLED)
two_button.configure(state=DISABLED)
three_button.configure(state=DISABLED)
sok_button.configure(state=DISABLED)
gray_button.configure(state=DISABLED)
red_button.configure(state=DISABLED)
blue_button.configure(state=DISABLED)
green_button.configure(state=DISABLED)
if not prody or vectors_prody.contact_map != 1:
contact_button.configure(state=DISABLED)
cross_button.configure(state=DISABLED)
contact_pymol_button.configure(state=DISABLED)
def pause_play(self, value):
if value == 1:
self.pause = 1
cmd.mstop() # PyMOL API
elif value == 0:
self.pause = 0
cmd.mplay() # PyMOL API
def frames2time(self, text_var):
frame = float(text_var)
time = frame * self.dt * self.nstxout
return time
def time2frames(self, text_var):
nsecond = float(text_var)
frame = nsecond / self.dt / self.nstxout
frame = int(frame)
return frame
@staticmethod
def shape(shape_type, s_params):
project_name = s_params.project_name
cmd.hide("everything", project_name + "_multimodel") # PyMOL API
cmd.show(shape_type, project_name + "_multimodel") # PyMOL API
@staticmethod
def label(name, s_params):
project_name = s_params.project_name
if name == "terminus":
cmd.label("n. ca and {}_multimodel and i. 1".format(project_name), '"N-terminus"') # PyMOL API
ca_number = cmd.count_atoms("n. ca and " + project_name + "_multimodel") # PyMOL API
cmd.label("n. ca and {}_multimodel and i. {}".format(project_name, str(ca_number)),
'"C-terminus"') # PyMOL API
elif name == "acids":
cmd.label("n. ca and {}_multimodel".format(project_name), "resn") # PyMOL API
elif name == "clear":
cmd.label("n. ca and {}_multimodel".format(project_name), "") # PyMOL API
# This function will watch time (beware this is separate thread)
def watch_frames(self):
while 1:
pymol_frame = cmd.get_frame() # PyMOL API
pymol_time = self.frames2time(pymol_frame)
self.queue_time.put(pymol_time)
time.sleep(0.1)
# This function will update display time in thread safe manner
def display_time(self, root):
try:
time = self.queue_time.get(block=False)
except Queue.Empty:
time = "No change"
if self.pause != 1:
self.tentry_value.set(time)
root.after(100, self.display_time, root)
# Show interpretation window...
def show_interpretation_window(parent, s_params):
InterpretationWindow(parent, s_params)
def help_window(master):
root = Toplevel(master)
root.wm_title("Help Window")
frame = Frame(root)
frame.pack()
w = Label(frame, text=help_option())
w.pack()
ok_button = Button(frame, text="OK", command=root.destroy)
ok_button.pack()
def log_window(s_params):
project_name = s_params.project_name
project_dir = get_project_dirs(project_name)
if sys.platform == "linux2":
cmd = "xdg-open {}log.txt".format(project_dir)
execute_subprocess(cmd)
elif sys.platform == "darwin":
cmd = "open {}log.txt".format(project_dir)
execute_subprocess(cmd)
elif sys.platform.startswith('win'):
cmd = "start {}log.txt".format(project_dir)
execute_subprocess(cmd)
def clean_message():
tkMessageBox.showinfo("Clean", "Temporary files are now removed!\nPlease restart plugin.")
clean_option()
def no_molecule_warning():
tkMessageBox.showinfo("No Molecule Selected", "Please choose any molecule before using this option.")
# This class is resposible for graphic edition of restraints
class RestraintsWindow:
atom_list = []
check_var = ""
# This function will create main window for restraints
def window(self, master, s_params):
gromacs2 = s_params.gmx_input
gromacs = s_params.gmx_output
root = Toplevel(master)
root.wm_title("Restraints Configure")
ok_button = Button(root, text="OK", command=lambda: self.index(s_params))
ok_button.pack(side=BOTTOM)
sb = Scrollbar(root, orient=VERTICAL)
sb.pack(side=RIGHT, fill=Y)
canvas = Canvas(root, width=600)
canvas.pack(side=TOP, fill="both", expand=True)
frame1 = Frame(canvas)
frame1.pack(side=TOP)
# attach canvas (with frame1 in it) to scrollbar
canvas.config(yscrollcommand=sb.set)
sb.config(command=canvas.yview)
# bind frame1 with canvas
canvas.create_window((1, 1), window=frame1, anchor="nw", tags="frame1")
frame1.bind("<Configure>", canvas.config(scrollregion=(0, 0, 0, 4500)))
self.check_var = IntVar(frame1)
self.check_var.set(gromacs2.restraints_nr)
self.atom_list = []
number = 0
for group in gromacs.restraints:
select = Radiobutton(frame1, text=group[0], value=number, variable=self.check_var)
select.pack()
text = Text(frame1)
text.insert(END, group[1])
text.pack()
self.atom_list.append(text)
number = number + 1
select1 = Radiobutton(frame1, text="[ PyMol Selected ]", value=number, variable=self.check_var)
select1.pack()
text1 = Text(frame1)
stored.list = []
cmd.iterate("(sele)", "stored.list.append(ID)") # PyMOL API
stored_string = ""
for atom in stored.list:
stored_string = stored_string + str(atom)
lengh = stored_string.split('\n')
if len(lengh[-1]) < 72:
stored_string = stored_string + " "
else:
stored_string = stored_string + "\n"
text1.insert(END, stored_string)
text1.pack()
self.atom_list.append(text1)
# This function will modyfie index_dynamics.ndx file based on user choosed restraints
def index(self, s_params, root_to_kill=False):
gromacs2 = s_params.gmx_input
gromacs = s_params.gmx_output
index_nr = self.check_var.get()
gromacs2.restraints_nr = index_nr
text = self.atom_list[index_nr]
if index_nr < len(gromacs.restraints):
gromacs.restraints[index_nr][1] = text.get(1.0, END)
gromacs.restraints = gromacs.restraints
index_file = open("index_dynamics.ndx", "w")
index_file.write("[ Dynamics Selected ]\n" + text.get(1.0, END))
index_file.close()
if root_to_kill:
root_to_kill.destroy()
# This function will activ or disable restraints button in main window based on check box
def check(self, check, config_button, s_params):
md_file = s_params.md_file
gromacs = s_params.gmx_output
progress = s_params.progress
if check == 1:
config_button.configure(state=ACTIVE)
md_file.update(2, md_file.options[2][1], 1)
gromacs.restraints_index()
progress.to_do[6] = 1
progress.to_do = progress.to_do
elif check == 0:
config_button.configure(state=DISABLED)
md_file.update(2, md_file.options[2][1], 0)
progress.to_do[6] = 0
progress.to_do = progress.to_do
# This function will create window, which allow you to choose PDB file if no file is loaded to PyMOL
def select_file(v_name, s_params):
root = Tk()
file = tkFileDialog.askopenfile(parent=root, mode='rb', title='Choose PDB file')
try:
name = file.name.split("/")
name2 = name[-1].split(".")
# Checking directories
project_name = name2[0]
s_params.change_project_name(project_name)
project_dir = get_project_dirs(project_name)
v_name.set(project_name)
if not os.path.isdir(project_dir):
os.makedirs(project_dir)
shutil.copyfile(file.name, project_dir + project_name + ".pdb")
print("pdb_copied")
create_config_files(project_name)
except:
pass
root.destroy()
# This function will create window, which allow you to save current work
def select_file_save(s_params, rest_of_work=0):
project_name = s_params.project_name
progress = s_params.progress
if project_name != "nothing":
if rest_of_work == 1:
progress.to_do_status()
root = Tk()
file = tkFileDialog.asksaveasfile(parent=root, mode='w', title='Choose save file')
if not file:
save_file(file.name, s_params)
root.destroy()
elif project_name == "nothing":
no_molecule_warning()
# This function will create window, which allow you to load previously saved work
def select_file_load(frame1_1a, v1_name, v2_group, v3_force, v4_water, water_v, config_button_restraints, s_params):
project_name = s_params.project_name
gromacs = s_params.gmx_output
gromacs2 = s_params.gmx_input
root = Tk()
file = tkFileDialog.askopenfile(parent=root, mode='rb', defaultextension=".tar.bz2", title='Choose file to load')
if not file:
load_file(file.name, s_params)
v1_name.set(project_name)
v2_group.set(gromacs.group_list[gromacs2.group][0])
v3_force.set(gromacs.force_list[gromacs2.force - 1][0])
v4_water.set(gromacs.water_list[gromacs2.water - 1][0])
water_v.set(gromacs.water_list[v4_water.get() - 1][1])
radio_button1 = Radiobutton(frame1_1a, text=project_name, value=project_name, variable=v1_name,
command=lambda: set_variables(v1_name.get(), v2_group, v3_force, v4_water, water_v,
config_button_restraints))
radio_button1.pack(side=TOP, anchor=W)
root.destroy()
# This function sets variables after choosing new molecule
def set_variables(name, v2_group, v3_force, v4_water, water_v, config_button_restraints, s_params):
print("Set Variables")
gromacs = s_params.gmx_output
gromacs2 = s_params.gmx_input
progress = s_params.progress
# Set project name and dir
project_name = name
if name:
s_params.change_project_name(project_name)
project_dir = get_project_dirs(project_name)
if os.path.isfile("{}options.pickle".format(project_dir)):
load_options(s_params)
v2_group.set(gromacs.group_list[gromacs2.group][0])
v3_force.set(gromacs.force_list[gromacs2.force - 1][0])
v4_water.set(gromacs.water_list[gromacs2.water - 1][0])
water_v.set(gromacs.water_list[v4_water.get() - 1][1])
else:
create_config_files(project_name)
# Correct set of restraints button
if progress.to_do[6] == 0:
config_button_restraints.configure(state=DISABLED)
elif progress.to_do[6] == 1:
config_button_restraints.configure(state=ACTIVE)
# If Resume is zero than initial Steps are all ON
if progress.resume == 0:
progress.to_do = [1, 1, 1, 1, 1, 1, 0, 1, 1, 1]
# This function will create the window with configuration files based on MDP class
def mdp_configure(config_name, master, s_params):
project_name = s_params.project_name
em_file = s_params.em_file
pr_file = s_params.pr_file
md_file = s_params.md_file
if project_name != "nothing":
root2 = Toplevel(master)
if config_name == "em":
em_file.clean_artefacts()
options = em_file.options
root2.wm_title("Energy Minimization Options")
elif config_name == "pr":
pr_file.clean_artefacts()
options = pr_file.options
root2.wm_title("Position Restrained MD Options")
elif config_name == "md":
md_file.clean_artefacts()
options = md_file.options
root2.wm_title("Molecular Dynamics Simulation Options")
values_list = []
check_list = []
if config_name == "em":
b = Button(root2, text="OK", command=lambda: mdp_update(values_list, check_list, "em", s_params, root2))
b.pack(side=BOTTOM)
elif config_name == "pr":
b = Button(root2, text="OK", command=lambda: mdp_update(values_list, check_list, "pr", s_params, root2))
b.pack(side=BOTTOM)
elif config_name == "md":
b = Button(root2, text="OK", command=lambda: mdp_update(values_list, check_list, "md", s_params, root2))
b.pack(side=BOTTOM)
sb = Scrollbar(root2, orient=VERTICAL)
sb.pack(side=RIGHT, fill=Y)
canvas = Canvas(root2, width=400)
canvas.pack(side=TOP, fill="both", expand=True)
frame1 = Frame(canvas)
frame1.pack(side=TOP)
# attach canvas (with frame1 in it) to scrollbar
canvas.config(yscrollcommand=sb.set)
sb.config(command=canvas.yview)
# bind canvas with frame1 1/2
canvas.create_window((1, 1), window=frame1, anchor="nw", tags="frame1")
for option, value in options:
frame2 = Frame(frame1)
frame2.pack(side=TOP)
if option == "emtol":
l1 = Label(frame2, text="Energy minimizing stuff")
l1.pack(side=TOP)
elif option == "Tcoupl":
l1 = Label(frame2, text="Berendsen temperature and coupling")
l1.pack(side=TOP)
elif option == "Pcoupl":
l1 = Label(frame2, text="Pressure coupling")
l1.pack(side=TOP)
elif option == "gen_vel":
l1 = Label(frame2, text="Generate velocites temperature")
l1.pack(side=TOP)
elif option == "constraints":
l1 = Label(frame2, text="Options for bonds")
l1.pack(side=TOP)
values_list.append(StringVar(root2))
values_list[-1].set(value)
check_list.append(IntVar(root2))
if option[0] != ";":
check_list[-1].set(1)
c1 = Checkbutton(frame2, text=option, variable=check_list[-1], width=25, anchor=W)
c1.pack(side=LEFT)
else:
check_list[-1].set(0)
c1 = Checkbutton(frame2, text=option, variable=check_list[-1], width=25, anchor=W)
c1.pack(side=LEFT)
e = Entry(frame2, textvariable=values_list[-1])
e.pack(side=LEFT)
# bind canvas with frame1 2/2
frame1.bind("<Configure>", canvas.config(scrollregion=(0, 0, 0, len(values_list) * 25)))
elif project_name == "nothing":
no_molecule_warning()
# This function will update MDP class objects alfter closing "mdp_configure" window
def mdp_update(values, check_list, mdp, s_params, root_to_kill=""):
em_file = s_params.em_file
pr_file = s_params.pr_file
md_file = s_params.md_file
try:
root_to_kill.destroy()
except:
pass
index_nr = 0
for value in values:
if mdp == "em":
em_file.update(index_nr, value.get(), check_list[index_nr].get())
elif mdp == "pr":
pr_file.update(index_nr, value.get(), check_list[index_nr].get())
elif mdp == "md":
md_file.update(index_nr, value.get(), check_list[index_nr].get())
index_nr = index_nr + 1
save_options(em_file, pr_file, md_file, s_params)
# This function will create Simulation Steps configuration window
def steps_configure(master, restraints_button, s_params, restraintsW):
project_name = s_params.project_name
progress = s_params.progress
gromacs2 = s_params.gmx_input
if project_name != "nothing":
root = Toplevel(master)
root.wm_title("Simulation Steps Configuration")
check_var1 = IntVar(root)
check_var1.set(progress.to_do[0])
check_var2 = IntVar(root)
check_var2.set(progress.to_do[1])
v1 = IntVar(root)
v1.set(progress.x2top)
check_var3 = IntVar(root)
check_var3.set(progress.to_do[2])
# Created empty variable check_var4 for genion
check_var4 = IntVar(root)
check_var4.set(progress.to_do[3])
check_var5 = IntVar(root)
check_var5.set(progress.to_do[4])
check_var6 = IntVar(root)
check_var6.set(progress.to_do[5])
check_var7 = IntVar(root)
check_var7.set(progress.to_do[6])
check_var8 = IntVar(root)
check_var8.set(progress.to_do[7])
check_var9 = IntVar(root)
check_var9.set(progress.to_do[8])
check_var10 = IntVar(root)
check_var10.set(progress.to_do[9])
# Variable for Resume Simulation
check_var11 = IntVar(root)
check_var11.set(progress.resume)
frame1 = Frame(root)
frame1.pack(side=TOP)
c1 = Checkbutton(frame1, text="Save configuration files" + steps_status_done(0, s_params), variable=check_var1,
command=lambda: progress.to_do_update(0, check_var1.get()))
c1.pack(side=TOP, anchor=W)
c2 = Checkbutton(frame1, text="Generate topology file from pdb" + steps_status_done(1, s_params),
variable=check_var2,
command=lambda: progress.to_do_update(1, check_var2.get()))
c2.pack(side=TOP, anchor=W)
r1 = Radiobutton(frame1, text="Use pdb2gmx tool", value=0, variable=v1,
command=lambda: progress.x2top_update(v1.get()))
r1.pack(side=TOP, anchor=W)
r2 = Radiobutton(frame1, text="Use x2top tool", value=1, variable=v1,
command=lambda: progress.x2top_update(v1.get()))
r2.pack(side=TOP, anchor=W)
c3 = Checkbutton(frame1, text="Adding Water Box (only for explicit solvent)" + steps_status_done(2, s_params),
variable=check_var3, command=lambda: progress.to_do_update(2, check_var3.get()))
c3.pack(side=TOP, anchor=W)
c4 = Checkbutton(frame1,
text="Adding ions and neutralize (only for explicit solvent; Optional)" + steps_status_done(3,
s_params),
variable=check_var4, command=lambda: progress.to_do_update(3, check_var4.get()))
c4.pack(side=TOP, anchor=W)
c5 = Checkbutton(frame1, text="Energy Minimization (optional)" + steps_status_done(4, s_params),
variable=check_var5,
command=lambda: progress.to_do_update(4, check_var5.get()))
c5.pack(side=TOP, anchor=W)
c6 = Checkbutton(frame1,
text="Position Restrained MD (optional, only for explicit solvent)" + steps_status_done(5,
s_params),
variable=check_var6, command=lambda: progress.to_do_update(5, check_var6.get()))
c6.pack(side=TOP, anchor=W)
c7 = Checkbutton(frame1, text="Restraints (optional)" + steps_status_done(6, s_params), variable=check_var7,
command=lambda: restraintsW.check(check_var7.get(), restraints_button))
c7.pack(side=TOP, anchor=W)
c8 = Checkbutton(frame1, text="Molecular Dynamics Simulation" + steps_status_done(7, s_params),
variable=check_var8,
command=lambda: progress.to_do_update(7, check_var8.get()))
c8.pack(side=TOP, anchor=W)
c9 = Checkbutton(frame1, text="Generate multimodel PDB" + steps_status_done(8, s_params), variable=check_var9,
command=lambda: progress.to_do_update(8, check_var9.get()))
c9.pack(side=TOP, anchor=W)
c10 = Checkbutton(frame1, text="Calculate vectors using ProDy (optional)" + steps_status_done(9, s_params),
variable=check_var10, command=lambda: progress.to_do_update(9, check_var10.get()))
c10.pack(side=TOP, anchor=W)
if not prody:
check_var11.set(0)
c10.configure(state=DISABLED)
progress.to_do_update(9, 0)
if gromacs2.explicit != 1:
check_var3.set(0)
c3.configure(state=DISABLED)
progress.to_do_update(2, 0)
check_var4.set(0)
c4.configure(state=DISABLED)
progress.to_do_update(3, 0)
check_var6.set(0)
c6.configure(state=DISABLED)
progress.to_do_update(5, 0)
l1 = Label(frame1, text="Simulation Progress:")
l1.pack(side=TOP)
variable_list = [check_var1, check_var2, check_var3, check_var4, check_var5, check_var6, check_var7, check_var8,
check_var9, check_var10, check_var11]
progress_bar = Progressbar(frame1)
progress_bar.pack(side=TOP)
if check_var11.get() == 1:
percent = steps_status_bar(check_var11.get(), s_params, variable_list)
progress_bar.configure(value=percent)
c11 = Checkbutton(frame1, text="Resume Simulation", variable=check_var11,
command=lambda: steps_click_resume(check_var11.get(), progress_bar, s_params, variable_list))
c11.pack(side=TOP, anchor=W)
b1 = Button(root, text="OK", command=lambda: steps_click_ok(root, s_params))
b1.pack(side=TOP)
elif project_name == "nothing":
no_molecule_warning()
# This function will update status bar if checkbutton is clicked
def steps_click_resume(var, bar, s_params, variable_list=[]):
percent = steps_status_bar(var, s_params, variable_list)
bar.configure(value=percent)
# This function will close steps window and update number of steps to do
def steps_click_ok(root, s_params):
root.destroy()
progress = s_params.progress
progress.steps = sum(progress.to_do)
# This function will show current progress on Progress Bar and operate with Steps Simulation Window for
# "Resume Simulation" button.
def steps_status_bar(var, s_params, variable_list=[]):
progress = s_params.progress
percent = 0.0
if var == 1:
to_do_nr = 0
for step in progress.status:
if step == 1:
progress.to_do[to_do_nr] = 0
progress.to_do = progress.to_do
variable_list[to_do_nr].set(0)
elif step == 0 and to_do_nr != 6:
progress.to_do[to_do_nr] = 1
progress.to_do = progress.to_do
variable_list[to_do_nr].set(1)
to_do_nr = to_do_nr + 1
progress.resume = 1
elif var == 0:
percent = 0.0
progress.to_do = [1, 1, 1, 1, 1, 1, 0, 1, 1, 1]
to_do_nr = 0
for variable in variable_list:
if to_do_nr != 5:
variable.set(1)
elif to_do_nr != 5:
variable.set(0)
to_do_nr = to_do_nr + 1
progress.resume = 0
if progress.steps != 0:
percent = ((progress.steps - sum(progress.to_do)) * 100) / progress.steps
else:
percent = 100
return percent
# Gather all water options windows in one class
class WaterWindows:
implicit_buttons = []
explicit_buttons = []
# Water chooser window
def choose(self, v4_water, water_v, waterbox_button, master, s_params):
gromacs = s_params.gmx_output
gromacs2 = s_params.gmx_input
root = Toplevel(master)
root.wm_title("Water Model")
v1 = IntVar(root)
v1.set(gromacs2.explicit)
v2 = IntVar(root)
v2.set(0)
radio_button2 = Radiobutton(root, text="Explicit Solvent Simulation", value=1, variable=v1,
command=lambda: self.change_e(v1.get(), v4_water, water_v, v2, s_params))
radio_button2.pack(side=TOP, anchor=W)
frame1 = Frame(root, padx=10)
frame1.pack(anchor=W)
self.explicit_buttons = []
for water in gromacs.water_list:
radio_button1 = Radiobutton(frame1, text=water[1], value=water[0], variable=v4_water,
command=lambda: self.change(v4_water, water_v, s_params))
radio_button1.pack(side=TOP, anchor=W)
self.explicit_buttons.append(radio_button1)
self.explicit_buttons.append(waterbox_button)
radio_button2 = Radiobutton(root, text="Implicit Solvent Simulation", value=0, variable=v1,
command=lambda: self.change_e(v1.get(), v4_water, water_v, v2, s_params))
radio_button2.pack(side=TOP, anchor=W)
frame2 = Frame(root, padx=10)
frame2.pack(anchor=W)
radio_button3_1 = Radiobutton(frame2, text="Still", value=0, variable=v2,
command=lambda: self.change_i(v2, s_params))
radio_button3_1.pack(side=TOP, anchor=W)
radio_button3_2 = Radiobutton(frame2, text="Hawkins-Cramer-Truhlar", value=1, variable=v2,
command=lambda: self.change_i(v2, s_params))
radio_button3_2.pack(side=TOP, anchor=W)
radio_button3_3 = Radiobutton(frame2, text="Onufriev-Bashford-Case", value=2, variable=v2,
command=lambda: self.change_i(v2))
radio_button3_3.pack(side=TOP, anchor=W)
self.implicit_buttons = [radio_button3_1, radio_button3_2, radio_button3_3]
self.change_e(gromacs2.explicit, v4_water, water_v, v2)
ok_button = Button(root, text="OK", command=root.destroy)
ok_button.pack(side=TOP)
# This function will change force field and water model when choosing Force Field in Main Window and also change
# water model after choosing one in "waterChoose"
def change(self, v4_water, water_v, s_params, force=False):
gromacs = s_params.gmx_output
gromacs2 = s_params.gmx_input
if not force:
force = gromacs2.force
else:
gromacs2.force = force
gromacs.water_update(force)
if gromacs2.explicit == 1:
water_v.set(gromacs.water_list[v4_water.get() - 1][1])
elif gromacs2.explicit == 0:
water_v.set("Implicit Solvent")
gromacs2.water = v4_water.get()
# This function changes explicit to implicit and vice versa water model
def change_e(self, value, v4_water, water_v, v2, s_params):
progress = s_params.progress
gromacs2 = s_params.gmx_input
em_file = s_params.em_file
md_file = s_params.md_file
dynamics_dir = get_dynamics_dir()
gromacs2.update({"explicit": value})
if gromacs2.explicit == 1:
for button in self.implicit_buttons:
button.configure(state=DISABLED)
for button in self.explicit_buttons:
button.configure(state=ACTIVE)
progress.to_do[2] = 1
progress.to_do[3] = 1
progress.to_do[5] = 1
# em update
if not os.path.isfile(dynamics_dir + "em.mdp"):
parameter_nr = 0
for parameter in em_file.options:
if (parameter[0] == "rlist") or (parameter[0] == ";rlist"):
em_file.update(parameter_nr, "1.0")
elif (parameter[0] == "rcoulomb") or (parameter[0] == ";rcoulomb"):
em_file.update(parameter_nr, "1.0")
elif (parameter[0] == "rvdw") or (parameter[0] == ";rvdw"):
em_file.update(parameter_nr, "1.0")
elif (parameter[0] == "implicit-solvent") or (parameter[0] == ";implicit-solvent"):
em_file.update(parameter_nr, "no")
elif (parameter[0] == "pbc") or (parameter[0] == ";pbc"):
em_file.update(parameter_nr, "no", 0)
elif (parameter[0] == "rgbradii") or (parameter[0] == ";rgbradii"):
em_file.update(parameter_nr, "0", 0)
elif (parameter[0] == "cutoff-scheme") or (parameter[0] == ";cutoff-scheme"):
em_file.update(parameter_nr, "Verlet")
elif (parameter[0] == "coulombtype") or (parameter[0] == ";coulombtype"):
em_file.update(parameter_nr, "PME")
parameter_nr = parameter_nr + 1
# md update
if not os.path.isfile(dynamics_dir + "md.mdp"):
parameter_nr = 0
for parameter in md_file.options:
if (parameter[0] == "nstlist") or (parameter[0] == ";nstlist"):
md_file.update(parameter_nr, "10")
elif (parameter[0] == "rlist") or (parameter[0] == ";rlist"):
md_file.update(parameter_nr, "1.0")
elif (parameter[0] == "rcoulomb") or (parameter[0] == ";rcoulomb"):
md_file.update(parameter_nr, "1.0")
elif (parameter[0] == "rvdw") or (parameter[0] == ";rvdw"):
md_file.update(parameter_nr, "1.0")
elif (parameter[0] == "Tcoupl") or (parameter[0] == ";Tcoupl"):
md_file.update(parameter_nr, "v-rescale")
elif (parameter[0] == "tau_t") or (parameter[0] == ";tau_t"):
md_file.update(parameter_nr, "0.1 0.1")
elif (parameter[0] == "tc-grps") or (parameter[0] == ";tc-grps"):
md_file.update(parameter_nr, "protein Non-Protein")
elif (parameter[0] == "ref_t") or (parameter[0] == ";ref_t"):
md_file.update(parameter_nr, "298 298")
elif (parameter[0] == "implicit-solvent") or (parameter[0] == ";implicit-solvent"):
md_file.update(parameter_nr, "no")
elif (parameter[0] == "pbc") or (parameter[0] == ";pbc"):
md_file.update(parameter_nr, "no", 0)
elif (parameter[0] == "rgbradii") or (parameter[0] == ";rgbradii"):
md_file.update(parameter_nr, "0", 0)
elif (parameter[0] == "comm_mode") or (parameter[0] == ";comm_mode"):
md_file.update(parameter_nr, "ANGULAR", 0)
elif (parameter[0] == "cutoff-scheme") or (parameter[0] == ";cutoff-scheme"):
md_file.update(parameter_nr, "Verlet")
elif (parameter[0] == "coulombtype") or (parameter[0] == ";coulombtype"):
md_file.update(parameter_nr, "PME")
parameter_nr = parameter_nr + 1
elif gromacs2.explicit == 0:
for button in self.implicit_buttons:
button.configure(state=ACTIVE)
for button in self.explicit_buttons:
button.configure(state=DISABLED)
progress.to_do[2] = 0
progress.to_do[3] = 0
progress.to_do[5] = 0
# em update
if not os.path.isfile(dynamics_dir + "em.mdp"):
parameter_nr = 0
for parameter in em_file.options:
if (parameter[0] == "rlist") or (parameter[0] == ";rlist"):
em_file.update(parameter_nr, "0")
elif (parameter[0] == "rcoulomb") or (parameter[0] == ";rcoulomb"):
em_file.update(parameter_nr, "0")
elif (parameter[0] == "rvdw") or (parameter[0] == ";rvdw"):
em_file.update(parameter_nr, "0")
elif (parameter[0] == "implicit-solvent") or (parameter[0] == ";implicit-solvent"):
em_file.update(parameter_nr, "GBSA")
elif (parameter[0] == "pbc") or (parameter[0] == ";pbc"):
em_file.update(parameter_nr, "no")
elif (parameter[0] == "rgbradii") or (parameter[0] == ";rgbradii"):
em_file.update(parameter_nr, "0")
elif (parameter[0] == "cutoff-scheme") or (parameter[0] == ";cutoff-scheme"):
em_file.update(parameter_nr, "group")
elif (parameter[0] == "coulombtype") or (parameter[0] == ";coulombtype"):
em_file.update(parameter_nr, "Cut-off")
parameter_nr = parameter_nr + 1
# md update
if not os.path.isfile(dynamics_dir + "md.mdp"):
parameter_nr = 0
for parameter in md_file.options:
if (parameter[0] == "nstlist") or (parameter[0] == ";nstlist"):
md_file.update(parameter_nr, "0")
elif (parameter[0] == "rlist") or (parameter[0] == ";rlist"):
md_file.update(parameter_nr, "0")
elif (parameter[0] == "rcoulomb") or (parameter[0] == ";rcoulomb"):
md_file.update(parameter_nr, "0")
elif (parameter[0] == "rvdw") or (parameter[0] == ";rvdw"):
md_file.update(parameter_nr, "0")
elif (parameter[0] == "Tcoupl") or (parameter[0] == ";Tcoupl"):
md_file.update(parameter_nr, "berendsen", 0)
elif (parameter[0] == "tau_t") or (parameter[0] == ";tau_t"):
md_file.update(parameter_nr, "0.1 0.1", 0)
elif (parameter[0] == "tc-grps") or (parameter[0] == ";tc-grps"):
md_file.update(parameter_nr, "protein Non-Protein", 0)
elif (parameter[0] == "ref_t") or (parameter[0] == ";ref_t"):
md_file.update(parameter_nr, "298 298", 0)
elif (parameter[0] == "implicit-solvent") or (parameter[0] == ";implicit-solvent"):
md_file.update(parameter_nr, "GBSA")
elif (parameter[0] == "pbc") or (parameter[0] == ";pbc"):
md_file.update(parameter_nr, "no")
elif (parameter[0] == "rgbradii") or (parameter[0] == ";rgbradii"):
md_file.update(parameter_nr, "0")
elif (parameter[0] == "comm_mode") or (parameter[0] == ";comm_mode"):
md_file.update(parameter_nr, "ANGULAR")
elif (parameter[0] == "cutoff-scheme") or (parameter[0] == ";cutoff-scheme"):
md_file.update(parameter_nr, "group")
elif (parameter[0] == "coulombtype") or (parameter[0] == ";coulombtype"):
md_file.update(parameter_nr, "Cut-off")
parameter_nr = parameter_nr + 1
self.change_i(v2, s_params)
# in implicit solvent watermodel must be set to "None"
v4_water.set(len(self.explicit_buttons) - 1)
self.change(v4_water, water_v, s_params)
# This function changes implicit water model
@staticmethod
def change_i(int_variable, s_params):
em_file = s_params.em_file
md_file = s_params.md_file
dynamics_dir = get_dynamics_dir()
if int_variable.get() == 0:
if not os.path.isfile(dynamics_dir + "em.mdp"):
parameter_nr = 0
for parameter in em_file.options:
if (parameter[0] == "gb-algorithm") or (parameter[0] == ";gb-algorithm"):
em_file.update(parameter_nr, "Still")
parameter_nr = parameter_nr + 1
if not os.path.isfile(dynamics_dir + "md.mdp"):
parameter_nr = 0
for parameter in md_file.options:
if (parameter[0] == "gb-algorithm") or (parameter[0] == ";gb-algorithm"):
md_file.update(parameter_nr, "Still")
parameter_nr = parameter_nr + 1
elif int_variable.get() == 1:
if not os.path.isfile(dynamics_dir + "em.mdp"):
parameter_nr = 0
for parameter in em_file.options:
if (parameter[0] == "gb-algorithm") or (parameter[0] == ";gb-algorithm"):
em_file.update(parameter_nr, "HCT")
parameter_nr = parameter_nr + 1
if not os.path.isfile(dynamics_dir + "md.mdp"):
parameter_nr = 0
for parameter in md_file.options:
if (parameter[0] == "gb-algorithm") or (parameter[0] == ";gb-algorithm"):
md_file.update(parameter_nr, "HCT")
parameter_nr = parameter_nr + 1
elif int_variable.get() == 2:
if not os.path.isfile(dynamics_dir + "em.mdp"):
parameter_nr = 0
for parameter in em_file.options:
if (parameter[0] == "gb-algorithm") or (parameter[0] == ";gb-algorithm"):
em_file.update(parameter_nr, "OBC")
parameter_nr = parameter_nr + 1
if not os.path.isfile(dynamics_dir + "md.mdp"):
parameter_nr = 0
for parameter in md_file.options:
if (parameter[0] == "gb-algorithm") or (parameter[0] == ";gb-algorithm"):
md_file.update(parameter_nr, "OBC")
parameter_nr = parameter_nr + 1
# Water box configuration window
@staticmethod
def box(master, s_params):
gromacs2 = s_params.gmx_input
root = Toplevel(master)
root.wm_title("Water Box Options")
root.wm_geometry("300x200")
v = StringVar(root)
v.set(gromacs2.box_type)
w = Label(root, text="Box type")
w.pack()
radio_button = Radiobutton(root, text="triclinic", value="triclinic", variable=v,
command=lambda: gromacs2.update({"box_type": v.get()}))
radio_button.pack(side=TOP, anchor=W)
radio_button = Radiobutton(root, text="cubic", value="cubic", variable=v,
command=lambda: gromacs2.update({"box_type": v.get()}))
radio_button.pack(side=TOP, anchor=W)
radio_button = Radiobutton(root, text="dodecahedron", value="dodecahedron", variable=v,
command=lambda: gromacs2.update({"box_type": v.get()}))
radio_button.pack(side=TOP, anchor=W)
radio_button = Radiobutton(root, text="octahedron", value="octahedron", variable=v,
command=lambda: gromacs2.update({"box_type": v.get()}))
radio_button.pack(side=TOP, anchor=W)
w1 = Label(root, text="Distance")
w1.pack()
distance = Entry(root)
distance.pack(side=TOP)
distance.insert(0, gromacs2.box_distance)
w2 = Label(root, text="Density [g/L]")
w2.pack()
density = Entry(root)
density.pack(side=TOP)
density.insert(0, gromacs2.box_density)
ok_button = Button(root, text="OK", command=lambda: gromacs2.update(
{"box_distance": distance.get(), "box_density": density.get()}, root))
ok_button.pack(side=TOP)
# Hydrogen configuration (for bigger time steps)
@staticmethod
def box2(master, s_params):
gromacs2 = s_params.gmx_input
root = Toplevel(master)
root.wm_title("Hydrogen options (for Pdb2gmx)")
root.wm_geometry("300x200")
v1 = StringVar(root)
v1.set(gromacs2.hydro)
w = Label(root, text="Hydrogen type (for pdb2gmx only)")
w.pack()
radio_button = Radiobutton(root, text="Normal Hydrogen", value="noheavyh", variable=v1,
command=lambda: gromacs2.update({"hydro": v1.get()}))
radio_button.pack(side=TOP, anchor=W)
radio_button = Radiobutton(root, text="Deuterium", value="deuterate", variable=v1,
command=lambda: gromacs2.update({"hydro": v1.get()}))
radio_button.pack(side=TOP, anchor=W)
radio_button = Radiobutton(root, text="Heavy Hydrogen (4amu) ", value="heavyh", variable=v1,
command=lambda: gromacs2.update({"hydro": v1.get()}))
radio_button.pack(side=TOP, anchor=W)
ok_button = Button(root, text="OK", command=root.destroy)
ok_button.pack(side=TOP)
# Options for the genion class all the options
class GenionWindow:
# Genion box configuration window
def window(self, master, s_params):
gromacs2 = s_params.gmx_input
root = Toplevel(master)
root.wm_title("GENION options")
root.wm_geometry("300x350")
v = StringVar(root)
v.set(gromacs2.neutrality)
w = Label(root, text="Parameters for genion")
w.pack()
radio_button = Radiobutton(root, text="Neutralize System", value="neutral", variable=v,
command=lambda: gromacs2.update({"neutrality": v.get()}))
radio_button.pack(side=TOP, anchor=W)
radio_button = Radiobutton(root, text="Do not Neutralize", value="noneutral", variable=v,
command=lambda: gromacs2.update({"neutrality": v.get()}))
radio_button.pack(side=TOP, anchor=W)
w1 = Label(root, text="Salt Concentration")
w1.pack()
salt = Entry(root)
salt.pack(side=TOP)
salt.insert(0, gromacs2.salt_conc)
w2 = Label(root, text="Positive Ion")
w2.pack()
posit = Entry(root)
posit.pack(side=TOP)
posit.insert(0, gromacs2.positive_ion)
w3 = Label(root, text="Negative Ion")
w3.pack()
negat = Entry(root)
negat.pack(side=TOP)
negat.insert(0, gromacs2.negative_ion)
ok_button = Button(root, text="OK", command=lambda: gromacs2.update(
{"salt_conc": salt.get(), "positive_ion": posit.get(), "negative_ion": negat.get()}, root))
ok_button.pack(side=TOP)
# This is the window to setup ProDy options
# def vectors_window(master, s_params):
# project_name = s_params.project_name
# if project_name != "nothing":
# root = Toplevel(master)
# root.wm_title("Vectors Configuration")
#
# frame1 = Frame(root)
# frame1.pack()
#
# v1 = IntVar(root)
# v1.set(calculation_type)
# v2 = IntVar(root)
# v2.set(contact_map)
#
# radio_button0 = Radiobutton(frame1, text="Anisotropic network model", value=0, variable=v1,
# command=lambda: block_contact(0, c1, v2))
# radio_button0.pack()
# radio_button1 = Radiobutton(frame1, text="Principal component analysis", value=1, variable=v1,
# command=lambda: block_contact(1, c1, v2))
# radio_button1.pack()
# radio_button2 = Radiobutton(frame1, text="Gaussian network model (experimental)", value=2, variable=v1,
# command=lambda: block_contact(0, c1, v2))
# radio_button2.pack()
#
# c1 = Checkbutton(frame1, text="Show Contact Map", variable=v2)
# c1.pack()
# if block_contact_map == 1:
# c1.configure(state=DISABLED)
#
# ok_button = Button(frame1, text="OK", command=lambda: options_change(v1, v2, root))
# ok_button.pack(side=TOP)
#
# elif project_name == "nothing":
# no_molecule_warning()
|
tomaszmakarewicz/Dynamics
|
pymol_plugin_dynamics.py
|
Python
|
gpl-3.0
| 125,074
|
[
"Gaussian",
"Gromacs",
"PyMOL"
] |
9ad0a9408313580a5eec2687b9ef86cba2114bc47f13d04a24eaf4fd38fafefc
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import warnings
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=2)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. Number of labels follows
a Poisson distribution that never takes the value 0.
length : int, optional (default=50)
Sum of the features (number of words if documents).
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
lb = MultiLabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
else:
warnings.warn('Support for the sequence of sequences multilabel '
'representation is being deprecated and replaced with '
'a sparse indicator matrix. '
'return_indicator will default to True from version '
'0.17.',
DeprecationWarning)
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See the `make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples / 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples / 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Parameters
----------
dim: integer, optional (default=1)
The size of the random (matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
prec: array of shape = [dim, dim]
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
d = np.diag(prec)
d = 1. / np.sqrt(d)
prec *= d
prec *= d[:, np.newaxis]
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perpsective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
|
soulmachine/scikit-learn
|
sklearn/datasets/samples_generator.py
|
Python
|
bsd-3-clause
| 53,169
|
[
"Gaussian"
] |
6ae48e4ecee6e5885559ffe28cc77fd1adc02653e79caeae61190dc14de4ab43
|
import argparse
import inspect
import logging
import json
import base64
from docstring_parser import parse
from collections import namedtuple
from flask import Flask, request
from flask_restx import Api, Resource, fields, abort
from flask_cors import CORS
from indra import get_config
from indra.sources import trips, reach, bel, biopax, eidos, hume, cwms, sofia
from indra.databases import hgnc_client
from indra.statements import stmts_from_json, get_statement_by_name
from indra.assemblers.pysb import PysbAssembler
import indra.assemblers.pysb.assembler as pysb_assembler
from indra.assemblers.cx import CxAssembler
from indra.assemblers.graph import GraphAssembler
from indra.assemblers.cyjs import CyJSAssembler
from indra.assemblers.sif import SifAssembler
from indra.assemblers.english import EnglishAssembler
from indra.tools.assemble_corpus import *
from indra.databases import cbio_client
from indra.sources.indra_db_rest import get_statements
from indra.sources.ndex_cx.api import process_ndex_network
from indra.sources.reach.api import reach_nxml_url, reach_text_url
from indra.belief.wm_scorer import get_eidos_scorer
from indra.ontology.bio import bio_ontology
from indra.ontology.world import world_ontology
from indra.pipeline import AssemblyPipeline, pipeline_functions
from indra.preassembler.custom_preassembly import *
logger = logging.getLogger('rest_api')
logger.setLevel(logging.DEBUG)
# Create Flask app, api, namespaces, and models
app = Flask(__name__)
api = Api(
app, title='INDRA REST API', description='REST API for INDRA webservice')
CORS(app)
preassembly_ns = api.namespace(
'Preassembly', 'Preassemble INDRA Statements', path='/preassembly/')
sources_ns = api.namespace(
'Sources', 'Get INDRA Statements from various sources', path='/')
assemblers_ns = api.namespace(
'Assemblers', 'Assemble INDRA Statements into models', path='/assemblers/')
ndex_ns = api.namespace('NDEx', 'Use NDEx service', path='/')
indra_db_rest_ns = api.namespace(
'INDRA DB REST', 'Use INDRA DB REST API', path='/indra_db_rest/')
databases_ns = api.namespace(
'Databases', 'Access external databases', path='/databases/')
# Models that can be inherited and reused in different namespaces
dict_model = api.model('dict', {})
stmts_model = api.model('Statements', {
'statements': fields.List(fields.Nested(dict_model), example=[{
"id": "acc6d47c-f622-41a4-8ae9-d7b0f3d24a2f",
"type": "Complex",
"members": [
{"db_refs": {"TEXT": "MEK", "FPLX": "MEK"}, "name": "MEK"},
{"db_refs": {"TEXT": "ERK", "FPLX": "ERK"}, "name": "ERK"}
],
"sbo": "https://identifiers.org/SBO:0000526",
"evidence": [{"text": "MEK binds ERK", "source_api": "trips"}]
}])})
bio_text_model = api.model('BioText', {
'text': fields.String(example='GRB2 binds SHC.')})
wm_text_model = api.model('WMText', {
'text': fields.String(example='Rainfall causes floods.')})
jsonld_model = api.model('jsonld', {
'jsonld': fields.String(example='{}')})
genes_model = api.model('Genes', {
'genes': fields.List(fields.String, example=['BRAF', 'MAP2K1'])})
# Store the arguments by type
int_args = ['poolsize', 'size_cutoff']
float_args = ['score_threshold', 'belief_cutoff']
boolean_args = [
'do_rename', 'use_adeft', 'do_methionine_offset', 'do_orthology_mapping',
'do_isoform_mapping', 'use_cache', 'return_toplevel', 'flatten_evidence',
'normalize_equivalences', 'normalize_opposites', 'invert', 'remove_bound',
'specific_only', 'allow_families', 'match_suffix', 'update_belief']
list_args = [
'gene_list', 'name_list', 'values', 'source_apis', 'uuids', 'curations',
'correct_tags', 'ignores', 'deletions']
dict_args = [
'grounding_map', 'misgrounding_map', 'whitelist', 'mutations']
def _return_stmts(stmts):
if stmts:
stmts_json = stmts_to_json(stmts)
res = {'statements': stmts_json}
else:
res = {'statements': []}
return res
def _stmts_from_proc(proc):
if proc and proc.statements:
stmts = stmts_to_json(proc.statements)
res = {'statements': stmts}
else:
res = {'statements': []}
return res
# Create Resources in Preassembly Namespace
# Manually add preassembly resources not based on assembly corpus functions
pipeline_model = api.inherit('Pipeline', stmts_model, {
'pipeline': fields.List(fields.Nested(dict_model), example=[
{'function': 'filter_grounded_only'},
{'function': 'run_preassembly', 'kwargs': {'return_toplevel': False}}
])
})
# There's an extra blank line between parameters here and in all the following
# docstrings for better visualization in Swagger
@preassembly_ns.expect(pipeline_model)
@preassembly_ns.route('/pipeline')
class RunPipeline(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Run an assembly pipeline for a list of Statements.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to run the pipeline.
pipeline : list[dict]
A list of dictionaries representing steps in the pipeline. Each
step should have a 'function' key and, if appropriate, 'args' and
'kwargs' keys. For more documentation and examples, see
https://indra.readthedocs.io/en/latest/modules/pipeline.html
Returns
-------
statements : list[indra.statements.Statement.to_json()]
The list of INDRA Statements resulting from running the pipeline
on the list of input Statements.
"""
args = request.json
stmts = stmts_from_json(args.get('statements'))
pipeline_steps = args.get('pipeline')
ap = AssemblyPipeline(pipeline_steps)
stmts_out = ap.run(stmts)
return _return_stmts(stmts_out)
# Dynamically generate resources for assembly corpus functions
class PreassembleStatements(Resource):
"""Parent Resource for Preassembly resources."""
func_name = None
def process_args(self, args_json):
for arg in args_json:
if arg == 'stmt_type':
args_json[arg] = get_statement_by_name(args_json[arg])
elif arg in ['matches_fun', 'refinement_fun']:
args_json[arg] = pipeline_functions[args_json[arg]]
elif arg == 'curations':
Curation = namedtuple(
'Curation', ['pa_hash', 'source_hash', 'tag'])
args_json[arg] = [
Curation(cur['pa_hash'], cur['source_hash'], cur['tag'])
for cur in args_json[arg]]
elif arg == 'belief_scorer':
if args_json[arg] == 'wm':
args_json[arg] = get_eidos_scorer()
else:
args_json[arg] = None
elif arg == 'ontology':
if args_json[arg] == 'wm':
args_json[arg] = world_ontology
else:
args_json[arg] = bio_ontology
elif arg == 'whitelist' or arg == 'mutations':
args_json[arg] = {
gene: [tuple(mod) for mod in mods]
for gene, mods in args_json[arg].items()}
return args_json
@api.doc(False)
def options(self):
return {}
def post(self):
args = self.process_args(request.json)
stmts = stmts_from_json(args.pop('statements'))
stmts_out = pipeline_functions[self.func_name](stmts, **args)
return _return_stmts(stmts_out)
def make_preassembly_model(func):
"""Create new Flask model with function arguments."""
args = inspect.signature(func).parameters
# We can reuse Staetments model if only stmts_in or stmts and **kwargs are
# arguments of the function
if ((len(args) == 1 and ('stmts_in' in args or 'stmts' in args)) or
(len(args) == 2 and 'kwargs' in args and
('stmts_in' in args or 'stmts' in args))):
return stmts_model
# Inherit a model if there are other arguments
model_fields = {}
for arg in args:
if arg != 'stmts_in' and arg != 'stmts' and arg != 'kwargs':
default = None
if args[arg].default is not inspect.Parameter.empty:
default = args[arg].default
# Need to use default for boolean and example for other types
if arg in boolean_args:
model_fields[arg] = fields.Boolean(default=default)
elif arg in int_args:
model_fields[arg] = fields.Integer(example=default)
elif arg in float_args:
model_fields[arg] = fields.Float(example=0.7)
elif arg in list_args:
if arg == 'curations':
model_fields[arg] = fields.List(
fields.Nested(dict_model),
example=[{'pa_hash': '1234', 'source_hash': '2345',
'tag': 'wrong_relation'}])
else:
model_fields[arg] = fields.List(
fields.String, example=default)
elif arg in dict_args:
model_fields[arg] = fields.Nested(dict_model)
else:
model_fields[arg] = fields.String(example=default)
new_model = api.inherit(
('%s_input' % func.__name__), stmts_model, model_fields)
return new_model
def update_docstring(func):
doc = func.__doc__
docstring = parse(doc)
new_doc = docstring.short_description + '\n\n'
if docstring.long_description:
new_doc += (docstring.long_description + '\n\n')
new_doc += ('Parameters\n----------\n')
for param in docstring.params:
if param.arg_name in ['save', 'save_unique']:
continue
elif param.arg_name in ['stmts', 'stmts_in']:
param.arg_name = 'statements'
param.type_name = 'list[indra.statements.Statement.to_json()]'
elif param.arg_name == 'belief_scorer':
param.type_name = 'Optional[str] or None'
param.description = (
'Type of BeliefScorer to use in calculating Statement '
'probabilities. If None is provided (default), then the '
'default scorer is used (good for biology use case). '
'For WorldModelers use case belief scorer should be set '
'to "wm".')
elif param.arg_name == 'ontology':
param.type_name = 'Optional[str] or None'
param.description = (
'Type of ontology to use for preassembly ("bio" or "wm"). '
'If None is provided (default), then the bio ontology is used.'
'For WorldModelers use case ontology should be set to "wm".')
elif param.arg_name in ['matches_fun', 'refinement_fun']:
param.type_name = 'str'
elif param.arg_name == 'curations':
param.type_name = 'list[dict]'
param.description = (
'A list of dictionaries representing curations. Each '
'dictionary must have "pa_hash" (preassembled statement hash)'
', "source_hash", (evidence hash) and "tag" (e.g. "correct", '
'"wrong_relation", etc.) keys.')
new_doc += (param.arg_name + ' : ' + param.type_name + '\n' +
param.description + '\n\n')
new_doc += 'Returns\n----------\n'
new_doc += 'statements : list[indra.statements.Statement.to_json()]\n'
new_doc += 'A list of processed INDRA Statements'
return docstring.short_description, new_doc
# Create resources for each of assembly_corpus functions
for func_name, func in pipeline_functions.items():
if func.__module__ == 'indra.tools.assemble_corpus':
doc = ''
short_doc = ''
# Get the function description from docstring
if func.__doc__:
short_doc, doc = update_docstring(func)
new_model = make_preassembly_model(func)
@preassembly_ns.expect(new_model)
@preassembly_ns.route(('/%s' % func_name),
doc={'summary': short_doc})
class NewFunction(PreassembleStatements):
func_name = func_name
def post(self):
return super().post()
post.__doc__ = doc
# Create resources for Sources namespace
# REACH
reach_text_model = api.inherit('ReachText', bio_text_model, {
'offline': fields.Boolean(default=False),
'url': fields.String(example=reach_text_url)
})
reach_json_model = api.model('ReachJSON', {'json': fields.String(example='{}')})
reach_pmc_model = api.model('ReachPMC', {
'pmcid': fields.String(example='PMC3717945'),
'offline': fields.Boolean(default=False),
'url': fields.String(example=reach_nxml_url)
})
@sources_ns.expect(reach_text_model)
@sources_ns.route('/reach/process_text')
class ReachProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process text with REACH and return INDRA Statements.
Parameters
----------
text : str
The text to be processed.
offline : Optional[bool]
If set to True, the REACH system is run offline via a JAR file.
Otherwise (by default) the web service is called. Default: False
url : Optional[str]
URL for a REACH web service instance, which is used for reading if
provided. If not provided but offline is set to False (its default
value), REACH_TEXT_URL set in configuration will be used. If not
provided in configuration, the Arizona REACH web service is called
(http://agathon.sista.arizona.edu:8080/odinweb/api/help).
Default: None
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
text = args.get('text')
offline = True if args.get('offline') else False
given_url = args.get('url')
config_url = get_config('REACH_TEXT_URL', failure_ok=True)
# Order: URL given as an explicit argument in the request. Then any URL
# set in the configuration. Then, unless offline is set, use the
# default REACH web service URL.
if 'url' in args: # This is to take None if explicitly given
url = given_url
elif config_url:
url = config_url
elif not offline:
url = reach_text_url
else:
url = None
# If a URL is set, prioritize it over the offline setting
if url:
offline = False
rp = reach.process_text(text, offline=offline, url=url)
return _stmts_from_proc(rp)
@sources_ns.expect(reach_json_model)
@sources_ns.route('/reach/process_json')
class ReachProcessJson(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process REACH json and return INDRA Statements.
Parameters
----------
json : str
The json string to be processed.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
json_str = args.get('json')
rp = reach.process_json_str(json_str)
return _stmts_from_proc(rp)
@sources_ns.expect(reach_pmc_model)
@sources_ns.route('/reach/process_pmc')
class ReachProcessPmc(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process PubMedCentral article and return INDRA Statements.
Parameters
----------
pmc_id : str
The ID of a PubmedCentral article. The string may start with PMC
but passing just the ID also works.
Examples: 3717945, PMC3717945
https://www.ncbi.nlm.nih.gov/pmc/
offline : Optional[bool]
If set to True, the REACH system is run offline via a JAR file.
Otherwise (by default) the web service is called. Default: False
url : Optional[str]
URL for a REACH web service instance, which is used for reading if
provided. If not provided but offline is set to False (its default
value), REACH_NXML_URL set in configuration will be used. If not
provided in configuration, the Arizona REACH web service is called
(http://agathon.sista.arizona.edu:8080/odinweb/api/help).
Default: None
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
pmcid = args.get('pmcid')
offline = True if args.get('offline') else False
given_url = args.get('url')
config_url = get_config('REACH_NXML_URL', failure_ok=True)
# Order: URL given as an explicit argument in the request. Then any URL
# set in the configuration. Then, unless offline is set, use the
# default REACH web service URL.
if 'url' in args: # This is to take None if explicitly given
url = given_url
elif config_url:
url = config_url
elif not offline:
url = reach_nxml_url
else:
url = None
# If a URL is set, prioritize it over the offline setting
if url:
offline = False
rp = reach.process_pmc(pmcid, offline=offline, url=url)
return _stmts_from_proc(rp)
# TRIPS
xml_model = api.model('XML', {'xml_str': fields.String})
@sources_ns.expect(bio_text_model)
@sources_ns.route('/trips/process_text')
class TripsProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process text with TRIPS and return INDRA Statements.
Parameters
----------
text : str
The text to be processed.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
text = args.get('text')
tp = trips.process_text(text)
return _stmts_from_proc(tp)
@sources_ns.expect(xml_model)
@sources_ns.route('/trips/process_xml')
class TripsProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process TRIPS EKB XML and return INDRA Statements.
Parameters
----------
xml_string : str
A TRIPS extraction knowledge base (EKB) string to be processed.
http://trips.ihmc.us/parser/api.html
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
xml_str = args.get('xml_str')
tp = trips.process_xml(xml_str)
return _stmts_from_proc(tp)
# Sofia
text_auth_model = api.inherit('TextAuth', wm_text_model, {
'auth': fields.List(fields.String, example=['USER', 'PASS'])})
# Hide documentation because webservice is unresponsive
@sources_ns.expect(text_auth_model)
@sources_ns.route('/sofia/process_text', doc=False)
class SofiaProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process text with Sofia and return INDRA Statements.
Parameters
----------
text : str
A string containing the text to be processed with Sofia.
auth : Optional[list]
A username/password pair for the Sofia web service. If not given,
the SOFIA_USERNAME and SOFIA_PASSWORD values are loaded from either
the INDRA config or the environment.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
text = args.get('text')
auth = args.get('auth')
sp = sofia.process_text(text, auth=auth)
return _stmts_from_proc(sp)
# Eidos
eidos_text_model = api.inherit('EidosText', wm_text_model, {
'webservice': fields.String,
'grounding_ns': fields.String(example='WM')
})
eidos_jsonld_model = api.inherit('EidosJsonld', jsonld_model, {
'grounding_ns': fields.String(example='WM')
})
# Hide docs until webservice is available
@sources_ns.expect(eidos_text_model)
@sources_ns.route('/eidos/process_text', doc=False)
class EidosProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process text with EIDOS and return INDRA Statements.
Parameters
----------
text : str
The text to be processed.
webservice : Optional[str]
An Eidos reader web service URL to send the request to.
If None, the reading is assumed to be done with the Eidos JAR
rather than via a web service. Default: None
grounding_ns : Optional[list]
A list of name spaces for which INDRA should represent groundings,
when given. If not specified or None, all grounding name spaces are
propagated. If an empty list, no groundings are propagated.
Example: ['UN', 'WM'], Default: None
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
text = args.get('text')
webservice = args.get('webservice')
grounding_ns = args.get('grounding_ns')
if not webservice:
abort(400, 'No web service address provided.')
ep = eidos.process_text(text, webservice=webservice,
grounding_ns=grounding_ns)
return _stmts_from_proc(ep)
@sources_ns.expect(eidos_jsonld_model)
@sources_ns.route('/eidos/process_jsonld')
class EidosProcessJsonld(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process an EIDOS JSON-LD and return INDRA Statements.
Parameters
----------
jsonld : str
The JSON-LD string to be processed.
grounding_ns : Optional[list]
A list of name spaces for which INDRA should represent groundings,
when given. If not specified or None, all grounding name spaces are
propagated. If an empty list, no groundings are propagated.
Example: ['UN', 'WM'], Default: None
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
eidos_json = args.get('jsonld')
grounding_ns = args.get('grounding_ns')
ep = eidos.process_json_str(eidos_json, grounding_ns=grounding_ns)
return _stmts_from_proc(ep)
# Hume
@sources_ns.expect(jsonld_model)
@sources_ns.route('/hume/process_jsonld')
class HumeProcessJsonld(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process Hume JSON-LD and return INDRA Statements.
Parameters
----------
jsonld : str
The JSON-LD string to be processed.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
jsonld_str = args.get('jsonld')
jsonld = json.loads(jsonld_str)
hp = hume.process_jsonld(jsonld)
return _stmts_from_proc(hp)
# CWMS
@sources_ns.expect(wm_text_model)
@sources_ns.route('/cwms/process_text')
class CwmsProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process text with CWMS and return INDRA Statements.
Parameters
----------
text : str
Text to process
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
text = args.get('text')
cp = cwms.process_text(text)
return _stmts_from_proc(cp)
# BEL
bel_rdf_model = api.model('BelRdf', {'belrdf': fields.String})
@sources_ns.expect(genes_model)
@sources_ns.route('/bel/process_pybel_neighborhood')
class BelProcessNeighborhood(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process BEL Large Corpus neighborhood and return INDRA Statements.
Parameters
----------
genes : list[str]
A list of entity names (e.g., gene names) which will be used as the
basis of filtering the result. If any of the Agents of an extracted
INDRA Statement has a name appearing in this list, the Statement is
retained in the result.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
genes = args.get('genes')
bp = bel.process_pybel_neighborhood(genes)
return _stmts_from_proc(bp)
@sources_ns.expect(bel_rdf_model)
@sources_ns.route('/bel/process_belrdf')
class BelProcessBelRdf(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process BEL RDF and return INDRA Statements.
Parameters
----------
belrdf : str
A BEL/RDF string to be processed. This will usually come from
reading a .rdf file.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
belrdf = args.get('belrdf')
bp = bel.process_belrdf(belrdf)
return _stmts_from_proc(bp)
# BioPax
source_target_model = api.model('SourceTarget', {
'source': fields.List(fields.String, example=['BRAF', 'RAF1', 'ARAF']),
'target': fields.List(fields.String, example=['MAP2K1', 'MAP2K2'])
})
@sources_ns.expect(genes_model)
@sources_ns.route('/biopax/process_pc_pathsbetween')
class BiopaxPathsBetween(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""
Process PathwayCommons paths between genes, return INDRA Statements.
Parameters
----------
genes : list
A list of HGNC gene symbols to search for paths between.
Examples: ['BRAF', 'MAP2K1']
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
genes = args.get('genes')
bp = biopax.process_pc_pathsbetween(genes)
return _stmts_from_proc(bp)
@sources_ns.expect(source_target_model)
@sources_ns.route('/biopax/process_pc_pathsfromto')
class BiopaxPathsFromTo(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""
Process PathwayCommons paths from-to genes, return INDRA Statements.
Parameters
----------
source : list
A list of HGNC gene symbols that are the sources of paths being
searched for.
Examples: ['BRAF', 'RAF1', 'ARAF']
target : list
A list of HGNC gene symbols that are the targets of paths being
searched for.
Examples: ['MAP2K1', 'MAP2K2']
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
source = args.get('source')
target = args.get('target')
bp = biopax.process_pc_pathsfromto(source, target)
return _stmts_from_proc(bp)
@sources_ns.expect(genes_model)
@sources_ns.route('/biopax/process_pc_neighborhood')
class BiopaxNeighborhood(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process PathwayCommons neighborhood, return INDRA Statements.
Parameters
----------
genes : list
A list of HGNC gene symbols to search the neighborhood of.
Examples: ['BRAF'], ['BRAF', 'MAP2K1']
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
genes = args.get('genes')
bp = biopax.process_pc_neighborhood(genes)
return _stmts_from_proc(bp)
# Create resources for Assemblers namespace
pysb_stmts_model = api.inherit('PysbStatements', stmts_model, {
'export_format': fields.String(example='kappa')
})
@assemblers_ns.expect(pysb_stmts_model)
@assemblers_ns.route('/pysb')
class AssemblePysb(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble INDRA Statements and return PySB model string.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
export_format : str
The format to export into, for instance "kappa", "bngl",
"sbml", "matlab", "mathematica", "potterswheel". See
http://pysb.readthedocs.io/en/latest/modules/export/index.html
for a list of supported formats. In addition to the formats
supported by PySB itself, this method also provides "sbgn"
output.
Returns
-------
image or model
Assembled exported model. If export_format is kappa_im or kappa_cm,
image is returned. Otherwise model string is returned.
"""
args = request.json
stmts_json = args.get('statements')
export_format = args.get('export_format')
stmts = stmts_from_json(stmts_json)
pa = PysbAssembler()
pa.add_statements(stmts)
pa.make_model()
try:
for m in pa.model.monomers:
pysb_assembler.set_extended_initial_condition(pa.model, m, 0)
except Exception as e:
logger.exception(e)
if not export_format:
model_str = pa.print_model()
elif export_format in ('kappa_im', 'kappa_cm'):
fname = 'model_%s.png' % export_format
root = os.path.dirname(os.path.abspath(fname))
graph = pa.export_model(format=export_format, file_name=fname)
with open(fname, 'rb') as fh:
data = 'data:image/png;base64,%s' % \
base64.b64encode(fh.read()).decode()
return {'image': data}
else:
try:
model_str = pa.export_model(format=export_format)
except Exception as e:
logger.exception(e)
model_str = ''
res = {'model': model_str}
return res
@assemblers_ns.expect(stmts_model)
@assemblers_ns.route('/cx')
class AssembleCx(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble INDRA Statements and return CX network json.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
model
Assembled model string.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
ca = CxAssembler(stmts)
model_str = ca.make_model()
res = {'model': model_str}
return res
@assemblers_ns.expect(stmts_model)
@assemblers_ns.route('/graph')
class AssembleGraph(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble INDRA Statements and return Graphviz graph dot string.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
model
Assembled model string.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
ga = GraphAssembler(stmts)
model_str = ga.make_model()
res = {'model': model_str}
return res
@assemblers_ns.expect(stmts_model)
@assemblers_ns.route('/cyjs')
class AssembleCyjs(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble INDRA Statements and return Cytoscape JS network.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
json_model : dict
Json dictionary containing graph information.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
cja = CyJSAssembler(stmts)
cja.make_model(grouping=True)
model_str = cja.print_cyjs_graph()
return json.loads(model_str)
@assemblers_ns.expect(stmts_model)
@assemblers_ns.route('/english')
class AssembleEnglish(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble each statement into English sentence.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
sentences : dict
Dictionary mapping Statement UUIDs with English sentences.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
sentences = {}
for st in stmts:
enga = EnglishAssembler()
enga.add_statements([st])
model_str = enga.make_model()
sentences[st.uuid] = model_str
res = {'sentences': sentences}
return res
@assemblers_ns.expect(stmts_model)
@assemblers_ns.route('/sif/loopy')
class AssembleLoopy(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble INDRA Statements into a Loopy model using SIF Assembler.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
loopy_url : str
Assembled Loopy model string.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
sa = SifAssembler(stmts)
sa.make_model(use_name_as_key=True)
model_str = sa.print_loopy(as_url=True)
res = {'loopy_url': model_str}
return res
# Create resources for NDEx namespace
network_model = api.model('Network', {'network_id': fields.String})
@ndex_ns.expect(stmts_model)
@ndex_ns.route('/share_model_ndex')
class ShareModelNdex(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Upload the model to NDEX.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
network_id : str
ID of uploaded NDEx network.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
ca = CxAssembler(stmts)
for n, v in args.items():
ca.cx['networkAttributes'].append({'n': n, 'v': v, 'd': 'string'})
ca.make_model()
network_id = ca.upload_model(private=False)
return {'network_id': network_id}
@ndex_ns.expect(network_model)
@ndex_ns.route('/fetch_model_ndex')
class FetchModelNdex(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Download model and associated pieces from NDEX.
Parameters
----------
network_id : str
ID of NDEx network to fetch.
Returns
-------
stored_data : dict
Dictionary representing the network.
"""
args = request.json
network_id = args.get('network_id')
cx = process_ndex_network(network_id)
network_attr = [x for x in cx.cx if x.get('networkAttributes')]
network_attr = network_attr[0]['networkAttributes']
keep_keys = ['txt_input', 'parser',
'model_elements', 'preset_pos', 'stmts',
'sentences', 'evidence', 'cell_line', 'mrna', 'mutations']
stored_data = {}
for d in network_attr:
if d['n'] in keep_keys:
stored_data[d['n']] = d['v']
return stored_data
# Create resources for INDRA DB REST namespace
stmt_model = api.model('Statement', {'statement': fields.Nested(dict_model)})
@indra_db_rest_ns.expect(stmt_model)
@indra_db_rest_ns.route('/get_evidence')
class GetEvidence(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Get all evidence for a given INDRA statement.
Parameters
----------
statements : indra.statements.Statement.to_json()
An INDRA Statement to get evidence for.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of retrieved INDRA Statements with evidence.
"""
args = request.json
stmt_json = args.get('statement')
stmt = Statement._from_json(stmt_json)
def _get_agent_ref(agent):
"""Get the preferred ref for an agent for db web api."""
if agent is None:
return None
ag_hgnc_id = hgnc_client.get_hgnc_id(agent.name)
if ag_hgnc_id is not None:
return ag_hgnc_id + "@HGNC"
db_refs = agent.db_refs
for namespace in ['HGNC', 'FPLX', 'CHEBI', 'TEXT']:
if namespace in db_refs.keys():
return '%s@%s' % (db_refs[namespace], namespace)
return '%s@%s' % (agent.name, 'TEXT')
def _get_matching_stmts(stmt_ref):
# Filter by statement type.
stmt_type = stmt_ref.__class__.__name__
agent_name_list = [
_get_agent_ref(ag) for ag in stmt_ref.agent_list()]
non_binary_statements = (Complex, SelfModification, ActiveForm)
# TODO: We should look at more than just the agent name.
# Doing so efficiently may require changes to the web api.
if isinstance(stmt_ref, non_binary_statements):
agent_list = [ag_name for ag_name in agent_name_list
if ag_name is not None]
kwargs = {}
else:
agent_list = []
kwargs = {k: v for k, v in zip(['subject', 'object'],
agent_name_list)}
if not any(kwargs.values()):
return []
print(agent_list)
stmts = get_statements(agents=agent_list, stmt_type=stmt_type,
simple_response=True, **kwargs)
return stmts
stmts_out = _get_matching_stmts(stmt)
agent_name_list = [ag.name for ag in stmt.agent_list()]
stmts_out = stmts = filter_concept_names(
stmts_out, agent_name_list, 'all')
return _return_stmts(stmts_out)
# Create resources for Databases namespace
cbio_model = api.model('Cbio', {
'gene_list': fields.List(fields.String, example=["FOSL1", "GRB2"]),
'cell_lines': fields.List(fields.String, example=['COLO679_SKIN'])
})
@databases_ns.expect(cbio_model)
@databases_ns.route('/cbio/get_ccle_mrna')
class CbioMrna(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Get CCLE mRNA amounts using cBioClient
Parameters
----------
gene_list : list[str]
A list of HGNC gene symbols to get mRNA amounts for.
cell_lines : list[str]
A list of CCLE cell line names to get mRNA amounts for.
Returns
-------
mrna_amounts : dict[dict[float]]
A dict keyed to cell lines containing a dict keyed to genes
containing float
"""
args = request.json
gene_list = args.get('gene_list')
cell_lines = args.get('cell_lines')
mrna_amounts = cbio_client.get_ccle_mrna(gene_list, cell_lines)
res = {'mrna_amounts': mrna_amounts}
return res
@databases_ns.expect(cbio_model)
@databases_ns.route('/cbio/get_ccle_cna')
class CbioCna(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Get CCLE CNA
-2 = homozygous deletion
-1 = hemizygous deletion
0 = neutral / no change
1 = gain
2 = high level amplification
Parameters
----------
gene_list : list[str]
A list of HGNC gene symbols to get mutations in.
cell_lines : list[str]
A list of CCLE cell line names to get mutations for.
Returns
-------
cna : dict[dict[int]]
A dict keyed to cases containing a dict keyed to genes
containing int
"""
args = request.json
gene_list = args.get('gene_list')
cell_lines = args.get('cell_lines')
cna = cbio_client.get_ccle_cna(gene_list, cell_lines)
res = {'cna': cna}
return res
@databases_ns.expect(cbio_model)
@databases_ns.route('/cbio/get_ccle_mutations')
class CbioMutations(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Get CCLE mutations
Parameters
----------
gene_list : list[str]
A list of HGNC gene symbols to get mutations in
cell_lines : list[str]
A list of CCLE cell line names to get mutations for.
Returns
-------
mutations : dict
The result from cBioPortal as a dict in the format
{cell_line : {gene : [mutation1, mutation2, ...] }}
"""
args = request.json
gene_list = args.get('gene_list')
cell_lines = args.get('cell_lines')
mutations = cbio_client.get_ccle_mutations(gene_list, cell_lines)
res = {'mutations': mutations}
return res
if __name__ == '__main__':
argparser = argparse.ArgumentParser('Run the INDRA REST API')
argparser.add_argument('--host', default='0.0.0.0')
argparser.add_argument('--port', default=8080, type=int)
argparserargs = argparser.parse_args()
app.run(host=argparserargs.host, port=argparserargs.port)
|
johnbachman/belpy
|
rest_api/api.py
|
Python
|
mit
| 43,684
|
[
"Cytoscape"
] |
a72b131c716da6f6a97b2a0cc9bb8fd616f80f23d466e09846fed5a236c091d3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import sys
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Prevent .pyc files being created.
# Stops the vtk source being polluted
# by .pyc files.
sys.dont_write_bytecode = True
# Load base (spike and test)
import TestStyleBaseSpike
import TestStyleBase
class TestStyleJoystickActor(vtk.test.Testing.vtkTest):
def testStyleJoystickActor(self):
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
testStyleBaseSpike = TestStyleBaseSpike.StyleBaseSpike(ren, renWin, iRen)
# Set interactor style
inStyle = vtk.vtkInteractorStyleSwitch()
iRen.SetInteractorStyle(inStyle)
# Switch to Joystick+Actor mode
iRen.SetKeyEventInformation(0, 0, 'j' , 0)
iRen.InvokeEvent("CharEvent")
iRen.SetKeyEventInformation(0, 0, 'a', 0)
iRen.InvokeEvent("CharEvent")
# Test style
testStyleBase = TestStyleBase.TestStyleBase(ren)
testStyleBase.test_style(inStyle.GetCurrentStyle())
# render and interact with data
img_file = "TestStyleJoystickActor.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestStyleJoystickActor, 'test')])
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Interaction/Style/Testing/Python/TestStyleJoystickActor.py
|
Python
|
gpl-3.0
| 2,123
|
[
"VTK"
] |
39c738a1dc5854a6115d628bc942d51f39a8c1035ffd9013b382da0903b77fd0
|
# model from Alex's flipflop file
from keras.layers import TimeDistributed, Dense, Activation
from keras.models import Sequential
from keras.constraints import maxnorm
from backend.Networks import leak_recurrent, dense_output_with_mask
def SimpleRecurrentModel(params):
model = Sequential()
# Incorporating leakiness in the neurons
model.add(leak_recurrent(input_dim=2, output_dim=params['N_rec'], return_sequences=True, activation='relu',
noise=params['rec_noise'], consume_less='mem', tau=params['tau'], dale_ratio=params['dale_ratio']))
# Before going directly to the output, we apply a relu to the signal FIRST and THEN sum THOSE signals
# So this is the difference between W * [x]_+ (what we want) and [W * x]_+ (what we would have gotten)
model.add(Activation('relu'))
# Output neuron
model.add(TimeDistributed(dense_output_with_mask(output_dim=1, activation='linear', dale_ratio=params['dale_ratio'],
input_dim=params['N_rec'])))
# Using mse, like in Daniel's example. Training is slow, for some reason when using binary_crossentropy
model.compile(loss = 'mse', optimizer='Adam', sample_weight_mode="temporal")
return model
|
ABAtanasov/KerasCog
|
models/SimpleRecurrent.py
|
Python
|
mit
| 1,270
|
[
"NEURON"
] |
5b037cf0843b6087810006aba8fa7a7e8adf0ba4ee1c35f109601fa0fa2ff1ad
|
#!/usr/bin/env python
#########################################################################################
# Register a volume (e.g., EPI from fMRI or DTI scan) to an anatomical image.
#
# See Usage() below for more information.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Julien Cohen-Adad
# Modified: 2014-06-03
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# TODO: add flag -owarpinv
# TODO: if user specified -param, then ignore the default paramreg
# TODO: check syn with shrink=4
# TODO: output name file for warp using "src" and "dest" file name, i.e. warp_filesrc2filedest.nii.gz
# TODO: testing script for all cases
# TODO: add following feature:
# -r of isct_antsRegistration at the initial step (step 0).
# -r [' dest ',' src ',0] --> align the geometric center of the two images
# -r [' dest ',' src ',1] --> align the maximum intensities of the two images I use that quite often...
# TODO: output reg for ants2d and centermass (2016-02-25)
# Note for the developer: DO NOT use --collapse-output-transforms 1, otherwise inverse warping field is not output
# TODO: make three possibilities:
# - one-step registration, using only image registration (by sliceReg or antsRegistration)
# - two-step registration, using first segmentation-based registration (based on sliceReg or antsRegistration) and
# second the image registration (and allow the choice of algo, metric, etc.)
# - two-step registration, using only segmentation-based registration
import sys
import os
import time
from copy import deepcopy
import numpy as np
from spinalcordtoolbox.reports.qc import generate_qc
from spinalcordtoolbox.registration.register import Paramreg, ParamregMultiStep
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, ActionCreateFolder, list_type, display_viewer_syntax
from spinalcordtoolbox.utils.sys import init_sct, printv, set_loglevel
from spinalcordtoolbox.utils.fs import extract_fname
from spinalcordtoolbox.image import check_dim
from spinalcordtoolbox.scripts.sct_register_to_template import register_wrapper
# Default registration parameters
step0 = Paramreg(step='0', type='im', algo='syn', metric='MI', iter='0', shrink='1', smooth='0', gradStep='0.5',
slicewise='0', dof='Tx_Ty_Tz_Rx_Ry_Rz') # only used to put src into dest space
step1 = Paramreg(step='1', type='im')
DEFAULT_PARAMREGMULTI = ParamregMultiStep([step0, step1])
def get_parser():
# Initialize the parser
parser = SCTArgumentParser(
description="This program co-registers two 3D volumes. The deformation is non-rigid and is constrained along "
"Z direction (i.e., axial plane). Hence, this function assumes that orientation of the destination "
"image is axial (RPI). If you need to register two volumes with large deformations and/or "
"different contrasts, it is recommended to input spinal cord segmentations (binary mask) in order "
"to achieve maximum robustness. The program outputs a warping field that can be used to register "
"other images to the destination image. To apply the warping field to another image, use "
"'sct_apply_transfo'\n"
"\n"
"Tips:\n"
" - For a registration step using segmentations, use the MeanSquares metric. Also, simple "
"algorithm will be very efficient, for example centermass as a 'preregistration'.\n"
" - For a registration step using images of different contrast, use the Mutual Information (MI) "
"metric.\n"
" - Combine the steps by increasing the complexity of the transformation performed in each step, "
"for example: -param step=1,type=seg,algo=slicereg,metric=MeanSquares:"
"step=2,type=seg,algo=affine,metric=MeanSquares,gradStep=0.2:"
"step=3,type=im,algo=syn,metric=MI,iter=5,shrink=2\n"
" - When image contrast is low, a good option is to perform registration only based on the image "
"segmentation, i.e. using type=seg\n"
" - Columnwise algorithm needs to be applied after a translation and rotation such as centermassrot "
"algorithm. For example: -param step=1,type=seg,algo=centermassrot,metric=MeanSquares:"
"step=2,type=seg,algo=columnwise,metric=MeanSquares"
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Image source. Example: src.nii.gz"
)
mandatory.add_argument(
'-d',
metavar=Metavar.file,
required=True,
help="Image destination. Example: dest.nii.gz"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-iseg',
metavar=Metavar.file,
help="Segmentation source. Example: src_seg.nii.gz"
)
optional.add_argument(
'-dseg',
metavar=Metavar.file,
help="Segmentation destination. Example: dest_seg.nii.gz"
)
optional.add_argument(
'-ilabel',
metavar=Metavar.file,
help="Labels source."
)
optional.add_argument(
'-dlabel',
metavar=Metavar.file,
help="Labels destination."
)
optional.add_argument(
'-initwarp',
metavar=Metavar.file,
help="Initial warping field to apply to the source image."
)
optional.add_argument(
'-initwarpinv',
metavar=Metavar.file,
help="Initial inverse warping field to apply to the destination image (only use if you wish to generate the "
"dest->src warping field)"
)
optional.add_argument(
'-m',
metavar=Metavar.file,
help="Mask that can be created with sct_create_mask to improve accuracy over region of interest. This mask "
"will be used on the destination image. Example: mask.nii.gz"
)
optional.add_argument(
'-o',
metavar=Metavar.file,
help="Name of output file. Example: src_reg.nii.gz"
)
optional.add_argument(
'-owarp',
metavar=Metavar.file,
help="Name of output forward warping field."
)
optional.add_argument(
'-param',
metavar=Metavar.list,
type=list_type(':', str),
help=(f"R|Parameters for registration. Separate arguments with \",\". Separate steps with \":\".\n"
f"Example: step=1,type=seg,algo=slicereg,metric=MeanSquares:step=2,type=im,algo=syn,metric=MI,iter=5,"
f"shrink=2\n"
f" - step: <int> Step number (starts at 1, except for type=label).\n"
f" - type: {{im, seg, imseg, label}} type of data used for registration. Use type=label only at "
f"step=0.\n"
f" - algo: The algorithm used to compute the transformation. Default={DEFAULT_PARAMREGMULTI.steps['1'].algo}\n"
f" * translation: translation in X-Y plane (2dof)\n"
f" * rigid: translation + rotation in X-Y plane (4dof)\n"
f" * affine: translation + rotation + scaling in X-Y plane (6dof)\n"
f" * syn: non-linear symmetric normalization\n"
f" * bsplinesyn: syn regularized with b-splines\n"
f" * slicereg: regularized translations (see: goo.gl/Sj3ZeU)\n"
f" * centermass: slicewise center of mass alignment (seg only).\n"
f" * centermassrot: slicewise center of mass and rotation alignment using method specified in "
f"'rot_method'\n"
f" * columnwise: R-L scaling followed by A-P columnwise alignment (seg only).\n"
f" - slicewise: <int> Slice-by-slice 2d transformation. "
f"Default={DEFAULT_PARAMREGMULTI.steps['1'].slicewise}.\n"
f" - metric: {{CC, MI, MeanSquares}}. Default={DEFAULT_PARAMREGMULTI.steps['1'].metric}.\n"
f" * CC: The cross correlation metric compares the images based on their intensities but with a small "
f"normalization. It can be used with images with the same contrast (for ex. T2-w with T2-w). In this "
f"case it is very efficient but the computation time can be very long.\n"
f" * MI: the mutual information metric compares the images based on their entropy, therefore the "
f"images need to be big enough to have enough information. It works well for images with different "
f"contrasts (for example T2-w with T1-w) but not on segmentations.\n"
f" * MeanSquares: The mean squares metric compares the images based on their intensities. It can be "
f"used only with images that have exactly the same contrast (with the same intensity range) or with "
f"segmentations.\n"
f" - iter: <int> Number of iterations. Default={DEFAULT_PARAMREGMULTI.steps['1'].iter}.\n"
f" - shrink: <int> Shrink factor. A shrink factor of 2 will down sample the images by a factor of 2 to "
f"do the registration, and thus allow bigger deformations (and be faster to compute). It is usually "
f"combined with a smoothing. (only for syn/bsplinesyn). Default={DEFAULT_PARAMREGMULTI.steps['1'].shrink}.\n"
f" - smooth: <int> Smooth factor (in mm). Note: if algo={{centermassrot,columnwise}} the smoothing "
f"kernel is: SxSx0. Otherwise it is SxSxS. Default={DEFAULT_PARAMREGMULTI.steps['1'].smooth}.\n"
f" - laplacian: <int> Laplace filter using Gaussian second derivatives, applied before registration. "
f"The input number correspond to the standard deviation of the Gaussian filter. "
f"Default={DEFAULT_PARAMREGMULTI.steps['1'].laplacian}.\n"
f" - gradStep: <float> The gradient step used by the function opitmizer. A small gradient step can lead "
f"to a more accurate registration but will take longer to compute, with the risk to not reach "
f"convergence. A bigger gradient step will make the registration faster but the result can be far from "
f"an optimum. Default={DEFAULT_PARAMREGMULTI.steps['1'].gradStep}.\n"
f" - deformation: ?x?x?: Restrict deformation (for ANTs algo). Replace ? by 0 (no deformation) or 1 "
f"(deformation). Default={DEFAULT_PARAMREGMULTI.steps['1'].deformation}.\n"
f" - init: Initial translation alignment based on:\n"
f" * geometric: Geometric center of images\n"
f" * centermass: Center of mass of images\n"
f" * origin: Physical origin of images\n"
f" - poly: <int> Polynomial degree of regularization (only for algo=slicereg). "
f"Default={DEFAULT_PARAMREGMULTI.steps['1'].poly}.\n"
f" - filter_size: <float> Filter size for regularization (only for algo=centermassrot). "
f"Default={DEFAULT_PARAMREGMULTI.steps['1'].filter_size}.\n"
f" - smoothWarpXY: <int> Smooth XY warping field (only for algo=columnwize). "
f"Default={DEFAULT_PARAMREGMULTI.steps['1'].smoothWarpXY}.\n"
f" - pca_eigenratio_th: <int> Min ratio between the two eigenvalues for PCA-based angular adjustment "
f"(only for algo=centermassrot and rot_method=pca). "
f"Default={DEFAULT_PARAMREGMULTI.steps['1'].pca_eigenratio_th}.\n"
f" - dof: <str> Degree of freedom for type=label. Separate with '_'. T stands for translation and R "
f"stands for rotation, x, y, and z indicating the direction. For example, Tx_Ty_Tz_Rx_Ry_Rz would allow "
f"translation on x, y and z axes and rotation on x, y and z axes. "
f"Default={DEFAULT_PARAMREGMULTI.steps['0'].dof}.\n"
f" - rot_method {{pca, hog, pcahog}}: rotation method to be used with algo=centermassrot. If using hog "
f"or pcahog, type should be set to imseg. Default={DEFAULT_PARAMREGMULTI.steps['1'].rot_method}\n"
f" * pca: approximate cord segmentation by an ellipse and finds it orientation using PCA's "
f"eigenvectors\n"
f" * hog: finds the orientation using the symmetry of the image\n"
f" * pcahog: tries method pca and if it fails, uses method hog.\n")
)
optional.add_argument(
'-identity',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=0,
help="Just put source into destination (no optimization)."
)
optional.add_argument(
'-z',
metavar=Metavar.int,
type=int,
default=Param().padding,
help="Size of z-padding to enable deformation at edges when using SyN."
)
optional.add_argument(
'-x',
choices=['nn', 'linear', 'spline'],
default='linear',
help="Final interpolation."
)
optional.add_argument(
'-ofolder',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="Output folder. Example: reg_results/"
)
optional.add_argument(
'-qc',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
optional.add_argument(
'-r',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=1,
help="Whether to remove temporary files. 0 = no, 1 = yes"
)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"
)
return parser
# DEFAULT PARAMETERS
class Param:
# The constructor
def __init__(self):
self.debug = 0
self.outSuffix = "_reg"
self.padding = 5
self.remove_temp_files = 1
# MAIN
# ==========================================================================================
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# initialize parameters
param = Param()
# Initialization
fname_output = ''
path_out = ''
fname_src_seg = ''
fname_dest_seg = ''
fname_src_label = ''
fname_dest_label = ''
start_time = time.time()
# get arguments
fname_src = arguments.i
fname_dest = arguments.d
if arguments.iseg is not None:
fname_src_seg = arguments.iseg
if arguments.dseg is not None:
fname_dest_seg = arguments.dseg
if arguments.ilabel is not None:
fname_src_label = arguments.ilabel
if arguments.dlabel is not None:
fname_dest_label = arguments.dlabel
if arguments.o is not None:
fname_output = arguments.o
if arguments.ofolder is not None:
path_out = arguments.ofolder
if arguments.owarp is not None:
fname_output_warp = arguments.owarp
else:
fname_output_warp = ''
if arguments.initwarp is not None:
fname_initwarp = os.path.abspath(arguments.initwarp)
else:
fname_initwarp = ''
if arguments.initwarpinv is not None:
fname_initwarpinv = os.path.abspath(arguments.initwarpinv)
else:
fname_initwarpinv = ''
if arguments.m is not None:
fname_mask = arguments.m
else:
fname_mask = ''
padding = arguments.z
paramregmulti = deepcopy(DEFAULT_PARAMREGMULTI)
if arguments.param is not None:
paramregmulti_user = arguments.param
# update registration parameters
for paramStep in paramregmulti_user:
paramregmulti.addStep(paramStep)
path_qc = arguments.qc
qc_dataset = arguments.qc_dataset
qc_subject = arguments.qc_subject
identity = arguments.identity
interp = arguments.x
remove_temp_files = arguments.r
# printv(arguments)
printv('\nInput parameters:')
printv(' Source .............. ' + fname_src)
printv(' Destination ......... ' + fname_dest)
printv(' Init transfo ........ ' + fname_initwarp)
printv(' Mask ................ ' + fname_mask)
printv(' Output name ......... ' + fname_output)
# printv(' Algorithm ........... '+paramregmulti.algo)
# printv(' Number of iterations '+paramregmulti.iter)
# printv(' Metric .............. '+paramregmulti.metric)
printv(' Remove temp files ... ' + str(remove_temp_files))
printv(' Verbose ............. ' + str(verbose))
# update param
param.verbose = verbose
param.padding = padding
param.fname_mask = fname_mask
param.remove_temp_files = remove_temp_files
# Get if input is 3D
printv('\nCheck if input data are 3D...', verbose)
check_dim(fname_src, dim_lst=[3])
check_dim(fname_dest, dim_lst=[3])
# Check if user selected type=seg, but did not input segmentation data
if 'paramregmulti_user' in locals():
if True in ['type=seg' in paramregmulti_user[i] for i in range(len(paramregmulti_user))]:
if fname_src_seg == '' or fname_dest_seg == '':
printv('\nERROR: if you select type=seg you must specify -iseg and -dseg flags.\n', 1, 'error')
# Put source into destination space using header (no estimation -- purely based on header)
# TODO: Check if necessary to do that
# TODO: use that as step=0
# printv('\nPut source into destination space using header...', verbose)
# run_proc('isct_antsRegistration -d 3 -t Translation[0] -m MI[dest_pad.nii,src.nii,1,16] -c 0 -f 1 -s 0 -o
# [regAffine,src_regAffine.nii] -n BSpline[3]', verbose)
# if segmentation, also do it for seg
fname_src2dest, fname_dest2src, _, _ = \
register_wrapper(fname_src, fname_dest, param, paramregmulti, fname_src_seg=fname_src_seg,
fname_dest_seg=fname_dest_seg, fname_src_label=fname_src_label,
fname_dest_label=fname_dest_label, fname_mask=fname_mask, fname_initwarp=fname_initwarp,
fname_initwarpinv=fname_initwarpinv, identity=identity, interp=interp,
fname_output=fname_output,
fname_output_warp=fname_output_warp,
path_out=path_out)
# display elapsed time
elapsed_time = time.time() - start_time
printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's', verbose)
if path_qc is not None:
if fname_dest_seg:
generate_qc(fname_src2dest, fname_in2=fname_dest, fname_seg=fname_dest_seg, args=argv,
path_qc=os.path.abspath(path_qc), dataset=qc_dataset, subject=qc_subject,
process='sct_register_multimodal')
else:
printv('WARNING: Cannot generate QC because it requires destination segmentation.', 1, 'warning')
# If dest wasn't registered (e.g. unidirectional registration due to '-initwarp'), then don't output syntax
if fname_dest2src:
display_viewer_syntax([fname_src, fname_dest2src], verbose=verbose)
display_viewer_syntax([fname_dest, fname_src2dest], verbose=verbose)
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
|
neuropoly/spinalcordtoolbox
|
spinalcordtoolbox/scripts/sct_register_multimodal.py
|
Python
|
mit
| 20,460
|
[
"Gaussian"
] |
be876ac9a526fc8a4b0c826c74d92c1dbb5ed6be55241a0936fd1b5ed333c6b2
|
"""
Copyright (c) 2014 Brian Muller
Copyright (c) 2015 OpenBazaar
"""
import random
from twisted.internet import defer
from zope.interface import implements
import nacl.signing
from dht.node import Node
from dht.routing import RoutingTable
from dht.utils import digest
from log import Logger
from rpcudp import RPCProtocol
from interfaces import MessageProcessor
from protos import objects
from protos.message import PING, STUN, STORE, DELETE, FIND_NODE, FIND_VALUE, HOLE_PUNCH
class KademliaProtocol(RPCProtocol):
implements(MessageProcessor)
def __init__(self, sourceNode, storage, ksize, database):
self.ksize = ksize
self.router = RoutingTable(self, ksize, sourceNode)
self.storage = storage
self.sourceNode = sourceNode
self.multiplexer = None
self.db = database
self.log = Logger(system=self)
self.handled_commands = [PING, STUN, STORE, DELETE, FIND_NODE, FIND_VALUE, HOLE_PUNCH]
RPCProtocol.__init__(self, sourceNode.getProto(), self.router)
def connect_multiplexer(self, multiplexer):
self.multiplexer = multiplexer
def getRefreshIDs(self):
"""
Get ids to search for to keep old buckets up to date.
"""
ids = []
for bucket in self.router.getLonelyBuckets():
ids.append(random.randint(*bucket.range))
return ids
def rpc_stun(self, sender):
self.addToRouter(sender)
return [sender.ip, str(sender.port)]
def rpc_ping(self, sender):
self.addToRouter(sender)
return [self.sourceNode.getProto().SerializeToString()]
def rpc_store(self, sender, keyword, key, value):
self.addToRouter(sender)
self.log.debug("got a store request from %s, storing value" % str(sender))
if len(keyword) == 20 and len(key) <= 33 and len(value) <= 1800:
self.storage[keyword] = (key, value)
return ["True"]
else:
return ["False"]
def rpc_delete(self, sender, keyword, key, signature):
self.addToRouter(sender)
value = self.storage.getSpecific(keyword, key)
if value is not None:
# Try to delete a message from the dht
if keyword == digest(sender.id):
try:
verify_key = nacl.signing.VerifyKey(sender.signed_pubkey[64:])
verify_key.verify(key, signature)
self.storage.delete(keyword, key)
return ["True"]
except Exception:
return ["False"]
# Or try to delete a pointer
else:
try:
node = objects.Node()
node.ParseFromString(value)
pubkey = node.signedPublicKey[64:]
try:
verify_key = nacl.signing.VerifyKey(pubkey)
verify_key.verify(signature + key)
self.storage.delete(keyword, key)
return ["True"]
except Exception:
return ["False"]
except Exception:
pass
return ["False"]
def rpc_find_node(self, sender, key):
self.log.info("finding neighbors of %s in local table" % key.encode('hex'))
self.addToRouter(sender)
node = Node(key)
nodeList = self.router.findNeighbors(node, exclude=sender)
ret = []
for n in nodeList:
ret.append(n.getProto().SerializeToString())
return ret
def rpc_find_value(self, sender, key):
self.addToRouter(sender)
ret = ["value"]
value = self.storage.get(key, None)
if value is None:
return self.rpc_find_node(sender, key)
ret.extend(value)
return ret
def callFindNode(self, nodeToAsk, nodeToFind):
address = (nodeToAsk.ip, nodeToAsk.port)
d = self.find_node(address, nodeToFind.id)
return d.addCallback(self.handleCallResponse, nodeToAsk)
def callFindValue(self, nodeToAsk, nodeToFind):
address = (nodeToAsk.ip, nodeToAsk.port)
d = self.find_value(address, nodeToFind.id)
return d.addCallback(self.handleCallResponse, nodeToAsk)
def callPing(self, nodeToAsk):
address = (nodeToAsk.ip, nodeToAsk.port)
d = self.ping(address)
return d.addCallback(self.handleCallResponse, nodeToAsk)
def callStore(self, nodeToAsk, keyword, key, value):
address = (nodeToAsk.ip, nodeToAsk.port)
d = self.store(address, keyword, key, value)
return d.addCallback(self.handleCallResponse, nodeToAsk)
def callDelete(self, nodeToAsk, keyword, key, signature):
address = (nodeToAsk.ip, nodeToAsk.port)
d = self.delete(address, keyword, key, signature)
return d.addCallback(self.handleCallResponse, nodeToAsk)
def transferKeyValues(self, node):
"""
Given a new node, send it all the keys/values it should be storing.
@param node: A new node that just joined (or that we just found out
about).
Process:
For each key in storage, get k closest nodes. If newnode is closer
than the furtherst in that list, and the node for this server
is closer than the closest in that list, then store the key/value
on the new node (per section 2.5 of the paper)
"""
ds = []
for keyword in self.storage.iterkeys():
keynode = Node(keyword)
neighbors = self.router.findNeighbors(keynode, exclude=node)
if len(neighbors) > 0:
newNodeClose = node.distanceTo(keynode) < neighbors[-1].distanceTo(keynode)
thisNodeClosest = self.sourceNode.distanceTo(keynode) < neighbors[0].distanceTo(keynode)
if len(neighbors) == 0 \
or (newNodeClose and thisNodeClosest) \
or (thisNodeClosest and len(neighbors) < self.ksize):
for k, v in self.storage.iteritems(keyword):
ds.append(self.callStore(node, keyword, k, v))
return defer.gatherResults(ds)
def handleCallResponse(self, result, node):
"""
If we get a response, add the node to the routing table. If
we get no response, make sure it's removed from the routing table.
"""
if result[0]:
if self.router.isNewNode(node):
self.transferKeyValues(node)
self.log.info("got response from %s, adding to router" % node)
self.router.addContact(node)
else:
self.log.debug("no response from %s, removing from router" % node)
self.router.removeContact(node)
return result
def addToRouter(self, node):
"""
Called by rpc_ functions when a node sends them a request.
We add the node to our router and transfer our stored values
if they are new and within our neighborhood.
"""
if self.router.isNewNode(node):
self.log.debug("Found a new node, transferring key/values")
self.transferKeyValues(node)
self.router.addContact(node)
def __iter__(self):
return iter(self.handled_commands)
|
JimmyMow/OpenBazaar-Server
|
dht/protocol.py
|
Python
|
mit
| 7,311
|
[
"Brian"
] |
f604e8205b831667599a3b30d11e76b299dfc6f3cb09511f68fa5abb10f75c96
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import datetime
import difflib
import glob
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def is_generated_file(filename, data, regexs):
for d in skipped_ungenerated_files:
if d in filename:
return False
p = regexs["generated"]
return p.search(data)
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
# determine if the file is automatically generated
generated = is_generated_file(filename, data, regexs)
basename = os.path.basename(filename)
if generated:
extension = "generatego"
else:
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove extra content from the top of files
if extension == "go" or extension == "generatego":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
elif extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
if generated:
print('File %s has the YEAR field, but it should not be in generated file' % filename, file=verbose_out)
else:
print('File %s has the YEAR field, but missing the year of date' % filename, file=verbose_out)
return False
if not generated:
# Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh',
"vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test",
"pkg/generated/bindata.go",
"cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud-sdk-go-v3",
"cluster-autoscaler/cloudprovider/bizflycloud/gobizfly",
"cluster-autoscaler/cloudprovider/brightbox/gobrightbox",
"cluster-autoscaler/cloudprovider/brightbox/k8ssdk",
"cluster-autoscaler/cloudprovider/brightbox/linkheader",
"cluster-autoscaler/cloudprovider/brightbox/go-cache",
"cluster-autoscaler/cloudprovider/digitalocean/godo",
"cluster-autoscaler/cloudprovider/magnum/gophercloud",
"cluster-autoscaler/cloudprovider/ionoscloud/ionos-cloud-sdk-go",
"cluster-autoscaler/cloudprovider/hetzner/hcloud-go",
"cluster-autoscaler/cloudprovider/oci"]
# list all the files contain 'DO NOT EDIT', but are not generated
skipped_ungenerated_files = ['hack/build-ui.sh', 'hack/lib/swagger.sh',
'hack/boilerplate/boilerplate.py',
'cluster-autoscaler/cloudprovider/aws/ec2_instance_types/gen.go',
'cluster-autoscaler/cloudprovider/azure/azure_instance_types/gen.go']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_dates():
years = datetime.datetime.now().year
return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)";
# company holder names can be anything
regexs["date"] = re.compile(get_dates())
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(
r"^(//(go:build| \+build).*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
# Search for generated files
regexs["generated"] = re.compile( 'DO NOT EDIT' )
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if "/cluster-autoscaler/_override/" in filename:
continue
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main())
|
kubernetes/autoscaler
|
hack/boilerplate/boilerplate.py
|
Python
|
apache-2.0
| 8,389
|
[
"VisIt"
] |
f0ab66fc97baf78a9e92a98cc9a24af44db0488603864ddd888f17643ee6e266
|
"""An NNTP client class based on RFC 977: Network News Transfer Protocol.
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', first + '-' + last)
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'r') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Imports
import re
import socket
__all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError",
"NNTPPermanentError","NNTPProtocolError","NNTPDataError",
"error_reply","error_temp","error_perm","error_proto",
"error_data",]
# maximal line length when calling readline(). This is to prevent
# reading arbitrary length lines. RFC 3977 limits NNTP line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# for backwards compatibility
error_reply = NNTPReplyError
error_temp = NNTPTemporaryError
error_perm = NNTPPermanentError
error_proto = NNTPProtocolError
error_data = NNTPDataError
# Standard port used by NNTP servers
NNTP_PORT = 119
# Response numbers that are followed by additional text (e.g. article)
LONGRESP = ['100', '215', '220', '221', '222', '224', '230', '231', '282']
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
# The class itself
class NNTP:
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=True):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port))
self.file = self.sock.makefile('rb')
self.debugging = 0
self.welcome = self.getresp()
# 'mode reader' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'mode reader' and 'authinfo' need to
# arrive differs between some NNTP servers. Try to send
# 'mode reader', and if it fails with an authorization failed
# error, try again after sending authinfo.
readermode_afterauth = 0
if readermode:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
except NNTPTemporaryError, e:
if user and e.response[:3] == '480':
# Need authorization before 'mode reader'
readermode_afterauth = 1
else:
raise
# If no login/password was specified, try to get them from ~/.netrc
# Presume that if .netc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(host)
if auth:
user = auth[0]
password = auth[2]
except IOError:
pass
# Perform NNRP authentication if needed.
if user:
resp = self.shortcmd('authinfo user '+user)
if resp[:3] == '381':
if not password:
raise NNTPReplyError(resp)
else:
resp = self.shortcmd(
'authinfo pass '+password)
if resp[:3] != '281':
raise NNTPPermanentError(resp)
if readermode_afterauth:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
# Get the welcome message from the server
# (this is read and squirreled away by __init__()).
# If the response code is 200, posting is allowed;
# if it 201, posting is not allowed
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print '*welcome*', repr(self.welcome)
return self.welcome
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def putline(self, line):
"""Internal: send one line to the server, appending CRLF."""
line = line + CRLF
if self.debugging > 1: print '*put*', repr(line)
self.sock.sendall(line)
def putcmd(self, line):
"""Internal: send one command to the server (through putline())."""
if self.debugging: print '*cmd*', repr(line)
self.putline(line)
def getline(self):
"""Internal: return one line from the server, stripping CRLF.
Raise EOFError if the connection is closed."""
line = self.file.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise NNTPDataError('line too long')
if self.debugging > 1:
print '*get*', repr(line)
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
def getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error."""
resp = self.getline()
if self.debugging: print '*resp*', repr(resp)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error."""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, str):
openedFile = file = open(file, "w")
resp = self.getresp()
if resp[:3] not in LONGRESP:
raise NNTPReplyError(resp)
list = []
while 1:
line = self.getline()
if line == '.':
break
if line[:2] == '..':
line = line[1:]
if file:
file.write(line + "\n")
else:
list.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, list
def shortcmd(self, line):
"""Internal: send a command and get the response."""
self.putcmd(line)
return self.getresp()
def longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text."""
self.putcmd(line)
return self.getlongresp(file)
def newgroups(self, date, time, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of newsgroup names"""
return self.longcmd('NEWGROUPS ' + date + ' ' + time, file)
def newnews(self, group, date, time, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of message ids"""
cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time
return self.longcmd(cmd, file)
def list(self, file=None):
"""Process a LIST command. Return:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)"""
resp, list = self.longcmd('LIST', file)
for i in range(len(list)):
# Parse lines into "group last first flag"
list[i] = tuple(list[i].split())
return resp, list
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
resp, lines = self.descriptions(group)
if len(lines) == 0:
return ""
else:
return lines[0][1]
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
line_pat = re.compile("^(?P<group>[^ \t]+)[ \t]+(.*)$")
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, raw_lines = self.longcmd('LIST NEWSGROUPS ' + group_pattern)
if resp[:3] != "215":
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, raw_lines = self.longcmd('XGTITLE ' + group_pattern)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles (string)
- first: first article number (string)
- last: last article number (string)
- name: the group name"""
resp = self.shortcmd('GROUP ' + name)
if resp[:3] != '211':
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, count, first, last, name
def help(self, file=None):
"""Process a HELP command. Returns:
- resp: server response if successful
- list: list of strings"""
return self.longcmd('HELP',file)
def statparse(self, resp):
"""Internal: parse the response of a STAT, NEXT or LAST command."""
if resp[:2] != '22':
raise NNTPReplyError(resp)
words = resp.split()
nr = 0
id = ''
n = len(words)
if n > 1:
nr = words[1]
if n > 2:
id = words[2]
return resp, nr, id
def statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self.shortcmd(line)
return self.statparse(resp)
def stat(self, id):
"""Process a STAT command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: the article number
- id: the message id"""
return self.statcmd('STAT ' + id)
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self.statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self.statcmd('LAST')
def artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, list = self.longcmd(line, file)
resp, nr, id = self.statparse(resp)
return resp, nr, id, list
def head(self, id):
"""Process a HEAD command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's header"""
return self.artcmd('HEAD ' + id)
def body(self, id, file=None):
"""Process a BODY command. Argument:
- id: article number or message id
- file: Filename string or file object to store the article in
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's body or an empty list
if file was used"""
return self.artcmd('BODY ' + id, file)
def article(self, id):
"""Process an ARTICLE command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article"""
return self.artcmd('ARTICLE ' + id)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful"""
return self.shortcmd('SLAVE')
def xhdr(self, hdr, str, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
Returns:
- resp: server response if successful
- list: list of (nr, value) strings"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self.longcmd('XHDR ' + hdr + ' ' + str, file)
for i in range(len(lines)):
line = lines[i]
m = pat.match(line)
if m:
lines[i] = m.group(1, 2)
return resp, lines
def xover(self, start, end, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
Returns:
- resp: server response if successful
- list: list of (art-nr, subject, poster, date,
id, references, size, lines)"""
resp, lines = self.longcmd('XOVER ' + start + '-' + end, file)
xover_lines = []
for line in lines:
elem = line.split("\t")
try:
xover_lines.append((elem[0],
elem[1],
elem[2],
elem[3],
elem[4],
elem[5].split(),
elem[6],
elem[7]))
except IndexError:
raise NNTPDataError(line)
return resp,xover_lines
def xgtitle(self, group, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
line_pat = re.compile("^([^ \t]+)[ \t]+(.*)$")
resp, raw_lines = self.longcmd('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self,id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article"""
resp = self.shortcmd("XPATH " + id)
if resp[:3] != '223':
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date (self):
"""Process the DATE command. Arguments:
None
Returns:
resp: server response if successful
date: Date suitable for newnews/newgroups commands etc.
time: Time suitable for newnews/newgroups commands etc."""
resp = self.shortcmd("DATE")
if resp[:3] != '111':
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1][2:8]
time = elem[1][-6:]
if len(date) != 6 or len(time) != 6:
raise NNTPDataError(resp)
return resp, date, time
def post(self, f):
"""Process a POST command. Arguments:
- f: file containing the article
Returns:
- resp: server response if successful"""
resp = self.shortcmd('POST')
# Raises error_??? if posting is not allowed
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def ihave(self, id, f):
"""Process an IHAVE command. Arguments:
- id: message-id of the article
- f: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
resp = self.shortcmd('IHAVE ' + id)
# Raises error_??? if the server already has it
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
resp = self.shortcmd('QUIT')
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
# Test retrieval when run as a script.
# Assumption: if there's a local news server, it's called 'news'.
# Assumption: if user queries a remote news server, it's named
# in the environment variable NNTPSERVER (used by slrn and kin)
# and we want readermode off.
if __name__ == '__main__':
import os
newshost = 'news' and os.environ["NNTPSERVER"]
if newshost.find('.') == -1:
mode = 'readermode'
else:
mode = None
s = NNTP(newshost, readermode=mode)
resp, count, first, last, name = s.group('comp.lang.python')
print resp
print 'Group', name, 'has', count, 'articles, range', first, 'to', last
resp, subs = s.xhdr('subject', first + '-' + last)
print resp
for item in subs:
print "%7s %s" % item
resp = s.quit()
print resp
|
nmercier/linux-cross-gcc
|
win32/bin/Lib/nntplib.py
|
Python
|
bsd-3-clause
| 22,106
|
[
"Brian"
] |
61ddc278f54de75ef4c4d3b7f3cae18e59dfd98836d7ed0c701d24c963d630ca
|
"""
Holds user settings and various helper objects.
@since: 0.53
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import support, _, logger
import os
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
from zeroinstall import zerostore
from zeroinstall.injector.model import network_levels, network_full
from zeroinstall.injector.namespaces import config_site, config_prog
from zeroinstall.support import basedir
DEFAULT_MIRROR = "http://roscidus.com/0mirror"
DEFAULT_KEY_LOOKUP_SERVER = 'https://keylookup.appspot.com'
class Config(object):
"""
@ivar auto_approve_keys: whether to approve known keys automatically
@type auto_approve_keys: bool
@ivar handler: handler for main-loop integration
@type handler: L{handler.Handler}
@ivar key_info_server: the base URL of a key information server
@type key_info_server: str
@ivar mirror: the base URL of a mirror site for feeds, keys and implementations (since 1.10)
@type mirror: str | None
@ivar freshness: seconds since a feed was last checked before it is considered stale
@type freshness: int
"""
__slots__ = ['help_with_testing', 'freshness', 'network_use', 'mirror', 'key_info_server', 'auto_approve_keys',
'_fetcher', '_stores', '_iface_cache', '_handler', '_trust_mgr', '_trust_db', '_app_mgr']
def __init__(self, handler = None):
"""@type handler: L{zeroinstall.injector.handler.Handler} | None"""
self.help_with_testing = False
self.freshness = 60 * 60 * 24 * 30
self.network_use = network_full
self._handler = handler
self._app_mgr = self._fetcher = self._stores = self._iface_cache = self._trust_mgr = self._trust_db = None
self.mirror = DEFAULT_MIRROR
self.key_info_server = DEFAULT_KEY_LOOKUP_SERVER
self.auto_approve_keys = True
feed_mirror = property(lambda self: self.mirror, lambda self, value: setattr(self, 'mirror', value))
@property
def stores(self):
if not self._stores:
self._stores = zerostore.Stores()
return self._stores
@property
def iface_cache(self):
if not self._iface_cache:
from zeroinstall.injector import iface_cache
self._iface_cache = iface_cache.iface_cache
#self._iface_cache = iface_cache.IfaceCache()
return self._iface_cache
@property
def fetcher(self):
if not self._fetcher:
from zeroinstall.injector import fetch
self._fetcher = fetch.Fetcher(self)
return self._fetcher
@property
def trust_mgr(self):
if not self._trust_mgr:
from zeroinstall.injector import trust
self._trust_mgr = trust.TrustMgr(self)
return self._trust_mgr
@property
def trust_db(self):
from zeroinstall.injector import trust
self._trust_db = trust.trust_db
@property
def handler(self):
if not self._handler:
from zeroinstall.injector import handler
if os.isatty(1):
self._handler = handler.ConsoleHandler()
else:
self._handler = handler.Handler()
return self._handler
@property
def app_mgr(self):
if not self._app_mgr:
from zeroinstall import apps
self._app_mgr = apps.AppManager(self)
return self._app_mgr
def save_globals(self):
"""Write global settings."""
parser = ConfigParser.ConfigParser()
parser.add_section('global')
parser.set('global', 'help_with_testing', str(self.help_with_testing))
parser.set('global', 'network_use', self.network_use)
parser.set('global', 'freshness', str(self.freshness))
parser.set('global', 'auto_approve_keys', str(self.auto_approve_keys))
path = basedir.save_config_path(config_site, config_prog)
path = os.path.join(path, 'global')
with open(path + '.new', 'wt') as stream:
parser.write(stream)
support.portable_rename(path + '.new', path)
def load_config(handler = None):
"""@type handler: L{zeroinstall.injector.handler.Handler} | None
@rtype: L{Config}"""
config = Config(handler)
parser = ConfigParser.RawConfigParser()
parser.add_section('global')
parser.set('global', 'help_with_testing', 'False')
parser.set('global', 'freshness', str(60 * 60 * 24 * 30)) # One month
parser.set('global', 'network_use', 'full')
parser.set('global', 'auto_approve_keys', 'True')
path = basedir.load_first_config(config_site, config_prog, 'global')
if path:
logger.info("Loading configuration from %s", path)
try:
parser.read(path)
except Exception as ex:
logger.warning(_("Error loading config: %s"), str(ex) or repr(ex))
config.help_with_testing = parser.getboolean('global', 'help_with_testing')
config.network_use = parser.get('global', 'network_use')
config.freshness = int(parser.get('global', 'freshness'))
config.auto_approve_keys = parser.getboolean('global', 'auto_approve_keys')
assert config.network_use in network_levels, config.network_use
return config
|
AlexanderRyzhko/0install-TUF
|
zeroinstall/injector/config.py
|
Python
|
lgpl-2.1
| 4,768
|
[
"VisIt"
] |
f7d25b9744ce18a5e0f6452af2889f0afb62da0f6ad1f616df4744dd0c5a613b
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import logging
from string import Template
from builtins_generator import BuiltinsGenerator, WK_lcfirst, WK_ucfirst
from builtins_templates import BuiltinsGeneratorTemplates as Templates
log = logging.getLogger('global')
class BuiltinsInternalsWrapperImplementationGenerator(BuiltinsGenerator):
def __init__(self, model):
BuiltinsGenerator.__init__(self, model)
self.internals = filter(lambda object: 'internal' in object.annotations, model.objects)
def output_filename(self):
return "%sJSBuiltinInternals.cpp" % self.model().framework.setting('namespace')
def generate_output(self):
args = {
'namespace': self.model().framework.setting('namespace'),
}
sections = []
sections.append(self.generate_license())
sections.append(Template(Templates.DoNotEditWarning).substitute(args))
sections.append(self.generate_primary_header_includes())
sections.append(self.generate_secondary_header_includes())
sections.append(Template(Templates.NamespaceTop).substitute(args))
sections.append(self.generate_section_for_object())
sections.append(Template(Templates.NamespaceBottom).substitute(args))
return "\n\n".join(sections)
def generate_secondary_header_includes(self):
header_includes = [
(["WebCore"],
("WebCore", "JSDOMGlobalObject.h"),
),
(["WebCore"],
("WebCore", "WebCoreJSClientData.h"),
),
(["WebCore"],
("JavaScriptCore", "heap/HeapInlines.h"),
),
(["WebCore"],
("JavaScriptCore", "heap/SlotVisitorInlines.h"),
),
(["WebCore"],
("JavaScriptCore", "runtime/JSCJSValueInlines.h"),
),
(["WebCore"],
("JavaScriptCore", "runtime/StructureInlines.h"),
),
]
return '\n'.join(self.generate_includes_from_entries(header_includes))
def generate_section_for_object(self):
lines = []
lines.append(self.generate_constructor())
lines.append(self.generate_visit_method())
lines.append(self.generate_initialize_method())
return '\n'.join(lines)
def accessor_name(self, object):
return WK_lcfirst(object.object_name)
def member_name(self, object):
return "m_" + self.accessor_name(object)
def member_type(self, object):
return WK_ucfirst(object.object_name) + "BuiltinFunctions"
def generate_constructor(self):
guards = set([object.annotations.get('conditional') for object in self.internals if 'conditional' in object.annotations])
lines = ["JSBuiltinInternalFunctions::JSBuiltinInternalFunctions(JSC::VM& vm)",
" : m_vm(vm)"]
for object in self.internals:
initializer = " , %s(m_vm)" % self.member_name(object)
lines.append(BuiltinsGenerator.wrap_with_guard(object.annotations.get('conditional'), initializer))
lines.append("{")
lines.append(" UNUSED_PARAM(vm);")
lines.append("}\n")
return '\n'.join(lines)
def property_macro(self, object):
lines = []
lines.append("#define DECLARE_GLOBAL_STATIC(name) \\")
lines.append(" JSDOMGlobalObject::GlobalPropertyInfo( \\")
lines.append(" clientData.builtinFunctions().%sBuiltins().name##PrivateName(), %s().m_##name##Function.get() , JSC::PropertyAttribute::DontDelete | JSC::PropertyAttribute::ReadOnly)," % (self.accessor_name(object), self.accessor_name(object)))
lines.append(" WEBCORE_FOREACH_%s_BUILTIN_FUNCTION_NAME(DECLARE_GLOBAL_STATIC)" % object.object_name.upper())
lines.append("#undef DECLARE_GLOBAL_STATIC")
return '\n'.join(lines)
def generate_visit_method(self):
lines = ["void JSBuiltinInternalFunctions::visit(JSC::SlotVisitor& visitor)",
"{"]
for object in self.internals:
visit = " %s.visit(visitor);" % self.member_name(object)
lines.append(BuiltinsGenerator.wrap_with_guard(object.annotations.get('conditional'), visit))
lines.append(" UNUSED_PARAM(visitor);")
lines.append("}\n")
return '\n'.join(lines)
def _generate_initialize_static_globals(self):
lines = [" JSVMClientData& clientData = *static_cast<JSVMClientData*>(m_vm.clientData);",
" JSDOMGlobalObject::GlobalPropertyInfo staticGlobals[] = {"]
for object in self.internals:
lines.append(BuiltinsGenerator.wrap_with_guard(object.annotations.get('conditional'), self.property_macro(object)))
lines.append(" };")
lines.append(" globalObject.addStaticGlobals(staticGlobals, WTF_ARRAY_LENGTH(staticGlobals));")
lines.append(" UNUSED_PARAM(clientData);")
return '\n'.join(lines)
def generate_initialize_method(self):
lines = ["void JSBuiltinInternalFunctions::initialize(JSDOMGlobalObject& globalObject)",
"{",
" UNUSED_PARAM(globalObject);"]
for object in self.internals:
init = " %s.init(globalObject);" % self.member_name(object)
lines.append(BuiltinsGenerator.wrap_with_guard(object.annotations.get('conditional'), init))
lines.append("")
guards = set([object.annotations.get('conditional') for object in self.internals if 'conditional' in object.annotations])
lines.append(BuiltinsGenerator.wrap_with_guard(" || ".join(guards), self._generate_initialize_static_globals()))
lines.append("}")
return '\n'.join(lines)
|
teamfx/openjfx-8u-dev-rt
|
modules/web/src/main/native/Source/JavaScriptCore/Scripts/builtins/builtins_generate_internals_wrapper_implementation.py
|
Python
|
gpl-2.0
| 7,074
|
[
"VisIt"
] |
a7ee97ff98b75ede14b38b9e281a77828b76bd37593c6be5473681de7e6c883e
|
#!/usr/bin/python -u
import datetime, os, sys, stat, subprocess, re, shutil, time
import select
import numpy
import netcdf
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open(r'/home/pi/rpilogger/catnc.conf'))
if (config.has_option('default','LOCATION_IN')):
_LOCATION_IN = config.get('default', 'LOCATION_IN')
else:
_LOCATION_IN = "/home/pi/data/tmp"
if (config.has_option('default','LOCATION_OUT')):
_LOCATION_OUT = config.get('default', 'LOCATION_OUT')
else:
_LOCATION_OUT = "/home/pi/data/"
if (config.has_option('default','REMOTE')):
_REMOTE = config.get('default', 'REMOTE')
else:
_REMOTE = ""
_MINUTES_A_DAY = 24*60
_SAMPLES_A_MINUTE = 30000
d_a = numpy.zeros(_SAMPLES_A_MINUTE*60*2,dtype='float32') #sure < sys.maxint
d_b = numpy.zeros(_SAMPLES_A_MINUTE*60*2,dtype='float32')
d_c = numpy.zeros(_SAMPLES_A_MINUTE*60*2,dtype='float32')
d_d = numpy.zeros(_SAMPLES_A_MINUTE*60*2,dtype='float32')
processed=[]
differences=[]
i_mode=""
def checkUser():
i,o,e = select.select([sys.stdin],[],[],0.0001)
for s in i:
if s == sys.stdin:
input = sys.stdin.readline()
return True
return False
def filesize(loc):
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
nbytes = os.path.getsize(loc)
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
def mkdir(directory):
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as e:
print "Error while creating directory %s!\n %s" % (directory,e)
exit(-1)
def concatenate(daydir,resdir):
global d_a, d_b, d_c, d_d, processed, differences, i_mode
files = [w for w in sorted(os.listdir(daydir))]
print "%d files found, will process %s" % (len(files), "all" if (lim==_MINUTES_A_DAY) else str(lim))
if (len(files) < _MINUTES_A_DAY):
print "files are missing. There should be 24*60=1440"
if (i_mode): #interactive mode
print "\t(press return (enter) to gently abort)"
processed=[]
differences=[]
nans=0
d_ix=0
for f in (f for f in files if (re.search('[0-2][0-9][0-5][0-9][0-5][0-9].[0-9][0-9][0-9][0-9].nc',f))): #(f.endswith(".nc"))):
processed.append(datetime.datetime.strptime(dd+f,"%Y/%m/%d/%H%M%S.%f.nc"))
d_lim = processed[0]+datetime.timedelta(hours=1)
d_lim = d_lim.replace(minute=0,second=0,microsecond=0)
try:
fid = netcdf.Dataset(os.path.join(daydir,f), 'r')
except IOError as e:
print "Unexpected error while reading file: %s \n %s" % (f, e)
return -1
except RuntimeError as e:
# if file is damadged simply skip it
print "Unexpected error while reading file: %s \n %s" % (f, e)
continue
else:
if (len(processed)==1):
start = getattr(fid,'start')
sps = getattr(fid,'sps') #500
import pdb; pdb.set_trace()
sampl = fid.variables['ch3'].size #30000
units = fid.variables['ch3'].units #mV
print "first file: %s has %d samples (%d seconds at %d Hz)" % (f, sampl, sampl/sps, sps)
if ((processed[0]-dy).total_seconds() < 60.0): # if right after midnight
dz = dy-datetime.timedelta(days=1) #the day before
dzt = dz.strftime('%Y/%m/%d/')
dzdir = os.path.join(_LOCATION_IN,dzt)
if not os.path.exists(dzdir): #try to get last file of previous day
print "could not find the day before (%s) to obtain the file spreading midnight" % dzdir
else:
filep = sorted(os.listdir(dzdir))[-1]
fp_st = datetime.datetime.strptime(dzt+filep,"%Y/%m/%d/%H%M%S.%f.nc")
oldd_s = datetime.timedelta.total_seconds(dy-fp_st)
if (oldd_s < 60.0): #if file covers midnight
fp_y=os.path.join(_LOCATION_IN,dzt+filep)
od_s = 60.0 - oldd_s
print "file of day before (%s) contains %.4f seconds of current day" % (dzt+filep,od_s)
try:
fp = netcdf.Dataset(fp_y, 'r')
except (IOError, RuntimeError) as e:
print "Unexpected error while reading file: %s \n %s" % (fp_y, e)
continue
else:
if (getattr(fp,'sps') == sps and fp.variables['ch3'].size == sampl):
si = int(numpy.round(oldd_s * sps))
inn=fp.variables['ch3'][si:].size
d_c[d_ix:d_ix+inn] = fp.variables['ch3'][si:]
d_d[d_ix:d_ix+inn] = fp.variables['ch4'][si:]
d_ix+=inn
print "imported %d values from %s" %(inn,dzt+filep)
processed[0] = dy
start = dy.strftime("%Y-%m-%d %H:%M:%S.%f")
else:
print "Error while processing %s. File inconsistent!" %(fp_y)
return -1
fp.close()
#import pdb; pdb.set_trace()
d_c[d_ix:d_ix+fid.variables['ch3'].size] = fid.variables['ch3'][:] # like append
d_d[d_ix:d_ix+fid.variables['ch3'].size] = fid.variables['ch4'][:] #
d_ix+=fid.variables['ch3'].size
if (len(processed)>1):
try:
fid2 = netcdf.Dataset(os.path.join(daydir,f), 'r')
except (IOError,RuntimeError) as e:
print "Unexpected error while reading file: %s \n %s" % (f, e)
continue
else:
if (sps != getattr(fid2,'sps')):
print "sampling rate inconsistent at %s! %d, now: %d" % (f, sps, getattr(fid2,'sps'))
break
if (sampl != fid2.variables['ch3'].size):
print "file length differs at %s! %d, now: %d" % (f, sampl, fid2.variables['ch3'].size)
break
if (len(processed)>2): #ignore first interval due to its possibly bigger range
time_delta = processed[-1] - processed[-2]
differences = numpy.hstack((differences, time_delta.total_seconds()))
m=int(numpy.round((differences[-1]-60.0)*sps))
if (m != 0):
if (m > 0):
print "WARN: %3d samples are missing. inserted %d*NaN" % (m,m)
print "\t %s" % (processed[-2].strftime("%H:%M:%S.%f"))
print "\t %s" % (processed[-1].strftime("%H:%M:%S.%f"))
print " diff: %10.4f seconds" % (differences[-1]-60.0)
if (processed[-1] >= d_lim): # if more files are missing so that next hour is also rare
time_delta = d_lim - processed[-2]
m=int(numpy.round((time_delta.total_seconds()-60.0)*sps))
print "WARN: even more files are missing\n will insert only %d*NaN (%10.4f secs) and skip some files" % (m,(float)(m)/sps)
d_ins = numpy.empty((m))
d_ins[:] = numpy.NAN
d_c[d_ix:d_ix+m] = d_ins
d_d[d_ix:d_ix+m] = d_ins
d_ix+=m
nans+=m
stop = d_lim.strftime("%Y-%m-%d %H:%M:%S.%f")
differences[-1]=time_delta.total_seconds()
if (savebin(resdir=resdir, d_ix=d_ix, start=start, stop=stop, nans=nans, sps=sps, units=units)):
return -1
processed=[processed[-1]]
differences=[]
nans=0
d_ix=0
d_lim = processed[0]+datetime.timedelta(hours=1)
d_lim = d_lim.replace(minute=0,second=0,microsecond=0)
inn=fid2.variables['ch3'][:].size
d_c[d_ix:d_ix+inn] = fid2.variables['ch3'][:]
d_d[d_ix:d_ix+inn] = fid2.variables['ch4'][:]
d_ix+=inn
start=stop
#import pdb; pdb.set_trace()
else:
d_ins = numpy.empty((m))
d_ins[:] = numpy.NAN
d_c[d_ix:d_ix+m] = d_ins
d_d[d_ix:d_ix+m] = d_ins
d_ix+=m
nans+=m
elif (m < 0):
print "WARN: files to dense! %d samples seems to be to much before %s" % (m, f)
print "\t %s" % (processed[-2].strftime("%H:%M:%S.%f"))
print "\t %s" % (processed[-1].strftime("%H:%M:%S.%f"))
print " diff: %10.4f seconds" % (differences[-1]-60.0)
#check if file not too long
if (processed[-1] < d_lim): # present file yet in batch
rs = (d_lim - processed[-1]).total_seconds()
if (rs < 60.0): #however if it is the last one, which needs to be truncated
si = int(numpy.round(rs * sps))
d_c[d_ix:d_ix+si] = fid2.variables['ch3'][:si]
d_d[d_ix:d_ix+si] = fid2.variables['ch4'][:si]
d_ix+=si
print "from last file %s only %f seconds (%d samples) taken" %(f, rs, si)
stop = d_lim.strftime("%Y-%m-%d %H:%M:%S.%f")
if (savebin(resdir=resdir, d_ix=d_ix, start=start, stop=stop, nans=nans, sps=sps, units=units)):
return -1
#import pdb; pdb.set_trace()
processed=[]
differences=[]
nans=0
d_ix=0
inn=fid2.variables['ch3'][si:].size
d_c[d_ix:d_ix+inn] = fid2.variables['ch3'][si:]
d_d[d_ix:d_ix+inn] = fid2.variables['ch4'][si:]
d_ix+=inn
start=stop
processed.append(d_lim)
else:
d_c[d_ix:d_ix+fid2.variables['ch3'].size] = fid2.variables['ch3'][:] # append
d_d[d_ix:d_ix+fid2.variables['ch3'].size] = fid2.variables['ch4'][:]
d_ix+=fid2.variables['ch3'].size
fid2.close()
fid.close()
if (i_mode and checkUser()):
print "user interrupt! %d files processed\n" %(len(processed))
return -1
break
return len(files)
def savebin(resdir, d_ix, start, stop, nans, sps, units):
global d_c, d_d, processed, differences
try:
p
except NameError:
p=""
else:
print "waiting for scp"
sts = os.waitpid(p.pid, 0)
print " %d files processed with in total %d records" %(len(processed), d_ix)
print " %d NaN inserted in total (%.3f seconds)" %(nans, nans/sps)
if (len(processed) >= 2):
s_intervals = "%.4f/%.4f/%.7f/%.7f" % (max(differences), min(differences), numpy.mean(differences),numpy.std(differences))
print " intervals maximum/minimum/mean/std: (%s) s" % s_intervals
jitter = (differences -60 )*1000
s_jitter = "%+.4f/%.4f/%.7f/%.7f" % (max(jitter), min(jitter), numpy.mean(jitter),numpy.std(jitter))
print " jitter maximum/minimum/mean/std: (%s) ms" % s_jitter
#print "(jitter = nominal - actual = 60.0000 - x)"
try: #create file
resfile = processed[0].strftime("%Y-%m-%dT%H.nc")
resf = os.path.join(resdir,resfile)
remf = processed[0].strftime("%Y/%m/%d/")
if (os.path.exists(resf) and os.path.getsize(resf)>1*1024L*1024L):
print " files already concatenated! Overwriting %s (%s)" %(resf,filesize(resf))
print " writing data to file %s" % resf
if (i_mode): #interactive mode
print " please have patience, this might need several minutes"
fidw = netcdf.Dataset(os.path.join(resdir,resfile), 'w', format='NETCDF4')
fidw.setncatts({'files':len(processed), 'sps':sps, 'nan':nans,\
'start':start, 'stop':stop, 'timezone':'UTC',\
'intervals': s_intervals, 'jitter': s_jitter})
fidw.createDimension('NS',d_ix)
fidw.createDimension('WE',d_ix)
fNS=fidw.createVariable('NS',numpy.float32,('NS',),zlib=True) #TODO fid.variables['ch3'].dtype
fWE=fidw.createVariable('WE',numpy.float32,('WE',),zlib=True) #TODO sign reversed
fNS.units=units
fWE.units=units
# write data back
fNS[:]=d_c[:d_ix]
fWE[:]=d_d[:d_ix]
print " writing %d records to file ..." % d_ix
fidw.close()
except:
print " Unexpected error while writing to file: %s" % (resfile)
print sys.exc_info()
return -1
else:
print " %s written! Size: %s" % (resf, filesize(resf))
if (_REMOTE!=""):
pss = subprocess.Popen(["ssh "+_REMOTE+" 'mkdir -p lemi-data/"+remf+"'"], stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
output, errors = pss.communicate()
if (errors):
print "Could not create remote directory lemi-data/"+remf+" on server: "+_REMOTE+" !"
print "Check for ssh keys and permissions!"
print "No file copy (scp) possible"
else :
p = subprocess.Popen(["scp", resf, _REMOTE+":lemi-data/"+remf+resfile])
#import pdb; pdb.set_trace()
print " "
return 0
if __name__ == "__main__":
# ./catnc.py --> non-interactive mode (processing yesterday)
# ./catnc.py 2015-03-17 --> interactive: processing the given date (also format 2015/03/17 works)
# ./catnc.py 2015-03-17 34
i_mode=""
lim=_MINUTES_A_DAY
if (len(sys.argv) > 1):
try: # if first argument is limit
lim=int(sys.argv[1])
except ValueError: # if not, first is probably a date
try:
i_mode=datetime.datetime.strptime(sys.argv[1],"%Y/%m/%d")
except ValueError:
try:
i_mode=datetime.datetime.strptime(sys.argv[1],"%Y-%m-%d")
except ValueError: # else ignore first
i_mode=""
try: # second argument a limit?
lim=int(sys.argv[2])
except (ValueError, IndexError):
lim=_MINUTES_A_DAY
#redirect stdout and stderr
_catnc_logfile = os.path.join(_LOCATION_OUT,"catnc.log")
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # Unbuffer output
#import pdb; pdb.set_trace()
tee = subprocess.Popen(["tee", _catnc_logfile], stdin=subprocess.PIPE)
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
os.dup2(tee.stdin.fileno(), sys.stderr.fileno())
print "starting %s " % ( ("for %s " %sys.argv[1]) if i_mode else "in non-interactive-mode")
if (lim!=_MINUTES_A_DAY):
print "debug mode active with limit=%d" % lim
t1 = datetime.datetime.now()
dy = i_mode
if not dy:
d = datetime.datetime.now()
print "Now it is " + str(d)
dy = d-datetime.timedelta(days=1)
dy = dy.replace(hour=0,minute=0,second=0,microsecond=0)
dd = dy.strftime('%Y/%m/%d/')
print "processing " + dd
daydir = os.path.join(_LOCATION_IN,dd)
resdir = os.path.join(_LOCATION_OUT,dy.strftime('%Y/%m/%d'))
if not os.path.exists(daydir):
print "input directory %s does not exists! Will exit!" %(daydir)
sys.exit(-1)
mkdir(resdir)
if os.listdir(resdir):
print "files possibly already concatenated. Directory %s exists." %(resdir)
rv = concatenate(daydir=daydir,resdir=resdir)
if (rv >= 0):
print "concat ready, %d files processed of %s" % (rv,daydir)
dby = dy-datetime.timedelta(days=4)
dbdir = os.path.join(_LOCATION_IN,dby.strftime('%Y/%m/%d'))
strq = "do you want to remove the files in %s ? (y/n) " % (dbdir)
if (os.path.exists(dbdir) and ( (not i_mode) or (i_mode and raw_input(strq) == 'y'))):
pr = subprocess.Popen(["rm -r "+dbdir],stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
output, errors = pr.communicate()
if (errors or output):
print "output: %s, errors: %s" %(output,errors)
else:
print "files in %s deleted!" %(dbdir)
dqy = dy-datetime.timedelta(days=10)
dqdir = os.path.join(_LOCATION_OUT,dqy.strftime('%Y/%m/%d'))
strq = "do you want to remove the files in %s ? (y/n) " % (dqdir)
if (os.path.exists(dqdir) and ( (not i_mode) or (i_mode and raw_input(strq) == 'y'))):
remf=dqy.strftime('%Y/%m/%d')
time.sleep(60) #wait upload to finish
pss = subprocess.Popen(["ssh "+_REMOTE+" 'du -s lemi-data/"+remf+"'"], stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
output, errors = pss.communicate()
size_remote=int(output.split(" ")[0])
ss=subprocess.check_output(['du', '-s', dqdir])
size_local=int(ss.split(" ")[0])
if (size_local == size_remote):
pr = subprocess.Popen(["rm -r "+dqdir],stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
output, errors = pr.communicate()
if (errors or output):
print "output: %s, errors: %s" %(output,errors)
else:
print "files in %s deleted!" %(dqdir)
else:
print "Error! Will not delete local directory! Size remote: %d, size local: %d" %(size_remote,size_local)
else:
print "Error! concatenate function returned %d\n" %(rv)
td = datetime.datetime.now() - t1
print "script run for %s (hours:mins:secs)" % str(td)
print "\n"
#import pdb; pdb.set_trace()
shutil.copyfile(_catnc_logfile, os.path.join(resdir,"catnc.txt"))
if (_REMOTE!=""):
p = subprocess.Popen(["scp", os.path.join(resdir,"catnc.txt"), _REMOTE+":lemi-data/"+dy.strftime('%Y/%m/%d/')+"catnc.txt"])
sts = os.waitpid(p.pid, 0)
sys.exit(rv)
|
deguss/rpilogger
|
development/catnc.py
|
Python
|
gpl-2.0
| 19,981
|
[
"NetCDF"
] |
7939f053f7a32a7301ac1e5adab6bb7122befd3b612a5bdcb169944ec71229b9
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# Eric Chang <ericchang2017@u.northwestern.edu>
# License: BSD 3 clause
from __future__ import division
from itertools import chain, combinations
import numbers
import warnings
from itertools import combinations_with_replacement as combinations_w_r
from distutils.version import LooseVersion
import numpy as np
from scipy import sparse
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import string_types
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import _argmax, nanpercentile
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import (check_is_fitted, check_random_state,
FLOAT_DTYPES)
from .label import LabelEncoder
BOUNDS_THRESHOLD = 1e-7
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'QuantileTransformer',
'PowerTransformer',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'quantile_transform',
'power_transform',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
Examples
--------
>>> from sklearn.preprocessing import MinMaxScaler
>>>
>>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
>>> scaler = MinMaxScaler()
>>> print(scaler.fit(data))
MinMaxScaler(copy=True, feature_range=(0, 1))
>>> print(scaler.data_max_)
[ 1. 18.]
>>> print(scaler.transform(data))
[[0. 0. ]
[0.25 0.25]
[0.5 0.5 ]
[1. 1. ]]
>>> print(scaler.transform([[2, 2]]))
[[1.5 0. ]]
See also
--------
minmax_scale: Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
data_min = np.nanmin(X, axis=0)
data_max = np.nanmax(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual features do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_*
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler
>>>
>>> data = [[0, 0], [0, 0], [1, 1], [1, 1]]
>>> scaler = StandardScaler()
>>> print(scaler.fit(data))
StandardScaler(copy=True, with_mean=True, with_std=True)
>>> print(scaler.mean_)
[0.5 0.5]
>>> print(scaler.transform(data))
[[-1. -1.]
[-1. -1.]
[ 1. 1.]
[ 1. 1.]]
>>> print(scaler.transform([[2, 2]]))
[[3. 3.]]
See also
--------
scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y='deprecated', copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
copy : bool, optional (default: None)
Copy the input X or not.
Returns
-------
X_tr : array-like, shape [n_samples, n_features]
Transformed array.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the estimator API.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature by
computing the relevant statistics on the samples in the training
set. Median and interquartile range are then stored to be used on
later data using the ``transform`` method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This will cause ``transform`` to raise an exception when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
https://en.wikipedia.org/wiki/Median
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X):
"""Center and scale the data.
Can be called on sparse input, provided that ``RobustScaler`` has been
fitted to dense input and ``with_centering=False``.
Parameters
----------
X : {array-like, sparse matrix}
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
Returns
-------
self : instance
"""
n_samples, n_features = check_array(X, accept_sparse=True).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X):
"""Transform data to polynomial features
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
The data to transform, row by row.
Sparse input should preferably be in CSC format.
Returns
-------
XP : np.ndarray or CSC sparse matrix, shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES, accept_sparse='csc')
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
if sparse.isspmatrix(X):
columns = []
for comb in combinations:
if comb:
out_col = 1
for col_idx in comb:
out_col = X[:, col_idx].multiply(out_col)
columns.append(out_col)
else:
columns.append(sparse.csc_matrix(np.ones((X.shape[0], 1))))
XP = sparse.hstack(columns, dtype=X.dtype).tocsc()
else:
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
for i, comb in enumerate(combinations):
XP[:, i] = X[:, comb].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Normalized input X.
norms : array, shape [n_samples] if axis=1 else [n_features]
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
normalize: Equivalent function without the estimator API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the estimator API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y='deprecated', copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[1., 0., 1.],
[1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values). For an encoder based on the unique values of the input
features of any type, see the
:class:`~sklearn.preprocessing.CategoricalEncoder`.
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and four samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.preprocessing.CategoricalEncoder : performs a one-hot or ordinal
encoding of all features (also handles string-valued features). This
encoder derives the categories based on the unique values in each
feature.
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
sklearn.preprocessing.LabelEncoder : encodes labels with values between 0
and n_classes-1.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
class QuantileTransformer(BaseEstimator, TransformerMixin):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import QuantileTransformer
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> qt = QuantileTransformer(n_quantiles=10, random_state=0)
>>> qt.fit_transform(X) # doctest: +ELLIPSIS
array([...])
See also
--------
quantile_transform : Equivalent function without the estimator API.
PowerTransformer : Perform mapping to a normal distribution using a power
transform.
StandardScaler : Perform standardization that is faster, but less robust
to outliers.
RobustScaler : Perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, n_quantiles=1000, output_distribution='uniform',
ignore_implicit_zeros=False, subsample=int(1e5),
random_state=None, copy=True):
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.ignore_implicit_zeros = ignore_implicit_zeros
self.subsample = subsample
self.random_state = random_state
self.copy = copy
def _dense_fit(self, X, random_state):
"""Compute percentiles for dense matrices.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
"""
if self.ignore_implicit_zeros:
warnings.warn("'ignore_implicit_zeros' takes effect only with"
" sparse matrix. This parameter has no effect.")
n_samples, n_features = X.shape
references = self.references_ * 100
# numpy < 1.9 bug: np.percentile 2nd argument needs to be a list
if LooseVersion(np.__version__) < '1.9':
references = references.tolist()
self.quantiles_ = []
for col in X.T:
if self.subsample < n_samples:
subsample_idx = random_state.choice(n_samples,
size=self.subsample,
replace=False)
col = col.take(subsample_idx, mode='clip')
self.quantiles_.append(nanpercentile(col, references))
self.quantiles_ = np.transpose(self.quantiles_)
def _sparse_fit(self, X, random_state):
"""Compute percentiles for sparse matrices.
Parameters
----------
X : sparse matrix CSC, shape (n_samples, n_features)
The data used to scale along the features axis. The sparse matrix
needs to be nonnegative.
"""
n_samples, n_features = X.shape
references = self.references_ * 100
# numpy < 1.9 bug: np.percentile 2nd argument needs to be a list
if LooseVersion(np.__version__) < '1.9':
references = references.tolist()
self.quantiles_ = []
for feature_idx in range(n_features):
column_nnz_data = X.data[X.indptr[feature_idx]:
X.indptr[feature_idx + 1]]
if len(column_nnz_data) > self.subsample:
column_subsample = (self.subsample * len(column_nnz_data) //
n_samples)
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=column_subsample,
dtype=X.dtype)
else:
column_data = np.zeros(shape=self.subsample, dtype=X.dtype)
column_data[:column_subsample] = random_state.choice(
column_nnz_data, size=column_subsample, replace=False)
else:
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=len(column_nnz_data),
dtype=X.dtype)
else:
column_data = np.zeros(shape=n_samples, dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
if not column_data.size:
# if no nnz, an error will be raised for computing the
# quantiles. Force the quantiles to be zeros.
self.quantiles_.append([0] * len(references))
else:
self.quantiles_.append(nanpercentile(column_data, references))
self.quantiles_ = np.transpose(self.quantiles_)
def fit(self, X, y=None):
"""Compute the quantiles used for transforming.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
self : object
"""
if self.n_quantiles <= 0:
raise ValueError("Invalid value for 'n_quantiles': %d. "
"The number of quantiles must be at least one."
% self.n_quantiles)
if self.subsample <= 0:
raise ValueError("Invalid value for 'subsample': %d. "
"The number of subsamples must be at least one."
% self.subsample)
if self.n_quantiles > self.subsample:
raise ValueError("The number of quantiles cannot be greater than"
" the number of samples used. Got {} quantiles"
" and {} samples.".format(self.n_quantiles,
self.subsample))
X = self._check_inputs(X)
rng = check_random_state(self.random_state)
# Create the quantiles of reference
self.references_ = np.linspace(0, 1, self.n_quantiles,
endpoint=True)
if sparse.issparse(X):
self._sparse_fit(X, rng)
else:
self._dense_fit(X, rng)
return self
def _transform_col(self, X_col, quantiles, inverse):
"""Private function to transform a single feature"""
if self.output_distribution == 'normal':
output_distribution = 'norm'
else:
output_distribution = self.output_distribution
output_distribution = getattr(stats, output_distribution)
if not inverse:
lower_bound_x = quantiles[0]
upper_bound_x = quantiles[-1]
lower_bound_y = 0
upper_bound_y = 1
else:
lower_bound_x = 0
upper_bound_x = 1
lower_bound_y = quantiles[0]
upper_bound_y = quantiles[-1]
# for inverse transform, match a uniform PDF
X_col = output_distribution.cdf(X_col)
# find index for lower and higher bounds
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
lower_bounds_idx = (X_col - BOUNDS_THRESHOLD <
lower_bound_x)
upper_bounds_idx = (X_col + BOUNDS_THRESHOLD >
upper_bound_x)
isfinite_mask = ~np.isnan(X_col)
X_col_finite = X_col[isfinite_mask]
if not inverse:
# Interpolate in one direction and in the other and take the
# mean. This is in case of repeated values in the features
# and hence repeated quantiles
#
# If we don't do this, only one extreme of the duplicated is
# used (the upper when we do ascending, and the
# lower for descending). We take the mean of these two
X_col[isfinite_mask] = .5 * (
np.interp(X_col_finite, quantiles, self.references_)
- np.interp(-X_col_finite, -quantiles[::-1],
-self.references_[::-1]))
else:
X_col[isfinite_mask] = np.interp(X_col_finite,
self.references_, quantiles)
X_col[upper_bounds_idx] = upper_bound_y
X_col[lower_bounds_idx] = lower_bound_y
# for forward transform, match the output PDF
if not inverse:
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
X_col = output_distribution.ppf(X_col)
# find the value to clip the data to avoid mapping to
# infinity. Clip such that the inverse transform will be
# consistent
clip_min = output_distribution.ppf(BOUNDS_THRESHOLD -
np.spacing(1))
clip_max = output_distribution.ppf(1 - (BOUNDS_THRESHOLD -
np.spacing(1)))
X_col = np.clip(X_col, clip_min, clip_max)
return X_col
def _check_inputs(self, X, accept_sparse_negative=False):
"""Check inputs before fit and transform"""
X = check_array(X, accept_sparse='csc', copy=self.copy,
dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
# we only accept positive sparse matrix when ignore_implicit_zeros is
# false and that we call fit or transform.
with np.errstate(invalid='ignore'): # hide NaN comparison warnings
if (not accept_sparse_negative and not self.ignore_implicit_zeros
and (sparse.issparse(X) and np.any(X.data < 0))):
raise ValueError('QuantileTransformer only accepts'
' non-negative sparse matrices.')
# check the output PDF
if self.output_distribution not in ('normal', 'uniform'):
raise ValueError("'output_distribution' has to be either 'normal'"
" or 'uniform'. Got '{}' instead.".format(
self.output_distribution))
return X
def _check_is_fitted(self, X):
"""Check the inputs before transforming"""
check_is_fitted(self, 'quantiles_')
# check that the dimension of X are adequate with the fitted data
if X.shape[1] != self.quantiles_.shape[1]:
raise ValueError('X does not have the same number of features as'
' the previously fitted data. Got {} instead of'
' {}.'.format(X.shape[1],
self.quantiles_.shape[1]))
def _transform(self, X, inverse=False):
"""Forward and inverse transform.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
inverse : bool, optional (default=False)
If False, apply forward transform. If True, apply
inverse transform.
Returns
-------
X : ndarray, shape (n_samples, n_features)
Projected data
"""
if sparse.issparse(X):
for feature_idx in range(X.shape[1]):
column_slice = slice(X.indptr[feature_idx],
X.indptr[feature_idx + 1])
X.data[column_slice] = self._transform_col(
X.data[column_slice], self.quantiles_[:, feature_idx],
inverse)
else:
for feature_idx in range(X.shape[1]):
X[:, feature_idx] = self._transform_col(
X[:, feature_idx], self.quantiles_[:, feature_idx],
inverse)
return X
def transform(self, X):
"""Feature-wise transformation of the data.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X)
self._check_is_fitted(X)
return self._transform(X, inverse=False)
def inverse_transform(self, X):
"""Back-projection to the original space.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X, accept_sparse_negative=True)
self._check_is_fitted(X)
return self._transform(X, inverse=True)
def quantile_transform(X, axis=0, n_quantiles=1000,
output_distribution='uniform',
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=False):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like, sparse matrix
The data to transform.
axis : int, (default=0)
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0)
... # doctest: +ELLIPSIS
array([...])
See also
--------
QuantileTransformer : Performs quantile-based scaling using the
``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`).
power_transform : Maps data to a normal distribution using a
power transformation.
scale : Performs standardization that is faster, but less robust
to outliers.
robust_scale : Performs robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
n = QuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError("axis should be either equal to 0 or 1. Got"
" axis={}".format(axis))
class PowerTransformer(BaseEstimator, TransformerMixin):
"""Apply a power transform featurewise to make data more Gaussian-like.
Power transforms are a family of parametric, monotonic transformations
that are applied to make data more Gaussian-like. This is useful for
modeling issues related to heteroscedasticity (non-constant variance),
or other situations where normality is desired.
Currently, PowerTransformer supports the Box-Cox transform. Box-Cox
requires input data to be strictly positive. The optimal parameter
for stabilizing variance and minimizing skewness is estimated through
maximum likelihood.
By default, zero-mean, unit-variance normalization is applied to the
transformed data.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
method : str, (default='box-cox')
The power transform method. Currently, 'box-cox' (Box-Cox transform)
is the only option available.
standardize : boolean, default=True
Set to True to apply zero-mean, unit-variance normalization to the
transformed output.
copy : boolean, optional, default=True
Set to False to perform inplace computation during transformation.
Attributes
----------
lambdas_ : array of float, shape (n_features,)
The parameters of the power transformation for the selected features.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import PowerTransformer
>>> pt = PowerTransformer()
>>> data = [[1, 2], [3, 2], [4, 5]]
>>> print(pt.fit(data))
PowerTransformer(copy=True, method='box-cox', standardize=True)
>>> print(pt.lambdas_) # doctest: +ELLIPSIS
[ 1.051... -2.345...]
>>> print(pt.transform(data)) # doctest: +ELLIPSIS
[[-1.332... -0.707...]
[ 0.256... -0.707...]
[ 1.076... 1.414...]]
See also
--------
power_transform : Equivalent function without the estimator API.
QuantileTransformer : Maps data to a standard normal distribution with
the parameter `output_distribution='normal'`.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
"""
def __init__(self, method='box-cox', standardize=True, copy=True):
self.method = method
self.standardize = standardize
self.copy = copy
def fit(self, X, y=None):
"""Estimate the optimal parameter for each feature.
The optimal parameter for minimizing skewness is estimated
on each feature independently. If the method is Box-Cox,
the lambdas are estimated using maximum likelihood.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data used to estimate the optimal transformation parameters.
y : Ignored
Returns
-------
self : object
"""
X = self._check_input(X, check_positive=True, check_method=True)
self.lambdas_ = []
transformed = []
for col in X.T:
col_trans, lmbda = stats.boxcox(col, lmbda=None)
self.lambdas_.append(lmbda)
transformed.append(col_trans)
self.lambdas_ = np.array(self.lambdas_)
transformed = np.array(transformed)
if self.standardize:
self._scaler = StandardScaler()
self._scaler.fit(X=transformed.T)
return self
def transform(self, X):
"""Apply the power transform to each feature using the fitted lambdas.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data to be transformed using a power transformation.
"""
check_is_fitted(self, 'lambdas_')
X = self._check_input(X, check_positive=True, check_shape=True)
for i, lmbda in enumerate(self.lambdas_):
X[:, i] = stats.boxcox(X[:, i], lmbda=lmbda)
if self.standardize:
X = self._scaler.transform(X)
return X
def inverse_transform(self, X):
"""Apply the inverse power transformation using the fitted lambdas.
The inverse of the Box-Cox transformation is given by::
if lambda == 0:
X = exp(X_trans)
else:
X = (X_trans * lambda + 1) ** (1 / lambda)
Parameters
----------
X : array-like, shape (n_samples, n_features)
The transformed data.
"""
check_is_fitted(self, 'lambdas_')
X = self._check_input(X, check_shape=True)
if self.standardize:
X = self._scaler.inverse_transform(X)
for i, lmbda in enumerate(self.lambdas_):
x = X[:, i]
if lmbda == 0:
x_inv = np.exp(x)
else:
x_inv = (x * lmbda + 1) ** (1 / lmbda)
X[:, i] = x_inv
return X
def _check_input(self, X, check_positive=False, check_shape=False,
check_method=False):
"""Validate the input before fit and transform.
Parameters
----------
X : array-like, shape (n_samples, n_features)
check_positive : bool
If True, check that all data is positive and non-zero.
check_shape : bool
If True, check that n_features matches the length of self.lambdas_
check_method : bool
If True, check that the transformation method is valid.
"""
X = check_array(X, ensure_2d=True, dtype=FLOAT_DTYPES, copy=self.copy)
if check_positive and self.method == 'box-cox' and np.any(X <= 0):
raise ValueError("The Box-Cox transformation can only be applied "
"to strictly positive data")
if check_shape and not X.shape[1] == len(self.lambdas_):
raise ValueError("Input data has a different number of features "
"than fitting data. Should have {n}, data has {m}"
.format(n=len(self.lambdas_), m=X.shape[1]))
valid_methods = ('box-cox',)
if check_method and self.method not in valid_methods:
raise ValueError("'method' must be one of {}, "
"got {} instead."
.format(valid_methods, self.method))
return X
def power_transform(X, method='box-cox', standardize=True, copy=True):
"""Apply a power transform featurewise to make data more Gaussian-like.
Power transforms are a family of parametric, monotonic transformations
that are applied to make data more Gaussian-like. This is useful for
modeling issues related to heteroscedasticity (non-constant variance),
or other situations where normality is desired.
Currently, power_transform() supports the Box-Cox transform. Box-Cox
requires input data to be strictly positive. The optimal parameter
for stabilizing variance and minimizing skewness is estimated
through maximum likelihood.
By default, zero-mean, unit-variance normalization is applied to the
transformed data.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data to be transformed using a power transformation.
method : str, (default='box-cox')
The power transform method. Currently, 'box-cox' (Box-Cox transform)
is the only option available.
standardize : boolean, default=True
Set to True to apply zero-mean, unit-variance normalization to the
transformed output.
copy : boolean, optional, default=True
Set to False to perform inplace computation.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import power_transform
>>> data = [[1, 2], [3, 2], [4, 5]]
>>> print(power_transform(data)) # doctest: +ELLIPSIS
[[-1.332... -0.707...]
[ 0.256... -0.707...]
[ 1.076... 1.414...]]
See also
--------
PowerTransformer: Performs power transformation using the ``Transformer``
API (as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
quantile_transform : Maps data to a standard normal distribution with
the parameter `output_distribution='normal'`.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
"""
pt = PowerTransformer(method=method, standardize=standardize, copy=copy)
return pt.fit_transform(X)
class CategoricalEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical features as a numeric array.
The input to this transformer should be an array-like of integers or
strings, denoting the values taken on by categorical (discrete) features.
The features can be encoded using a one-hot (aka one-of-K or dummy)
encoding scheme (``encoding='onehot'``, the default) or converted
to ordinal integers (``encoding='ordinal'``).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
encoding : str, 'onehot', 'onehot-dense' or 'ordinal'
The type of encoding to use (default is 'onehot'):
- 'onehot': encode the features using a one-hot aka one-of-K scheme
(or also called 'dummy' encoding). This creates a binary column for
each category and returns a sparse matrix.
- 'onehot-dense': the same as 'onehot' but returns a dense array
instead of a sparse matrix.
- 'ordinal': encode the features as ordinal integers. This results in
a single column of integers (0 to n_categories - 1) per feature.
categories : 'auto' or a list of lists/arrays of values.
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- list : ``categories[i]`` holds the categories expected in the ith
column. The passed categories must be sorted and should not mix
strings and numeric values.
The used categories can be found in the ``categories_`` attribute.
dtype : number type, default np.float64
Desired dtype of output.
handle_unknown : 'error' (default) or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform (default is to raise). When this parameter
is set to 'ignore' and an unknown category is encountered during
transform, the resulting one-hot encoded columns for this feature
will be all zeros. In the inverse transform, an unknown category
will be denoted as None.
Ignoring unknown categories is not supported for
``encoding='ordinal'``.
Attributes
----------
categories_ : list of arrays
The categories of each feature determined during fitting
(in order corresponding with output of ``transform``).
Examples
--------
Given a dataset with two features, we let the encoder find the unique
values per feature and transform the data to a binary one-hot encoding.
>>> from sklearn.preprocessing import CategoricalEncoder
>>> enc = CategoricalEncoder(handle_unknown='ignore')
>>> X = [['Male', 1], ['Female', 3], ['Female', 2]]
>>> enc.fit(X)
... # doctest: +ELLIPSIS
CategoricalEncoder(categories='auto', dtype=<... 'numpy.float64'>,
encoding='onehot', handle_unknown='ignore')
>>> enc.categories_
[array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]
>>> enc.transform([['Female', 1], ['Male', 4]]).toarray()
array([[1., 0., 1., 0., 0.],
[0., 1., 0., 0., 0.]])
>>> enc.inverse_transform([[0, 1, 1, 0, 0], [0, 0, 0, 1, 0]])
array([['Male', 1],
[None, 2]], dtype=object)
See also
--------
sklearn.preprocessing.OneHotEncoder : performs a one-hot encoding of
integer ordinal features. The ``OneHotEncoder assumes`` that input
features take on values in the range ``[0, max(feature)]`` instead of
using the unique values.
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, encoding='onehot', categories='auto', dtype=np.float64,
handle_unknown='error'):
self.encoding = encoding
self.categories = categories
self.dtype = dtype
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit the CategoricalEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to determine the categories of each feature.
Returns
-------
self
"""
if self.encoding not in ['onehot', 'onehot-dense', 'ordinal']:
template = ("encoding should be either 'onehot', 'onehot-dense' "
"or 'ordinal', got %s")
raise ValueError(template % self.handle_unknown)
if self.handle_unknown not in ['error', 'ignore']:
template = ("handle_unknown should be either 'error' or "
"'ignore', got %s")
raise ValueError(template % self.handle_unknown)
if self.encoding == 'ordinal' and self.handle_unknown == 'ignore':
raise ValueError("handle_unknown='ignore' is not supported for"
" encoding='ordinal'")
if self.categories != 'auto':
for cats in self.categories:
if not np.all(np.sort(cats) == np.array(cats)):
raise ValueError("Unsorted categories are not yet "
"supported")
X_temp = check_array(X, dtype=None)
if not hasattr(X, 'dtype') and np.issubdtype(X_temp.dtype, np.str_):
X = check_array(X, dtype=np.object)
else:
X = X_temp
n_samples, n_features = X.shape
self._label_encoders_ = [LabelEncoder() for _ in range(n_features)]
for i in range(n_features):
le = self._label_encoders_[i]
Xi = X[:, i]
if self.categories == 'auto':
le.fit(Xi)
else:
if self.handle_unknown == 'error':
valid_mask = np.in1d(Xi, self.categories[i])
if not np.all(valid_mask):
diff = np.unique(Xi[~valid_mask])
msg = ("Found unknown categories {0} in column {1}"
" during fit".format(diff, i))
raise ValueError(msg)
le.classes_ = np.array(self.categories[i])
self.categories_ = [le.classes_ for le in self._label_encoders_]
return self
def transform(self, X):
"""Transform X using specified encoding scheme.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to encode.
Returns
-------
X_out : sparse matrix or a 2-d array
Transformed input.
"""
X_temp = check_array(X, dtype=None)
if not hasattr(X, 'dtype') and np.issubdtype(X_temp.dtype, np.str_):
X = check_array(X, dtype=np.object)
else:
X = X_temp
n_samples, n_features = X.shape
X_int = np.zeros_like(X, dtype=np.int)
X_mask = np.ones_like(X, dtype=np.bool)
for i in range(n_features):
Xi = X[:, i]
valid_mask = np.in1d(Xi, self.categories_[i])
if not np.all(valid_mask):
if self.handle_unknown == 'error':
diff = np.unique(X[~valid_mask, i])
msg = ("Found unknown categories {0} in column {1}"
" during transform".format(diff, i))
raise ValueError(msg)
else:
# Set the problematic rows to an acceptable value and
# continue `The rows are marked `X_mask` and will be
# removed later.
X_mask[:, i] = valid_mask
Xi = Xi.copy()
Xi[~valid_mask] = self.categories_[i][0]
X_int[:, i] = self._label_encoders_[i].transform(Xi)
if self.encoding == 'ordinal':
return X_int.astype(self.dtype, copy=False)
mask = X_mask.ravel()
n_values = [cats.shape[0] for cats in self.categories_]
n_values = np.array([0] + n_values)
feature_indices = np.cumsum(n_values)
indices = (X_int + feature_indices[:-1]).ravel()[mask]
indptr = X_mask.sum(axis=1).cumsum()
indptr = np.insert(indptr, 0, 0)
data = np.ones(n_samples * n_features)[mask]
out = sparse.csr_matrix((data, indices, indptr),
shape=(n_samples, feature_indices[-1]),
dtype=self.dtype)
if self.encoding == 'onehot-dense':
return out.toarray()
else:
return out
def inverse_transform(self, X):
"""Convert back the data to the original representation.
In case unknown categories are encountered (all zero's in the
one-hot encoding), ``None`` is used to represent this category.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
The transformed data.
Returns
-------
X_tr : array-like, shape [n_samples, n_features]
Inverse transformed array.
"""
check_is_fitted(self, 'categories_')
X = check_array(X, accept_sparse='csr')
n_samples, _ = X.shape
n_features = len(self.categories_)
n_transformed_features = sum([len(cats) for cats in self.categories_])
# validate shape of passed X
msg = ("Shape of the passed X data is not correct. Expected {0} "
"columns, got {1}.")
if self.encoding == 'ordinal' and X.shape[1] != n_features:
raise ValueError(msg.format(n_features, X.shape[1]))
elif (self.encoding.startswith('onehot')
and X.shape[1] != n_transformed_features):
raise ValueError(msg.format(n_transformed_features, X.shape[1]))
# create resulting array of appropriate dtype
dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
X_tr = np.empty((n_samples, n_features), dtype=dt)
if self.encoding == 'ordinal':
for i in range(n_features):
labels = X[:, i].astype('int64')
X_tr[:, i] = self.categories_[i][labels]
else: # encoding == 'onehot' / 'onehot-dense'
j = 0
found_unknown = {}
for i in range(n_features):
n_categories = len(self.categories_[i])
sub = X[:, j:j + n_categories]
# for sparse X argmax returns 2D matrix, ensure 1D array
labels = np.asarray(_argmax(sub, axis=1)).flatten()
X_tr[:, i] = self.categories_[i][labels]
if self.handle_unknown == 'ignore':
# ignored unknown categories: we have a row of all zero's
unknown = np.asarray(sub.sum(axis=1) == 0).flatten()
if unknown.any():
found_unknown[i] = unknown
j += n_categories
# if ignored are found: potentially need to upcast result to
# insert None values
if found_unknown:
if X_tr.dtype != object:
X_tr = X_tr.astype(object)
for idx, mask in found_unknown.items():
X_tr[mask, idx] = None
return X_tr
|
BiaDarkia/scikit-learn
|
sklearn/preprocessing/data.py
|
Python
|
bsd-3-clause
| 117,634
|
[
"Gaussian"
] |
da78accd317a650daa8bde486b7d6e5b78bb302724004ce6f39d0ac701244c8a
|
'''
Utility class for finding, selecting, weighting, and loading observation data
for neighboring stations around a point location.
Copyright 2014, Jared Oyler.
This file is part of TopoWx.
TopoWx is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
TopoWx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TopoWx. If not, see <http://www.gnu.org/licenses/>.
'''
__all__ = ['StationSelect']
from twx.db import LON, LAT, STN_ID
import numpy as np
from twx.utils import grt_circle_dist
class StationSelect(object):
'''
Class for finding, selecting, weighting, and loading observation data
for neighboring stations around a point location.
'''
def __init__(self, stn_da, stn_mask=None, rm_zero_dist_stns=False):
'''
Parameters
----------
stn_da : twx.db.StationSerialDataDb
A StationSerialDataDb object pointing to the
database from which neighboring stations should
be loaded.
stn_mask : boolean ndarray, optional
A boolean mask specifying which stations in stn_da
should be considered as possible neighbors. True =
station should be considered; False = station should
be removed and not considered.
rm_zero_dist_stns : boolean, optional
If true, any stations that have the exact same lon,lat
as the point location, will not be considered neighboring stations.
'''
if stn_mask is None:
self.stns = stn_da.stns
else:
self.stns = stn_da.stns[stn_mask]
self.stn_da = stn_da
self.rm_zero_dist_stns = rm_zero_dist_stns
self.mask_all = np.ones(self.stns.size, dtype=np.bool)
# Cached data for a specific point
self.pt_lat = None
self.pt_lon = None
self.pt_stns_rm = None
self.pt_mask_stns_rm = None
self.pt_stn_dists = None
self.pt_dist_sort = None
self.pt_sort_stn_dists = None
self.pt_sort_stns = None
def __set_pt(self, lat, lon, stns_rm=None):
if isinstance(stns_rm, str) or isinstance(stns_rm, unicode):
stns_rm = np.array([stns_rm])
elif not isinstance(stns_rm, np.ndarray) and not stns_rm is None:
raise Exception("stns_rm must be str, unicode, or numpy array of str/unicode")
do_set_pt = True
if self.pt_lat == lat and self.pt_lon == lon:
try:
if self.pt_stns_rm is None and stns_rm is None:
do_set_pt = False
elif np.alltrue(self.pt_stns_rm == stns_rm):
do_set_pt = False
except:
pass
if do_set_pt:
stn_dists = grt_circle_dist(lon, lat, self.stns[LON], self.stns[LAT])
fnl_stns_rm = stns_rm if stns_rm is not None else np.array([])
if self.rm_zero_dist_stns:
# Remove any stations that are at the same location (dist == 0)
fnl_stns_rm = np.unique(np.concatenate((fnl_stns_rm, self.stns[STN_ID][stn_dists == 0])))
if fnl_stns_rm.size > 0:
mask_rm = np.logical_not(np.in1d(self.stns[STN_ID], fnl_stns_rm, assume_unique=True))
else:
mask_rm = self.mask_all
self.pt_lat = lat
self.pt_lon = lon
self.pt_stns_rm = stns_rm
self.pt_mask_stns_rm = mask_rm
self.pt_stn_dists = stn_dists
self.pt_dist_sort = np.argsort(self.pt_stn_dists)
self.pt_sort_stn_dists = np.take(self.pt_stn_dists, self.pt_dist_sort)
self.pt_sort_stns = np.take(self.stns, self.pt_dist_sort)
mask_rm = np.take(self.pt_mask_stns_rm, self.pt_dist_sort)
mask_rm = np.nonzero(mask_rm)[0]
self.pt_sort_stn_dists = np.take(self.pt_sort_stn_dists, mask_rm)
self.pt_sort_stns = np.take(self.pt_sort_stns, mask_rm)
def set_ngh_stns(self, lat, lon, nnghs, load_obs=True, obs_mth=None, stns_rm=None):
'''
Find and set neighboring stations for a specific point.
Parameters
----------
lat : float
Latitude of the point.
lon : float
Longitude of the point.
nnghs : int
The number of neighboring stations to find.
load_obs : boolean, optional
If true, the observations of the neighboring stations
will be loaded.
obs_mth : int between 1-12, optional
If not None, will only load observations for a specific month
stns_rm : ndarray or str, optional
An array of station ids or a single station id for stations that
should not be considered neighbors for the specific point.
Class Attributes Set
----------
ngh_stns : structured array
A structure array of metadata for the neighboring stations
ngh_obs : ndarray
A 2-D array of daily observations for the neighboring stations.
Each column is a single station time series. Will be none if
load_obs = False
ngh_dists : ndarray
A 1-D array of neighboring station distances from the point (km)
ngh_wgt : ndarray
A 1-D array of distance-based weights (bisquare weighting) for
each neighboring station.
'''
self.__set_pt(lat, lon, stns_rm)
stn_dists = self.pt_sort_stn_dists
stns = self.pt_sort_stns
# get the distance bandwidth using the the nnghs + 1
dbw = stn_dists[nnghs]
ngh_stns = stns[0:nnghs]
dists = stn_dists[0:nnghs]
# bisquare weighting
wgt = np.square(1.0 - np.square(dists / dbw))
# Gaussian
# wgt = np.exp(-.5*((dists/dbw)**2))
# wgt = ((1.0+np.cos(np.pi*(dists/dbw)))/2.0)
# wgt = 1.0/(dists**2)
# wgt = wgt/np.sum(wgt)
# Sort by stn id
stnid_sort = np.argsort(ngh_stns[STN_ID])
interp_stns = np.take(ngh_stns, stnid_sort)
wgt = np.take(wgt, stnid_sort)
dists = np.take(dists, stnid_sort)
if load_obs:
ngh_obs = self.stn_da.load_obs(ngh_stns[STN_ID], mth=obs_mth)
else:
ngh_obs = None
self.ngh_stns = interp_stns
self.ngh_obs = ngh_obs
self.ngh_dists = dists
self.ngh_wgt = wgt
|
jaredwo/topowx
|
twx/interp/station_select.py
|
Python
|
gpl-3.0
| 6,881
|
[
"Gaussian"
] |
c88ded832fea13dd2a6d638b64356bd34f8d414da1ccd132748fb6ca3806f249
|
#!/usr/bin/env python
# encoding: utf-8
"""
Functions related to sky subtraction.
high level iraf wrappers: combine_sky_spectra, setairmass_galaxy, skies
sky_subtract_galaxy
low level FITS functions: find_line_peak, find_lines, get_continuum,
get_peak_cont, get_wavelength_location
functions for solving: get_std_sky, guess_scaling, try_sky
high level functions: generate_sky, modify_sky, sky_subtract
"""
import os
import subprocess
import pyfits
import scipy.optimize
from .data import get, get_object_spectra, get_sky_spectra, write
from .iraf_low import sarith, scombine, setairmass
from .misc import avg, base, list_convert, rms, std, zerocount
# define some atmospheric spectral lines
LINES = [5893, 5578, 6301, 6365]
## High level IRAF wrappers ##
def combine_sky_spectra(name):
"""Convert all sky spectra to the same scaling, then combine them."""
sky_list = get_sky_spectra(name)
sizes = get(name, 'sizes')
scaled = []
for spectra in sky_list:
scale = sizes[spectra] # scale by the number of pixels arcoss
num = zerocount(spectra)
sarith('%s/disp/%s.1d' % (name, num), '/', scale,
'%s/sky/%s.scaled' % (name, num))
scaled.append('%s/sky/%s.scaled' % (name, num))
if os.path.isfile('%s/sky.1d' % name):
os.remove('%s/sky.1d' % name)
scombine(list_convert(scaled), '%s/sky.1d' % name)
def setairmass_galaxy(name):
"""Set effective air mass for each object spectra in a galaxy."""
spectra = get_object_spectra(name)
for spectrum in spectra:
num = zerocount(spectrum)
setairmass('%s/sub/%s.1d' % (name, num))
def skies(name):
"""Create a combined sky spectrum, perform sky subtraction, and set
airmass metadata """
if not os.path.isdir('%s/sky' % name):
os.mkdir('%s/sky' % name)
combine_sky_spectra(name)
if not os.path.isdir('%s/sub' % name):
os.mkdir('%s/sub' % name)
sky_subtract_galaxy(name)
setairmass_galaxy(name)
def sky_subtract_galaxy(name):
"""Remove sky lines from each spectra in a galaxy, making a guess at an
appropriate scaling level if none is stored already."""
spectra = get_object_spectra(name)
sky_levels = get(name, 'sky')
for spectrum in spectra:
sky_level = sky_levels[spectrum]
if not sky_level:
sky_level = sky_subtract(name, spectrum)
generate_sky(name, spectrum, sky_level)
write(name, 'sky', sky_levels)
## Functions for manipulating the fits data at a low level ##
def find_line_peak(data, location, search):
"""Find the local maximum near a given location. The third option control
how far on either side of the expected wavelength location to
consider."""
search = range(int(location - search), int(location + search))
values = [data[i] for i in search]
peak_num = search[values.index(max(values))]
return peak_num
def find_lines(name, num):
"""Find the locations of a number of sky lines in a FITS file."""
fn = '%s/disp/%s.1d.fits' % (name, num)
hdulist = pyfits.open(fn)
data = hdulist[0].data
header = hdulist[0].header
locations = []
for line in LINES:
line_loc = get_wavelength_location(header, line)
locations.append(find_line_peak(data, line_loc, 5))
return locations
def get_continuum(location, data):
"""Return the root means square of the continuum values around a
location."""
upcont_num = base(location, data, 1)
downcont_num = base(location, data, -1)
data = data.tolist()
values = data[upcont_num:(upcont_num + 3)]
values.extend(data[(downcont_num - 3):downcont_num])
return rms(*values)
def get_peak_cont(hdulist, wavelength, search):
"""Return the maximum value near a given wavelength; also the local
continuum level."""
data = hdulist[0].data
header = hdulist[0].header
wavelength_location = get_wavelength_location(header, wavelength)
peak_location = find_line_peak(data, wavelength_location, search)
peak = data[peak_location]
cont = get_continuum(peak_location, data)
return peak, cont
def get_wavelength_location(headers, wavelength):
"""Find the location of a given wavelength withing a FITS file."""
start = headers['CRVAL1']
step = headers['CDELT1']
distance = wavelength - start
number = round(distance / step)
return number
## Functions for solving for the proper level of sky subtraction ##
def get_std_sky(scale, name, num):
"""Attempt a sky subtraction at a given scaling, and return a metric of
how good that scaling is.
A proper sky subraction should result in a basically smooth continuum
left, so this function looks at the standard deviaton of the spectrum
around spectral lines known to be atmospheric. These values are
averaged and return, lower numbers are better."""
scale = float(scale)
try_sky(scale, name, num)
locations = find_lines(name, num)
fn = '%s/tmp/%s/%s.1d.fits' % (name, num, scale)
hdulist_out = pyfits.open(fn)
deviations = []
for item in locations:
values = hdulist_out[0].data[(item - 50):(item + 50)]
deviations.append(std(*values))
return avg(*deviations)
def guess_scaling(name, spectrum):
"""Make a guess at an appropriate scaling factor for sky subtraction.
For each atmospheric spectral line given, find the difference
between the peak and the continuum levels in both the sky spectrum
and the object spectrum. The ratio of these is the scaling factor.
Average the ratios from each line and return this value."""
spectra = '%s/disp/%s.1d.fits' % (name, zerocount(spectrum))
skyname = '%s/sky.1d.fits' % name
spectrafits = pyfits.open(spectra)
skyfits = pyfits.open(skyname)
scalings = []
for line in LINES:
spec_peak, spec_cont = get_peak_cont(spectrafits, line, 5)
sky_peak, sky_cont = get_peak_cont(skyfits, line, 5)
scale = ((spec_peak - spec_cont) / (sky_peak - sky_cont))
scalings.append(scale)
return avg(*scalings)
def try_sky(scale, name, num):
"""Preform a sky subtraction at a given scaling, saving the result to a
temporary location."""
sky = '%s/sky.1d' % name
scaled_sky = '%s/tmp/%s/%s.sky.1d' % (name, num, scale)
in_fn = '%s/disp/%s.1d' % (name, num)
out_fn = '%s/tmp/%s/%s.1d' % (name, num, scale)
if not (os.path.isfile('%s.fits' % scaled_sky) or
os.path.isfile('%s.fits' % out_fn)):
sarith(sky, '*', scale, scaled_sky)
sarith(in_fn, '-', scaled_sky, out_fn)
## Functions wrapping the solvers and providing output ##
def generate_sky(name, spectrum, sky_level):
"""Use sarith to perform sky subtraction at a given scaling level."""
num = zerocount(spectrum)
in_fn = '%s/disp/%s.1d' % (name, num)
in_sky = '%s/sky.1d' % name
out_fn = '%s/sub/%s.1d' % (name, num)
out_sky = '%s/sky/%s.sky.1d' % (name, num)
subprocess.call(['rm', '-f', '%s.fits' % out_fn])
subprocess.call(['rm', '-f', '%s.fits' % out_sky])
sarith(in_sky, '*', sky_level, out_sky)
sarith(in_fn, '-', out_sky, out_fn)
def modify_sky(path, name, number, op, value):
"""Change the level of sky subtraction for a region by an increment."""
os.chdir(path)
sky_levels = get(name, 'sky')
sky_level = sky_levels[number]
if op == '+':
new_sky_level = sky_level + value
elif op == '-':
new_sky_level = sky_level - value
sky_levels[number] = new_sky_level
write(name, 'sky', sky_levels)
generate_sky(name, number, new_sky_level)
def sky_subtract(name, spectrum):
"""Optimize the get_std_sky function to determine the best level of sky
subtraction. Return the value found."""
num = zerocount(spectrum)
guess = guess_scaling(name, spectrum)
os.mkdir('%s/tmp' % name)
os.mkdir('%s/tmp/%s' % (name, num))
xopt = scipy.optimize.fmin(get_std_sky, guess,
args=(name, num), xtol=0.001)
subprocess.call(['rm', '-rf', '%s/tmp' % name])
return float(xopt)
|
tungol/mslit
|
mslit/sky.py
|
Python
|
unlicense
| 8,187
|
[
"Galaxy"
] |
f4cef3f38826120f54e50e9182a659aa4a6bf68e655377bcbc52ec33f8981c43
|
from django.shortcuts import render, redirect
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.utils.encoding import smart_str
from django.core.servers.basehttp import FileWrapper
from tethys_gizmos.gizmo_options import TextInput, JobsTable
from app import SswDownloader as app
import os
import time
import urllib
import urllib2
@login_required
def home(request):
"""
Controller for the app home page.
"""
text_input_options = TextInput(name='urls_url',
icon_append='glyphicon glyphicon-link',
)
if request.POST and 'urls_url' in request.POST:
urls_url = request.POST['urls_url']
# configure and submit condor job
jm = app.get_job_manager()
job_name = 'SSW Download-%s' % time.time()
job_description = _get_description(urls_url)
job = jm.create_job(job_name, request.user, 'ssw_download', description=job_description)
job.set_attribute('arguments', '"%s $(job_name).nc"' % (urls_url, ))
# job.set_attribute('arguments', [urls_url, '%s.nc' % job.name])
job.execute()
# redirect to jobs page
return redirect('jobs/')
context = {'text_input_options': text_input_options}
return render(request, 'ssw_downloader/home.html', context)
def _get_description(urls_url):
def get_url_variables(url):
raw_pairs = url.split('?')[1].split('&')
url_vars = dict()
for pair in raw_pairs:
k,v = pair.split('=')
url_vars[k] = v
return url_vars
def get_date(url_vars):
date = url_vars['LABEL'].split('.')[1]
date_str = "%s-%s-%s" % (date[1:5], date[5:7], date[7:9])
return date_str
urls = urllib2.urlopen(urls_url).read().strip().split()
first_url = urllib.unquote(urls[0])
last_url = urllib.unquote(urls[-1])
url_vars = get_url_variables(first_url)
bbox = url_vars['BBOX']
from_date = get_date(url_vars)
to_date = get_date(get_url_variables(last_url))
num_files = len(urls)
description = "FILES: %d; DATES: %s to %s; BBOX: %s" % (num_files, from_date, to_date, bbox)
return description
@login_required
def jobs(request):
"""
Controller for the jobs page.
"""
jm = app.get_job_manager()
jobs = jm.list_jobs(request.user)
jobs_table_options = JobsTable(jobs=jobs,
column_fields=('id', 'description', 'run_time'),
hover=True,
striped=False,
bordered=False,
condensed=False,
results_url='ssw_downloader:results',
)
context = {'jobs_table_options': jobs_table_options}
return render(request, 'ssw_downloader/jobs.html', context)
@login_required
def results(request, job_id):
"""
Controller for the results page.
"""
job, file_name, file_path = _get_job(job_id)
convert_url = '/handoff/netcdf-to-gssha/old-convert-netcdf?path_to_netcdf_file=%s' % file_path
# if _can_convert():
# convert_url = reverse('ssw_downloader:convert', kwargs={'job_id': job_id})
context = {'job_id': job.id,
'convert_url': convert_url
}
return render(request, 'ssw_downloader/results.html', context)
@login_required
def download(request, job_id):
job, file_name, file_path = _get_job(job_id)
wrapper = FileWrapper(file(file_path))
response = HttpResponse(wrapper, content_type='application/force-download')
response['Content-Disposition'] = 'attachment; filename=%s' % smart_str(file_name)
response['Content-Length'] = os.path.getsize(file_path)
return response
# def convert(request, job_id):
# job, file_name, file_path = _get_job(job_id)
#
# hm = app.get_handoff_manager()
# app_name = 'netcdf_to_gssha'
# handler_name = 'old-convert-netcdf'
# # return hm.handoff(request, handler_name, app_name, path_to_netcdf_file=file_path)
#
# handler = hm.get_handler(handler_name, app_name)
# if handler:
# # try:
# return redirect(handler(request, path_to_netcdf_file=file_path))
# # except Exception, e:
# # print e
#
# return redirect(reverse('ssw_downloader:results', kwargs={'job_id': job_id}))
def _get_job(job_id):
jm = app.get_job_manager()
job = jm.get_job(job_id)
file_name = '%s.nc' % job.condorpy_job.job_name
file_path = os.path.join(job.initial_dir, file_name)
return job, file_name, file_path
# def _can_convert():
# hm = app.get_handoff_manager()
# app_name = 'netcdf_to_gssha'
# handler_name = 'convert-netcdf'
# capabilities = hm.get_capabilities(app_name)
# for handler in capabilities:
# if handler.name == handler_name:
# return True
|
CI-WATER/tethysapp-ssw_downloader
|
tethysapp/ssw_downloader/controllers.py
|
Python
|
bsd-2-clause
| 5,053
|
[
"NetCDF"
] |
71d33ebb622b644fd8d19b938829f39d1ade946699c6516b42dd08f117bd99a3
|
"""
Generates protein-ligand docked poses using Autodock Vina.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "GPL"
import numpy as np
import os
import pybel
import tempfile
from subprocess import call
from deepchem.feat import hydrogenate_and_compute_partial_charges
from deepchem.dock.binding_pocket import RFConvexHullPocketFinder
class PoseGenerator(object):
"""Abstract superclass for all pose-generation routines."""
def generate_poses(self, protein_file, ligand_file, out_dir=None):
"""Generates the docked complex and outputs files for docked complex."""
raise NotImplementedError
def write_conf(receptor_filename, ligand_filename, centroid, box_dims,
conf_filename, exhaustiveness=None):
"""Writes Vina configuration file to disk."""
with open(conf_filename, "w") as f:
f.write("receptor = %s\n" % receptor_filename)
f.write("ligand = %s\n\n" % ligand_filename)
f.write("center_x = %f\n" % centroid[0])
f.write("center_y = %f\n" % centroid[1])
f.write("center_z = %f\n\n" % centroid[2])
f.write("size_x = %f\n" % box_dims[0])
f.write("size_y = %f\n" % box_dims[1])
f.write("size_z = %f\n\n" % box_dims[2])
if exhaustiveness is not None:
f.write("exhaustiveness = %d\n" % exhaustiveness)
def get_molecule_data(pybel_molecule):
"""Uses pybel to compute centroid and range of molecule (Angstroms)."""
atom_positions = []
for atom in pybel_molecule:
atom_positions.append(atom.coords)
num_atoms = len(atom_positions)
protein_xyz = np.asarray(atom_positions)
protein_centroid = np.mean(protein_xyz, axis=0)
protein_max = np.max(protein_xyz, axis=0)
protein_min = np.min(protein_xyz, axis=0)
protein_range = protein_max - protein_min
return protein_centroid, protein_range
class VinaPoseGenerator(PoseGenerator):
"""Uses Autodock Vina to generate binding poses."""
def __init__(self, exhaustiveness=10, detect_pockets=True):
"""Initializes Vina Pose generation"""
current_dir = os.path.dirname(os.path.realpath(__file__))
self.vina_dir = os.path.join(current_dir, "autodock_vina_1_1_2_linux_x86")
self.exhaustiveness = exhaustiveness
self.detect_pockets = detect_pockets
if self.detect_pockets:
self.pocket_finder = RFConvexHullPocketFinder()
if not os.path.exists(self.vina_dir):
print("Vina not available. Downloading")
# TODO(rbharath): May want to move this file to S3 so we can ensure it's
# always available.
wget_cmd = "wget -c http://vina.scripps.edu/download/autodock_vina_1_1_2_linux_x86.tgz"
call(wget_cmd.split())
print("Downloaded Vina. Extracting")
download_cmd = "tar xzvf autodock_vina_1_1_2_linux_x86.tgz"
call(download_cmd.split())
print("Moving to final location")
mv_cmd = "mv autodock_vina_1_1_2_linux_x86 %s" % current_dir
call(mv_cmd.split())
print("Cleanup: removing downloaded vina tar.gz")
rm_cmd = "rm autodock_vina_1_1_2_linux_x86.tgz"
call(rm_cmd.split())
self.vina_cmd = os.path.join(self.vina_dir, "bin/vina")
def generate_poses(self, protein_file, ligand_file, out_dir=None):
"""Generates the docked complex and outputs files for docked complex."""
if out_dir is None:
out_dir = tempfile.mkdtemp()
# Prepare receptor
receptor_name = os.path.basename(protein_file).split(".")[0]
protein_hyd = os.path.join(out_dir, "%s.pdb" % receptor_name)
protein_pdbqt = os.path.join(out_dir, "%s.pdbqt" % receptor_name)
hydrogenate_and_compute_partial_charges(protein_file, "pdb",
hyd_output=protein_hyd,
pdbqt_output=protein_pdbqt,
protein=True)
# Get protein centroid and range
receptor_pybel = next(pybel.readfile(str("pdb"), str(protein_hyd)))
# TODO(rbharath): Need to add some way to identify binding pocket, or this is
# going to be extremely slow!
if not self.detect_pockets:
protein_centroid, protein_range = get_molecule_data(receptor_pybel)
box_dims = protein_range + 5.0
else:
print("About to find putative binding pockets")
pockets, pocket_atoms_maps, pocket_coords = self.pocket_finder.find_pockets(
protein_file, ligand_file)
# TODO(rbharath): Handle multiple pockets instead of arbitrarily selecting
# first pocket.
print("Computing centroid and size of proposed pocket.")
pocket_coord = pocket_coords[0]
protein_centroid = np.mean(pocket_coord, axis=1)
pocket = pockets[0]
(x_min, x_max), (y_min, y_max), (z_min, z_max) = pocket
x_box = (x_max - x_min)/2.
y_box = (y_max - y_min)/2.
z_box = (z_max - z_min)/2.
box_dims = (x_box, y_box, z_box)
# Prepare receptor
ligand_name = os.path.basename(ligand_file).split(".")[0]
ligand_hyd = os.path.join(out_dir, "%s.pdb" % ligand_name)
ligand_pdbqt = os.path.join(out_dir, "%s.pdbqt" % ligand_name)
# TODO(rbharath): Generalize this so can support mol2 files as well.
hydrogenate_and_compute_partial_charges(ligand_file, "sdf",
hyd_output=ligand_hyd,
pdbqt_output=ligand_pdbqt,
protein=False)
# Write Vina conf file
conf_file = os.path.join(out_dir, "conf.txt")
write_conf(protein_pdbqt, ligand_pdbqt, protein_centroid,
box_dims, conf_file, exhaustiveness=self.exhaustiveness)
# Define locations of log and output files
log_file = os.path.join(out_dir, "%s_log.txt" % ligand_name)
out_pdbqt = os.path.join(out_dir, "%s_docked.pdbqt" % ligand_name)
# TODO(rbharath): Let user specify the number of poses required.
print("About to call Vina")
call("%s --config %s --log %s --out %s"
% (self.vina_cmd, conf_file, log_file, out_pdbqt), shell=True)
# TODO(rbharath): Convert the output pdbqt to a pdb file.
# Return docked files
return protein_hyd, out_pdbqt
|
bowenliu16/deepchem
|
deepchem/dock/pose_generation.py
|
Python
|
gpl-3.0
| 6,276
|
[
"Pybel"
] |
304307cae65106a8c84dc5bba96ccd55302f8d495c780d60989180db9ff4f5ec
|
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import mooseutils
from chigger import utils
from .ChiggerResultBase import ChiggerResultBase
from .ChiggerSourceBase import ChiggerSourceBase
class ChiggerResult(ChiggerResultBase):
"""
A ChiggerResult object capable of attaching an arbitrary number of ChiggerFilterSourceBase
objects to the vtkRenderer.
Any options supplied to this object are automatically passed down to the ChiggerFilterSourceBase
objects contained by this class, if the applicable. To have the settings of the contained source
objects appear in this objects option dump then simply add the settings to the static getOptions
method of the derived class. This is not done here because this class is designed to accept
arbitrary ChiggerFilterSourceBase object which may have varying settings, see ExodusResult for
an example of a single type implementation based on this class.
Inputs:
*sources: A tuple of ChiggerFilterSourceBase object to render.
**kwargs: see ChiggerResultBase
"""
# The Base class type that this object to which its ownership is restricted.
SOURCE_TYPE = ChiggerSourceBase
@staticmethod
def getOptions():
opt = ChiggerResultBase.getOptions()
return opt
def __init__(self, *sources, **kwargs):
super(ChiggerResult, self).__init__(**kwargs)
self._sources = sources
for src in self._sources:
src._parent = self #pylint: disable=protected-access
def needsUpdate(self):
"""
Checks if this object or any of the contained ChiggerFilterSourceBase object require update.
(override)
"""
return super(ChiggerResult, self).needsUpdate() or \
any([src.needsUpdate() for src in self._sources])
def updateOptions(self, *args):
"""
Apply the supplied option objects to this object and the contained ChiggerFilterSourceBase
objects. (override)
Inputs:
see ChiggerResultBase
"""
changed = [self.needsUpdate()]
changed.append(super(ChiggerResult, self).updateOptions(*args))
for src in self._sources:
changed.append(src.updateOptions(*args))
changed = any(changed)
self.setNeedsUpdate(changed)
return changed
def setOptions(self, *args, **kwargs):
"""
Apply the supplied options to this object and the contained ChiggerFilterSourceBase objects.
(override)
Inputs:
see ChiggerResultBase
"""
changed = [self.needsUpdate()]
changed.append(super(ChiggerResult, self).setOptions(*args, **kwargs))
for src in self._sources:
changed.append(src.setOptions(*args, **kwargs))
changed = any(changed)
self.setNeedsUpdate(changed)
return changed
def update(self, **kwargs):
"""
Update this object and the contained ChiggerFilterSourceBase objects. (override)
Inputs:
see ChiggerResultBase
"""
super(ChiggerResult, self).update(**kwargs)
for src in self._sources:
if src.needsUpdate():
src.update()
def getSources(self):
"""
Return the list of ChiggerSource objects.
"""
return self._sources
def getBounds(self, check=True):
"""
Return the bounding box of the results.
Inputs:
check[bool]: (Default: True) When True, perform an update check and raise an exception
if object is not up-to-date. This should not be used.
TODO: For Peacock, on linux check=False must be set, but I am not sure why.
"""
if check:
self.checkUpdateState()
elif self.needsUpdate():
self.update()
return utils.get_bounds(*self._sources)
def getRange(self, local=False):
"""
Return the min/max range for the selected variables and blocks/boundary/nodeset.
NOTE: For the range to be restricted by block/boundary/nodest the reader must have
"squeeze=True", which can be much slower.
"""
rngs = [src.getRange(local=local) for src in self._sources]
return utils.get_min_max(*rngs)
def reset(self):
"""
Remove actors from renderer.
"""
super(ChiggerResult, self).reset()
for src in self._sources:
self._vtkrenderer.RemoveViewProp(src.getVTKActor())
def initialize(self):
"""
Initialize by adding actors to renderer.
"""
super(ChiggerResult, self).initialize()
for src in self._sources:
if not isinstance(src, self.SOURCE_TYPE):
n = src.__class__.__name__
t = self.SOURCE_TYPE.__name__
msg = 'The supplied source type of {} must be of type {}.'.format(n, t)
raise mooseutils.MooseException(msg)
src.setVTKRenderer(self._vtkrenderer)
self._vtkrenderer.AddViewProp(src.getVTKActor())
def __iter__(self):
"""
Provides iteration access to the underlying source objects.
"""
for src in self._sources:
yield src
def __getitem__(self, index):
"""
Provide [] access to the source objects.
"""
return self._sources[index]
def __len__(self):
"""
The number of source objects.
"""
return len(self._sources)
|
harterj/moose
|
python/chigger/base/ChiggerResult.py
|
Python
|
lgpl-2.1
| 5,871
|
[
"MOOSE"
] |
f003cf845e9b0dbaa895f232c4ba56a1da68a5b36cc8556c4e30a33818fcd051
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module provides classes for analyzing phase diagrams.
"""
from six.moves import zip
import numpy as np
import itertools
import collections
from monty.functools import lru_cache
from monty.dev import deprecated
from pymatgen.core.composition import Composition
from pymatgen.phasediagram.maker import PhaseDiagram, get_facets
from pymatgen.analysis.reaction_calculator import Reaction
from pymatgen.util.coord_utils import Simplex
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "May 16, 2012"
class PDAnalyzer(object):
"""
A class for performing analyses on Phase Diagrams.
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
"""
numerical_tol = 1e-8
def __init__(self, pd):
"""
Initializes analyzer with a PhaseDiagram.
Args:
pd: Phase Diagram to analyze.
"""
self._pd = pd
@lru_cache(1)
def _get_facet_and_simplex(self, comp):
"""
Get any facet that a composition falls into. Cached so successive
calls at same composition are fast.
"""
c = self._pd.pd_coords(comp)
for f, s in zip(self._pd.facets, self._pd.simplexes):
if s.in_simplex(c, PDAnalyzer.numerical_tol / 10):
return f, s
raise RuntimeError("No facet found for comp = {}".format(comp))
def _get_facet_chempots(self, facet):
"""
Calculates the chemical potentials for each element within a facet.
Args:
facet: Facet of the phase diagram.
Returns:
{ element: chempot } for all elements in the phase diagram.
"""
complist = [self._pd.qhull_entries[i].composition for i in facet]
energylist = [self._pd.qhull_entries[i].energy_per_atom for i in facet]
m = [[c.get_atomic_fraction(e) for e in self._pd.elements] for c in complist]
chempots = np.linalg.solve(m, energylist)
return dict(zip(self._pd.elements, chempots))
def get_decomposition(self, comp):
"""
Provides the decomposition at a particular composition.
Args:
comp: A composition
Returns:
Decomposition as a dict of {Entry: amount}
"""
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self._pd.pd_coords(comp))
return {self._pd.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PDAnalyzer.numerical_tol}
def get_hull_energy(self, comp):
"""
Args:
comp (Composition): Input composition
Returns:
Energy of lowest energy equilibrium at desired composition. Not
normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)
"""
e = 0
for k, v in self.get_decomposition(comp).items():
e += k.energy_per_atom * v
return e * comp.num_atoms
def get_decomp_and_e_above_hull(self, entry, allow_negative=False):
"""
Provides the decomposition and energy above convex hull for an entry.
Due to caching, can be much faster if entries with the same composition
are processed together.
Args:
entry: A PDEntry like object
allow_negative: Whether to allow negative e_above_hulls. Used to
calculate equilibrium reaction energies. Defaults to False.
Returns:
(decomp, energy above convex hull) Stable entries should have
energy above hull of 0. The decomposition is provided as a dict of
{Entry: amount}.
"""
if entry in self._pd.stable_entries:
return {entry: 1}, 0
comp = entry.composition
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self._pd.pd_coords(comp))
decomp = {self._pd.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PDAnalyzer.numerical_tol}
energies = [self._pd.qhull_entries[i].energy_per_atom for i in facet]
ehull = entry.energy_per_atom - np.dot(decomp_amts, energies)
if allow_negative or ehull >= -PDAnalyzer.numerical_tol:
return decomp, ehull
raise ValueError("No valid decomp found!")
def get_e_above_hull(self, entry):
"""
Provides the energy above convex hull for an entry
Args:
entry: A PDEntry like object
Returns:
Energy above convex hull of entry. Stable entries should have
energy above hull of 0.
"""
return self.get_decomp_and_e_above_hull(entry)[1]
def get_equilibrium_reaction_energy(self, entry):
"""
Provides the reaction energy of a stable entry from the neighboring
equilibrium stable entries (also known as the inverse distance to
hull).
Args:
entry: A PDEntry like object
Returns:
Equilibrium reaction energy of entry. Stable entries should have
equilibrium reaction energy <= 0.
"""
if entry not in self._pd.stable_entries:
raise ValueError("Equilibrium reaction energy is available only "
"for stable entries.")
if entry.is_element:
return 0
entries = [e for e in self._pd.stable_entries if e != entry]
modpd = PhaseDiagram(entries, self._pd.elements)
analyzer = PDAnalyzer(modpd)
return analyzer.get_decomp_and_e_above_hull(entry,
allow_negative=True)[1]
def get_composition_chempots(self, comp):
facet = self._get_facet_and_simplex(comp)[0]
return self._get_facet_chempots(facet)
@deprecated(get_composition_chempots)
def get_facet_chempots(self, facet):
return self._get_facet_chempots(facet)
def get_transition_chempots(self, element):
"""
Get the critical chemical potentials for an element in the Phase
Diagram.
Args:
element: An element. Has to be in the PD in the first place.
Returns:
A sorted sequence of critical chemical potentials, from less
negative to more negative.
"""
if element not in self._pd.elements:
raise ValueError("get_transition_chempots can only be called with "
"elements in the phase diagram.")
critical_chempots = []
for facet in self._pd.facets:
chempots = self._get_facet_chempots(facet)
critical_chempots.append(chempots[element])
clean_pots = []
for c in sorted(critical_chempots):
if len(clean_pots) == 0:
clean_pots.append(c)
else:
if abs(c - clean_pots[-1]) > PDAnalyzer.numerical_tol:
clean_pots.append(c)
clean_pots.reverse()
return tuple(clean_pots)
def get_critical_compositions(self, comp1, comp2):
"""
Get the critical compositions along the tieline between two
compositions. I.e. where the decomposition products change.
The endpoints are also returned.
Args:
comp1, comp2 (Composition): compositions that define the tieline
Returns:
[(Composition)]: list of critical compositions. All are of
the form x * comp1 + (1-x) * comp2
"""
n1 = comp1.num_atoms
n2 = comp2.num_atoms
pd_els = self._pd.elements
# the reduced dimensionality Simplexes don't use the
# first element in the PD
c1 = self._pd.pd_coords(comp1)
c2 = self._pd.pd_coords(comp2)
# none of the projections work if c1 == c2, so just return *copies*
# of the inputs
if np.all(c1 == c2):
return[comp1.copy(), comp2.copy()]
intersections = [c1, c2]
for sc in self._pd.simplexes:
intersections.extend(sc.line_intersection(c1, c2))
intersections = np.array(intersections)
# find position along line
l = (c2 - c1)
l /= np.sum(l ** 2) ** 0.5
proj = np.dot(intersections - c1, l)
# only take compositions between endpoints
proj = proj[np.logical_and(proj > -self.numerical_tol,
proj < proj[1] + self.numerical_tol)]
proj.sort()
# only unique compositions
valid = np.ones(len(proj), dtype=np.bool)
valid[1:] = proj[1:] > proj[:-1] + self.numerical_tol
proj = proj[valid]
ints = c1 + l * proj[:, None]
# reconstruct full-dimensional composition array
cs = np.concatenate([np.array([1 - np.sum(ints, axis=-1)]).T,
ints], axis=-1)
# mixing fraction when compositions are normalized
x = proj / np.dot(c2 - c1, l)
# mixing fraction when compositions are not normalized
x_unnormalized = x * n1 / (n2 + x * (n1 - n2))
num_atoms = n1 + (n2 - n1) * x_unnormalized
cs *= num_atoms[:, None]
return [Composition((c, v) for c, v in zip(pd_els, m)) for m in cs]
def get_element_profile(self, element, comp, comp_tol=1e-5):
"""
Provides the element evolution data for a composition.
For example, can be used to analyze Li conversion voltages by varying
uLi and looking at the phases formed. Also can be used to analyze O2
evolution by varying uO2.
Args:
element: An element. Must be in the phase diagram.
comp: A Composition
comp_tol: The tolerance to use when calculating decompositions.
Phases with amounts less than this tolerance are excluded.
Defaults to 1e-5.
Returns:
Evolution data as a list of dictionaries of the following format:
[ {'chempot': -10.487582010000001, 'evolution': -2.0,
'reaction': Reaction Object], ...]
"""
if element not in self._pd.elements:
raise ValueError("get_transition_chempots can only be called with"
" elements in the phase diagram.")
gccomp = Composition({el: amt for el, amt in comp.items()
if el != element})
elref = self._pd.el_refs[element]
elcomp = Composition(element.symbol)
evolution = []
for cc in self.get_critical_compositions(elcomp, gccomp)[1:]:
decomp_entries = self.get_decomposition(cc).keys()
decomp = [k.composition for k in decomp_entries]
rxn = Reaction([comp], decomp + [elcomp])
rxn.normalize_to(comp)
c = self.get_composition_chempots(cc + elcomp * 1e-5)[element]
amt = -rxn.coeffs[rxn.all_comp.index(elcomp)]
evolution.append({'chempot': c,
'evolution': amt,
'element_reference': elref,
'reaction': rxn, 'entries': decomp_entries})
return evolution
def get_chempot_range_map(self, elements, referenced=True, joggle=True):
"""
Returns a chemical potential range map for each stable entry.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges
of all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: If True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
Returns a dict of the form {entry: [simplices]}. The list of
simplices are the sides of the N-1 dim polytope bounding the
allowable chemical potential range of each entry.
"""
all_chempots = []
pd = self._pd
facets = pd.facets
for facet in facets:
chempots = self._get_facet_chempots(facet)
all_chempots.append([chempots[el] for el in pd.elements])
inds = [pd.elements.index(el) for el in elements]
el_energies = {el: 0.0 for el in elements}
if referenced:
el_energies = {el: pd.el_refs[el].energy_per_atom
for el in elements}
chempot_ranges = collections.defaultdict(list)
vertices = [list(range(len(self._pd.elements)))]
if len(all_chempots) > len(self._pd.elements):
vertices = get_facets(all_chempots, joggle=joggle)
for ufacet in vertices:
for combi in itertools.combinations(ufacet, 2):
data1 = facets[combi[0]]
data2 = facets[combi[1]]
common_ent_ind = set(data1).intersection(set(data2))
if len(common_ent_ind) == len(elements):
common_entries = [pd.qhull_entries[i]
for i in common_ent_ind]
data = np.array([[all_chempots[i][j]
- el_energies[pd.elements[j]]
for j in inds] for i in combi])
sim = Simplex(data)
for entry in common_entries:
chempot_ranges[entry].append(sim)
return chempot_ranges
def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=1e-2):
"""
returns a set of chemical potentials corresponding to the vertices of the simplex
in the chemical potential phase diagram.
The simplex is built using all elements in the target_composition except dep_elt.
The chemical potential of dep_elt is computed from the target composition energy.
This method is useful to get the limiting conditions for
defects computations for instance.
Args:
target_comp: A Composition object
dep_elt: the element for which the chemical potential is computed from the energy of
the stable phase at the target composition
tol_en: a tolerance on the energy to set
Returns:
[{Element:mu}]: An array of conditions on simplex vertices for
which each element has a chemical potential set to a given
value. "absolute" values (i.e., not referenced to element energies)
"""
muref = np.array([self._pd.el_refs[e].energy_per_atom
for e in self._pd.elements if e != dep_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self._pd.elements if e != dep_elt])
for e in self._pd.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self._pd.elements if e != dep_elt]
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[dep_elt] / target_comp[dep_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
elts = [e for e in self._pd.elements if e != dep_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = v[i] + muref[i]
res[dep_elt]=(np.dot(v+muref, coeff)+ef)/target_comp[dep_elt]
already_in = False
for di in all_coords:
dict_equals = True
for k in di:
if abs(di[k]-res[k]) > tol_en:
dict_equals = False
break
if dict_equals:
already_in = True
break
if not already_in:
all_coords.append(res)
return all_coords
def get_chempot_range_stability_phase(self, target_comp, open_elt):
"""
returns a set of chemical potentials correspoding to the max and min
chemical potential of the open element for a given composition. It is
quite common to have for instance a ternary oxide (e.g., ABO3) for
which you want to know what are the A and B chemical potential leading
to the highest and lowest oxygen chemical potential (reducing and
oxidizing conditions). This is useful for defect computations.
Args:
target_comp: A Composition object
open_elt: Element that you want to constrain to be max or min
Returns:
{Element:(mu_min,mu_max)}: Chemical potentials are given in
"absolute" values (i.e., not referenced to 0)
"""
muref = np.array([self._pd.el_refs[e].energy_per_atom
for e in self._pd.elements if e != open_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self._pd.elements if e != open_elt])
for e in self._pd.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self._pd.elements if e != open_elt]
max_open = -float('inf')
min_open = float('inf')
max_mus = None
min_mus = None
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[open_elt] / target_comp[open_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
all_coords.append(v)
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] > max_open:
max_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
max_mus = v
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] < min_open:
min_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
min_mus = v
elts = [e for e in self._pd.elements if e != open_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = (min_mus[i] + muref[i], max_mus[i] + muref[i])
res[open_elt] = (min_open, max_open)
return res
|
xhqu1981/pymatgen
|
pymatgen/phasediagram/analyzer.py
|
Python
|
mit
| 19,907
|
[
"pymatgen"
] |
78642eee1dd5cbfa66548493806ea594088cc972d6451b69056299abdd9d2247
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import warnings
from ..utils.exceptions import AstropyUserWarning
from ..extern.six.moves import range
__all__ = ['sigma_clip', 'sigma_clipped_stats']
def sigma_clip(data, sigma=3, sigma_lower=None, sigma_upper=None, iters=5,
cenfunc=np.ma.median, stdfunc=np.std, axis=None, copy=True):
"""
Perform sigma-clipping on the provided data.
The data will be iterated over, each time rejecting points that are
discrepant by more than a specified number of standard deviations from a
center value. If the data contains invalid values (NaNs or infs),
they are automatically masked before performing the sigma clipping.
.. note::
`scipy.stats.sigmaclip
<http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_
provides a subset of the functionality in this function.
Parameters
----------
data : array-like
The data to be sigma clipped.
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. Defaults to 3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. Defaults to `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. Defaults to `None`.
iters : int or `None`, optional
The number of iterations to perform sigma clipping, or `None` to
clip until convergence is achieved (i.e., continue until the
last iteration clips nothing). Defaults to 5.
cenfunc : callable, optional
The function used to compute the center for the clipping. Must
be a callable that takes in a masked array and outputs the
central value. Defaults to the median (`numpy.ma.median`).
stdfunc : callable, optional
The function used to compute the standard deviation about the
center. Must be a callable that takes in a masked array and
outputs a width estimator. Masked (rejected) pixels are those
where::
deviation < (-sigma_lower * stdfunc(deviation))
deviation > (sigma_upper * stdfunc(deviation))
where::
deviation = data - cenfunc(data [,axis=int])
Defaults to the standard deviation (`numpy.std`).
axis : int or `None`, optional
If not `None`, clip along the given axis. For this case,
``axis`` will be passed on to ``cenfunc`` and ``stdfunc``, which
are expected to return an array with the axis dimension removed
(like the numpy functions). If `None`, clip over all axes.
Defaults to `None`.
copy : bool, optional
If `True`, the ``data`` array will be copied. If `False`, the
returned masked array data will contain the same array as
``data``. Defaults to `True`.
Returns
-------
filtered_data : `numpy.ma.MaskedArray`
A masked array with the same shape as ``data`` input, where the
points rejected by the algorithm have been masked.
Notes
-----
1. The routine works by calculating::
deviation = data - cenfunc(data [,axis=int])
and then setting a mask for points outside the range::
deviation < (-sigma_lower * stdfunc(deviation))
deviation > (sigma_upper * stdfunc(deviation))
It will iterate a given number of times, or until no further
data are rejected.
2. Most numpy functions deal well with masked arrays, but if one
would like to have an array with just the good (or bad) values, one
can use::
good_only = filtered_data.data[~filtered_data.mask]
bad_only = filtered_data.data[filtered_data.mask]
However, for multidimensional data, this flattens the array,
which may not be what one wants (especially if filtering was
done along an axis).
Examples
--------
This example generates random variates from a Gaussian distribution
and returns a masked array in which all points that are more than 2
sample standard deviations from the median are masked::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=2, iters=5)
This example sigma clips on a similar distribution, but uses 3 sigma
relative to the sample *mean*, clips until convergence, and does not
copy the data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=3, iters=None,
... cenfunc=mean, copy=False)
This example sigma clips along one axis on a similar distribution
(with bad points inserted)::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> filtered_data = sigma_clip(data, sigma=2.3, axis=0)
Note that along the other axis, no points would be masked, as the
variance is higher.
"""
def perform_clip(_filtered_data, _kwargs):
"""
Perform sigma clip by comparing the data to the minimum and maximum
values (median + sig * standard deviation). Use sigma_lower and
sigma_upper to get the correct limits. Data values less or greater
than the minimum / maximum values will have True set in the mask array.
"""
if _filtered_data.size == 0:
return _filtered_data
max_value = cenfunc(_filtered_data, **_kwargs)
std = stdfunc(_filtered_data, **_kwargs)
min_value = max_value - std * sigma_lower
max_value += std * sigma_upper
if axis is not None:
if axis != 0:
min_value = np.expand_dims(min_value, axis=axis)
max_value = np.expand_dims(max_value, axis=axis)
if max_value is np.ma.masked:
max_value = np.ma.MaskedArray(np.nan, mask=True)
min_value = np.ma.MaskedArray(np.nan, mask=True)
_filtered_data.mask |= _filtered_data > max_value
_filtered_data.mask |= _filtered_data < min_value
if sigma_lower is None:
sigma_lower = sigma
if sigma_upper is None:
sigma_upper = sigma
kwargs = dict()
if axis is not None:
kwargs['axis'] = axis
if np.any(~np.isfinite(data)):
data = np.ma.masked_invalid(data)
warnings.warn("Input data contains invalid values (NaNs or infs), "
"which were automatically masked.", AstropyUserWarning)
filtered_data = np.ma.array(data, copy=copy)
if iters is None:
lastrej = filtered_data.count() + 1
while filtered_data.count() != lastrej:
lastrej = filtered_data.count()
perform_clip(filtered_data, kwargs)
else:
for i in range(iters):
perform_clip(filtered_data, kwargs)
# prevent filtered_data.mask = False (scalar) if no values are clipped
if filtered_data.mask.shape == ():
filtered_data.mask = False # .mask shape will now match .data shape
return filtered_data
def sigma_clipped_stats(data, mask=None, mask_value=None, sigma=3.0,
sigma_lower=None, sigma_upper=None, iters=5,
cenfunc=np.ma.median, stdfunc=np.std, axis=None):
"""
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use as the lower and upper
clipping limit. These limits are overridden by ``sigma_lower``
and ``sigma_upper``, if input. Defaults to 3.
sigma_lower : float, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is used.
Defaults to `None`.
sigma_upper : float, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is used.
Defaults to `None`.
iters : int, optional
The number of iterations to perform sigma clipping, or `None` to
clip until convergence is achieved (i.e., continue until the
last iteration clips nothing) when calculating the statistics.
Defaults to 5.
cenfunc : callable, optional
The function used to compute the center for the clipping. Must
be a callable that takes in a masked array and outputs the
central value. Defaults to the median (`numpy.ma.median`).
stdfunc : callable, optional
The function used to compute the standard deviation about the
center. Must be a callable that takes in a masked array and
outputs a width estimator. Masked (rejected) pixels are those
where::
deviation < (-sigma_lower * stdfunc(deviation))
deviation > (sigma_upper * stdfunc(deviation))
where::
deviation = data - cenfunc(data [,axis=int])
Defaults to the standard deviation (`numpy.std`).
axis : int or `None`, optional
If not `None`, clip along the given axis. For this case,
``axis`` will be passed on to ``cenfunc`` and ``stdfunc``, which
are expected to return an array with the axis dimension removed
(like the numpy functions). If `None`, clip over all axes.
Defaults to `None`.
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
"""
if mask is not None:
data = np.ma.MaskedArray(data, mask)
if mask_value is not None:
data = np.ma.masked_values(data, mask_value)
data_clip = sigma_clip(data, sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, iters=iters,
cenfunc=cenfunc, stdfunc=stdfunc, axis=axis)
mean = np.ma.mean(data_clip, axis=axis)
median = np.ma.median(data_clip, axis=axis)
std = np.ma.std(data_clip, axis=axis)
if axis is None and np.ma.isMaskedArray(median):
# With Numpy 1.10 np.ma.median always return a MaskedArray, even with
# one element. So for compatibility with previous versions, we take the
# scalar value
median = median.item()
return mean, median, std
|
joergdietrich/astropy
|
astropy/stats/sigma_clipping.py
|
Python
|
bsd-3-clause
| 11,671
|
[
"Gaussian"
] |
60a2f800b4b40ae6caeb9ddfca63e76bc244efdf72a483ce86d85dbea31691d3
|
""" ConstantExpressions gathers constant expression. """
from pythran.analyses.aliases import Aliases
from pythran.analyses.globals_analysis import Globals
from pythran.analyses.locals_analysis import Locals
from pythran.analyses.pure_expressions import PureExpressions
from pythran.intrinsic import FunctionIntr
from pythran.passmanager import NodeAnalysis
from pythran.tables import MODULES
import ast
class ConstantExpressions(NodeAnalysis):
"""Identify constant expressions."""
def __init__(self):
self.result = set()
super(ConstantExpressions, self).__init__(Globals, Locals,
PureExpressions, Aliases)
def add(self, node):
self.result.add(node)
return True
def visit_BoolOp(self, node):
return all(map(self.visit, node.values)) and self.add(node)
def visit_BinOp(self, node):
rec = all(map(self.visit, (node.left, node.right)))
return rec and self.add(node)
def visit_UnaryOp(self, node):
return self.visit(node.operand) and self.add(node)
def visit_IfExp(self, node):
rec = all(map(self.visit, (node.test, node.body, node.orelse)))
return rec and self.add(node)
def visit_Compare(self, node):
rec = all(map(self.visit, [node.left] + node.comparators))
return rec and self.add(node)
def visit_Call(self, node):
rec = all(map(self.visit, node.args + [node.func]))
return rec and self.add(node)
visit_Num = add
visit_Str = add
def visit_Subscript(self, node):
rec = all(map(self.visit, (node.value, node.slice)))
return rec and self.add(node)
def visit_Name(self, node):
if node in self.aliases:
# params and store are not constants
if not isinstance(node.ctx, ast.Load):
return False
# if we can alias on multiple value, it is not constant
elif len(self.aliases[node].aliases) > 1:
return False
# if it is not a globals, it depends on variable so it is not
# constant
elif node.id not in self.globals:
return False
# if it is defined in the current function, it is not constant
elif node.id in self.locals[node]:
return False
def is_function(x):
return isinstance(x, (FunctionIntr,
ast.FunctionDef,
ast.alias))
pure_fun = all(alias in self.pure_expressions and
is_function(alias)
for alias in self.aliases[node].aliases)
return pure_fun
else:
return False
def visit_Attribute(self, node):
def rec(w, n):
if isinstance(n, ast.Name):
return w[n.id]
elif isinstance(n, ast.Attribute):
return rec(w, n.value)[n.attr]
return rec(MODULES, node).isconst() and self.add(node)
def visit_Dict(self, node):
rec = all(map(self.visit, node.keys + node.values))
return rec and self.add(node)
def visit_List(self, node):
return all(map(self.visit, node.elts)) and self.add(node)
visit_Tuple = visit_List
visit_Set = visit_List
def visit_Slice(self, _):
# ultra-conservative, indeed
return False
def visit_Index(self, node):
return self.visit(node.value) and self.add(node)
|
hainm/pythran
|
pythran/analyses/constant_expressions.py
|
Python
|
bsd-3-clause
| 3,547
|
[
"VisIt"
] |
aca06fb83041a331374c958678d2967af177cfb73ea8da1df7f4f04a315e99e0
|
""" Module invoked for finding and loading DIRAC (and extensions) modules
"""
import os
import imp
from DIRAC.Core.Utilities import List
from DIRAC import gConfig, S_ERROR, S_OK, gLogger
from DIRAC.ConfigurationSystem.Client.Helpers import getInstalledExtensions
from DIRAC.ConfigurationSystem.Client import PathFinder
class ModuleLoader( object ):
def __init__( self, importLocation, sectionFinder, superClass, csSuffix = False, moduleSuffix = False ):
self.__modules = {}
self.__loadedModules = {}
self.__superClass = superClass
#Function to find the
self.__sectionFinder = sectionFinder
#Import from where? <Ext>.<System>System.<importLocation>.<module>
self.__importLocation = importLocation
#Where to look in the CS for the module? /Systems/<System>/<Instance>/<csSuffix>
if not csSuffix:
csSuffix = "%ss" % importLocation
self.__csSuffix = csSuffix
#Module suffix (for Handlers)
self.__modSuffix = moduleSuffix
def getModules( self ):
data = dict( self.__modules )
for k in data:
data[ k ][ 'standalone' ] = len( data ) == 1
return data
def loadModules( self, modulesList, hideExceptions = False ):
"""
Load all modules required in moduleList
"""
for modName in modulesList:
gLogger.verbose( "Checking %s" % modName )
#if it's a executor modName name just load it and be done with it
if modName.find( "/" ) > -1:
gLogger.verbose( "Module %s seems to be a valid name. Try to load it!" % modName )
result = self.loadModule( modName, hideExceptions = hideExceptions )
if not result[ 'OK' ]:
return result
continue
#Check if it's a system name
#Look in the CS
system = modName
#Can this be generated with sectionFinder?
csPath = "%s/Executors" % PathFinder.getSystemSection ( system, ( system, ) )
gLogger.verbose( "Exploring %s to discover modules" % csPath )
result = gConfig.getSections( csPath )
if result[ 'OK' ]:
#Add all modules in the CS :P
for modName in result[ 'Value' ]:
result = self.loadModule( "%s/%s" % ( system, modName ), hideExceptions = hideExceptions )
if not result[ 'OK' ]:
return result
#Look what is installed
parentModule = None
for rootModule in getInstalledExtensions():
if system.find( "System" ) != len( system ) - 6:
parentImport = "%s.%sSystem.%s" % ( rootModule, system, self.__csSuffix )
else:
parentImport = "%s.%s.%s" % ( rootModule, system, self.__csSuffix )
#HERE!
result = self.__recurseImport( parentImport )
if not result[ 'OK' ]:
return result
parentModule = result[ 'Value' ]
if parentModule:
break
if not parentModule:
continue
parentPath = parentModule.__path__[0]
gLogger.notice( "Found modules path at %s" % parentImport )
for entry in os.listdir( parentPath ):
if entry[-3:] != ".py" or entry == "__init__.py":
continue
if not os.path.isfile( os.path.join( parentPath, entry ) ):
continue
modName = "%s/%s" % ( system, entry[:-3] )
gLogger.verbose( "Trying to import %s" % modName )
result = self.loadModule( modName,
hideExceptions = hideExceptions,
parentModule = parentModule )
return S_OK()
def loadModule( self, modName, hideExceptions = False, parentModule = False ):
"""
Load module name.
name must take the form [DIRAC System Name]/[DIRAC module]
"""
while modName and modName[0] == "/":
modName = modName[1:]
if modName in self.__modules:
return S_OK()
modList = modName.split( "/" )
if len( modList ) != 2:
return S_ERROR( "Can't load %s: Invalid module name" % ( modName ) )
csSection = self.__sectionFinder( modName )
loadGroup = gConfig.getValue( "%s/Load" % csSection, [] )
#Check if it's a load group
if loadGroup:
gLogger.info( "Found load group %s. Will load %s" % ( modName, ", ".join( loadGroup ) ) )
for loadModName in loadGroup:
if loadModName.find( "/" ) == -1:
loadModName = "%s/%s" % ( modList[0], loadModName )
result = self.loadModule( loadModName, hideExceptions = hideExceptions, parentModule = False )
if not result[ 'OK' ]:
return result
return S_OK()
#Normal load
loadName = gConfig.getValue( "%s/Module" % csSection, "" )
if not loadName:
loadName = modName
gLogger.info( "Loading %s" % ( modName ) )
else:
if loadName.find( "/" ) == -1:
loadName = "%s/%s" % ( modList[0], loadName )
gLogger.info( "Loading %s (%s)" % ( modName, loadName ) )
#If already loaded, skip
loadList = loadName.split( "/" )
if len( loadList ) != 2:
return S_ERROR( "Can't load %s: Invalid module name" % ( loadName ) )
system, module = loadList
#Load
className = module
if self.__modSuffix:
className = "%s%s" % ( className, self.__modSuffix )
if loadName not in self.__loadedModules:
#Check if handler is defined
loadCSSection = self.__sectionFinder( loadName )
handlerPath = gConfig.getValue( "%s/HandlerPath" % loadCSSection, "" )
if handlerPath:
gLogger.info( "Trying to %s from CS defined path %s" % ( loadName, handlerPath ) )
gLogger.verbose( "Found handler for %s: %s" % ( loadName, handlerPath ) )
handlerPath = handlerPath.replace( "/", "." )
if handlerPath.find( ".py", len( handlerPath ) -3 ) > -1:
handlerPath = handlerPath[ :-3 ]
className = List.fromChar( handlerPath, "." )[-1]
result = self.__recurseImport( handlerPath )
if not result[ 'OK' ]:
return S_ERROR( "Cannot load user defined handler %s: %s" % ( handlerPath, result[ 'Message' ] ) )
gLogger.verbose( "Loaded %s" % handlerPath )
elif parentModule:
gLogger.info( "Trying to autodiscover %s from parent" % loadName )
#If we've got a parent module, load from there.
modImport = module
if self.__modSuffix:
modImport = "%s%s" % ( modImport, self.__modSuffix )
result = self.__recurseImport( modImport, parentModule, hideExceptions = hideExceptions )
else:
#Check to see if the module exists in any of the root modules
gLogger.info( "Trying to autodiscover %s" % loadName )
rootModulesToLook = getInstalledExtensions()
for rootModule in rootModulesToLook:
importString = '%s.%sSystem.%s.%s' % ( rootModule, system, self.__importLocation, module )
if self.__modSuffix:
importString = "%s%s" % ( importString, self.__modSuffix )
gLogger.verbose( "Trying to load %s" % importString )
result = self.__recurseImport( importString, hideExceptions = hideExceptions )
#Error while loading
if not result[ 'OK' ]:
return result
#Something has been found! break :)
if result[ 'Value' ]:
gLogger.verbose( "Found %s" % importString )
break
#Nothing found
if not result[ 'Value' ]:
return S_ERROR( "Could not find %s" % loadName )
modObj = result[ 'Value' ]
try:
#Try to get the class from the module
modClass = getattr( modObj, className )
except AttributeError:
location = ""
if '__file__' in dir( modObj ):
location = modObj.__file__
else:
location = modObj.__path__
gLogger.exception( "%s module does not have a %s class!" % ( location, module ) )
return S_ERROR( "Cannot load %s" % module )
#Check if it's subclass
if not issubclass( modClass, self.__superClass ):
return S_ERROR( "%s has to inherit from %s" % ( loadName, self.__superClass.__name__ ) )
self.__loadedModules[ loadName ] = { 'classObj' : modClass, 'moduleObj' : modObj }
#End of loading of 'loadName' module
#A-OK :)
self.__modules[ modName ] = self.__loadedModules[ loadName ].copy()
#keep the name of the real code module
self.__modules[ modName ][ 'modName' ] = modName
self.__modules[ modName ][ 'loadName' ] = loadName
gLogger.notice( "Loaded module %s" % modName )
return S_OK()
def __recurseImport( self, modName, parentModule = None, hideExceptions = False ):
if isinstance( modName, basestring):
modName = List.fromChar( modName, "." )
try:
if parentModule:
impData = imp.find_module( modName[0], parentModule.__path__ )
else:
impData = imp.find_module( modName[0] )
impModule = imp.load_module( modName[0], *impData )
if impData[0]:
impData[0].close()
except ImportError as excp:
strExcp = str( excp )
if strExcp.find( "No module named" ) == 0 and strExcp.find( modName[0] ) == len( strExcp ) - len( modName[0] ):
return S_OK()
errMsg = "Can't load %s" % ".".join( modName )
if not hideExceptions:
gLogger.exception( errMsg )
return S_ERROR( errMsg )
if len( modName ) == 1:
return S_OK( impModule )
return self.__recurseImport( modName[1:], impModule )
|
arrabito/DIRAC
|
Core/Base/private/ModuleLoader.py
|
Python
|
gpl-3.0
| 9,349
|
[
"DIRAC"
] |
70b25cf0a388c6fb633ce0ce6fe0c330890eb2ab77ebbd83b247f8af7afe9a87
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements DefectCompatibility analysis for consideration of
defects
"""
import logging
from monty.json import MSONable
from pymatgen.analysis.defects.core import Vacancy
from pymatgen.analysis.defects.corrections import (
BandEdgeShiftingCorrection,
BandFillingCorrection,
FreysoldtCorrection,
KumagaiCorrection,
)
from pymatgen.core import Structure
__author__ = "Danny Broberg, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "shyamd@lbl.gov"
__status__ = "Development"
__date__ = "Mar 15, 2018"
logger = logging.getLogger(__name__)
class DefectCompatibility(MSONable):
"""
The DefectCompatibility class evaluates corrections and delocalization
metrics on a DefectEntry. It can only parse based on the available
parameters that already exist in the parameters dict of the DefectEntry.
required settings in defect_entry.parameters for various types of analysis/correction:
freysoldt: [ "dielectric", "axis_grid", "bulk_planar_averages", "defect_planar_averages",
"initial_defect_structure", "defect_frac_sc_coords"]
kumagai: [ "dielectric", "bulk_atomic_site_averages", "defect_atomic_site_averages",
"site_matching_indices", "initial_defect_structure", "defect_frac_sc_coords"]
bandfilling: ["eigenvalues", "kpoint_weights", "potalign", "vbm", "cbm", "run_metadata"]
bandshifting: ["hybrid_cbm", "hybrid_vbm", "vbm", "cbm"]
defect relaxation/structure analysis: ["final_defect_structure", "initial_defect_structure",
"sampling_radius", "defect_frac_sc_coords"]
"""
def __init__(
self,
plnr_avg_var_tol=0.0001,
plnr_avg_minmax_tol=0.1,
atomic_site_var_tol=0.005,
atomic_site_minmax_tol=0.1,
tot_relax_tol=1.0,
perc_relax_tol=50.0,
defect_tot_relax_tol=2.0,
preferred_cc="freysoldt",
free_chg_cutoff=2.1,
use_bandfilling=True,
use_bandedgeshift=True,
):
"""
Initializes the DefectCompatibility class
Each argument helps decide whether a DefectEntry is flagged as compatible or not
Args:
plnr_avg_var_tol (float): compatibility tolerance for variance of the sampling region
in the planar averaged electrostatic potential (FreysoldtCorrection)
plnr_avg_minmax_tol (float): compatibility tolerance for max/min difference of the
sampling region in the planar averaged electrostatic potential (FreysoldtCorrection)
atomic_site_var_tol (float): compatibility tolerance for variance of the sampling
region in the atomic site averaged electrostatic potential (KumagaiCorrection)
atomic_site_minmax_tol (float): compatibility tolerance for max/min difference
of the sampling region in the atomic site averaged electrostatic
potential (KumagaiCorrection)
tot_relax_tol (float): compatibility tolerance for total integrated relaxation
amount outside of a given radius from the defect (in Angstrom).
Radius is supplied as 'sampling_radius' within parameters of DefectEntry.
perc_relax_tol (float): compatibility tolerance for percentage of total relaxation
outside of a given radius from the defect (percentage amount),
assuming a total integration relaxation greater than 1 Angstrom.
Radius is supplied as 'sampling_radius' within parameters of DefectEntry.
defect_tot_relax_tol (float): compatibility tolerance for displacement of defect site
itself (in Angstrom).
preferred_cc (str): Charge correction that is preferred to be used.
If only one is available based on metadata, then that charge correction will be used.
Options are: 'freysoldt' and 'kumagai'
free_chg_cutoff (float): compatibility tolerance for total amount of host band occupation
outside of band edges, given by eigenvalue data. Extra occupation in the CB would be
free electrons, while lost occupation in VB would be free holes.
use_bandfilling (bool): Whether to include BandFillingCorrection or not (assuming
sufficient metadata is supplied to perform BandFillingCorrection).
use_bandedgeshift (bool): Whether to perform a BandEdgeShiftingCorrection or not (assuming
sufficient metadata is supplied to perform BandEdgeShiftingCorrection).
"""
self.plnr_avg_var_tol = plnr_avg_var_tol
self.plnr_avg_minmax_tol = plnr_avg_minmax_tol
self.atomic_site_var_tol = atomic_site_var_tol
self.atomic_site_minmax_tol = atomic_site_minmax_tol
self.tot_relax_tol = tot_relax_tol
self.perc_relax_tol = perc_relax_tol
self.defect_tot_relax_tol = defect_tot_relax_tol
self.preferred_cc = preferred_cc
self.free_chg_cutoff = free_chg_cutoff
self.use_bandfilling = use_bandfilling
self.use_bandedgeshift = use_bandedgeshift
def process_entry(self, defect_entry, perform_corrections=True):
"""
Process a given DefectEntry with qualifiers given from initialization of class.
Order of processing is:
1) perform all possible defect corrections with information given
2) consider delocalization analyses based on qualifier metrics
given initialization of class. If delocalized, flag entry as delocalized
3) update corrections to defect entry and flag as delocalized
Corrections are applied based on:
i) if free charges are more than free_chg_cutoff then will not apply charge correction,
because it no longer is applicable
ii) use charge correction set by preferred_cc
iii) only use BandFilling correction if use_bandfilling is set to True
iv) only use BandEdgeShift correction if use_bandedgeshift is set to True
"""
for struct_key in [
"bulk_sc_structure",
"initial_defect_structure",
"final_defect_structure",
]:
if struct_key in defect_entry.parameters.keys() and isinstance(defect_entry.parameters[struct_key], dict):
defect_entry.parameters[struct_key] = Structure.from_dict(defect_entry.parameters[struct_key])
if perform_corrections:
self.perform_all_corrections(defect_entry)
self.delocalization_analysis(defect_entry)
# apply corrections based on delocalization analysis
corrections = {}
skip_charge_corrections = False
if "num_hole_vbm" in defect_entry.parameters.keys():
if (self.free_chg_cutoff < defect_entry.parameters["num_hole_vbm"]) or (
self.free_chg_cutoff < defect_entry.parameters["num_elec_cbm"]
):
logger.info("Will not use charge correction because too many free charges")
skip_charge_corrections = True
if skip_charge_corrections:
corrections.update({"charge_correction": 0.0})
else:
if ("freysoldt" in self.preferred_cc.lower()) and ("freysoldt_meta" in defect_entry.parameters.keys()):
frey_meta = defect_entry.parameters["freysoldt_meta"]
frey_corr = frey_meta["freysoldt_electrostatic"] + frey_meta["freysoldt_potential_alignment_correction"]
corrections.update({"charge_correction": frey_corr})
elif "kumagai_meta" in defect_entry.parameters.keys():
kumagai_meta = defect_entry.parameters["kumagai_meta"]
kumagai_corr = (
kumagai_meta["kumagai_electrostatic"] + kumagai_meta["kumagai_potential_alignment_correction"]
)
corrections.update({"charge_correction": kumagai_corr})
else:
logger.info("Could not use any charge correction because insufficient metadata was supplied.")
if self.use_bandfilling:
if "bandfilling_meta" in defect_entry.parameters.keys():
bfc_corr = defect_entry.parameters["bandfilling_meta"]["bandfilling_correction"]
corrections.update({"bandfilling_correction": bfc_corr})
else:
logger.info("Could not use band filling correction because insufficient metadata was supplied.")
else:
corrections.update({"bandfilling_correction": 0.0})
if self.use_bandedgeshift and ("bandshift_meta" in defect_entry.parameters.keys()):
corrections.update(
{
"bandedgeshifting_correction": defect_entry.parameters["bandshift_meta"][
"bandedgeshifting_correction"
]
}
)
# also want to update relevant data for phase diagram
defect_entry.parameters.update(
{
"phasediagram_meta": {
"vbm": defect_entry.parameters["hybrid_vbm"],
"gap": defect_entry.parameters["hybrid_cbm"] - defect_entry.parameters["hybrid_vbm"],
}
}
)
else:
corrections.update({"bandedgeshifting_correction": 0.0})
if isinstance(defect_entry.parameters["vbm"], float) and isinstance(defect_entry.parameters["cbm"], float):
# still want to have vbm and gap ready for phase diagram
defect_entry.parameters.update(
{
"phasediagram_meta": {
"vbm": defect_entry.parameters["vbm"],
"gap": defect_entry.parameters["cbm"] - defect_entry.parameters["vbm"],
}
}
)
defect_entry.corrections.update(corrections)
return defect_entry
def perform_all_corrections(self, defect_entry):
"""
Perform all corrections for a defect.
Args:
defect_entry (DefectEntry): Defect to correct.
Returns:
Corrected DefectEntry
"""
# consider running freysoldt correction
required_frey_params = [
"dielectric",
"axis_grid",
"bulk_planar_averages",
"defect_planar_averages",
"initial_defect_structure",
"defect_frac_sc_coords",
]
run_freysoldt = len(set(defect_entry.parameters.keys()).intersection(required_frey_params)) == len(
required_frey_params
)
if not run_freysoldt:
logger.info("Insufficient DefectEntry parameters exist for Freysoldt Correction.")
else:
defect_entry = self.perform_freysoldt(defect_entry)
# consider running kumagai correction
required_kumagai_params = [
"dielectric",
"bulk_atomic_site_averages",
"defect_atomic_site_averages",
"site_matching_indices",
"initial_defect_structure",
"defect_frac_sc_coords",
]
run_kumagai = len(set(defect_entry.parameters.keys()).intersection(required_kumagai_params)) == len(
required_kumagai_params
)
if not run_kumagai:
logger.info("Insufficient DefectEntry parameters exist for Kumagai Correction.")
else:
try:
defect_entry = self.perform_kumagai(defect_entry)
except Exception:
logger.info("Kumagai correction error occurred! Won't perform correction.")
# add potalign based on preferred correction setting if it does not already exist in defect entry
if self.preferred_cc == "freysoldt":
if "freysoldt_meta" in defect_entry.parameters.keys():
potalign = defect_entry.parameters["freysoldt_meta"]["freysoldt_potalign"]
defect_entry.parameters["potalign"] = potalign
elif "kumagai_meta" in defect_entry.parameters.keys():
logger.info(
"WARNING: was not able to use potalign from Freysoldt correction, "
"using Kumagai value for purposes of band filling correction."
)
potalign = defect_entry.parameters["kumagai_meta"]["kumagai_potalign"]
defect_entry.parameters["potalign"] = potalign
else:
if "kumagai_meta" in defect_entry.parameters.keys():
potalign = defect_entry.parameters["kumagai_meta"]["kumagai_potalign"]
defect_entry.parameters["potalign"] = potalign
elif "freysoldt_meta" in defect_entry.parameters.keys():
logger.info(
"WARNING: was not able to use potalign from Kumagai correction, "
"using Freysoldt value for purposes of band filling correction."
)
potalign = defect_entry.parameters["freysoldt_meta"]["freysoldt_potalign"]
defect_entry.parameters["potalign"] = potalign
# consider running band filling correction
required_bandfilling_params = [
"eigenvalues",
"kpoint_weights",
"potalign",
"vbm",
"cbm",
"run_metadata",
]
run_bandfilling = len(set(defect_entry.parameters.keys()).intersection(required_bandfilling_params)) == len(
required_bandfilling_params
)
if run_bandfilling:
if (
(defect_entry.parameters["vbm"] is None)
or (defect_entry.parameters["cbm"] is None)
or (defect_entry.parameters["potalign"] is None)
):
run_bandfilling = False
if not run_bandfilling:
logger.info("Insufficient DefectEntry parameters exist for BandFilling Correction.")
else:
defect_entry = self.perform_bandfilling(defect_entry)
# consider running band edge shifting correction
required_bandedge_shifting_params = ["hybrid_cbm", "hybrid_vbm", "vbm", "cbm"]
run_bandedge_shifting = len(
set(defect_entry.parameters.keys()).intersection(required_bandedge_shifting_params)
) == len(required_bandedge_shifting_params)
if not run_bandedge_shifting:
logger.info("Insufficient DefectEntry parameters exist for BandShifting Correction.")
else:
defect_entry = self.perform_band_edge_shifting(defect_entry)
return defect_entry
@staticmethod
def perform_freysoldt(defect_entry):
"""
Perform Freysoldt correction.
Args:
defect_entry (DefectEntry): Defect to correct.
Returns:
Corrected DefectEntry
"""
FC = FreysoldtCorrection(defect_entry.parameters["dielectric"])
freycorr = FC.get_correction(defect_entry)
freysoldt_meta = FC.metadata.copy()
freysoldt_meta["freysoldt_potalign"] = defect_entry.parameters["potalign"]
freysoldt_meta["freysoldt_electrostatic"] = freycorr["freysoldt_electrostatic"]
freysoldt_meta["freysoldt_potential_alignment_correction"] = freycorr["freysoldt_potential_alignment"]
defect_entry.parameters.update({"freysoldt_meta": freysoldt_meta})
return defect_entry
@staticmethod
def perform_kumagai(defect_entry):
"""
Perform Kumagai correction.
Args:
defect_entry (DefectEntry): Defect to correct.
Returns:
Corrected DefectEntry
"""
gamma = defect_entry.parameters["gamma"] if "gamma" in defect_entry.parameters.keys() else None
sampling_radius = (
defect_entry.parameters["sampling_radius"] if "sampling_radius" in defect_entry.parameters.keys() else None
)
KC = KumagaiCorrection(
defect_entry.parameters["dielectric"],
sampling_radius=sampling_radius,
gamma=gamma,
)
kumagaicorr = KC.get_correction(defect_entry)
kumagai_meta = dict(KC.metadata.items())
kumagai_meta["kumagai_potalign"] = defect_entry.parameters["potalign"]
kumagai_meta["kumagai_electrostatic"] = kumagaicorr["kumagai_electrostatic"]
kumagai_meta["kumagai_potential_alignment_correction"] = kumagaicorr["kumagai_potential_alignment"]
defect_entry.parameters.update({"kumagai_meta": kumagai_meta})
return defect_entry
@staticmethod
def perform_bandfilling(defect_entry):
"""
Perform bandfilling correction.
Args:
defect_entry (DefectEntry): Defect to correct.
Returns:
Corrected DefectEntry
"""
BFC = BandFillingCorrection()
bfc_dict = BFC.get_correction(defect_entry)
bandfilling_meta = defect_entry.parameters["bandfilling_meta"].copy()
bandfilling_meta.update({"bandfilling_correction": bfc_dict["bandfilling_correction"]})
defect_entry.parameters.update(
{
"bandfilling_meta": bandfilling_meta,
# also update free holes and electrons for shallow level shifting correction...
"num_hole_vbm": bandfilling_meta["num_hole_vbm"],
"num_elec_cbm": bandfilling_meta["num_elec_cbm"],
}
)
return defect_entry
@staticmethod
def perform_band_edge_shifting(defect_entry):
"""
Perform band edge shifting correction.
Args:
defect_entry (DefectEntry): Defect to correct.
Returns:
Corrected DefectEntry
"""
BEC = BandEdgeShiftingCorrection()
bec_dict = BEC.get_correction(defect_entry)
bandshift_meta = defect_entry.parameters["bandshift_meta"].copy()
bandshift_meta.update(bec_dict)
defect_entry.parameters.update({"bandshift_meta": bandshift_meta})
return defect_entry
def delocalization_analysis(self, defect_entry):
"""
Do delocalization analysis. To do this, one considers:
i) sampling region of planar averaged electrostatic potential (freysoldt approach)
ii) sampling region of atomic site averaged potentials (kumagai approach)
iii) structural relaxation amount outside of radius considered in kumagai approach (default is wigner seitz
radius)
iv) if defect is not a vacancy type -> track to see how much the defect has moved
calculations that fail delocalization get "is_compatibile" set to False in parameters
also parameters receives a "delocalization_meta" with following dict:
plnr_avg = {'is_compatible': True/False, 'metadata': metadata used for determining this}
atomic_site = {'is_compatible': True/False, 'metadata': metadata used for determining this}
structure_relax = {'is_compatible': True/False, 'metadata': metadata used for determining this}
defectsite_relax = {'is_compatible': True/False, 'metadata': metadata used for determining this}
"""
defect_entry.parameters.update(
{"is_compatible": True}
) # this will be switched to False if delocalization is detected
if "freysoldt_meta" in defect_entry.parameters.keys():
defect_entry = self.check_freysoldt_delocalized(defect_entry)
else:
logger.info(
"Insufficient information provided for performing Freysoldt "
"correction delocalization analysis.\n"
"Cannot perform planar averaged electrostatic potential "
"compatibility analysis."
)
if "kumagai_meta" in defect_entry.parameters.keys():
defect_entry = self.check_kumagai_delocalized(defect_entry)
else:
logger.info(
"Insufficient information provided for performing Kumagai "
"correction delocalization analysis.\n"
"Cannot perform atomic site averaged electrostatic "
"potential compatibility analysis."
)
req_struct_delocal_params = [
"final_defect_structure",
"initial_defect_structure",
"sampling_radius",
"defect_frac_sc_coords",
]
run_struct_delocal = len(set(defect_entry.parameters.keys()).intersection(req_struct_delocal_params)) == len(
req_struct_delocal_params
)
if run_struct_delocal:
defect_entry = self.check_final_relaxed_structure_delocalized(defect_entry)
else:
logger.info(
"Insufficient information provided in defect_entry.parameters. "
"Cannot perform full structure site relaxation compatibility analysis."
)
return defect_entry
def check_freysoldt_delocalized(self, defect_entry):
"""
Check for Freysoldt delocalization.
Args:
defect_entry (DefectEntry): Defect to correct.
Returns:
Corrected DefectEntry
"""
plnr_avg_analyze_meta = {}
plnr_avg_allows_compatible = True
for ax in range(3):
freystats = defect_entry.parameters["freysoldt_meta"]["pot_corr_uncertainty_md"][ax]["stats"]
frey_variance_compatible = freystats["variance"] <= self.plnr_avg_var_tol
frey_window = abs(freystats["minmax"][1] - freystats["minmax"][0])
frey_minmax_compatible = frey_window <= self.plnr_avg_minmax_tol
plnr_avg_analyze_meta.update(
{
ax: {
"frey_variance_compatible": frey_variance_compatible,
"frey_variance": freystats["variance"],
"plnr_avg_var_tol": self.plnr_avg_var_tol,
"frey_minmax_compatible": frey_minmax_compatible,
"frey_minmax_window": frey_window,
"plnr_avg_minmax_tol": self.plnr_avg_minmax_tol,
}
}
)
if (not frey_variance_compatible) or (not frey_minmax_compatible):
plnr_avg_allows_compatible = False
if "delocalization_meta" not in defect_entry.parameters.keys():
defect_entry.parameters["delocalization_meta"] = {}
defect_entry.parameters["delocalization_meta"].update(
{
"plnr_avg": {
"is_compatible": plnr_avg_allows_compatible,
"metadata": plnr_avg_analyze_meta,
}
}
)
if not plnr_avg_allows_compatible:
defect_entry.parameters.update({"is_compatible": False})
return defect_entry
def check_kumagai_delocalized(self, defect_entry):
"""
Check for Kumagai delocalization.
Args:
defect_entry (DefectEntry): Defect to correct.
Returns:
Corrected DefectEntry
"""
atomic_site_analyze_meta = {}
kumagaistats = defect_entry.parameters["kumagai_meta"]["pot_corr_uncertainty_md"]["stats"]
kumagai_variance_compatible = kumagaistats["variance"] <= self.atomic_site_var_tol
kumagai_window = abs(kumagaistats["minmax"][1] - kumagaistats["minmax"][0])
kumagai_minmax_compatible = kumagai_window <= self.atomic_site_minmax_tol
atomic_site_analyze_meta.update(
{
"kumagai_variance_compatible": kumagai_variance_compatible,
"kumagai_variance": kumagaistats["variance"],
"atomic_site_var_tol": self.atomic_site_var_tol,
"kumagai_minmax_compatible": kumagai_minmax_compatible,
"kumagai_minmax_window": kumagai_window,
"plnr_avg_minmax_tol": self.atomic_site_minmax_tol,
}
)
atomic_site_allows_compatible = kumagai_variance_compatible and kumagai_minmax_compatible
if "delocalization_meta" not in defect_entry.parameters.keys():
defect_entry.parameters["delocalization_meta"] = {}
defect_entry.parameters["delocalization_meta"].update(
{
"atomic_site": {
"is_compatible": atomic_site_allows_compatible,
"metadata": atomic_site_analyze_meta,
}
}
)
if not atomic_site_allows_compatible:
defect_entry.parameters.update({"is_compatible": False})
return defect_entry
def check_final_relaxed_structure_delocalized(self, defect_entry):
"""
NOTE this assumes initial and final structures have sites indexed in same way
:param defect_entry:
:return:
"""
structure_relax_analyze_meta = {}
initial_defect_structure = defect_entry.parameters["initial_defect_structure"]
final_defect_structure = defect_entry.parameters["final_defect_structure"]
radius_to_sample = defect_entry.parameters["sampling_radius"]
def_frac_coords = defect_entry.parameters["defect_frac_sc_coords"]
initsites = [site.frac_coords for site in initial_defect_structure]
finalsites = [site.frac_coords for site in final_defect_structure]
distmatrix = initial_defect_structure.lattice.get_all_distances(finalsites, initsites)
# calculate distance moved as a function of the distance from the defect
distdata = []
totpert = 0.0
defindex = None
for ind, site in enumerate(initial_defect_structure.sites):
if site.distance_and_image_from_frac_coords(def_frac_coords)[0] < 0.01:
defindex = ind
continue
totpert += distmatrix[ind, ind]
# append [distance to defect, distance traveled, index in structure]
distance_to_defect = initial_defect_structure.lattice.get_distance_and_image(
def_frac_coords, initsites[ind]
)[0]
distdata.append([distance_to_defect, distmatrix[ind, ind], int(ind)])
if defindex is None and not isinstance(defect_entry.defect, Vacancy):
raise ValueError("fractional coordinate for defect could not be identified in initial_defect_structure")
distdata.sort()
tot_relax_outside_rad = 0.0
perc_relax_outside_rad = 0.0
for newind, d in enumerate(distdata):
perc_relax = 100 * d[1] / totpert if totpert else 0.0
d.append(perc_relax) # percentage contribution to total relaxation
if d[0] > radius_to_sample:
tot_relax_outside_rad += d[1]
perc_relax_outside_rad += d[3]
structure_tot_relax_compatible = tot_relax_outside_rad <= self.tot_relax_tol
structure_perc_relax_compatible = not (perc_relax_outside_rad > self.perc_relax_tol and totpert >= 1.0)
structure_relax_analyze_meta.update(
{
"structure_tot_relax_compatible": structure_tot_relax_compatible,
"tot_relax_outside_rad": tot_relax_outside_rad,
"tot_relax_tol": self.tot_relax_tol,
"structure_perc_relax_compatible": structure_perc_relax_compatible,
"perc_relax_outside_rad": perc_relax_outside_rad,
"perc_relax_tol": self.perc_relax_tol,
"full_structure_relax_data": distdata,
"defect_index": defindex,
}
)
structure_relax_allows_compatible = structure_tot_relax_compatible and structure_perc_relax_compatible
# NEXT: do single defect delocalization analysis (requires similar data, so might as well run in tandem
# with structural delocalization)
defectsite_relax_analyze_meta = {}
if isinstance(defect_entry.defect, Vacancy):
defectsite_relax_allows_compatible = True
defectsite_relax_analyze_meta.update(
{
"relax_amount": None,
"defect_tot_relax_tol": self.defect_tot_relax_tol,
}
)
else:
defect_relax_amount = distmatrix[defindex, defindex]
defectsite_relax_allows_compatible = defect_relax_amount <= self.defect_tot_relax_tol
defectsite_relax_analyze_meta.update(
{
"relax_amount": defect_relax_amount,
"defect_tot_relax_tol": self.defect_tot_relax_tol,
}
)
if "delocalization_meta" not in defect_entry.parameters.keys():
defect_entry.parameters["delocalization_meta"] = {}
defect_entry.parameters["delocalization_meta"].update(
{
"defectsite_relax": {
"is_compatible": defectsite_relax_allows_compatible,
"metadata": defectsite_relax_analyze_meta,
}
}
)
defect_entry.parameters["delocalization_meta"].update(
{
"structure_relax": {
"is_compatible": structure_relax_allows_compatible,
"metadata": structure_relax_analyze_meta,
}
}
)
if (not structure_relax_allows_compatible) or (not defectsite_relax_allows_compatible):
defect_entry.parameters.update({"is_compatible": False})
return defect_entry
|
materialsproject/pymatgen
|
pymatgen/analysis/defects/defect_compatibility.py
|
Python
|
mit
| 29,940
|
[
"pymatgen"
] |
4ccc82cd886865e1a2ba7eeb0bc90f94d9db0601bdb63fa9863a68646548f2a0
|
# -*- coding: utf-8 -*-
#
# Copyright © 2013-2015 Michael Rabbitt, Roberto Alsina and others.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Inspired by "[Python] reStructuredText GitHub Podcast directive"
# (https://gist.github.com/brianhsu/1407759), public domain by Brian Hsu
"""
Extension to Python Markdown for Embedded Audio.
Basic Example:
>>> import markdown
>>> text = "[podcast]https://archive.org/download/Rebeldes_Stereotipos/rs20120609_1.mp3[/podcast]"
>>> html = markdown.markdown(text, [PodcastExtension()])
>>> print(html)
<p><audio controls=""><source src="https://archive.org/download/Rebeldes_Stereotipos/rs20120609_1.mp3" type="audio/mpeg"></source></audio></p>
"""
from __future__ import print_function, unicode_literals
from nikola.plugin_categories import MarkdownExtension
try:
from markdown.extensions import Extension
from markdown.inlinepatterns import Pattern
from markdown.util import etree
except ImportError:
# No need to catch this, if you try to use this without Markdown,
# the markdown compiler will fail first
Pattern = Extension = object
PODCAST_RE = r'\[podcast\](?P<url>.+)\[/podcast\]'
class PodcastPattern(Pattern):
"""InlinePattern for footnote markers in a document's body text."""
def __init__(self, pattern, configs):
"""Initialize pattern."""
Pattern.__init__(self, pattern)
def handleMatch(self, m):
"""Handle pattern matches."""
url = m.group('url').strip()
audio_elem = etree.Element('audio')
audio_elem.set('controls', '')
source_elem = etree.SubElement(audio_elem, 'source')
source_elem.set('src', url)
source_elem.set('type', 'audio/mpeg')
return audio_elem
class PodcastExtension(MarkdownExtension, Extension):
""""Podcast extension for Markdown."""
def __init__(self, configs={}):
"""Initialize extension."""
# set extension defaults
self.config = {}
# Override defaults with user settings
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
"""Extend Markdown."""
podcast_md_pattern = PodcastPattern(PODCAST_RE, self.getConfigs())
podcast_md_pattern.md = md
md.inlinePatterns.add('podcast', podcast_md_pattern, "<not_strong")
md.registerExtension(self)
def makeExtension(configs=None): # pragma: no cover
"""Make Markdown extension."""
return PodcastExtension(configs)
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=(doctest.NORMALIZE_WHITESPACE +
doctest.REPORT_NDIFF))
|
techdragon/nikola
|
nikola/plugins/compile/markdown/mdx_podcast.py
|
Python
|
mit
| 3,699
|
[
"Brian"
] |
34d868fcb11f8acb4197fb8a5451516219ded2bba140a18aacafbeb13351cdee
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# freeseer - vga/presentation capture software
#
# Copyright (C) 2012, 2013 Free and Open Source Software Learning Centre
# http://fosslc.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For support, questions, suggestions or any other inquiries, visit:
# http://wiki.github.com/Freeseer/freeseer/
import shutil
import tempfile
import unittest
from PyQt4 import Qt
from PyQt4 import QtGui
from PyQt4 import QtTest
from freeseer.framework.config.profile import ProfileManager
from freeseer.frontend.reporteditor.reporteditor import ReportEditorApp
from freeseer import settings
class TestReportEditorApp(unittest.TestCase):
'''
Test cases for ReportEditorApp.
'''
def setUp(self):
'''
Stardard init method: runs before each test_* method
Initializes a QtGui.QApplication and ReportEditorApp object.
ReportEditorApp() causes the UI to be rendered.
'''
self.profile_manager = ProfileManager(tempfile.mkdtemp())
profile = self.profile_manager.get('testing')
config = profile.get_config('freeseer.conf', settings.FreeseerConfig, storage_args=['Global'], read_only=False)
db = profile.get_database()
self.app = QtGui.QApplication([])
self.report_editor = ReportEditorApp(config, db)
self.report_editor.show()
def tearDown(self):
shutil.rmtree(self.profile_manager._base_folder)
del self.report_editor.app
self.app.deleteLater()
def test_close_report_editor(self):
'''
Tests closing the ReportEditorApp
'''
QtTest.QTest.mouseClick(self.report_editor.editorWidget.closeButton, Qt.Qt.LeftButton)
self.assertFalse(self.report_editor.editorWidget.isVisible())
def test_file_menu_quit(self):
'''
Tests ReportEditorApp's File->Quit
'''
self.assertTrue(self.report_editor.isVisible())
# File->Menu
self.report_editor.actionExit.trigger()
self.assertFalse(self.report_editor.isVisible())
def test_help_menu_about(self):
'''
Tests ReportEditorApp's Help->About
'''
self.assertTrue(self.report_editor.isVisible())
# Help->About
self.report_editor.actionAbout.trigger()
self.assertFalse(self.report_editor.hasFocus())
self.assertTrue(self.report_editor.aboutDialog.isVisible())
# Click "Close"
QtTest.QTest.mouseClick(self.report_editor.aboutDialog.closeButton, Qt.Qt.LeftButton)
self.assertFalse(self.report_editor.aboutDialog.isVisible())
|
Freeseer/freeseer
|
src/freeseer/tests/frontend/reporteditor/test_reporteditor.py
|
Python
|
gpl-3.0
| 3,212
|
[
"VisIt"
] |
8922249dac9937c6833ebcefb03e5d02a3c447e9a9006231c9fa539b3663977d
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2010 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
""" A dialog for sellable categories selection, offering buttons for
creation and edition.
"""
from kiwi.ui.objectlist import Column
from stoqlib.domain.taxes import ProductTaxTemplate
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.gui.search.searchcolumns import SearchColumn
from stoqlib.gui.search.searcheditor import SearchEditor
from stoqlib.gui.slaves.taxslave import ICMSTemplateSlave, IPITemplateSlave
from stoqlib.gui.editors.taxclasseditor import ProductTaxTemplateEditor
_ = stoqlib_gettext
TYPE_SLAVES = {
ProductTaxTemplate.TYPE_ICMS: ICMSTemplateSlave,
ProductTaxTemplate.TYPE_IPI: IPITemplateSlave,
}
class TaxTemplatesSearch(SearchEditor):
size = (500, 350)
title = _('Tax Classes Search')
search_label = _('Class Matching:')
search_spec = ProductTaxTemplate
editor_class = ProductTaxTemplateEditor
text_field_columns = [ProductTaxTemplate.name]
def get_columns(self):
return [
SearchColumn("name", _("Class name"), data_type=str,
sorted=True, expand=True),
Column("tax_type_str", _("Type"), data_type=str, width=80),
]
|
tiagocardosos/stoq
|
stoqlib/gui/search/taxclasssearch.py
|
Python
|
gpl-2.0
| 2,078
|
[
"VisIt"
] |
ce9517d35b5df61de0e8a6e674d0357a4fe55678caa5b2e72e6602603e239800
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print __doc__
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# License: BSD style
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
|
mrshu/scikit-learn
|
examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py
|
Python
|
bsd-3-clause
| 3,486
|
[
"Gaussian"
] |
de49ae949ad7b6e0c9cb3cf253265e246864214dfb99c3b2af621e1b0d99b83a
|
# =========================================================================
#
# Imports
#
# =========================================================================
import unittest
from .context import constants
from .context import euler_physics
import numpy as np
import numpy.testing as npt
# =========================================================================
#
# Class definitions
#
# =========================================================================
class EulerPhysicsTestCase(unittest.TestCase):
"""Tests for `euler_physics.py`."""
# =========================================================================
def setUp(self):
# initialize gamma (needed in flux calculations)
constants.init()
constants.gamma = 1.4
# Left/right toy data taken from shock tube problems found here:
# http://num3sis.inria.fr/blog/eulerian-flows-approximate-riemann-solvers-validation-on-1d-test-cases/
# or: C. Kong MS thesis at U. of Reading
# http://www.readingconnect.net/web/FILES/maths/CKong-riemann.pdf
# For each test: rhoL, uL, pL, rhoR, uR, pR
t1 = np.array([1, 0, 1, 0.125,
0.0, 0.1]) # Sod shock tube
# Modified Sod shock tube
t2 = np.array([1, 0.75, 1.0, 0.125, 0.0, 0.1])
t3 = np.array([1, -2.0, 0.4, 1.0,
2.0, 0.4]) # 123 problem
# Left Woodward and Colella (blast wave)
t4 = np.array([1, 0.0, 1000.0, 1.0, 0.0, 0.01])
# collision of two strong shocks
t5 = np.array([5.99924, 19.5975, 460.894,
5.99242, -6.19633, 46.0950])
# stationary contact discontinuity
t6 = np.array([1.4, 0.0, 1.0, 1.0, 0.0, 1.0])
# Transform to conserved variables
def ptoc(tp):
"""Take a test case containing rhoL, uL, pL, rhoR, uR, pR and turn
them into conservative variables
"""
tc = np.zeros(tp.shape)
tc[0] = tp[0]
tc[1] = tp[0] * tp[1]
tc[2] = tp[2] / (constants.gamma - 1) + 0.5 * tp[0] * tp[1] * tp[1]
tc[3] = tp[3]
tc[4] = tp[3] * tp[4]
tc[5] = tp[5] / (constants.gamma - 1) + 0.5 * tp[3] * tp[4] * tp[4]
return tc
t1 = ptoc(t1)
t2 = ptoc(t2)
t3 = ptoc(t3)
t4 = ptoc(t4)
t5 = ptoc(t5)
t6 = ptoc(t6)
# Put them into a long vector
self.ul = np.array([t1[0:3],
t2[0:3],
t3[0:3],
t4[0:3],
t5[0:3],
t6[0:3]]).flatten()
self.ur = np.array([t1[3::],
t2[3::],
t3[3::],
t4[3::],
t5[3::],
t6[3::]]).flatten()
# =========================================================================
def test_max_wave_speed(self):
"""Is the max wave speed solver correct?"""
# toy data
u = np.arange(1, 12 * 3 + 1).reshape((3, 12))
# Get the maximum wave speed
m = euler_physics.max_wave_speed(u)
# test
npt.assert_array_almost_equal(m, 2.74833147735, decimal=7)
# =========================================================================
def test_riemann_rusanov(self):
"""Is the Rusanov Riemann solver correct?"""
# Get the flux
F = euler_physics.riemann_rusanov(self.ul, self.ur)
# test
npt.assert_array_almost_equal(F,
np.array([5.1765698102e-01, 5.5000000000e-01, 1.3311179512e+00,
1.2207819810e+00, 1.5562059837e+00, 3.8646951951e+00,
0.0000000000e+00, -1.0966629547e+00, 0.0000000000e+00,
0.0000000000e+00, 5.0000500000e+02, 4.6770249628e+04,
4.0321739283e+01, 3.8386450281e+03, 5.7316182989e+04,
2.3664319132e-01, 1.0000000000e+00, 0.0000000000e+00]),
decimal=6)
# =========================================================================
def test_riemann_godunov(self):
"""Is the Godunov Riemann solver correct?"""
# Get the flux
F = euler_physics.riemann_godunov(self.ul, self.ur)
# test (exact solution generated by my exact Riemann solver)
npt.assert_array_almost_equal(F,
np.array([0.3953910704650308, 0.6698366621333465, 1.1540375166808616,
0.8109525650238815, 1.5445355710738495, 3.0029992255123030,
0.0000000000000000, 0.0018938734200542, 0.0000000000000000,
11.2697554398918438, 681.7522718876612089, 33777.3342909460770898,
117.5701059000000015, 2764.9741503752502467, 54190.4009509894967778,
0.0000000000000000, 1.0000000000000000, 0.0000000000000000]),
decimal=6)
# =========================================================================
def test_riemann_roe(self):
"""Is the Roe Riemann solver correct?"""
# Get the flux
F = euler_physics.riemann_roe(self.ul, self.ur)
# test
npt.assert_array_almost_equal(F,
np.array([3.9066048579e-01, 5.5000000000e-01, 1.2958822774e+00,
8.8328703998e-01, 1.4815703003e+00, 3.2200016348e+00,
0.0000000000e+00, -3.7700000000e+00, 0.0000000000e+00,
2.4445173035e+01, 3.5323828015e+02, 4.2779480602e+04,
1.0069219686e+02, 2.8140961713e+03, 5.0998456608e+04,
0.0000000000e+00, 1.0000000000e+00, 0.0000000000e+00]),
decimal=6)
# =========================================================================
def test_interior_flux(self):
"""Is the interior flux correct?"""
# toy data
u = np.arange(1, 12 * 3 + 1).reshape((3, 12))
# Get the maximum wave speed
F = euler_physics.interior_flux(u)
# test
npt.assert_array_almost_equal(F,
np.array([[2., 4.4, 6.8, 5., 7.4, 8.9375, 8., 10.91428571, 12.31020408, 11., 14.48, 15.818],
[14., 18.06153846, 19.36804734, 17., 21.65, 22.93671875, 20.,
25.24210526, 26.51523546, 23., 28.83636364, 30.09958678],
[26., 32.432, 33.68768, 29., 36.02857143, 37.27831633, 32., 39.62580645, 40.87075963, 35., 43.22352941, 44.46453287]]),
decimal=7)
if __name__ == '__main__':
unittest.main()
|
marchdf/dg1d
|
tests/test_euler_physics.py
|
Python
|
apache-2.0
| 7,497
|
[
"BLAST"
] |
6b546f7c022aca704f63894503cfffe845f2b25f6bd7e6508dfd411e46eaa410
|
#!/usr/bin/env python2.7
from __future__ import with_statement
import hashlib
import os
import random
import re
import sys
from pprint import pprint
from fabric import api
from fabric import network
from fabric.colors import blue, green, red, white, yellow, magenta
from fabric.api import abort, cd, local, env, settings, sudo, get, put, hide
from fabric.contrib import files
from fabric.contrib.console import confirm
import logging
logging.basicConfig()
paramiko_logger = logging.getLogger("paramiko.transport")
paramiko_logger.disabled = True
SPACE_SEPERATED_CONFIG_VALUES = [
'services',
'loaddata_apps',
]
def _modify_config(config):
for key in SPACE_SEPERATED_CONFIG_VALUES:
if key in config:
config[key] = config[key].split()
return config
def _load_project_config(environment=None):
from ConfigParser import SafeConfigParser
config_file = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'project.ini')
project_config = SafeConfigParser()
project_config.read(config_file)
def get_section(project_config, section):
options = {}
for key in project_config.options(section):
options[key] = project_config.get(section, key)
return options
def get_available_postfixes(project_config, section):
postfixes = []
for key in project_config.sections():
if key.startswith('%s:' % section):
postfixes.append(key[len('%s:' % section):])
return postfixes
if environment:
env.hosts.extend([
project_config.get('project', 'host'),
])
config_dict = get_section(project_config, 'project')
if environment:
config_dict.update(get_section(project_config, 'project:%s' % environment))
config_dict['_selected_environment'] = environment
config_dict['_environments'] = get_available_postfixes(
project_config,
'project')
return config_dict
def _load_environment(environment):
config = _load_project_config(environment)
config = _modify_config(config)
env.config = config
env.hosts = env.config['host']
def live():
_load_environment('live')
def staging():
_load_environment('staging')
def _require_environment(func):
from functools import wraps
@wraps(func)
def decorated(*args, **kwargs):
if not hasattr(env, 'config') or '_selected_environment' not in env.config:
print(red('ERROR: You need to select an environment.'))
print(yellow('The following environments are available:'))
environments = _load_project_config()['_environments']
for environment in environments:
print(yellow(' ' + environment))
print('You can use them with ' + blue('fab <environment> <command>'))
print('Example: ' + blue('fab ' + environments[0] + ' deploy'))
sys.exit(1)
return func(*args, **kwargs)
return decorated
SERVER_SETTINGS_FILE = 'server_settings.py'
@_require_environment
def run(command):
'''
Overwriting run command to execute tasks as project user.
'''
command = command.encode('string-escape')
sudo('su {user} -c "{command}"'.format(
command=command,
**env.config))
@_require_environment
def update(rev=None):
'''
* Update the checkout.
'''
if rev is None:
rev = env.config['branch']
with cd(env.config['path']):
sudo('git fetch origin', user=env.config['repo_manager'])
sudo('git reset --hard {rev}'.format(rev=rev), user=env.config['repo_manager'])
run('mkdir -p {root}/logs'.format(**env.config))
setup_fs_permissions()
@_require_environment
def migratedb():
'''
* run migrate
'''
with cd(env.config['path']):
run('.env/bin/python3 manage.py migrate --noinput')
@_require_environment
def reload_webserver():
'''
* reload nginx
'''
sudo('/etc/init.d/nginx reload')
@_require_environment
def restart_webserver():
'''
* restart nginx
'''
sudo('/etc/init.d/nginx restart')
@_require_environment
def showenv():
pprint(env.config)
@_require_environment
def test():
# test if the project.ini file is filled correctly
required_config = (
('name'),
('repository'),
('host'),
('domain'),
('path'),
('django_port'),
)
missing_values = []
for config_name in required_config:
value = env.config[config_name]
if not value:
missing_values.append(config_name)
if missing_values:
print(
red(u'Error: ') +
u'Please modify ' + yellow('project.ini') +
u' to contain all the necessary information. ' +
u'The following options are missing:\n'
)
for section, key in missing_values:
print(yellow(u'\t%s.%s' % (section, key)))
sys.exit(1)
# check if project is already set up on the server
if not files.exists(env.config['path']):
print(
red(u'Error: ') +
u'The project is not yet installed on the server. ' +
u'Please run ' + blue(u'fab install')
)
sys.exit(1)
# check if project has a local_settings file
with cd(env.config['path']):
if not files.exists(env.config['local_settings']):
print(
red(u'Error: ') +
u'The project has no ' + yellow(u'local_settings.py') +
u' configuration file on the server yet. ' +
u'Please run ' + blue(u'fab install') + u'.'
)
sys.exit(1)
print(
green(u'Congratulations. Everything seems fine so far!\n') +
u'You can run ' + yellow(u'fab deploy') + ' to update the server.'
)
@_require_environment
def collectstatic():
'''
* run .env/bin/python manage.py collectstatic
'''
with settings(warn_only=True):
build()
with cd(env.config['path']):
run('.env/bin/python3 manage.py collectstatic -v0 --noinput')
def setup_virtualenv():
'''
* setup virtualenv
'''
run('python3 -m venv .env --without-pip')
with cd('.env'):
run('wget https://bootstrap.pypa.io/get-pip.py')
run('bin/python3 get-pip.py')
local('cp config/activate_this.py .env/bin/activate_this.py')
@_require_environment
def pip_install():
'''
* install dependcies
'''
with cd(env.config['path']):
run('.env/bin/pip3 install -r requirements.txt')
@_require_environment
def npm_install():
'''
* install JS dependencies
'''
with cd(env.config['frontend']):
run('npm install')
run('npm run build --aot --prod')
run('npm run precache')
@_require_environment
def bower_install():
'''
* install JS dependencies
'''
with cd(env.config['path']):
run('bower install --config.interactive=false')
@_require_environment
def build():
'''
* Running build on the server.
'''
with cd(env.config['path']):
run('gulp build')
@_require_environment
def deploy(rev=None):
'''
* upload source
* build static files
* restart services
'''
update(rev=rev)
#npm_install()
pip_install()
migratedb()
collectstatic()
restart()
@_require_environment
def create_user():
with settings(warn_only=True):
sudo('useradd --home %(path)s %(user)s' % env.config)
sudo('gpasswd -a %(user)s projects' % env.config)
sudo('gpasswd -a www-data %(user)s' % env.config)
sudo('gpasswd -a sam %(user)s' % env.config)
sudo('gpasswd -a %(user)s sam' % env.config)
@_require_environment
def setup(mysql_root_password=None):
'''
* symlink services to /etc/service/<project_name>-<service>
* symlink and nginx config to /etc/nginx/sites-available
* symlink and nginx config from /etc/nginx/sites-available to
/etc/nginx/sites-enabled
* reload nginx
'''
port = _determine_port()
template_config = {
u'USER': env.config['user'],
u'PATH': env.config['path'],
u'PROJECT_NAME': env.config['name'],
u'DOMAIN': env.config['domain'],
u'PORT': port,
u'DBNAME': env.config['dbname'],
u'DBUSER': env.config['dbname'],
}
with cd(env.config['path']):
if not files.exists(env.config['local_settings']):
context = template_config.copy()
context.update({
u'SECRET_KEY': _generate_secret_key(),
})
files.upload_template(
u'src/website/local_settings.example.py',
context=context,
destination=env.config['local_settings'])
context = template_config.copy()
files.upload_template(
u'config/nginx.conf.template',
context=context,
destination=u'config/nginx.conf')
for service in env.config['services']:
files.upload_template(
u'services/%s.template' % service,
context=context,
destination=u'services/%s' % service)
for service_config in _services():
local_config = env.config.copy()
local_config.update(service_config)
if not files.exists('/service/%(service_name)s/run' % local_config):
sudo('mkdir -p /service/%(service_name)s' % local_config)
sudo('ln -s %(path)s/services/%(service)s /service/%(service_name)s/run' % local_config)
if not files.exists('/etc/nginx/sites-available/%(name)s.conf' % env.config):
sudo('ln -s %(path)s/config/nginx.conf /etc/nginx/sites-available/%(name)s.conf' % env.config)
if not files.exists('/etc/nginx/sites-enabled/%(name)s.conf' % env.config):
sudo('ln -s /etc/nginx/sites-available/%(name)s.conf /etc/nginx/sites-enabled' % env.config)
setup_fs_permissions()
reload_webserver()
restart()
@_require_environment
def install(root_password=None):
create_user()
stop()
# create project's parent directory
if not files.exists(env.config['root']):
sudo('mkdir -p %s' % env.config['root'])
sudo('chown {user}:{user} -R {root}'.format(**env.config))
sudo('chmod g+w -R {root}'.format(**env.config))
# git clone
if not files.exists(env.config['path']):
sudo('git clone {repository} {path}'.format(**env.config), user=env.config['repo_manager'])
else:
update()
setup_fs_permissions()
network.disconnect_all()
setup_virtualenv()
pip_install()
setup(root_password)
migratedb()
npm_install()
collectstatic()
start()
reload_webserver()
setup_fs_permissions()
print(green(u'Success!\n\n\n\n'),yellow(u'The project should be running now'))
def _services():
for service in env.config['services']:
service_config = {
'service': service,
'service_name': '%s-%s' % (env.config['name'], service),
}
service_config.update(env.config)
yield service_config
@_require_environment
def start():
'''
* start all services
'''
for service_config in _services():
sudo('svc -u /service/%(service_name)s' % service_config)
@_require_environment
def stop():
'''
* stop all services
'''
for service_config in _services():
sudo('svc -d /service/%(service_name)s' % service_config)
@_require_environment
def restart():
'''
* restart all services
'''
stop()
start()
collectstatic()
@_require_environment
def status():
'''
* show if services are running
'''
with settings(warn_only=True):
for service_config in _services():
sudo('svstat /service/%(service_name)s' % service_config)
@_require_environment
def setup_fs_permissions():
with cd(env.config['path']):
sudo('chown %(user)s:%(user)s -R .' % env.config)
sudo('chmod u+rw,g+rw -R .')
sudo('chmod g+s -R .')
sudo('chmod +x restart')
for service in env.config['services']:
with settings(warn_only=True):
sudo('chmod +x services/%s' % service)
def _determine_port():
port = env.config['django_port']
if port:
return port
port_available = re.compile(u'Connection refused\s*$', re.IGNORECASE)
while True:
port = random.randint(10000, 11000)
with settings(hide('warnings', 'stdout', 'running'), warn_only=True):
result = sudo('echo | telnet localhost %d' % port)
if port_available.search(result):
return port
#######################
# Development helpers #
#######################
def _generate_secret_key():
import random
return u''.join([
random.choice(u'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
for i in range(50)
])
def _pwdgen():
import random
random.seed()
allowedConsonants = "bcdfghjklmnprstvwxz"
allowedVowels = "aeiou"
allowedDigits = "0123456789"
pwd = random.choice(allowedConsonants) + random.choice(allowedVowels) \
+ random.choice(allowedConsonants) + random.choice(allowedVowels) \
+ random.choice(allowedConsonants) + random.choice(allowedVowels) \
+ random.choice(allowedDigits) + random.choice(allowedDigits)
return pwd
def devsetup():
os.chdir(os.path.dirname(__file__))
run('python3 -m venv .env --without-pip')
with cd('.env'):
run('wget https://bootstrap.pypa.io/get-pip.py')
run('bin/python3 get-pip.py')
local('cp config/activate_this.py .env/bin/activate_this.py')
if not os.path.exists('src/website/local_settings.py'):
local(
'cp -p src/website/local_settings.development.py src/website/local_settings.py',
capture=False)
def devupdate():
os.chdir(os.path.dirname(__file__))
local('.env/bin/pip3 install --upgrade -r requirements.txt')
local('npm install')
local('bower install')
local('gulp')
def devinit():
os.chdir(os.path.dirname(__file__))
devsetup()
devupdate()
local('.env/bin/python3 manage.py migrate', capture=False)
local('.env/bin/python3 manage.py loaddata config/adminuser.json', capture=False)
local('.env/bin/python3 manage.py loaddata config/localsite.json', capture=False)
_ascii_art('killer')
def devenv():
os.chdir(os.path.dirname(__file__))
devsetup()
local('.env/bin/pip3 install --upgrade -r requirements.txt')
|
samsath/cpcc_backend
|
fabfile.py
|
Python
|
gpl-3.0
| 14,658
|
[
"GULP"
] |
bb409607d9da0a2acf86f326e485af92f3b7684f470d3faa09dbcb1f7660151e
|
from rdkit import Chem
# match_atom_index can be of type int or a list - otherwise trouble.
#
# Note that atom_type properties can also have been set in hydrogen_transformations():
#
def set_atom_type(match, match_atom_index, mol, atom_type):
try:
this_atom = match[match_atom_index]
try:
current_type = mol.GetAtomWithIdx(this_atom).GetProp("atom_type")
except KeyError:
mol.GetAtomWithIdx(this_atom).SetProp("atom_type", atom_type)
name = mol.GetAtomWithIdx(this_atom).GetProp("name")
if False:
print ' set atom number %s having name %s with type %s ' % (str(this_atom).rjust(2),
repr(name), repr(atom_type))
except TypeError:
for match_atom in match_atom_index:
set_atom_type(match, match_atom, mol, atom_type)
def ele_to_smart(v):
return (v.upper(), '['+v+']', 0)
# those not handled by hand-coding
def smarts_by_element():
eles = [
"He", "Li", "Be", "B", "Ne", "Na", "Mg", "Al",
"Ar", "K", "Ca", "Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu",
"Zn", "Ga", "Ge", "As", "Se", "Kr", "Rb", "Sr", "Y", "Zr", "Nb", "Mo", "Tc",
"Ru", "Rh", "Pd", "Ag", "Cd", "In", "Sn", "Sb", "Te", "I", "Xe", "Cs", "Ba", "La",
"Ce", "Pr", "Nd", "Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho", "Er", "Tm", "Yb", "Lu",
"Hf", "Ta", "W", "Re", "Os", "Ir", "Pt", "Au", "Hg", "Tl", "Pb", "Bi", "Po", "At",
"Rn", "Fr", "Ra", "Ac", "Th", "Pa", "U"]
return map(ele_to_smart, eles)
def set_atom_types(mol):
smarts_list = [
# Full coverage for C, H, O.
# Oxygen
('O2', "[OX2;H0]", 0), # ester, Os between P and C are O2, not OP
('OP', 'O~P', 0),
('OS', 'O~S', 0),
('OB', 'O~B', 0),
('OC', '*C(=O)[OH]', (2,3)), # carboxylic acid
('OC', '*C(=O)O', (2,3)), # carboxylate, doesn't match deloc bonds
('OH1', '[OH1]', 0), # alcohol
('O2', "[oX2;H0]", 0), # ring oxygen
('O', 'O=*', 0), # carbonyl oxygen
# OH2 no examples
# OHA no examples
# OHB no examples
# OHC no examples
# OC2 no exmampes
# Fallback oxygen
('O', 'O', 0),
# Carbon SP
("CSP1", '[H][C]#*', 1), # e.g. in 2GT
("CSP", '[C]#[C]', (0,1)),
("CSP", '[C]#*', 0),
# Carbon SP2
('CR56', 'c12aaaac1aaa2', (0,5)), # works on indole
('CR56', 'c12aaaan1aaa2', 0), # same pattern as (below) N56, but catching first 56 atom
('CR66', 'c12aaaac1aaaa2', (0,5)),
('CR6', 'c12ccccc1OCO2', (0,5)), # mouse, fused atoms in 6-ring not non-arom 5-ring
('CR66', 'c12aaaac1AAAA2', (0,5)), # one 6-ring aromatic, other not. Needed for XXX?
# but makes a fail on 113.
('CR6', 'c12caccc1***2', (0,5)), # aromatic 6, (non-)aromatic 5, maybe this should be CR56?
# note CR1 missing - can't find example
# CR1H missing - can't find example
('CR16', '[cr6;H1]', 0),
('CR6', '[cr6;H0]', 0),
('CR15', '[cr5;H1]', 0),
# ('CR5', 'C1(=O)[C,c][C,c]C=N1', 0), # carbonyl C in a (non-percieved?) 5 ring, 0CE (a)
('CR5', '[cr5;H0]', 0),
('CR5', '[CR5;H0]', 0),
('C1', '[CX3;H1]', 0), # double bond, single bond and one H
('C2', '[CX3;H2]=*', 0), # double bond, and 2 H
('C', '[CX3;H0;^2]', 0),
('C', '[CX3]=[OX1]', 0), # carbonyl carbon
('C', '[$([CX2](=C)=C)]', 0), # bonded to 3 things not hydrogen
# Carbon SP3
('CT', '[CX4H0]', 0), # single bonded to 4 things not hydrogen
('CH3', '[C;H3;^3]', 0), # bonded to something via single bond and 3 Hs
('CH2', '[C;^3;H2]', 0), # standard aliphatic C.
('CH1', '*[C;H1](*)*', 1), # bonded to H and 3 things
# sp??? needs sorting
('CH2', '[CH2]', 0), # bonded to 2 hydrogens
# Carbon fallback
('C', '[C,c]', 0),
# Hydrogen
('HCH1', '[H][CH1]', 0),
('HCH2', '[H][C;H2^3]', 0),
('HCH3', '[H][CH3]', 0),
('HNC1', '[H][N;H1;^2]~C(~N)~N', 0), # H of N of N=C ?
('HNC2', '[H][NX3;H2;^2]', 0), # H on a NC2 (NH1 and NH2 of ARG)
('HNC3', '[H][NX3;H3;^2]', 0), # guess - no examples
('HNT1', '[H][NX4;H1;^3]', 0),
('HNT1', '[H][NX3;H1;^3]', 0),
('HNT2', '[H][NX3;H2;^3]', 0), # H connected to type NT2
('HNT3', '[N^3;H3][H]', 1), # NH3+
('HNH2', '[H][NH2;^2]', 0), # NH2 is sp2
('HNH1', '[H][NX3;H1;^2]', 0),
('HCR6', '[H][cr6;H1]', 0),
('HCR5', '[H][cr5;H1]', 0), # connected to aromatic ring C with 1 H
('HNR5', '[H][nr5;H1]', 0), # connected to aromatic ring C with 1 H
('HNR5', '[H][Nr5;H1]', 0), # guess based on above, connected to aromatic N in a 5-ring
('HNR6', '[H][nr6;H1]', 0), # connected to aromatic 6-ring N with 1 H
('HNR6', '[H][NR6;H1]', 0), # guess based on above
# HCR missing - no examples (and how is it different to HCR1?)
('HCR1', '[H]c', 0),
('HNH1', '[H][NH1]', 0),
('HOH1', '[H][OH1]', 0),
('HOH2', '[H][OH2][H]', (0,2)), # H of HOH - water
('H', '[H]', 0),
# Nitrogen, SP3
('NT1', '[NX4;H1;^3]', 0),
('NT1', '[NX3;H1;^3]', 0),
('NT2', '[NX3;H2;^3]', 0), # different to mon-lib!
('NT3', '[NX4;H3;^3]', 0),
('NT', '[NX3;H0;^3]', 0),
# NE-CZ in ARG should be deloc (guandino) - also NC1-C
# single (as is in ARG.cif) is not found in ener_lib!
# Nitrogen, SP2
('NR66', 'c12aaaan1aaaa2', 5), # (second) 66 atom is an N.
('NR56', 'c12aaaan1aaa2', 5), # (second) 56 atom is an N.
('NR55', 'c12aaan1aaa2', 4), # (second) 55 atom is an N.
('NC2', '[NX3;H2^2]', 0), # N of sp2 NH2 (as in ARG).
('NH2', '[NX3^2][CX3^2]=[N^2;X3+]', (0,2)), # amidinium (charged)...
('NR15', '[nr5;X3;H1]', 0),
('NR5', '[nr5;X3;H0]', 0),
('NR5', '[NR;X3;H0;^2]', 0), # [NR5;X3;H0;^2] fails on 14C (also at daylight)
('NRD5', '[nr5;X2;H0]', 0), # guess from 071
('NRD5', 'C1(=O)[C,c][C,c]C=N1', 5), # N bonded to carbonyl C in a (non-percieved?) 5 ring, 0CE (a)
('NR16', '[nr6;H1]', 0),
('NRD6', 'a:[nr6;X2;H0]:a', 1), # aromatic N with no H, i.e. one double one single
('NR6', '[nr6]', 0),
('NC1', '[H][N;H1;^2]~C(~N)~N', 1),
('NC1', '[NX3;H1;^2]C(~N)~N', 0), # N, as in NE in ARG
('NC1', '[NX2;H1;^2]', 0), # N of N=C ?
('NH1', '[NX3;H1;^2]', 0),
('NH2', '[NX3;H2;^2]', 0), # sp2, e.g. ND2 of an ASP
('NT', '*n1~[o]~[o]1', 1), # guess from 16X dioxaziridine (bleugh)
# (NT needs checking?)
# NC2 no examples
# NC3 no examples
# NPA no examples
# NPB no examples
# Nitrogen SP1
('NS', '[N^1]', 0),
# NS1 no examples
# fall-back nitrogen
('N', '[N,n]', 0),
# Phosphorus
('P', 'P', 0),
# Cl
('CL', '[Cl]', 0),
# F
('F', '[F]', 0),
# Br
('BR', '[Br]', 0),
# Sulfur
('SH1', '[SX2H1]', 0), # SG of CYS
('ST', '[SX4]', 0), # tetrahedral (2 single bonds, 2 double)
('S1', '[S]=*', 0),
('S2', '[SX2,sX2]', 0),
('S3', '[SX3,sX3]', 0),
('S', '[S,s]', 0),
# Silicon
('SI1', '[Si;X4]', 0), # tetragonal Si
('SI', '[Si]', 0) # Si any other
]
full_list = smarts_list
for item in smarts_by_element():
full_list.append(item)
for smarts_info in full_list:
atom_type, smarts, match_atom_index = smarts_info
pattern = Chem.MolFromSmarts(smarts)
if mol.HasSubstructMatch(pattern):
matches = mol.GetSubstructMatches(pattern)
if True:
print "SMARTS ", smarts
print " ", atom_type, ": ", matches
for match in matches:
set_atom_type(match, match_atom_index, mol, atom_type)
else:
# print "SMARTS ", smarts, " --- No hits "
pass
# do we return success (everything has a type) or not?
#
for atom in mol.GetAtoms():
try:
atom_type = atom.GetProp('atom_type')
except KeyError:
is_aromatic = atom.GetIsAromatic()
hybrid = atom.GetHybridization()
print "Error:: Missing type for atom \"", atom.GetProp('name'), "\" is_aromatic: ", is_aromatic, " hybridization: ", hybrid
return False
# we got to the end, good
return True
|
jlec/coot
|
pyrogen/atom_types.py
|
Python
|
gpl-3.0
| 8,934
|
[
"RDKit"
] |
5729294a6fee8630b5077238a0a374680d52c667571040204d0a5840853cb7ba
|
"""
I/O for Tecplot ASCII data format, cf.
<https://github.com/su2code/SU2/raw/master/externals/tecio/360_data_format_guide.pdf>,
<http://paulbourke.net/dataformats/tp/>.
"""
import numpy as np
from ..__about__ import __version__ as version
from .._common import warn
from .._exceptions import ReadError, WriteError
from .._files import open_file
from .._helpers import register_format
from .._mesh import Mesh
zone_key_to_type = {
"T": str,
"I": int,
"J": int,
"K": int,
"N": int,
"NODES": int,
"E": int,
"ELEMENTS": int,
"F": str,
"ET": str,
"DATAPACKING": str,
"ZONETYPE": str,
"NV": int,
"VARLOCATION": str,
}
# 0=ORDERED
# 1=FELINESEG
# 2=FETRIANGLE
# 3=FEQUADRILATERAL
# 4=FETETRAHEDRON
# 5=FEBRICK
# 6=FEPOLYGON
# 7=FEPOLYHEDRON
tecplot_to_meshio_type = {
"LINESEG": "line",
"FELINESEG": "line",
"TRIANGLE": "triangle",
"FETRIANGLE": "triangle",
"QUADRILATERAL": "quad",
"FEQUADRILATERAL": "quad",
"TETRAHEDRON": "tetra",
"FETETRAHEDRON": "tetra",
"BRICK": "hexahedron",
"FEBRICK": "hexahedron",
}
meshio_to_tecplot_type = {
"line": "FELINESEG",
"triangle": "FETRIANGLE",
"quad": "FEQUADRILATERAL",
"tetra": "FETETRAHEDRON",
"pyramid": "FEBRICK",
"wedge": "FEBRICK",
"hexahedron": "FEBRICK",
}
meshio_only = set(meshio_to_tecplot_type.keys())
meshio_to_tecplot_order = {
"line": [0, 1],
"triangle": [0, 1, 2],
"quad": [0, 1, 2, 3],
"tetra": [0, 1, 2, 3],
"pyramid": [0, 1, 2, 3, 4, 4, 4, 4],
"wedge": [0, 1, 4, 3, 2, 2, 5, 5],
"hexahedron": [0, 1, 2, 3, 4, 5, 6, 7],
}
meshio_to_tecplot_order_2 = {
"triangle": [0, 1, 2, 2],
"quad": [0, 1, 2, 3],
"tetra": [0, 1, 2, 2, 3, 3, 3, 3],
"pyramid": [0, 1, 2, 3, 4, 4, 4, 4],
"wedge": [0, 1, 4, 3, 2, 2, 5, 5],
"hexahedron": [0, 1, 2, 3, 4, 5, 6, 7],
}
meshio_type_to_ndim = {
"line": 1,
"triangle": 2,
"quad": 2,
"tetra": 3,
"pyramid": 3,
"wedge": 3,
"hexahedron": 3,
}
def read(filename):
with open_file(filename, "r") as f:
out = read_buffer(f)
return out
def readline(f):
line = f.readline().strip()
while line.startswith("#"):
line = f.readline().strip()
return line
def read_buffer(f):
variables = None
num_data = None
zone_format = None
zone_type = None
is_cell_centered = None
data = None
cells = None
while True:
line = readline(f)
if line.upper().startswith("VARIABLES"):
# Multilines for VARIABLES appears to work only if
# variable name is double quoted
lines = [line]
i = f.tell()
line = readline(f).upper()
while True:
if line.startswith('"'):
lines += [line]
i = f.tell()
line = readline(f).upper()
else:
f.seek(i)
break
line = " ".join(lines)
variables = _read_variables(line)
elif line.upper().startswith("ZONE"):
# ZONE can be defined on several lines e.g.
# ```
# ZONE NODES = 62533, ELEMENTS = 57982
# , DATAPACKING = BLOCK, ZONETYPE = FEQUADRILATERAL
# , VARLOCATION = ([1-2] = NODAL, [3-7] = CELLCENTERED)
# ```
# is valid (and understood by ParaView and VisIt).
info_lines = [line]
i = f.tell()
line = readline(f).upper()
while True:
# check if the first entry can be converted to a float
try:
float(line.split()[0])
except ValueError:
info_lines += [line]
i = f.tell()
line = readline(f).upper()
else:
f.seek(i)
break
line = " ".join(info_lines)
assert variables is not None
zone = _read_zone(line)
(
num_nodes,
num_cells,
zone_format,
zone_type,
is_cell_centered,
) = _parse_fezone(zone, variables)
num_data = [num_cells if i else num_nodes for i in is_cell_centered]
data, cells = _read_zone_data(
f,
sum(num_data) if zone_format == "FEBLOCK" else num_nodes,
num_cells,
zone_format,
)
break # Only support one zone, no need to read the rest
elif not line:
break
assert num_data is not None
assert zone_format is not None
assert zone_type is not None
assert variables is not None
assert is_cell_centered is not None
assert data is not None
assert cells is not None
data = (
np.split(np.concatenate(data), np.cumsum(num_data[:-1]))
if zone_format == "FEBLOCK"
else np.transpose(data)
)
data = {k: v for k, v in zip(variables, data)}
point_data, cell_data = {}, {}
for i, variable in zip(is_cell_centered, variables):
if i:
cell_data[variable] = [data[variable]]
else:
point_data[variable] = data[variable]
x = "X" if "X" in point_data.keys() else "x"
y = "Y" if "Y" in point_data.keys() else "y"
z = "Z" if "Z" in point_data.keys() else "z" if "z" in point_data.keys() else ""
points = np.column_stack((point_data.pop(x), point_data.pop(y)))
if z:
points = np.column_stack((points, point_data.pop(z)))
cells = [(tecplot_to_meshio_type[zone_type], cells - 1)]
return Mesh(points, cells, point_data, cell_data)
def _read_variables(line):
# Gather variables in a list
line = line.split("=")[1]
line = [x for x in line.replace(",", " ").split()]
variables = []
i = 0
while i < len(line):
if '"' in line[i] and not (line[i].startswith('"') and line[i].endswith('"')):
var = f"{line[i]}_{line[i + 1]}"
i += 1
else:
var = line[i]
variables.append(var.replace('"', ""))
i += 1
# Check that at least X and Y are defined
if "X" not in variables and "x" not in variables:
raise ReadError("Variable 'X' not found")
if "Y" not in variables and "y" not in variables:
raise ReadError("Variable 'Y' not found")
return variables
def _read_zone(line):
# Gather zone entries in a dict
line = line[5:]
zone = {}
# Look for zone title
ivar = line.find('"')
# If zone contains a title, process it and save the title
if ivar >= 0:
i1, i2 = ivar, ivar + line[ivar + 1 :].find('"') + 2
zone_title = line[i1 + 1 : i2 - 1]
line = line.replace(line[i1:i2], "PLACEHOLDER")
else:
zone_title = None
# Look for VARLOCATION (problematic since it contains both ',' and '=')
ivar = line.find("VARLOCATION")
# If zone contains VARLOCATION, process it and remove the key/value pair
if ivar >= 0:
i1, i2 = line.find("("), line.find(")")
zone["VARLOCATION"] = line[i1 : i2 + 1].replace(" ", "")
line = line[:ivar] + line[i2 + 1 :]
# Split remaining key/value pairs separated by '='
line = [x for x in line.replace(",", " ").split() if x != "="]
i = 0
while i < len(line):
if "=" in line[i]:
if not (line[i].startswith("=") or line[i].endswith("=")):
key, value = line[i].split("=")
else:
key = line[i].replace("=", "")
value = line[i + 1]
i += 1
else:
key = line[i]
value = line[i + 1].replace("=", "")
i += 1
zone[key] = zone_key_to_type[key](value)
i += 1
# Add zone title to zone dict
if zone_title:
zone["T"] = zone_title
return zone
def _parse_fezone(zone, variables):
# Check that the grid is unstructured
if "F" in zone.keys():
if zone["F"] not in {"FEPOINT", "FEBLOCK"}:
raise ReadError("Tecplot reader can only read finite-element type grids")
if "ET" not in zone.keys():
raise ReadError("Element type 'ET' not found")
zone_format = zone.pop("F")
zone_type = zone.pop("ET")
elif "DATAPACKING" in zone.keys():
if "ZONETYPE" not in zone.keys():
raise ReadError("Zone type 'ZONETYPE' not found")
zone_format = "FE" + zone.pop("DATAPACKING")
zone_type = zone.pop("ZONETYPE")
else:
raise ReadError("Data format 'F' or 'DATAPACKING' not found")
# Number of nodes
if "N" in zone.keys():
num_nodes = zone.pop("N")
elif "NODES" in zone.keys():
num_nodes = zone.pop("NODES")
else:
raise ReadError("Number of nodes not found")
# Number of elements
if "E" in zone.keys():
num_cells = zone.pop("E")
elif "ELEMENTS" in zone.keys():
num_cells = zone.pop("ELEMENTS")
else:
raise ReadError("Number of elements not found")
# Variable locations
is_cell_centered = np.zeros(len(variables), dtype=int)
if zone_format == "FEBLOCK":
if "NV" in zone.keys():
node_value = zone.pop("NV")
is_cell_centered[node_value:] = 1
elif "VARLOCATION" in zone.keys():
varlocation = zone.pop("VARLOCATION")[1:-1].split(",")
for location in varlocation:
varrange, varloc = location.split("=")
varloc = varloc.strip()
if varloc == "CELLCENTERED":
varrange = varrange[1:-1].split("-")
if len(varrange) == 1:
i = int(varrange[0]) - 1
is_cell_centered[i] = 1
else:
imin = int(varrange[0]) - 1
imax = int(varrange[1]) - 1
for i in range(imin, imax + 1):
is_cell_centered[i] = 1
return num_nodes, num_cells, zone_format, zone_type, is_cell_centered
def _read_zone_data(f, num_data, num_cells, zone_format):
data, count = [], 0
while count < num_data:
line = readline(f).split()
if line:
data += [[float(x) for x in line]]
count += len(line) if zone_format == "FEBLOCK" else 1
cells, count = [], 0
while count < num_cells:
line = readline(f).split()
if line:
cells += [[[int(x) for x in line]]]
count += 1
return data, np.concatenate(cells)
def write(filename, mesh):
# Check cell types
cell_types = []
cell_blocks = []
for ic, c in enumerate(mesh.cells):
if c.type in meshio_only:
cell_types.append(c.type)
cell_blocks.append(ic)
else:
warn(
(
"Tecplot does not support cell type '{}'. "
"Skipping cell block {}."
).format(c.type, ic)
)
# Define cells and zone type
cell_types = np.unique(cell_types)
if len(cell_types) == 0:
raise WriteError("No cell type supported by Tecplot in mesh")
elif len(cell_types) == 1:
# Nothing much to do except converting pyramids and wedges to hexahedra
zone_type = meshio_to_tecplot_type[cell_types[0]]
cells = np.concatenate(
[
mesh.cells[ic].data[:, meshio_to_tecplot_order[mesh.cells[ic].type]]
for ic in cell_blocks
]
)
else:
# Check if the mesh contains 2D and 3D cells
num_dims = [meshio_type_to_ndim[mesh.cells[ic].type] for ic in cell_blocks]
# Skip 2D cells if it does
if len(np.unique(num_dims)) == 2:
warn("Mesh contains 2D and 3D cells. Skipping 2D cells.")
cell_blocks = [ic for ic, ndim in zip(cell_blocks, num_dims) if ndim == 3]
# Convert 2D cells to quads / 3D cells to hexahedra
zone_type = "FEQUADRILATERAL" if num_dims[0] == 2 else "FEBRICK"
cells = np.concatenate(
[
mesh.cells[ic].data[:, meshio_to_tecplot_order_2[mesh.cells[ic].type]]
for ic in cell_blocks
]
)
# Define variables
variables = ["X", "Y"]
data = [mesh.points[:, 0], mesh.points[:, 1]]
varrange = [3, 0]
if mesh.points.shape[1] == 3:
variables += ["Z"]
data += [mesh.points[:, 2]]
varrange[0] += 1
for k, v in mesh.point_data.items():
if k not in {"X", "Y", "Z", "x", "y", "z"}:
if v.ndim == 1:
variables += [k]
data += [v]
varrange[0] += 1
elif v.ndim == 2:
for i, vv in enumerate(v.T):
variables += [f"{k}_{i}"]
data += [vv]
varrange[0] += 1
else:
warn(f"Skipping point data '{k}'.")
if mesh.cell_data:
varrange[1] = varrange[0] - 1
for k, v in mesh.cell_data.items():
if k not in {"X", "Y", "Z", "x", "y", "z"}:
v = np.concatenate([v[ic] for ic in cell_blocks])
if v.ndim == 1:
variables += [k]
data += [v]
varrange[1] += 1
elif v.ndim == 2:
for i, vv in enumerate(v.T):
variables += [f"{k}_{i}"]
data += [vv]
varrange[1] += 1
else:
warn(f"Skipping cell data '{k}'.")
with open_file(filename, "w") as f:
# Title
f.write(f'TITLE = "Written by meshio v{version}"\n')
# Variables
variables_str = ", ".join(f'"{var}"' for var in variables)
f.write(f"VARIABLES = {variables_str}\n")
# Zone record
num_nodes = len(mesh.points)
num_cells = sum(len(mesh.cells[ic].data) for ic in cell_blocks)
f.write(f"ZONE NODES = {num_nodes}, ELEMENTS = {num_cells},\n")
f.write(f"DATAPACKING = BLOCK, ZONETYPE = {zone_type}")
if varrange[0] <= varrange[1]:
f.write(",\n")
varlocation_str = (
f"{varrange[0]}"
if varrange[0] == varrange[1]
else f"{varrange[0]}-{varrange[1]}"
)
f.write(f"VARLOCATION = ([{varlocation_str}] = CELLCENTERED)\n")
else:
f.write("\n")
# Zone data
for arr in data:
_write_table(f, arr)
# CellBlock
for cell in cells:
f.write(" ".join(str(c) for c in cell + 1) + "\n")
def _write_table(f, data, ncol=20):
nrow = len(data) // ncol
lines = np.split(data, np.full(nrow, ncol).cumsum())
for line in lines:
if len(line):
f.write(" ".join(str(l) for l in line) + "\n")
register_format("tecplot", [".dat", ".tec"], read, {"tecplot": write})
|
nschloe/meshio
|
src/meshio/tecplot/_tecplot.py
|
Python
|
mit
| 15,144
|
[
"ParaView",
"VisIt"
] |
971e3b7dec73c45a223e699d295f171802b58e5c4b00dea43c6cf1d769e84132
|
#!/usr/bin/python
# -*- coding: iso8859-2 -*-
#
# qclib - Quantum Computing library for Python
# Copyright (C) 2006 Robert Nowotniak <rnowotniak@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import sys
from qclib import *
class QclibTestCase(unittest.TestCase):
def runTest(self):
# kety bazy standardowej
print(ket0)
print(ket1)
# arbitralne stany kubitow
print(0.3 * ket0)
print(0.4 * ket0 + 0.5 * ket1)
print((0.4 * ket0 + 0.5 * ket1).normalize())
print(repr(0.4 * ket0 + 0.5 * ket1))
print(repr(ket0))
# iloczyn tensorowy kubitow i rej kwantowych
print(ket0 ** ket0)
print(ket0 ** ket1)
print(ket1 ** ket1)
print(repr(ket1 ** ket1))
print(ket0 ** ket1 ** ket0)
print(repr(ket0 ** ket1 ** ket0))
# bramki elementarne
h = Hadamard()
I = Identity()
cnot = CNot()
print(h)
print(I)
print(cnot)
print(repr(cnot))
# mnozenie bramek
print(h * I)
# iloczyn tensorowy bramek
print(h ** cnot)
print(h ** cnot ** cnot)
# dzialanie bramka na rejestr lub kubit
print(h * I)
print(h * ket0)
print(h * ket1)
# calling gates like functions
print(h(ket0))
print()
cnot2 = CNot(0, 1)
circ = (I ** h ** I) * (I ** cnot) * (cnot2 ** I)
print(circ(ket0 ** ket0 ** ket0))
circ = QCircuit(
Stage(I, h, I),
Stage(I, cnot),
Stage(cnot2, I)
)
print(circ(ket0 ** ket0 ** ket0))
print()
input = ket0 ** ket0 ** ket0
circ = (I ** h ** I) * (I ** cnot) * (cnot2 ** I)
print(circ(input))
print
print('swap test, niesasiadujace kubity, test z cnot2')
circ = (I ** Swap()) * (cnot2 ** I) * (I ** Swap())
print(circ)
input = ket1 ** ket0 ** ket1
print(input.dirac())
print(circ(input).dirac())
class QuantumCircuitTestCase(unittest.TestCase):
pass
class QubitTestCase(unittest.TestCase):
"""A test case for Qubit class"""
def setUp(self):
self.q1 = (0.3 * ket0 + 0.4 * ket1).normalize()
def testQubit(self):
print(self.q1)
def testFlip(self):
pass
class QRegisterTestCase(unittest.TestCase):
"""A test case for QRegister class"""
def setUp(self):
self.q1 = (0.3 * ket0 + 0.4 * ket1).normalize()
self.q2 = (0.5 * ket0 + 0.333 * ket1).normalize()
self.q3 = ((0.3j + 0.7) * ket0 + (0.4 + 0.1j) * ket1).normalize()
def testNormalize(self):
q1 = (0.3 * ket0 + 0.4 * ket1).normalize()
q2 = (0.5 * ket0 + 0.333 * ket1).normalize()
q3 = ((0.3j + 0.7) * ket0 + (0.4 + 0.1j) * ket1).normalize()
for q in (q1, q2, q3):
assert abs(sum(array(abs(q.matrix)) ** 2) - 1) < epsilon, \
'Not normalized state'
def testKets(self):
pass
def testTensor(self):
pass
def testGates(self):
pass
def testDirac(self):
assert ket0.dirac() == '|0>'
assert ket1.dirac() == '|1>'
assert (ket0**ket1).dirac() == '|01>'
def testEpr(self):
inp = ket0 ** ket0
pair = epr(inp)
assert sum(abs(pair.matrix - transpose(matrix([sqrt(2)/2, 0, 0, sqrt(2)/2])))) < epsilon, \
'Not an EPR pair'
def testKet(self):
print((Ket(5) + Ket(6)).normalize().dirac())
def testMeasureAll(self):
assert ket0.measure() == ket0
assert ket1.measure() == ket1
res = [0, 0]
for i in xrange(100):
q = (ket0 + ket1).normalize()
q.measure()
if q == ket0:
res[0] += 1
elif q == ket1:
res[1] += 1
else:
self.fail('Not possible measurement result')
assert res[0] + res[1] == 100, 'Not possible measurements result'
assert abs(res[0] - 50) < 15, 'Not fair distribution of results'
for i in xrange(10):
q = (Ket(5) + Ket(6)).normalize()
q.measure()
if q != Ket(5) and q != Ket(6):
self.fail('Not possible measurement result')
q0 = QRegister([ones(8) / sqrt(8)])
q0.measure()
assert q0 in [Ket(n, 3) for n in xrange(8)]
q = (0.9 + 0.6j) * Ket(1, 2) + (0.7 - .1j) * Ket(2,2)
q.normalize()
q.measure()
def testMeasureSome(self):
q0 = QRegister([ones(8) / sqrt(8)])
print(q0)
q0 = QRegister([ones(8) / sqrt(8)])
print(q0.measure(0))
q0 = QRegister([ones(8) / sqrt(8)])
print(q0.measure(2, 1))
print(q0)
print(q0.dirac())
q = ket0 ** (s2 * ket0 + s2 * ket1).normalize() ** ket1
assert q.measure(1) in (Ket(0), Ket(1))
assert q in (Ket(1, 3), Ket(3, 3))
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(QRegisterTestCase))
suite.addTest(unittest.makeSuite(QubitTestCase))
suite.addTest(unittest.makeSuite(QclibTestCase))
unittest.TextTestRunner(verbosity = 2).run(suite)
|
rnowotniak/qclib
|
qctest.py
|
Python
|
gpl-3.0
| 5,888
|
[
"DIRAC"
] |
4c4780398f00577132b94451422b5e8f410655407c1c384808861972e42e5137
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Defines parameters for simulation, used by example_parallel_network.py script
Copyright (C) 2018 Computational Neuroscience Group, NMBU.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
'''
import matplotlib
import os
if 'DISPLAY' not in os.environ.keys():
matplotlib.use('agg')
import os
import numpy as np
from scipy import stats
from glob import glob
import json
from parameters import ParameterSet
from mpi4py import MPI
import neuron
import sys
from urllib.request import urlopen
from example_parallel_network_methods import get_templatename, get_params, \
get_syn_params
import LFPy
stringType = 'U'
# set up MPI environment
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
# load some neuron-interface files needed for the EPFL cell types
neuron.h.load_file("stdrun.hoc")
neuron.h.load_file("import3d.hoc")
#######################
# Functions
#######################
def get_pre_m_type(post):
'''little helper function to return the most populuous excitatory m_type
within the layer of m_type post, assuming this is representative for
excitatory external connections onto postsynaptic cells '''
if post.startswith('L23'):
return 'L23_PC'
elif post.startswith('L4'):
return 'L4_PC'
elif post.startswith('L5'):
return 'L5_TTPC1'
elif post.startswith('L6'):
return 'L6_IPC'
#######################
# Parameters
#######################
# test mode (1 cell per pop, all-to-all connectivity)
TESTING = False
# Creating a NeuroTools.parameters.ParameterSet object for the main parameters
PSET = ParameterSet({})
# output file destination
if TESTING:
PSET.OUTPUTPATH = 'example_parallel_network_output_testing'
else:
PSET.OUTPUTPATH = 'example_parallel_network_output'
# input file paths
# PATHs to current cell-specific files and NMODL files
PSET.CWD = os.getcwd()
PSET.CELLPATH = 'hoc_combos_syn.1_0_10.allzips'
PSET.NMODL = 'hoc_combos_syn.1_0_10.allmods'
########################################################
# Simulation control
########################################################
PSET.dt = 2**-4 # simulation time step size (ms)
PSET.tstop = 1500. # simulation duration (ms)
PSET.v_init = -77. # membrane voltage(s) at t = 0 for all cells
PSET.celsius = 34. # global temperature setting affecting active channels
PSET.TRANSIENT = 500. # duration of startup transient
# population size scaling (multiplied with values in
# populationParams['POP_SIZE']):
PSET.POPSCALING = 1.
# global scaling of connection probabilities (to counteract POPSCALING)
PSET.CONNPROBSCALING = 1. / PSET.POPSCALING
# switch for fully connected network (do not use with large population sizes)
PSET.fully_connected = True if TESTING else False
# bool flag switching LFP calculations on or off (faster)
PSET.COMPUTE_LFP = True
# bool flag switching ECoG calculation on or off
PSET.COMPUTE_ECOG = PSET.COMPUTE_LFP
# bool flag switching on calculations of electric current dipole moments
# per population
PSET.COMPUTE_P = PSET.COMPUTE_LFP
# bool flag switching on calculations of contributions to the extracellular
# potential per population
PSET.rec_pop_contributions = PSET.COMPUTE_LFP
# downsample factor for timeseries plots
PSET.decimate_q = 10
# settings for filtered signals shown in plots (fc=100 Hz, lowpass)
PSET.filterargs = dict(
N=2,
Wn=100. *
2. *
PSET.dt /
1000 *
PSET.decimate_q,
btype='lowpass')
# Base NetworkCell arguments, morphology and template specific args is
# defined below.
cellParams = {
'passive': False,
'nsegs_method': None,
'v_init': PSET.v_init,
'tstart': 0,
'tstop': PSET.tstop,
'dt': PSET.dt,
'verbose': False,
'extracellular': False,
'delete_sections': False,
}
# some stimuli to activate the network
PSET.PointProcParams = {
'idx': 0,
'record_current': False,
'pptype': 'IClamp',
# 'amp' : 0.793, # amplitude parameter set later on
'dur': 1E6,
'delay': 0.,
}
# parameters for predicting extracellular potentials, specifying
# coordinates of electrodes and extracellular conductivity. So far only
# point contacts
PSET.electrodeParams = {
'x': np.zeros(16),
'y': np.zeros(16),
'z': np.linspace(-1500, 0, 16)[::-1],
'sigma': 0.3,
'n': 50,
'N': np.array([[0., 1., 0]] * 16),
'r': 5.,
'method': 'root_as_point',
}
# parameters for 4-sphere volume conductor model
# compute electrode positions on the outer radius for different angular offsets
_theta = np.linspace(-np.pi / 4, np.pi / 4, 9)
_x = 90000. * np.sin(_theta)
_y = np.zeros(_theta.size)
_z = 90000. * np.cos(_theta)
PSET.foursphereParams = {
'radii': [79000., 80000., 85000., 90000.], # shell radii
'sigmas': [0.3, 1.5, 0.015, 0.3], # shell conductivity
'r_electrodes': np.c_[_x, _y, _z], # contact coordinates
}
# Optional arguments to Network.simulate() for computing extracellular
# contribution from passive leak, membrane capactitance and synaptic currents
PSET.NetworkSimulateArgs = {
'use_ipas': False,
'use_icap': False,
'use_isyn': False,
'to_memory': True,
}
# layer thickness top to bottom L1-L6, Markram et al. 2015 Fig 3A.
PSET.layer_data = np.array([('L1', 165., -82.5),
('L2', 149., -239.5),
('L3', 353., -490.5),
('L4', 190., -762.),
('L5', 525, -1119.5),
('L6', 700, -1732.)],
dtype=[('layer', '|{}2'.format(stringType)),
('thickness', float), ('center', float)])
# Define electrode geometry corresponding to an ECoG electrode, where contact
# points have a radius r, surface normal vectors N, and ECoG is calculated as
# the average LFP in n random points on each contact:
PSET.ecogParameters = {
'sigma_S': 0., # CSF conductivity
'sigma_T': 0.3, # GM conductivity
'sigma_G': 0.3, # WM conductivity
'h': PSET.layer_data['thickness'].sum(),
'x': np.array([0.]), # x,y,z-coordinates of electrode contacts
'y': np.array([0.]),
'z': np.array([0.]),
# +PSET.layer_data[4]['thickness']/8,
'z_shift': -PSET.layer_data['thickness'].sum(),
'n': 500,
'r': 250, # ECoG radii are often 500-1000 um
'N': np.array([[0., 0., 1.]]),
'method': "pointsource",
}
# Main population parameters:
PSET.populationParameters = np.array([
# Layer 4
# Excitatory
('L4_PC', 'cAD', 'L4_PC_cADpyr230_1', 2674,
dict(
radius=210,
loc=PSET.layer_data[3]['center'],
scale=100.,
cap=[
1078.,
97.]),
dict(x=np.pi / 2, y=0.),
['dend', 'apic'],
['dend', 'apic'],
0.125, 5.),
# Inhibitory
('L4_LBC', 'dNAC', 'L4_LBC_dNAC222_1', 122,
dict(
radius=210,
loc=PSET.layer_data[3]['center'],
scale=100.,
cap=[
938.,
670]),
dict(x=np.pi / 2, y=0.),
['soma', 'dend', 'apic'],
['dend', 'apic'],
0.125, 5.),
# Layer 5
# Excitatory
('L5_TTPC1', 'cAD', 'L5_TTPC1_cADpyr232_1', 2403,
dict(
radius=210,
loc=PSET.layer_data[4]['center'],
scale=125.,
cap=[
719,
73.]),
dict(x=np.pi / 2, y=0.),
['dend', 'apic'],
['dend', 'apic'],
0.1, 5.),
# Inhibitory
('L5_MC', 'bAC', 'L5_MC_bAC217_1', 395,
dict(
radius=210,
loc=PSET.layer_data[4]['center'],
scale=125.,
cap=[
378.,
890]),
dict(x=np.pi / 2, y=0.),
['soma', 'dend', 'apic'],
['dend', 'apic'],
0.125, 5.),
],
dtype=[('m_type', '|{}32'.format(stringType)),
('e_type', '|{}32'.format(stringType)),
('me_type', '|{}32'.format(stringType)
), ('POP_SIZE', 'i8'), ('pop_args', dict),
('rotation_args', dict), ('syn_section', list),
('extrinsic_input_section', list),
('extrinsic_input_density', 'f8'),
('extrinsic_input_frequency', 'f8')])
# column data:
# shortnames as used in pathway_*.json files
# names as used to denote individual cell types
# POP_SIZE : number of neurons for each morphological type as given on
# https://bbp.epfl.ch/nmc-portal/microcircuit
# pop_args : dict,
# radius, mean position (loc) and standard deviation (scale) of the soma
# positions
# rotation_args : dict, default rotations around x and y axis applied to
# each cell in the population using LFPy.NetworkCell.set_rotation()
# method.
# syn_section : list
# list of section names where outgoing connections from this population
# are made onto postsynaptic neurons (i.e., no excitatory synapses on
# somatic sections anywhere)
# extrinsic_input_density : density of extrinisc incoming connections in
# units of [µm^-2]
# extrinsic_input_frequency : frequency of synapse activation in units of [Hz]
# TODO: Define only short names, pick random cell types or similar when
# creating populations. Column could be redone as
# [('m_type', '|U8'), ('e-type', '|U8')] and
# single cell objects picked from the glob('m+e type') on random
# # Override population sizes (for testing)
if TESTING:
PSET.populationParameters['POP_SIZE'] = np.ones(
PSET.populationParameters.size)
# Define a layer-specificity of connections L_YXL
# (see Hagen, Dahmen et al. (2016), Cereb Cortex) based on the anatomy of
# dendrites and axons. We here define this depth-dependence of synapse
# positioning as the product of total [soma + dendrite] length and
# total axon length in spatial bins corresponding to the thickness and
# boundaries of each layer. The products are normalized such that the sum of
# each column is 1, i.e., the sum of layer specificities of a connection
# between X and Y is 1.
PSET.L_YXL_m_types = {}
bins = np.r_[-PSET.layer_data['thickness'].cumsum()[::-1], 0]
for i, (y, Y, pop_args_Y, rotation_args_Y) in enumerate(zip(
PSET.populationParameters['m_type'],
PSET.populationParameters['me_type'],
PSET.populationParameters['pop_args'],
PSET.populationParameters['rotation_args'])):
# create a container for the layer specificities of connections
data = np.zeros((PSET.layer_data.size,
PSET.populationParameters.size))
# find and load the corresponding morphology files into LFPy
m_Y = glob(os.path.join(PSET.CELLPATH, Y, 'morphology', '*.asc'))[0]
cell_Y = LFPy.Cell(morphology=m_Y)
cell_Y.set_rotation(**rotation_args_Y)
cell_Y.set_pos(z=pop_args_Y['loc'])
# sum the total length of axon in each layer bin
layerbounds = np.r_[0, -PSET.layer_data['thickness'].cumsum()]
len_Y_sum = np.zeros(PSET.layer_data.size)
for k in range(PSET.layer_data.size):
len_Y_sum[k] = cell_Y.length[cell_Y.get_idx(
['soma', 'dend', 'apic'],
z_min=layerbounds[k + 1],
z_max=layerbounds[k])].sum()
for j, (X, pop_args_X, rotation_args_X) in enumerate(zip(
PSET.populationParameters['me_type'],
PSET.populationParameters['pop_args'],
PSET.populationParameters['rotation_args'])):
m_X = glob(os.path.join(PSET.CELLPATH, X, 'morphology', '*.asc'))[0]
cell_X = LFPy.Cell(morphology=m_X)
cell_X.set_rotation(**rotation_args_X)
cell_X.set_pos(z=pop_args_X['loc'])
len_X_sum = np.zeros(PSET.layer_data.size)
for k in range(PSET.layer_data.size):
len_X_sum[k] = cell_X.length[cell_X.get_idx(
'axon', z_min=layerbounds[k + 1], z_max=layerbounds[k])].sum()
data[:, j] = np.sqrt(len_Y_sum * len_X_sum) / \
np.sqrt(len_Y_sum * len_X_sum).sum()
# fill in
PSET.L_YXL_m_types[y] = data
# clean up namespace
del cell_X, cell_Y, len_X_sum, len_Y_sum, data
# Container for LFPy.NetworkCell class parameters (path to morphology file
# etc.)
PSET.cellParameters = dict()
##########################################################################
# Set up various files and folders such that single-cell models from BBP can
# be used, and extract some numbers from pathway .json files
##########################################################################
# TODO: Add automated download of cell models from EPFL microcircuit portal
# autodownload some json files with anatomical and pathway specific data
pathway_files = ['pathways_anatomy_factsheets_simplified.json',
'pathways_physiology_factsheets_simplified.json']
if RANK == 0:
for fname in pathway_files:
if not os.path.isfile(fname):
u = urlopen(
'https://bbp.epfl.ch/nmc-portal/documents/10184/7288948/' +
fname)
localFile = open(fname, 'w')
localFile.write(u.read().decode('utf-8'))
localFile.close()
u.close()
COMM.Barrier()
# flag for cell template file to switch on (inactive) synapses
add_synapses = False
# load synapse file info for each cell type as structured arrays in dictionary
synapses_tsv_dtype = [
('synapse_id', int),
('pre_cell_id', int),
('pre_mtype', int),
('sectionlist_id', int),
('sectionlist_index', int),
('seg_x', float),
('synapse_type', int),
('dep', float),
('fac', float),
('use', float),
('tau_d', float),
('delay', float),
('weight', float)
]
synapses_tsv = {}
# attempt to set up a folder with all unique EPFL mechanism mod files,
# compile, and load them all in order to be able to load cells as
# LFPy.NetworkCell objects
if RANK == 0:
if not os.path.isdir(PSET.NMODL):
os.mkdir(PSET.NMODL)
for NRN in PSET.populationParameters['me_type']:
for nmodl in glob(os.path.join(
PSET.CELLPATH, NRN, 'mechanisms', '*.mod')):
while not os.path.isfile(
os.path.join(PSET.NMODL, os.path.split(nmodl)[-1])):
os.system('cp {} {}'.format(nmodl,
os.path.join(PSET.NMODL,
'.')))
os.chdir(PSET.NMODL)
# patch faulty ProbGABAAB_EMS.mod file (otherwise stochastic inhibitory
# synapses will stay closed except at first activation)
diff = '''319c319
< urand = scop_random(1)
---
> value = scop_random(1)
'''
f = open('ProbGABAAB_EMS.patch', 'w')
f.writelines(diff)
f.close()
os.system('patch ProbGABAAB_EMS.mod ProbGABAAB_EMS.patch')
os.system('nrnivmodl')
os.chdir(PSET.CWD)
COMM.Barrier()
neuron.load_mechanisms(PSET.NMODL)
os.chdir(PSET.CWD)
# Fill in dictionary of population-specific cell parameters
for NRN in PSET.populationParameters['me_type']:
os.chdir(os.path.join(PSET.CWD, PSET.CELLPATH, NRN))
# get the template name
f = open("template.hoc", 'r')
templatename = get_templatename(f)
f.close()
# get biophys template name
f = open("biophysics.hoc", 'r')
biophysics = get_templatename(f)
f.close()
# get morphology template name
f = open("morphology.hoc", 'r')
morphology = get_templatename(f)
f.close()
# get synapses template name
f = open(os.path.join("synapses", "synapses.hoc"), 'r')
synapses = get_templatename(f)
f.close()
if not hasattr(neuron.h, morphology):
"""Create the cell model"""
# Load morphology
neuron.h.load_file(1, "morphology.hoc")
if not hasattr(neuron.h, biophysics):
# Load biophysics
neuron.h.load_file(1, "biophysics.hoc")
if not hasattr(neuron.h, synapses):
# load synapses
neuron.h.load_file(1, os.path.join('synapses', 'synapses.hoc'))
if not hasattr(neuron.h, templatename):
# Load main cell template
neuron.h.load_file(1, "template.hoc")
# create parameter dictionaries specific for each cell type (population)
PSET.cellParameters[NRN] = dict(list(dict(
morphology=glob(os.path.join('morphology', '*'))[0],
templatefile=os.path.join(NRN, 'template.hoc'),
templatename=templatename,
templateargs=1 if add_synapses else 0,
).items()) + list(cellParams.items()))
# load synapse and connectivity data. mtype_map is the same for all cell types
if sys.version < '3':
with open(os.path.join('synapses', 'mtype_map.tsv')) as f:
mtype_map = np.loadtxt(f,
dtype={'names': ('pre_mtype_id', 'pre_mtype'),
'formats': ('i4', '{}9'.format(
stringType))},
converters={1: lambda s: s.decode()})
else:
with open(os.path.join('synapses', 'mtype_map.tsv'),
encoding='us-ascii') as f:
mtype_map = np.loadtxt(f,
dtype={'names': ('pre_mtype_id', 'pre_mtype'),
'formats': ('i4', '{}9'.format(
stringType))},
converters={1: lambda s: s.decode()})
os.chdir(PSET.CWD)
for name in PSET.populationParameters['m_type']:
files = glob(
os.path.join(
PSET.CELLPATH,
name + '*',
'synapses',
'synapses.tsv'))
synapses_tsv[name] = np.array([], dtype=synapses_tsv_dtype)
for f in files:
synapses_tsv[name] = np.r_[
synapses_tsv[name], np.loadtxt(
f, dtype=synapses_tsv_dtype, skiprows=1)]
# Open pathway anatomy and physiology factsheet files and read out info
pathways_anatomy = dict()
pathways_physiology = dict()
f = open(pathway_files[0], 'r')
j = json.load(f)
for pre in PSET.populationParameters['m_type']:
for post in PSET.populationParameters['m_type']:
key = '{}:{}'.format(pre, post)
try:
pathways_anatomy[key] = j[key]
except KeyError:
# fill in dummy data, no synapses will be created
print('no pathway anatomy data for connection {}'.format(key))
if sys.version < '3':
pathways_anatomy[key] = {
'common_neighbor_bias': 0,
'connection_probability': 0,
'mean_number_of_synapse_per_connection': 0,
'number_of_convergent_neuron_mean': 0,
'number_of_convergent_neuron_std': 0,
'number_of_divergent_neuron_mean': 0,
'number_of_divergent_neuron_std': 0,
'number_of_synapse_per_connection_std': 0,
'total_synapse_count': 0,
}
else:
pathways_anatomy[key] = {
u'common_neighbor_bias': 0,
u'connection_probability': 0,
u'mean_number_of_synapse_per_connection': 0,
u'number_of_convergent_neuron_mean': 0,
u'number_of_convergent_neuron_std': 0,
u'number_of_divergent_neuron_mean': 0,
u'number_of_divergent_neuron_std': 0,
u'number_of_synapse_per_connection_std': 0,
u'total_synapse_count': 0,
}
f.close()
j.clear()
f = open(pathway_files[1], 'r')
j = json.load(f)
for pre in PSET.populationParameters['m_type']:
for post in PSET.populationParameters['m_type']:
key = '{}:{}'.format(pre, post)
try:
pathways_physiology[key] = j[key]
except KeyError:
# fill in dummy data, no synapses will be created
print('no pathway physiology data for connection {}'.format(key))
if sys.version < '3':
pathways_physiology[key] = {
'cv_psp_amplitude_mean': 3,
'cv_psp_amplitude_std': 0.95,
'd_mean': 360,
'd_std': 230,
'decay_mean': 9.8,
'decay_std': 6.7,
'epsp_mean': 1.6,
'epsp_std': 0.78,
'f_mean': 330,
'f_std': 240,
'failures_mean': 86,
'failures_std': 6.5,
'gsyn_mean': 0.3,
'gsyn_std': 0.11,
'latency_mean': 0.33,
'latency_std': 0.18,
'risetime_mean': 0.43,
'risetime_std': 0.47,
'space_clamp_correction_factor': 3.6,
'synapse_type': u'Excitatory, depressing',
'u_mean': 0.19,
'u_std': 0.23
}
else:
pathways_physiology[key] = {
u'cv_psp_amplitude_mean': 3,
u'cv_psp_amplitude_std': 0.95,
u'd_mean': 360,
u'd_std': 230,
u'decay_mean': 9.8,
u'decay_std': 6.7,
u'epsp_mean': 1.6,
u'epsp_std': 0.78,
u'f_mean': 330,
u'f_std': 240,
u'failures_mean': 86,
u'failures_std': 6.5,
u'gsyn_mean': 0.3,
u'gsyn_std': 0.11,
u'latency_mean': 0.33,
u'latency_std': 0.18,
u'risetime_mean': 0.43,
u'risetime_std': 0.47,
u'space_clamp_correction_factor': 3.6,
u'synapse_type': u'Excitatory, depressing',
u'u_mean': 0.19,
u'u_std': 0.23
}
f.close()
j.clear()
# get out stats for synapses and connections, temporary
syn_param_stats = get_syn_params(PSET.populationParameters['m_type'],
PSET.populationParameters['me_type'],
pathways_physiology, mtype_map, synapses_tsv)
del synapses_tsv # not needed anymore.
###########################################################################
# Set up main connection parameters used by Network class instance methods
############################################################################
# Main connection parameters between pre and post-synaptic populations
# organized as dictionary of parameter lists between pre and postsynaptic
# populations:
if PSET.fully_connected:
# fully connected network (no selfconnections)
connprob = [[1] * PSET.populationParameters.size] * \
PSET.populationParameters.size
else:
connprob = get_params(PSET.populationParameters['m_type'],
pathways_anatomy,
'connection_probability',
# unit conversion % -> fraction
0.01 * PSET.CONNPROBSCALING)
PSET.connParams = dict(
# connection probabilities between populations
connprob=connprob,
# synapse mechanisms
syntypes=[[neuron.h.ProbAMPANMDA_EMS
if syn_param_stats['{}:{}'.format(pre, post)
]['synapse_type'] >= 100 else
neuron.h.ProbGABAAB_EMS
for post in PSET.populationParameters['m_type']]
for pre in PSET.populationParameters['m_type']],
# synapse time constants and reversal potentials.
# Use the mean/global EPFL synapse model parameters
# (for now) as some connections appear to be missing in pathway files.
synparams=[[dict(
Use=syn_param_stats['{}:{}'.format(pre, post)]['Use_mean'],
Dep=syn_param_stats['{}:{}'.format(pre, post)]['Dep_mean'],
Fac=syn_param_stats['{}:{}'.format(pre, post)]['Fac_mean'],
tau_r_AMPA=0.2,
tau_d_AMPA=syn_param_stats['{}:{}'.format(pre, post)]['tau_d_mean'],
tau_r_NMDA=0.29,
tau_d_NMDA=43,
e=0,
mg=1,
u0=0,
synapseID=0,
verboseLevel=0,
NMDA_ratio=0.4 # this may take on several values in synconf.txt files,
# not accounted for here
)
if syn_param_stats['{}:{}'.format(pre, post)
]['synapse_type'] >= 100 else
dict(
Use=syn_param_stats['{}:{}'.format(pre, post)]['Use_mean'],
Dep=syn_param_stats['{}:{}'.format(pre, post)]['Dep_mean'],
Fac=syn_param_stats['{}:{}'.format(pre, post)]['Fac_mean'],
tau_r_GABAA=0.2,
# from synapses.hoc: rng.lognormal(0.2, 0.1) (mean, variance)
tau_d_GABAA=syn_param_stats['{}:{}'.format(pre, post)]['tau_d_mean'],
tau_r_GABAB=3.5,
tau_d_GABAB=260.9,
e_GABAA=-80,
e_GABAB=-75.8354,
u0=0,
synapseID=0,
verboseLevel=0,
GABAB_ratio=0.0,
# this may take on several values, in synconf.txt files, not accounted
# for here
)
for post in PSET.populationParameters['m_type']]
for pre in PSET.populationParameters['m_type']],
# maximum conductances
weightfuns=[[np.random.normal] * PSET.populationParameters.size] * \
PSET.populationParameters.size,
weightargs=get_params(PSET.populationParameters['m_type'],
pathways_physiology,
['gsyn_mean', 'gsyn_std'], 1.),
# Correct??? (very small PSPs otherwise).
# Also, weights in unknown units loaded from synapses_tsv is different
# than the reported averaged gsyn.
# connection delays
delayfuns=[[np.random.normal] * PSET.populationParameters.size] * \
PSET.populationParameters.size,
delayargs=[[dict(
loc=syn_param_stats['{}:{}'.format(pre, post)]['delay_mean'],
scale=syn_param_stats['{}:{}'.format(pre, post)]['delay_std']
) for post in PSET.populationParameters['m_type']]
for pre in PSET.populationParameters['m_type']],
# delays less than this value will be redrawn
mindelay=2**-3,
# numbers of synapses per connection
multapsefuns=[[np.random.normal] \
* PSET.populationParameters.size] \
* PSET.populationParameters.size,
multapseargs=get_params(PSET.populationParameters['m_type'],
pathways_anatomy,
['mean_number_of_synapse_per_connection',
'number_of_synapse_per_connection_std']),
# parameters for finding random synapse locations using the method
# LFPy.Cell.get_rand_idx_area_and_distribution_norm. The argument nidx is
# default to 1
syn_pos_args=[[dict(section=syn_section,
z_min=-1E6,
z_max=1E6,
fun=[stats.norm] * PSET.layer_data.size,
funargs=[dict(loc=loc, scale=scale / 2.)
for loc, scale in PSET.layer_data[
['center', 'thickness']]],
funweights=PSET.L_YXL_m_types[post_m_type][:, i]
) for i, pre_m_type in enumerate(
PSET.populationParameters['m_type'])]
for post_m_type, syn_section in PSET.populationParameters[
['m_type', 'syn_section']]],
)
# save connection data
PSET.save_connections = True
# connection parameters for synapses activated by putative external
# population(s)
PSET.connParamsExtrinsic = dict(
# synapse type
syntype='ProbAMPANMDA_EMS',
# synapse parameters (assumes parameters of excitatory population in the
# layer)
synparams=[dict(
Use=syn_param_stats['{}:{}'.format(
get_pre_m_type(post), post)]['Use_mean'],
Dep=syn_param_stats['{}:{}'.format(
get_pre_m_type(post), post)]['Dep_mean'],
Fac=syn_param_stats['{}:{}'.format(
get_pre_m_type(post), post)]['Fac_mean'],
tau_r_AMPA=0.2,
tau_d_AMPA=syn_param_stats['{}:{}'.format(
get_pre_m_type(post), post)]['tau_d_mean'],
tau_r_NMDA=0.29,
tau_d_NMDA=43,
e=0,
mg=1,
u0=0,
synapseID=0,
verboseLevel=0,
NMDA_ratio=0.4 # this may take on several values in synconf.txt files,
# not accounted for here
) for post in PSET.populationParameters['m_type']],
# maximum conductances
weightfuns=[np.random.normal] * PSET.populationParameters.size,
weightargs=[get_params(np.array([m_type]), pathways_physiology,
['gsyn_mean', 'gsyn_std'], 1.)[0][0]
for m_type in PSET.populationParameters['m_type']],
)
|
LFPy/LFPy
|
examples/bioRxiv281717/example_parallel_network_parameters.py
|
Python
|
gpl-3.0
| 29,269
|
[
"NEURON"
] |
8000c39b23e09856503522c1090b4385120578a43a2eee0680cd64094c451f3d
|
#!/usr/bin/env python
"""
==================
ModEM
==================
# Generate data file for ModEM
# by Paul Soeffky 2013
# revised by LK 2014
# revised by JP 2014
# edited by AK 2016
"""
import os
import mtpy.core.z as mtz
import mtpy.core.mt as mt
import numpy as np
import mtpy.utils.latlongutmconversion as utm2ll
import mtpy.modeling.ws3dinv as ws
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.patches import Ellipse
from matplotlib.colors import Normalize
import matplotlib.colorbar as mcb
import matplotlib.gridspec as gridspec
import mtpy.imaging.mtplottools as mtplottools
import matplotlib.widgets as widgets
import matplotlib.colors as colors
import matplotlib.cm as cm
import mtpy.utils.exceptions as mtex
import mtpy.analysis.pt as mtpt
import mtpy.imaging.mtcolors as mtcl
import scipy.interpolate as spi
try:
from evtk.hl import gridToVTK, pointsToVTK
except ImportError:
print ('If you want to write a vtk file for 3d viewing, you need download '
'and install evtk from https://bitbucket.org/pauloh/pyevtk')
print ('Note: if you are using Windows you should build evtk first with'
'either MinGW or cygwin using the command: \n'
' python setup.py build -compiler=mingw32 or \n'
' python setup.py build -compiler=cygwin')
epsg_dict = {28350:['+proj=utm +zone=50 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',50],
28351:['+proj=utm +zone=51 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',51],
28352:['+proj=utm +zone=52 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',52],
28353:['+proj=utm +zone=53 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',53],
28354:['+proj=utm +zone=54 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',54],
28355:['+proj=utm +zone=55 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',55],
28356:['+proj=utm +zone=56 +south +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',56],
3112:['+proj=lcc +lat_1=-18 +lat_2=-36 +lat_0=0 +lon_0=134 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs',0],
4326:['+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs',0]}
#==============================================================================
class Data(object):
"""
Data will read and write .dat files for ModEM and convert a WS data file
to ModEM format.
..note: :: the data is interpolated onto the given periods such that all
stations invert for the same periods. The interpolation is
a linear interpolation of each of the real and imaginary parts
of the impedance tensor and induction tensor.
See mtpy.core.mt.MT.interpolate for more details
Arguments
------------
**edi_list** : list
list of full paths to .edi files you want to invert for
====================== ====================================================
Attributes/Key Words Description
====================== ====================================================
_dtype internal variable defining the data type of
data_array
_t_shape internal variable defining shape of tipper array in
_dtype
_z_shape internal variable defining shape of Z array in
_dtype
center_position (east, north, evel) for center point of station
array. All stations are relative to this location
for plotting purposes.
comp_index_dict dictionary for index values of component of Z and T
station_locations numpy.ndarray structured to store station
location values. Keys are:
* station --> station name
* east --> UTM east (m)
* north --> UTM north (m)
* lat --> latitude in decimal degrees
* lon --> longitude in decimal degrees
* elev --> elevation (m)
* zone --> UTM zone
* rel_east -- > relative east location to
center_position (m)
* rel_north --> relative north location to
center_position (m)
data_array numpy.ndarray (num_stations) structured to store
data. keys are:
* station --> station name
* lat --> latitude in decimal degrees
* lon --> longitude in decimal degrees
* elev --> elevation (m)
* rel_east -- > relative east location to
center_position (m)
* rel_north --> relative north location to
center_position (m)
* east --> UTM east (m)
* north --> UTM north (m)
* zone --> UTM zone
* z --> impedance tensor array with shape
(num_freq, 2, 2)
* z_err --> impedance tensor error array with
shape (num_freq, 2, 2)
* tip --> Tipper array with shape
(num_freq, 1, 2)
* tipperr --> Tipper array with shape
(num_freq, 1, 2)
data_fn full path to data file
data_period_list period list from all the data
edi_list list of full paths to edi files
error_egbert percentage to multiply sqrt(Z_xy*Zyx) by.
*default* is 3 as prescribed by Egbert & Kelbert
error_floor percentage to set the error floor at, anything below
this number will be set to error_floor.
*default* is 10
error_tipper absolute tipper error, all tipper error will be
set to this value unless you specify error_type as
'floor' or 'floor_egbert'.
*default* is .05 for 5%
error_type [ 'floor' | 'value' | 'egbert' ]
*default* is 'egbert'
* 'floor' sets the error floor to error_floor
* 'value' sets error to error_value
* 'egbert' sets error to
error_egbert * sqrt(abs(zxy*zyx))
* 'floor_egbert' sets error floor to
error_egbert * sqrt(abs(zxy*zyx))
error_value percentage to multiply Z by to set error
*default* is 5 for 5% of Z as error
fn_basename basename of data file. *default* is 'ModEM_Data.dat'
header_strings strings for header of data file following the format
outlined in the ModEM documentation
inv_comp_dict dictionary of inversion componets
inv_mode inversion mode, options are: *default* is '1'
* '1' --> for 'Full_Impedance' and
'Full_Vertical_Components'
* '2' --> 'Full_Impedance'
* '3' --> 'Off_Diagonal_Impedance' and
'Full_Vertical_Components'
* '4' --> 'Off_Diagonal_Impedance'
* '5' --> 'Full_Vertical_Components'
* '6' --> 'Full_Interstation_TF'
* '7' --> 'Off_Diagonal_Rho_Phase'
inv_mode_dict dictionary for inversion modes
max_num_periods maximum number of periods
mt_dict dictionary of mtpy.core.mt.MT objects with keys
being station names
period_dict dictionary of period index for period_list
period_list list of periods to invert for
period_max maximum value of period to invert for
period_min minimum value of period to invert for
rotate_angle Angle to rotate data to assuming 0 is N and E is 90
save_path path to save data file to
units [ [V/m]/[T] | [mV/km]/[nT] | Ohm ] units of Z
*default* is [mV/km]/[nT]
wave_sign [ + | - ] sign of time dependent wave.
*default* is '+' as positive downwards.
====================== ====================================================
========================== ================================================
Methods Description
========================== ================================================
convert_ws3dinv_data_file convert a ws3dinv file to ModEM fomrat,
**Note** this doesn't include tipper data and
you need a station location file like the one
output by mtpy.modeling.ws3dinv
get_data_from_edi get data from given .edi files and fill
attributes accordingly
get_mt_dict get a dictionary of mtpy.core.mt.MT objects
with keys being station names
get_period_list get a list of periods to invert for
get_station_locations get station locations and relative locations
filling in station_locations
read_data_file read in a ModEM data file and fill attributes
data_array, station_locations, period_list, mt_dict
write_data_file write a ModEM data file
========================== ================================================
:Example 1 --> create inversion period list: ::
>>> import os
>>> import mtpy.modeling.modem as modem
>>> edi_path = r"/home/mt/edi_files"
>>> edi_list = [os.path.join(edi_path, edi) \
for edi in os.listdir(edi_path)\
if edi.find('.edi') > 0]
>>> md = modem.Data(edi_list, period_min=.1, period_max=300,\
max_num_periods=12)
>>> md.write_data_file(save_path=r"/home/modem/inv1")
:Example 2 --> set inverions period list from data: ::
>>> import os
>>> import mtpy.modeling.modem as modem
>>> edi_path = r"/home/mt/edi_files"
>>> edi_list = [os.path.join(edi_path, edi) \
for edi in os.listdir(edi_path)\
if edi.find('.edi') > 0]
>>> md = modem.Data(edi_list)
>>> #get period list from an .edi file
>>> mt_obj1 = modem.mt.MT(edi_list[0])
>>> inv_period_list = 1./mt_obj1.Z.freq
>>> #invert for every third period in inv_period_list
>>> inv_period_list = inv_period_list[np.arange(0, len(inv_period_list, 3))]
>>> md.period_list = inv_period_list
>>> md.write_data_file(save_path=r"/home/modem/inv1")
:Example 3 --> change error values: ::
>>> import mtpy.modeling.modem as modem
>>> mdr = modem.Data()
>>> mdr.read_data_file(r"/home/modem/inv1/ModEM_Data.dat")
>>> mdr.error_type = 'floor'
>>> mdr.error_floor = 10
>>> mdr.error_tipper = .03
>>> mdr.write_data_file(save_path=r"/home/modem/inv2")
:Example 4 --> change inversion type: ::
>>> import mtpy.modeling.modem as modem
>>> mdr = modem.Data()
>>> mdr.read_data_file(r"/home/modem/inv1/ModEM_Data.dat")
>>> mdr.inv_mode = '3'
>>> mdr.write_data_file(save_path=r"/home/modem/inv2")
:Example 5 --> create mesh first then data file: ::
>>> import mtpy.modeling.modem as modem
>>> import os
>>> #1) make a list of all .edi files that will be inverted for
>>> edi_path = r"/home/EDI_Files"
>>> edi_list = [os.path.join(edi_path, edi)
for edi in os.listdir(edi_path)
>>> ... if edi.find('.edi') > 0]
>>> #2) make a grid from the stations themselves with 200m cell spacing
>>> mmesh = modem.Model(edi_list=edi_list, cell_size_east=200,
>>> ... cell_size_north=200)
>>> mmesh.make_mesh()
>>> # check to see if the mesh is what you think it should be
>>> mmesh.plot_mesh()
>>> # all is good write the mesh file
>>> mmesh.write_model_file(save_path=r"/home/modem/Inv1")
>>> # create data file
>>> md = modem.Data(edi_list, station_locations=mmesh.station_locations)
>>> md.write_data_file(save_path=r"/home/modem/Inv1")
:Example 6 --> rotate data: ::
>>> md.rotation_angle = 60
>>> md.write_data_file(save_path=r"/home/modem/Inv1")
>>> # or
>>> md.write_data_file(save_path=r"/home/modem/Inv1", \
rotation_angle=60)
"""
def __init__(self, edi_list=None, **kwargs):
self.edi_list = edi_list
self.error_type = kwargs.pop('error_type', 'egbert')
self.error_floor = kwargs.pop('error_floor', 5.0)
self.error_value = kwargs.pop('error_value', 5.0)
self.error_egbert = kwargs.pop('error_egbert', 3.0)
self.error_tipper = kwargs.pop('error_tipper', .05)
self.wave_sign_impedance = kwargs.pop('wave_sign_impedance', '+')
self.wave_sign_tipper = kwargs.pop('wave_sign_tipper', '+')
self.units = kwargs.pop('units', '[mV/km]/[nT]')
self.inv_mode = kwargs.pop('inv_mode', '1')
self.period_list = kwargs.pop('period_list', None)
self.period_step = kwargs.pop('period_step', 1)
self.period_min = kwargs.pop('period_min', None)
self.period_max = kwargs.pop('period_max', None)
self.period_buffer = kwargs.pop('period_buffer', None)
self.max_num_periods = kwargs.pop('max_num_periods', None)
self.data_period_list = None
self.fn_basename = kwargs.pop('fn_basename', 'ModEM_Data.dat')
self.save_path = kwargs.pop('save_path', os.getcwd())
self.formatting = kwargs.pop('format', '1')
self._rotation_angle = kwargs.pop('rotation_angle', 0.0)
self._set_rotation_angle(self._rotation_angle)
self._station_locations = None
self.center_position = np.array([0.0, 0.0])
self.epsg = kwargs.pop('epsg',None)
self.data_array = None
self.mt_dict = None
self.data_fn = kwargs.pop('data_fn','ModEM_Data.dat')
self._z_shape = (1, 2, 2)
self._t_shape = (1, 1, 2)
self._dtype = [('station', '|S10'),
('lat', np.float),
('lon', np.float),
('elev', np.float),
('rel_east', np.float),
('rel_north', np.float),
('east', np.float),
('north', np.float),
('zone', '|S4'),
('z', (np.complex, self._z_shape)),
('z_err', (np.complex, self._z_shape)),
('tip', (np.complex, self._t_shape)),
('tip_err', (np.complex, self._t_shape))]
self.inv_mode_dict = {'1':['Full_Impedance', 'Full_Vertical_Components'],
'2':['Full_Impedance'],
'3':['Off_Diagonal_Impedance',
'Full_Vertical_Components'],
'4':['Off_Diagonal_Impedance'],
'5':['Full_Vertical_Components'],
'6':['Full_Interstation_TF'],
'7':['Off_Diagonal_Rho_Phase']}
self.inv_comp_dict = {'Full_Impedance':['zxx', 'zxy', 'zyx', 'zyy'],
'Off_Diagonal_Impedance':['zxy', 'zyx'],
'Full_Vertical_Components':['tx', 'ty']}
self.comp_index_dict = {'zxx': (0, 0), 'zxy':(0, 1), 'zyx':(1, 0),
'zyy':(1, 1), 'tx':(0, 0), 'ty':(0, 1)}
self.header_strings = \
['# Created using MTpy error {0} of {1:.0f}%, data rotated {2:.1f} deg clockwise from N\n'.format(
self.error_type, self.error_floor, self._rotation_angle),
'# Period(s) Code GG_Lat GG_Lon X(m) Y(m) Z(m) Component Real Imag Error\n']
#size of a utm grid
self._utm_grid_size_north = 888960.0
self._utm_grid_size_east = 640000.0
self._utm_cross = False
self._utm_ellipsoid = 23
def _set_dtype(self, z_shape, t_shape):
"""
reset dtype
"""
self._z_shape = z_shape
self._t_shape = t_shape
self._dtype = [('station', '|S10'),
('lat', np.float),
('lon', np.float),
('elev', np.float),
('rel_east', np.float),
('rel_north', np.float),
('east', np.float),
('north', np.float),
('zone', '|S4'),
('z', (np.complex, self._z_shape)),
('z_err', (np.complex, self._z_shape)),
('tip', (np.complex, self._t_shape)),
('tip_err', (np.complex, self._t_shape))]
def _set_header_string(self):
"""
reset the header sring for file
"""
h_str = '# Created using MTpy error {0} of {1:.0f}%, data rotated {2:.1f}_deg clockwise from N\n'
if self.error_type == 'egbert':
self.header_strings[0] = h_str.format(self.error_type,
self.error_egbert,
self._rotation_angle)
elif self.error_type == 'floor':
self.header_strings[0] = h_str.format(self.error_type,
self.error_floor,
self._rotation_angle)
elif self.error_type == 'value':
self.header_strings[0] = h_str.format(self.error_type,
self.error_value,
self._rotation_angle)
def get_mt_dict(self):
"""
get mt_dict from edi file list
"""
if self.edi_list is None:
raise ModEMError('edi_list is None, please input a list of '
'.edi files containing the full path')
if len(self.edi_list) == 0:
raise ModEMError('edi_list is empty, please input a list of '
'.edi files containing the full path' )
self.mt_dict = {}
for edi in self.edi_list:
mt_obj = mt.MT(edi)
self.mt_dict[mt_obj.station] = mt_obj
def project_sites(self):
"""
function to project sites from lat/long to eastings/northing.
no dependency on external projection modules (e.g. pyproj) but
limited flexibility for projection.
"""
utm_zones_dict = {'M':9, 'L':8, 'K':7, 'J':6, 'H':5, 'G':4, 'F':3,
'E':2, 'D':1, 'C':0, 'N':10, 'P':11, 'Q':12, 'R':13,
'S':14, 'T':15, 'U':16, 'V':17, 'W':18, 'X':19}
#--> need to convert lat and lon to east and north
for c_arr in self.data_array:
if c_arr['lat'] != 0.0 and c_arr['lon'] != 0.0:
c_arr['zone'], c_arr['east'], c_arr['north'] = \
utm2ll.LLtoUTM(self._utm_ellipsoid,
c_arr['lat'],
c_arr['lon'])
#--> need to check to see if all stations are in the same zone
utm_zone_list = list(set(self.data_array['zone']))
#if there are more than one zone, figure out which zone is the odd ball
utm_zone_dict = dict([(utmzone, 0) for utmzone in utm_zone_list])
if len(utm_zone_list) != 1:
self._utm_cross = True
for c_arr in self.data_array:
utm_zone_dict[c_arr['zone']] += 1
#flip keys and values so the key is the number of zones and
# the value is the utm zone
utm_zone_dict = dict([(utm_zone_dict[key], key)
for key in utm_zone_dict.keys()])
#get the main utm zone as the one with the most stations in it
main_utm_zone = utm_zone_dict[max(utm_zone_dict.keys())]
#Get a list of index values where utm zones are not the
#same as the main zone
diff_zones = np.where(self.data_array['zone'] != main_utm_zone)[0]
for c_index in diff_zones:
c_arr = self.data_array[c_index]
c_utm_zone = c_arr['zone']
print '{0} utm_zone is {1} and does not match {2}'.format(
c_arr['station'], c_arr['zone'], main_utm_zone)
zone_shift = 1-abs(utm_zones_dict[c_utm_zone[-1]]-\
utm_zones_dict[main_utm_zone[-1]])
#--> check to see if the zone is in the same latitude
#if odd ball zone is north of main zone, add 888960 m
if zone_shift > 1:
north_shift = self._utm_grid_size_north*zone_shift
print ('--> adding {0:.2f}'.format(north_shift)+\
' meters N to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['north'] += north_shift
#if odd ball zone is south of main zone, subtract 88960 m
elif zone_shift < -1:
north_shift = self._utm_grid_size_north*zone_shift
print ('--> subtracting {0:.2f}'.format(north_shift)+\
' meters N to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['north'] -= north_shift
#--> if zone is shifted east or west
if int(c_utm_zone[0:-1]) > int(main_utm_zone[0:-1]):
east_shift = self._utm_grid_size_east*\
abs(int(c_utm_zone[0:-1])-int(main_utm_zone[0:-1]))
print ('--> adding {0:.2f}'.format(east_shift)+\
' meters E to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['east'] += east_shift
elif int(c_utm_zone[0:-1]) < int(main_utm_zone[0:-1]):
east_shift = self._utm_grid_size_east*\
abs(int(c_utm_zone[0:-1])-int(main_utm_zone[0:-1]))
print ('--> subtracting {0:.2f}'.format(east_shift)+\
' meters E to place station in ' +\
'proper coordinates relative to all other ' +\
'staions.')
c_arr['east'] -= east_shift
def project_sites_pyproj(self):
import pyproj
if self.epsg not in epsg_dict.keys():
self.epsg = None
if self.epsg is None:
return
p1 = pyproj.Proj(epsg_dict[4326][0])
p2 = pyproj.Proj(epsg_dict[self.epsg][0])
for c_arr in self.data_array:
if c_arr['lat'] != 0.0 and c_arr['lon'] != 0.0:
c_arr['zone'] = epsg_dict[self.epsg][1]
c_arr['east'], c_arr['north'] = \
pyproj.transform(p1,p2,
c_arr['lon'],c_arr['lat'])
def get_relative_station_locations(self):
"""
get station locations from edi files and project to local coordinates
..note:: There are two options for projection method. If pyproj is
installed, you can use the method that uses pyproj. In this
case, specify the epsg number as an attribute to the model
object or when setting it up. The epsg can generally be found
through a google search. If epsg is specified then **all**
sites are projected to that epsg. It is up to the user to
make sure all sites are in the bounds of projection.
**note** epsg 3112 (Geoscience Australia Lambert) covers all
of Australia but may cause signficiant rotation at some
locations.
***If pyproj is not used:***
If the survey steps across multiple UTM zones, then a
distance will be added to the stations to place them in
the correct location. This distance is
_utm_grid_size_north and _utm_grid_size_east. You should
these parameters to place the locations in the proper spot
as grid distances and overlaps change over the globe.
"""
# get center position of the stations in lat and lon
self.center_position[0] = self.data_array['lat'].mean()
self.center_position[1] = self.data_array['lon'].mean()
# try to use pyproj if desired, if not then have to use inbuilt
# projection module but may give bad results if crossing more than one zone
if self.epsg is not None:
use_pyproj=True
else:
use_pyproj=False
if use_pyproj:
try:
self.project_sites_pyproj()
except ImportError:
use_pyproj=False
errormessage = "Error loading pyproj"
if self.epsg is None:
use_pyproj=False
errormessage = "Couldn't find epsg, please define manually"
# warning message
if not use_pyproj:
print errormessage
if not use_pyproj:
self.project_sites()
#remove the average distance to get coordinates in a relative space
self.data_array['rel_east'] = self.data_array['east']-\
self.data_array['east'].mean()
self.data_array['rel_north'] = self.data_array['north']-\
self.data_array['north'].mean()
#--> rotate grid if necessary
#to do this rotate the station locations because ModEM assumes the
#input mesh is a lateral grid.
#needs to be 90 - because North is assumed to be 0 but the rotation
#matrix assumes that E is 0.
if self.rotation_angle != 0:
cos_ang = np.cos(np.deg2rad(self.rotation_angle))
sin_ang = np.sin(np.deg2rad(self.rotation_angle))
rot_matrix = np.matrix(np.array([[cos_ang, sin_ang],
[-sin_ang, cos_ang]]))
coords = np.array([self.data_array['rel_east'],
self.data_array['rel_north']])
#rotate the relative station locations
new_coords = np.array(np.dot(rot_matrix, coords))
self.data_array['rel_east'][:] = new_coords[0, :]
self.data_array['rel_north'][:] = new_coords[1, :]
print 'Rotated stations by {0:.1f} deg clockwise from N'.format(
self.rotation_angle)
#translate the stations so they are relative to 0,0
east_center = (self.data_array['rel_east'].max()-
np.abs(self.data_array['rel_east'].min()))/2
north_center = (self.data_array['rel_north'].max()-
np.abs(self.data_array['rel_north'].min()))/2
#remove the average distance to get coordinates in a relative space
self.data_array['rel_east'] -= east_center
self.data_array['rel_north'] -= north_center
def get_period_list(self):
"""
make a period list to invert for
"""
if self.mt_dict is None:
self.get_mt_dict()
if self.period_list is not None:
print '-'*50
print 'Inverting for periods:'
for per in self.period_list:
print ' {0:<12.6f}'.format(per)
print '-'*50
return
data_period_list = []
for s_key in sorted(self.mt_dict.keys()):
mt_obj = self.mt_dict[s_key]
data_period_list.extend(list(1./mt_obj.Z.freq))
self.data_period_list = np.array(sorted(list(set(data_period_list)),
reverse=False))
if self.period_min is not None:
if self.period_max is None:
raise ModEMError('Need to input period_max')
if self.period_max is not None:
if self.period_min is None:
raise ModEMError('Need to input period_min')
if self.period_min is not None and self.period_max is not None:
if self.max_num_periods is None:
raise ModEMError('Need to input number of periods to use')
min_index = np.where(self.data_period_list >= self.period_min)[0][0]
max_index = np.where(self.data_period_list <= self.period_max)[0][-1]
pmin = np.log10(self.data_period_list[min_index])
pmax = np.log10(self.data_period_list[max_index])
self.period_list = np.logspace(pmin, pmax, num=self.max_num_periods)
print '-'*50
print 'Inverting for periods:'
for per in self.period_list:
print ' {0:<12.6f}'.format(per)
print '-'*50
if self.period_list is None:
raise ModEMError('Need to input period_min, period_max, '
'max_num_periods or a period_list')
def _set_rotation_angle(self, rotation_angle):
"""
on set rotation angle rotate mt_dict and data_array,
"""
if self._rotation_angle == rotation_angle:
return
new_rotation_angle = -self._rotation_angle+rotation_angle
if new_rotation_angle == 0:
return
print 'Changing rotation angle from {0:.1f} to {1:.1f}'.format(
self._rotation_angle, rotation_angle)
self._rotation_angle = rotation_angle
if self.data_array is None:
return
if self.mt_dict is None:
return
for mt_key in sorted(self.mt_dict.keys()):
mt_obj = self.mt_dict[mt_key]
mt_obj.Z.rotate(new_rotation_angle)
mt_obj.Tipper.rotate(new_rotation_angle)
print 'Data rotated to align with {0:.1f} deg clockwise from N'.format(
self._rotation_angle)
print '*'*70
print ' If you want to rotate station locations as well use the'
print ' command Data.get_relative_station_locations() '
print ' if stations have not already been rotated in Model'
print '*'*70
self._fill_data_array()
def _get_rotation_angle(self):
return self._rotation_angle
rotation_angle = property(fget=_get_rotation_angle,
fset=_set_rotation_angle,
doc="""Rotate data assuming N=0, E=90""")
def _initialise_empty_data_array(self,stationlocations,period_list,
location_type='LL',stationnames=None):
"""
create an empty data array to create input files for forward modelling
station locations is an array containing x,y coordinates of each station
(shape = (number_of_stations,2))
period_list = list of periods to model
location_type = 'LL' or 'EN' - longitude/latitude or easting/northing
"""
self.period_list = period_list
nf = len(self.period_list)
self._set_dtype((nf, 2, 2), (nf, 1, 2))
self.data_array = np.zeros(len(stationlocations), dtype=self._dtype)
if location_type == 'LL':
self.data_array['lon'] = stationlocations[:,0]
self.data_array['lat'] = stationlocations[:,1]
else:
self.data_array['east'] = stationlocations[:,0]
self.data_array['north'] = stationlocations[:,1]
# set non-zero values to array (as zeros will be deleted)
if self.inv_mode in '12':
self.data_array['z'][:] = 100.+100j
self.data_array['z_err'][:] = 1e15
if self.inv_mode == '1':
self.data_array['tip'][:] = 0.1 + 0.1j
self.data_array['tip_err'][:] = 1e15
# set station names
if stationnames is not None:
if len(stationnames) != len(stationnames):
stationnames = None
if stationnames is None:
stationnames = ['st%03i'%ss for ss in range(len(stationlocations))]
self.data_array['station'] = stationnames
self.get_relative_station_locations()
def _fill_data_array(self):
"""
fill the data array from mt_dict
"""
if self.period_list is None:
self.get_period_list()
ns = len(self.mt_dict.keys())
nf = len(self.period_list)
d_array = False
if self.data_array is not None:
d_arr_copy = self.data_array.copy()
d_array = True
self._set_dtype((nf, 2, 2), (nf, 1, 2))
self.data_array = np.zeros(ns, dtype=self._dtype)
rel_distance = True
for ii, s_key in enumerate(sorted(self.mt_dict.keys())):
mt_obj = self.mt_dict[s_key]
if d_array is True:
try:
d_index = np.where(d_arr_copy['station'] == s_key)[0][0]
self.data_array[ii]['station'] = s_key
self.data_array[ii]['lat'] = d_arr_copy[d_index]['lat']
self.data_array[ii]['lon'] = d_arr_copy[d_index]['lon']
self.data_array[ii]['east'] = d_arr_copy[d_index]['east']
self.data_array[ii]['north'] = d_arr_copy[d_index]['north']
self.data_array[ii]['elev'] = d_arr_copy[d_index]['elev']
self.data_array[ii]['rel_east'] = d_arr_copy[d_index]['rel_east']
self.data_array[ii]['rel_north'] = d_arr_copy[d_index]['rel_north']
except IndexError:
print 'Could not find {0} in data_array'.format(s_key)
else:
self.data_array[ii]['station'] = mt_obj.station
self.data_array[ii]['lat'] = mt_obj.lat
self.data_array[ii]['lon'] = mt_obj.lon
self.data_array[ii]['east'] = mt_obj.east
self.data_array[ii]['north'] = mt_obj.north
self.data_array[ii]['elev'] = mt_obj.elev
try:
self.data_array[ii]['rel_east'] = mt_obj.grid_east
self.data_array[ii]['rel_north'] = mt_obj.grid_north
rel_distance = False
except AttributeError:
pass
# interpolate each station onto the period list
# check bounds of period list
interp_periods = self.period_list[np.where(
(self.period_list >= 1./mt_obj.Z.freq.max()) &
(self.period_list <= 1./mt_obj.Z.freq.min()))]
# if specified, apply a buffer so that interpolation doesn't stretch too far over periods
if type(self.period_buffer) in [float,int]:
interp_periods_new = []
dperiods = 1./mt_obj.Z.freq
for iperiod in interp_periods:
# find nearest data period
difference = np.abs(iperiod-dperiods)
nearestdperiod = dperiods[difference == np.amin(difference)][0]
if max(nearestdperiod/iperiod, iperiod/nearestdperiod) < self.period_buffer:
interp_periods_new.append(iperiod)
interp_periods = np.array(interp_periods_new)
interp_z, interp_t = mt_obj.interpolate(1./interp_periods)
for kk, ff in enumerate(interp_periods):
jj = np.where(self.period_list == ff)[0][0]
self.data_array[ii]['z'][jj] = interp_z.z[kk, :, :]
self.data_array[ii]['z_err'][jj] = interp_z.zerr[kk, :, :]
if mt_obj.Tipper.tipper is not None:
self.data_array[ii]['tip'][jj] = interp_t.tipper[kk, :, :]
self.data_array[ii]['tip_err'][jj] = \
interp_t.tippererr[kk, :, :]
if rel_distance is False:
self.get_relative_station_locations()
def _set_station_locations(self, station_locations):
"""
take a station_locations array and populate data_array
"""
if self.data_array is None:
self.get_mt_dict()
self.get_period_list()
self._fill_data_array()
for s_arr in station_locations:
try:
d_index = np.where(self.data_array['station'] ==
s_arr['station'])[0][0]
except IndexError:
print 'Could not find {0} in data_array'.format(s_arr['station'])
d_index = None
if d_index is not None:
self.data_array[d_index]['lat'] = s_arr['lat']
self.data_array[d_index]['lon'] = s_arr['lon']
self.data_array[d_index]['east'] = s_arr['east']
self.data_array[d_index]['north'] = s_arr['north']
self.data_array[d_index]['elev'] = s_arr['elev']
self.data_array[d_index]['rel_east'] = s_arr['rel_east']
self.data_array[d_index]['rel_north'] = s_arr['rel_north']
def _get_station_locations(self):
"""
extract station locations from data array
"""
if self.data_array is None:
return None
station_locations = self.data_array[['station', 'lat', 'lon',
'north', 'east', 'elev','zone',
'rel_north', 'rel_east']]
return station_locations
station_locations = property(_get_station_locations,
_set_station_locations,
doc="""location of stations""")
def write_data_file(self, save_path=None, fn_basename=None,
rotation_angle=None, compute_error=True,
fill=True):
"""
write data file for ModEM
will save file as save_path/fn_basename
Arguments:
------------
**save_path** : string
directory path to save data file to.
*default* is cwd
**fn_basename** : string
basename to save data file as
*default* is 'ModEM_Data.dat'
**rotation_angle** : float
angle to rotate the data by assuming N = 0,
E = 90. *default* is 0.0
Outputs:
----------
**data_fn** : string
full path to created data file
:Example: ::
>>> import os
>>> import mtpy.modeling.modem as modem
>>> edi_path = r"/home/mt/edi_files"
>>> edi_list = [os.path.join(edi_path, edi) \
for edi in os.listdir(edi_path)\
if edi.find('.edi') > 0]
>>> md = modem.Data(edi_list, period_min=.1, period_max=300,\
max_num_periods=12)
>>> md.write_data_file(save_path=r"/home/modem/inv1")
"""
if save_path is not None:
self.save_path = save_path
if fn_basename is not None:
self.fn_basename = fn_basename
self.data_fn = os.path.join(self.save_path, self.fn_basename)
if fill:
self.get_period_list()
#rotate data if desired
if rotation_angle is not None:
self.rotation_angle = rotation_angle
#be sure to fill in data array
if fill:
self._fill_data_array()
# get relative station locations in grid coordinates
self.get_relative_station_locations()
#reset the header string to be informational
self._set_header_string()
# number of periods - subtract periods with all zero components
nper = len(np.where(np.mean(np.mean(np.mean(np.abs(self.data_array['z']),axis=0),axis=1),axis=1)>0)[0])
dlines = []
for inv_mode in self.inv_mode_dict[self.inv_mode]:
dlines.append(self.header_strings[0])
dlines.append(self.header_strings[1])
dlines.append('> {0}\n'.format(inv_mode))
if inv_mode.find('Impedance') > 0:
dlines.append('> exp({0}i\omega t)\n'.format(self.wave_sign_impedance))
dlines.append('> {0}\n'.format(self.units))
elif inv_mode.find('Vertical') >=0:
dlines.append('> exp({0}i\omega t)\n'.format(self.wave_sign_tipper))
dlines.append('> []\n')
dlines.append('> 0\n') #oriention, need to add at some point
dlines.append('> {0: >10.6f} {1:>10.6f}\n'.format(
self.center_position[0], self.center_position[1]))
dlines.append('> {0} {1}\n'.format(nper,
self.data_array['z'].shape[0]))
for ss in range(self.data_array['z'].shape[0]):
for ff in range(self.data_array['z'].shape[1]):
for comp in self.inv_comp_dict[inv_mode]:
#index values for component with in the matrix
z_ii, z_jj = self.comp_index_dict[comp]
#get the correct key for data array according to comp
if comp.find('z') == 0:
c_key = 'z'
elif comp.find('t') == 0:
c_key = 'tip'
#get the value for that compenent at that frequency
zz = self.data_array[ss][c_key][ff, z_ii, z_jj]
if zz.real != 0.0 and zz.imag != 0.0 and \
zz.real != 1e32 and zz.imag != 1e32:
if self.formatting == '1':
per = '{0:<12.5e}'.format(self.period_list[ff])
sta = '{0:>7}'.format(self.data_array[ss]['station'])
lat = '{0:> 9.3f}'.format(self.data_array[ss]['lat'])
lon = '{0:> 9.3f}'.format(self.data_array[ss]['lon'])
eas = '{0:> 12.3f}'.format(self.data_array[ss]['rel_east'])
nor = '{0:> 12.3f}'.format(self.data_array[ss]['rel_north'])
ele = '{0:> 12.3f}'.format(self.data_array[ss]['elev'])
com = '{0:>4}'.format(comp.upper())
if self.units == 'ohm':
rea = '{0:> 14.6e}'.format(zz.real/796.)
ima = '{0:> 14.6e}'.format(zz.imag/796.)
else:
rea = '{0:> 14.6e}'.format(zz.real)
ima = '{0:> 14.6e}'.format(zz.imag)
elif self.formatting == '2':
per = '{0:<14.6e}'.format(self.period_list[ff])
sta = '{0:<10}'.format(self.data_array[ss]['station'])
lat = '{0:> 14.6f}'.format(self.data_array[ss]['lat'])
lon = '{0:> 14.6f}'.format(self.data_array[ss]['lon'])
eas = '{0:> 12.3f}'.format(self.data_array[ss]['rel_east'])
nor = '{0:> 15.3f}'.format(self.data_array[ss]['rel_north'])
ele = '{0:> 10.3f}'.format(self.data_array[ss]['elev'])
com = '{0:>12}'.format(comp.upper())
if self.units == 'ohm':
rea = '{0:> 17.6e}'.format(zz.real/796.)
ima = '{0:> 17.6e}'.format(zz.imag/796.)
else:
rea = '{0:> 17.6e}'.format(zz.real)
ima = '{0:> 17.6e}'.format(zz.imag)
if compute_error:
#compute relative error
if comp.find('t') == 0:
if 'floor' in self.error_type:
abs_err = max(self.error_tipper,
self.data_array[ss]['tip_err'][ff,0,z_ii])
else:
abs_err = self.error_tipper
elif comp.find('z') == 0:
if self.error_type == 'floor':
rel_err = self.data_array[ss][c_key+'_err'][ff, z_ii, z_jj]/\
abs(zz)
if rel_err < self.error_floor/100.:
rel_err = self.error_floor/100.
abs_err = rel_err*abs(zz)
elif self.error_type == 'value':
abs_err = abs(zz)*self.error_value/100.
elif self.error_type == 'egbert':
d_zxy = self.data_array[ss]['z'][ff, 0, 1]
d_zyx = self.data_array[ss]['z'][ff, 1, 0]
abs_err = np.sqrt(abs(d_zxy*d_zyx))*\
self.error_egbert/100.
elif self.error_type == 'floor_egbert':
abs_err = self.data_array[ss][c_key+'_err'][ff, z_ii, z_jj]
d_zxy = self.data_array[ss]['z'][ff, 0, 1]
d_zyx = self.data_array[ss]['z'][ff, 1, 0]
if abs_err < np.sqrt(abs(d_zxy*d_zyx))*self.error_egbert/100.:
abs_err = np.sqrt(abs(d_zxy*d_zyx))*self.error_egbert/100.
if abs_err == 0.0:
abs_err = 1e3
print ('error at {0} is 0 for period {1}'.format(
sta, per)+'set to 1e3')
if self.units == 'ohm':
abs_err /= 796.
else:
abs_err = self.data_array[ss][c_key+'_err'][ff, z_ii, z_jj].real
if ((c_key.find('z') >= 0) and (self.units == 'ohm')):
abs_err /= 796.
abs_err = '{0:> 14.6e}'.format(abs(abs_err))
#make sure that x==north, y==east, z==+down
dline = ''.join([per, sta, lat, lon, nor, eas, ele,
com, rea, ima, abs_err, '\n'])
dlines.append(dline)
dfid = file(self.data_fn, 'w')
dfid.writelines(dlines)
dfid.close()
print 'Wrote ModEM data file to {0}'.format(self.data_fn)
def convert_ws3dinv_data_file(self, ws_data_fn, station_fn=None,
save_path=None, fn_basename=None):
"""
convert a ws3dinv data file into ModEM format
Arguments:
------------
**ws_data_fn** : string
full path to WS data file
**station_fn** : string
full path to station info file output by
mtpy.modeling.ws3dinv. Or you can create one using
mtpy.modeling.ws3dinv.WSStation
**save_path** : string
directory path to save data file to.
*default* is cwd
**fn_basename** : string
basename to save data file as
*default* is 'ModEM_Data.dat'
Outputs:
-----------
**data_fn** : string
full path to created data file
:Example: ::
>>> import mtpy.modeling.modem as modem
>>> mdr = modem.Data()
>>> mdr.convert_ws3dinv_data_file(r"/home/ws3dinv/inv1/WSData.dat",
station_fn=r"/home/ws3dinv/inv1/WS_Station_Locations.txt")
"""
if os.path.isfile(ws_data_fn) == False:
raise ws.WSInputError('Did not find {0}, check path'.format(ws_data_fn))
if save_path is not None:
self.save_path = save_path
else:
self.save_path = os.path.dirname(ws_data_fn)
if fn_basename is not None:
self.fn_basename = fn_basename
#--> get data from data file
wsd = ws.WSData()
wsd.read_data_file(ws_data_fn, station_fn=station_fn)
ns = wsd.data['station'].shape[0]
nf = wsd.period_list.shape[0]
self.period_list = wsd.period_list.copy()
self._set_dtype((nf, 2, 2), (nf, 1, 2))
self.data_array = np.zeros(ns, dtype=self._dtype)
#--> fill data array
for ii, d_arr in enumerate(wsd.data):
self.data_array[ii]['station'] = d_arr['station']
self.data_array[ii]['rel_east'] = d_arr['east']
self.data_array[ii]['rel_north'] = d_arr['north']
self.data_array[ii]['z'][:] = d_arr['z_data']
self.data_array[ii]['z_err'][:] = d_arr['z_data_err'].real*\
d_arr['z_err_map'].real
self.data_array[ii]['station'] = d_arr['station']
self.data_array[ii]['lat'] = 0.0
self.data_array[ii]['lon'] = 0.0
self.data_array[ii]['rel_east'] = d_arr['east']
self.data_array[ii]['rel_north'] = d_arr['north']
self.data_array[ii]['elev'] = 0.0
#need to change the inversion mode to be the same as the ws_data file
if self.data_array['z'].all() == 0.0:
if self.data_array['tip'].all() == 0.0:
self.inv_mode = '4'
else:
self.inv_mode = '3'
else:
if self.data_array['tip'].all() == 0.0:
self.inv_mode = '2'
else:
self.inv_mode = '1'
#-->write file
self.write_data_file()
def read_data_file(self, data_fn=None):
"""
read ModEM data file
Fills attributes:
* data_array
* period_list
* mt_dict
"""
if data_fn is not None:
self.data_fn = data_fn
self.save_path = os.path.dirname(self.data_fn)
self.fn_basename = os.path.basename(self.data_fn)
if self.data_fn is None:
raise ModEMError('data_fn is None, enter a data file to read.')
elif os.path.isfile(self.data_fn) is False:
raise ModEMError('Could not find {0}, check path'.format(self.data_fn))
dfid = file(self.data_fn, 'r')
dlines = dfid.readlines()
dfid.close()
header_list = []
metadata_list = []
data_list = []
period_list = []
station_list = []
read_impedance = False
read_tipper = False
for dline in dlines:
if dline.find('#') == 0:
header_list.append(dline.strip())
elif dline.find('>') == 0:
metadata_list.append(dline[1:].strip())
if dline.lower().find('ohm') > 0:
self.units = 'ohm'
if dline.lower().find('mv') > 0:
self.units =' [mV/km]/[nT]'
if dline.lower().find('vertical') > 0:
read_tipper = True
read_impedance = False
elif dline.lower().find('impedance') > 0:
read_impedance = True
read_tipper = False
if dline.find('exp') > 0:
if read_impedance is True:
self.wave_sign_impedance = dline[dline.find('(')+1]
elif read_tipper is True:
self.wave_sign_tipper = dline[dline.find('(')+1]
else:
dline_list = dline.strip().split()
if len(dline_list) == 11:
for ii, d_str in enumerate(dline_list):
if ii != 1:
try:
dline_list[ii] = float(d_str.strip())
except ValueError:
pass
# be sure the station name is a string
else:
dline_list[ii] = d_str.strip()
period_list.append(dline_list[0])
station_list.append(dline_list[1])
data_list.append(dline_list)
#try to find rotation angle
h_list = header_list[0].split()
for hh, h_str in enumerate(h_list):
if h_str.find('_deg') > 0:
try:
self._rotation_angle = float(h_str[0:h_str.find('_deg')])
print ('Set rotation angle to {0:.1f} '.format(
self._rotation_angle)+'deg clockwise from N')
except ValueError:
pass
self.period_list = np.array(sorted(set(period_list)))
station_list = sorted(set(station_list))
#make a period dictionary to with key as period and value as index
period_dict = dict([(per, ii) for ii, per in enumerate(self.period_list)])
#--> need to sort the data into a useful fashion such that each station
# is an mt object
data_dict = {}
z_dummy = np.zeros((len(self.period_list), 2, 2), dtype='complex')
t_dummy = np.zeros((len(self.period_list), 1, 2), dtype='complex')
index_dict = {'zxx': (0, 0), 'zxy':(0, 1), 'zyx':(1, 0), 'zyy':(1, 1),
'tx':(0, 0), 'ty':(0, 1)}
#dictionary for true false if station data (lat, lon, elev, etc)
#has been filled already so we don't rewrite it each time
tf_dict = {}
for station in station_list:
data_dict[station] = mt.MT()
data_dict[station].Z = mtz.Z(z_array=z_dummy.copy(),
zerr_array=z_dummy.copy().real,
freq=1./self.period_list)
data_dict[station].Tipper = mtz.Tipper(tipper_array=t_dummy.copy(),
tippererr_array=t_dummy.copy().real,
freq=1./self.period_list)
#make sure that the station data starts out with false to fill
#the data later
tf_dict[station] = False
#fill in the data for each station
for dd in data_list:
#get the period index from the data line
p_index = period_dict[dd[0]]
#get the component index from the data line
ii, jj = index_dict[dd[7].lower()]
#if the station data has not been filled yet, fill it
if tf_dict[dd[1]] == False:
data_dict[dd[1]].lat = dd[2]
data_dict[dd[1]].lon = dd[3]
data_dict[dd[1]].grid_north = dd[4]
data_dict[dd[1]].grid_east = dd[5]
data_dict[dd[1]].grid_elev = dd[6]
data_dict[dd[1]].station = dd[1]
tf_dict[dd[1]] = True
#fill in the impedance tensor with appropriate values
if dd[7].find('Z') == 0:
z_err = dd[10]
if self.wave_sign_impedance == '+':
z_value = dd[8]+1j*dd[9]
elif self.wave_sign_impedance == '-':
z_value = dd[8]-1j*dd[9]
if self.units == 'ohm':
z_value *= 796.
z_err *= 796.
data_dict[dd[1]].Z.z[p_index, ii, jj] = z_value
data_dict[dd[1]].Z.zerr[p_index, ii, jj] = z_err
#fill in tipper with appropriate values
elif dd[7].find('T') == 0:
if self.wave_sign_tipper == '+':
data_dict[dd[1]].Tipper.tipper[p_index, ii, jj] = dd[8]+1j*dd[9]
elif self.wave_sign_tipper == '-':
data_dict[dd[1]].Tipper.tipper[p_index, ii, jj] = dd[8]-1j*dd[9]
data_dict[dd[1]].Tipper.tippererr[p_index, ii, jj] = dd[10]
#make mt_dict an attribute for easier manipulation later
self.mt_dict = data_dict
ns = len(self.mt_dict.keys())
nf = len(self.period_list)
self._set_dtype((nf, 2, 2), (nf, 1, 2))
self.data_array = np.zeros(ns, dtype=self._dtype)
#Be sure to caclulate invariants and phase tensor for each station
for ii, s_key in enumerate(sorted(self.mt_dict.keys())):
mt_obj = self.mt_dict[s_key]
self.mt_dict[s_key].zinv.compute_invariants()
self.mt_dict[s_key].pt.set_z_object(mt_obj.Z)
self.mt_dict[s_key].Tipper._compute_amp_phase()
self.mt_dict[s_key].Tipper._compute_mag_direction()
self.data_array[ii]['station'] = mt_obj.station
self.data_array[ii]['lat'] = mt_obj.lat
self.data_array[ii]['lon'] = mt_obj.lon
self.data_array[ii]['east'] = mt_obj.east
self.data_array[ii]['north'] = mt_obj.north
self.data_array[ii]['elev'] = mt_obj.grid_elev
self.data_array[ii]['rel_east'] = mt_obj.grid_east
self.data_array[ii]['rel_north'] = mt_obj.grid_north
self.data_array[ii]['z'][:] = mt_obj.Z.z
self.data_array[ii]['z_err'][:] = mt_obj.Z.zerr
self.data_array[ii]['tip'][:] = mt_obj.Tipper.tipper
self.data_array[ii]['tip_err'][:] = mt_obj.Tipper.tippererr
def write_vtk_station_file(self, vtk_save_path=None,
vtk_fn_basename='ModEM_stations'):
"""
write a vtk file for station locations. For now this in relative
coordinates.
Arguments:
-------------
**vtk_save_path** : string
directory to save vtk file to.
*default* is Model.save_path
**vtk_fn_basename** : string
filename basename of vtk file
*default* is ModEM_stations, evtk will add
on the extension .vtu
"""
if vtk_save_path is not None:
vtk_fn = os.path.join(self.save_path, vtk_fn_basename)
else:
vtk_fn = os.path.join(vtk_save_path, vtk_fn_basename)
pointsToVTK(vtk_fn,
self.station_locations['rel_north'],
self.station_locations['rel_east'],
-self.station_locations['elev'],
pointData={'elevation':self.station_locations['elev']})
print 'Wrote file to {0}'.format(vtk_fn)
#==============================================================================
# mesh class
#==============================================================================
class Model(object):
"""
make and read a FE mesh grid
The mesh assumes the coordinate system where:
x == North
y == East
z == + down
All dimensions are in meters.
:Example 1 --> create data file first then model file: ::
>>> import mtpy.modeling.modem as modem
>>> import os
>>> #1) make a list of all .edi files that will be inverted for
>>> edi_path = r"/home/EDI_Files"
>>> edi_list = [os.path.join(edi_path, edi)
for edi in os.listdir(edi_path)
>>> ... if edi.find('.edi') > 0]
>>> #2) create data file
>>> md = modem.Data(edi_list)
>>> md.write_data_file(save_path=r"/home/modem/Inv1")
>>> #3) make a grid from the stations themselves with 200m cell spacing
>>> mmesh = modem.Model(Data=md, cell_size_east=200,
cell_size_north=200)
>>> mmesh.make_mesh()
>>> # check to see if the mesh is what you think it should be
>>> msmesh.plot_mesh()
>>> # all is good write the mesh file
>>> msmesh.write_model_file(save_path=r"/home/modem/Inv1")
:Example 2 --> Rotate Mesh: ::
>>> mmesh.mesh_rotation_angle = 60
>>> mmesh.make_mesh()
..note:: ModEM assumes all coordinates are relative to North and East, and
does not accommodate mesh rotations, therefore, here the rotation
is of the stations, which essentially does the same thing. You
will need to rotate you data to align with the 'new' coordinate
system.
==================== ======================================================
Attributes Description
==================== ======================================================
cell_size_east mesh block width in east direction
*default* is 500
cell_size_north mesh block width in north direction
*default* is 500
edi_list list of .edi files to invert for
grid_east overall distance of grid nodes in east direction
grid_north overall distance of grid nodes in north direction
grid_z overall distance of grid nodes in z direction
model_fn full path to initial file name
n_layers total number of vertical layers in model
nodes_east relative distance between nodes in east direction
nodes_north relative distance between nodes in north direction
nodes_z relative distance between nodes in east direction
pad_east number of cells for padding on E and W sides
*default* is 7
pad_north number of cells for padding on S and N sides
*default* is 7
pad_root_east padding cells E & W will be pad_root_east**(x)
pad_root_north padding cells N & S will be pad_root_north**(x)
pad_z number of cells for padding at bottom
*default* is 4
res_list list of resistivity values for starting model
res_model starting resistivity model
mesh_rotation_angle Angle to rotate the grid to. Angle is measured
positve clockwise assuming North is 0 and east is 90.
*default* is None
save_path path to save file to
station_fn full path to station file
station_locations location of stations
title title in initial file
z1_layer first layer thickness
z_bottom absolute bottom of the model *default* is 300,000
z_target_depth Depth of deepest target, *default* is 50,000
_utm_grid_size_east size of a UTM grid in east direction.
*default* is 640000 meters
_utm_grid_size_north size of a UTM grid in north direction.
*default* is 888960 meters
==================== ======================================================
..note:: If the survey steps across multiple UTM zones, then a
distance will be added to the stations to place them in
the correct location. This distance is
_utm_grid_size_north and _utm_grid_size_east. You should
these parameters to place the locations in the proper spot
as grid distances and overlaps change over the globe.
==================== ======================================================
Methods Description
==================== ======================================================
make_mesh makes a mesh from the given specifications
plot_mesh plots mesh to make sure everything is good
write_initial_file writes an initial model file that includes the mesh
==================== ======================================================
"""
def __init__(self, **kwargs):#edi_list=None,
# self.edi_list = edi_list
self.Data = kwargs.pop('Data',None)
# size of cells within station area in meters
self.cell_size_east = kwargs.pop('cell_size_east', 500)
self.cell_size_north = kwargs.pop('cell_size_north', 500)
#padding cells on either side
self.pad_east = kwargs.pop('pad_east', 7)
self.pad_north = kwargs.pop('pad_north', 7)
self.pad_z = kwargs.pop('pad_z', 4)
#root of padding cells
self.pad_stretch_h= kwargs.pop('pad_stretch_h', 1.2)
self.pad_stretch_v= kwargs.pop('pad_stretch_v', 1.2)
self.z1_layer = kwargs.pop('z1_layer', 10)
self.z_target_depth = kwargs.pop('z_target_depth', 50000)
self.z_bottom = kwargs.pop('z_bottom', 300000)
#number of vertical layers
self.n_layers = kwargs.pop('n_layers', 30)
# number of air layers
self.n_airlayers = kwargs.pop('n_airlayers',0)
# sea level in grid_z coordinates. Auto adjusts when topography read in
self.sea_level = 0.
#strike angle to rotate grid to
self.mesh_rotation_angle = kwargs.pop('mesh_rotation_angle', 0)
#--> attributes to be calculated
#station information
if self.Data is not None:
self.station_locations = self.Data.station_locations
else:
self.station_locations = None
#grid nodes
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
#grid locations
self.grid_east = None
self.grid_north = None
self.grid_z = None
# dictionary to contain any surfaces (e.g. topography)
self.surfaces = {}
#size of a utm grid
self._utm_grid_size_north = 888960.0
self._utm_grid_size_east = 640000.0
self._utm_cross = False
self._utm_ellipsoid = 23
# self.epsg = kwargs.pop('epsg',None)
#resistivity model
self.res_model = kwargs.pop('res_model',None)
self.grid_center = None
#inital file stuff
self.model_fn = kwargs.pop('model_fn', None)
self.save_path = kwargs.pop('save_path', None)
self.model_fn_basename = kwargs.pop('model_fn_basename',
'ModEM_Model.ws')
if self.model_fn is not None:
self.save_path = os.path.dirname(self.model_fn)
self.model_fn_basename = os.path.basename(self.model_fn)
self.title = 'Model File written by MTpy.modeling.modem'
self.res_scale = kwargs.pop('res_scale', 'loge')
# def get_station_locations(self):
# """
# get the station locations from lats and lons
# """
#
# #if station locations are not input read from the edi files
# if self.station_locations is None:
# if self.edi_list is None:
# raise AttributeError('edi_list is None, need to input a list of '
# 'edi files to read in.')
#
# n_stations = len(self.edi_list)
#
# if n_stations == 0:
# raise ModEMError('No .edi files in edi_list, please check '
# 'file locations.')
#
# #make a structured array to put station location information into
# self.station_locations = np.zeros(n_stations,
# dtype=[('station','|S10'),
# ('lat', np.float),
# ('lon', np.float),
# ('east', np.float),
# ('north', np.float),
# ('zone', '|S4'),
# ('rel_east', np.float),
# ('rel_north', np.float),
# ('elev', np.float)])
# #get station locations in meters
# for ii, edi in enumerate(self.edi_list):
# mt_obj = mt.MT(edi)
# self.station_locations[ii]['lat'] = mt_obj.lat
# self.station_locations[ii]['lon'] = mt_obj.lon
# self.station_locations[ii]['station'] = mt_obj.station
# self.station_locations[ii]['east'] = mt_obj.east
# self.station_locations[ii]['north'] = mt_obj.north
# self.station_locations[ii]['elev'] = mt_obj.elev
# self.station_locations[ii]['zone'] = mt_obj.utm_zone
#
#
# # try to use pyproj if desired, if not then have to use inbuilt
# # projection module but may give bad results if crossing more than one zone
# if self.epsg is not None:
# use_pyproj=True
# else:
# use_pyproj=False
#
# if use_pyproj:
# try:
# project_sites2(self,self.station_locations)
# except ImportError:
# use_pyproj=False
# errormessage = "Error loading pyproj"
# if self.epsg is None:
# use_pyproj=False
# errormessage = "Couldn't find epsg, please define manually"
# # warning message
# if not use_pyproj:
# print errormessage
#
#
#
# if not use_pyproj:
# project_sites(self,self.station_locations)
#
#
#
# #remove the average distance to get coordinates in a relative space
# self.station_locations['rel_east'] = self.station_locations['east']-\
# self.station_locations['east'].mean()
# self.station_locations['rel_north'] = self.station_locations['north']-\
# self.station_locations['north'].mean()
#
# #--> rotate grid if necessary
# #to do this rotate the station locations because ModEM assumes the
# #input mesh is a lateral grid.
# #needs to be 90 - because North is assumed to be 0 but the rotation
# #matrix assumes that E is 0.
# if self.mesh_rotation_angle != 0:
# cos_ang = np.cos(np.deg2rad(self.mesh_rotation_angle))
# sin_ang = np.sin(np.deg2rad(self.mesh_rotation_angle))
# rot_matrix = np.matrix(np.array([[cos_ang, sin_ang],
# [-sin_ang, cos_ang]]))
#
# coords = np.array([self.station_locations['rel_east'],
# self.station_locations['rel_north']])
#
# #rotate the relative station locations
# new_coords = np.array(np.dot(rot_matrix, coords))
#
# self.station_locations['rel_east'][:] = new_coords[0, :]
# self.station_locations['rel_north'][:] = new_coords[1, :]
#
# print 'Rotated stations by {0:.1f} deg clockwise from N'.format(
# self.mesh_rotation_angle)
#
# #translate the stations so they are relative to 0,0
# east_center = (self.station_locations['rel_east'].max()-
# np.abs(self.station_locations['rel_east'].min()))/2
# north_center = (self.station_locations['rel_north'].max()-
# np.abs(self.station_locations['rel_north'].min()))/2
#
# #remove the average distance to get coordinates in a relative space
# self.station_locations['rel_east'] -= east_center
# self.station_locations['rel_north'] -= north_center
def make_mesh(self):
"""
create finite element mesh according to parameters set.
The mesh is built by first finding the center of the station area.
Then cells are added in the north and east direction with width
cell_size_east and cell_size_north to the extremeties of the station
area. Padding cells are then added to extend the model to reduce
edge effects. The number of cells are pad_east and pad_north and the
increase in size is by pad_root_east and pad_root_north. The station
locations are then computed as the center of the nearest cell as
required by the code.
The vertical cells are built to increase in size exponentially with
depth. The first cell depth is first_layer_thickness and should be
about 1/10th the shortest skin depth. The layers then increase
on a log scale to z_target_depth. Then the model is
padded with pad_z number of cells to extend the depth of the model.
padding = np.round(cell_size_east*pad_root_east**np.arange(start=.5,
stop=3, step=3./pad_east))+west
"""
# self.get_station_locations()
#find the edges of the grid
west = self.station_locations['rel_east'].min()-self.cell_size_east*3/2.
east = self.station_locations['rel_east'].max()+self.cell_size_east*3/2.
south = self.station_locations['rel_north'].min()-self.cell_size_north*3/2.
north = self.station_locations['rel_north'].max()+self.cell_size_north*3/2.
west = np.round(west, -2)
east= np.round(east, -2)
south= np.round(south, -2)
north = np.round(north, -2)
#-------make a grid around the stations from the parameters above------
#--> make grid in east-west direction
#cells within station area
east_gridr = np.arange(start=west, stop=east+self.cell_size_east,
step=self.cell_size_east)
east_gridr -= np.mean(east_gridr)
#padding cells in the east-west direction
for ii in range(1,self.pad_east+1):
east_0 = float(east_gridr[-1])
west_0 = float(east_gridr[0])
add_size = np.round(self.cell_size_east*self.pad_stretch_h*ii, -2)
pad_w = west_0-add_size
pad_e = east_0+add_size
east_gridr = np.insert(east_gridr, 0, pad_w)
east_gridr = np.append(east_gridr, pad_e)
#--> need to make sure none of the stations lie on the nodes
for s_east in sorted(self.station_locations['rel_east']):
try:
node_index = np.where(abs(s_east-east_gridr) <
.02*self.cell_size_east)[0][0]
if s_east-east_gridr[node_index] > 0:
east_gridr[node_index] -= .02*self.cell_size_east
elif s_east-east_gridr[node_index] < 0:
east_gridr[node_index] += .02*self.cell_size_east
except IndexError:
continue
#--> make grid in north-south direction
#N-S cells with in station area
north_gridr = np.arange(start=south, stop=north+self.cell_size_north,
step=self.cell_size_north)
north_gridr -= np.mean(north_gridr)
#padding cells in the east-west direction
for ii in range(1, self.pad_north+1):
south_0 = float(north_gridr[0])
north_0 = float(north_gridr[-1])
add_size = np.round(self.cell_size_north*self.pad_stretch_h*ii, -2)
pad_s = south_0-add_size
pad_n = north_0+add_size
north_gridr = np.insert(north_gridr, 0, pad_s)
north_gridr = np.append(north_gridr, pad_n)
#--> need to make sure none of the stations lie on the nodes
for s_north in sorted(self.station_locations['rel_north']):
try:
node_index = np.where(abs(s_north-north_gridr) <
.02*self.cell_size_north)[0][0]
if s_north-north_gridr[node_index] > 0:
north_gridr[node_index] -= .02*self.cell_size_north
elif s_north-north_gridr[node_index] < 0:
north_gridr[node_index] += .02*self.cell_size_north
except IndexError:
continue
#--> make depth grid
log_z = np.logspace(np.log10(self.z1_layer),
np.log10(self.z_target_depth),
num=self.n_layers-self.pad_z-self.n_airlayers)
z_nodes = np.array([zz-zz%10**np.floor(np.log10(zz)) for zz in
log_z])
# index of top of padding
itp = len(z_nodes) - 1
#padding cells in the vertical direction
for ii in range(1, self.pad_z+1):
z_0 = np.float(z_nodes[itp])
pad_d = np.round(z_0*self.pad_stretch_v*ii, -2)
z_nodes = np.append(z_nodes, pad_d)
# add air layers and define ground surface level.
# initial layer thickness is same as z1_layer
z_nodes = np.hstack([[self.z1_layer]*self.n_airlayers,z_nodes])
#make an array of absolute values
z_grid = np.array([z_nodes[:ii].sum() for ii in range(z_nodes.shape[0]+1)])
# z_grid point at zero level
self.sea_level = z_grid[self.n_airlayers]
#---Need to make an array of the individual cell dimensions for
# modem
east_nodes = east_gridr[1:]-east_gridr[:-1]
north_nodes = north_gridr[1:]-north_gridr[:-1]
#compute grid center
center_east = -east_nodes.__abs__().sum()/2
center_north = -north_nodes.__abs__().sum()/2
center_z = 0
self.grid_center = np.array([center_north, center_east, center_z])
#make nodes attributes
self.nodes_east = east_nodes
self.nodes_north = north_nodes
self.nodes_z = z_nodes
self.grid_east = east_gridr
self.grid_north = north_gridr
self.grid_z = z_grid
#--> print out useful information
print '-'*15
print ' Number of stations = {0}'.format(len(self.station_locations))
print ' Dimensions: '
print ' e-w = {0}'.format(east_gridr.shape[0])
print ' n-s = {0}'.format(north_gridr.shape[0])
print ' z = {0} (including 7 air layers)'.format(z_grid.shape[0])
print ' Extensions: '
print ' e-w = {0:.1f} (m)'.format(east_nodes.__abs__().sum())
print ' n-s = {0:.1f} (m)'.format(north_nodes.__abs__().sum())
print ' 0-z = {0:.1f} (m)'.format(self.nodes_z.__abs__().sum())
print ' Stations rotated by: {0:.1f} deg clockwise positive from N'.format(self.mesh_rotation_angle)
print ''
print ' ** Note ModEM does not accommodate mesh rotations, it assumes'
print ' all coordinates are aligned to geographic N, E'
print ' therefore rotating the stations will have a similar effect'
print ' as rotating the mesh.'
print '-'*15
if self._utm_cross is True:
print '{0} {1} {2}'.format('-'*25, 'NOTE', '-'*25)
print ' Survey crosses UTM zones, be sure that stations'
print ' are properly located, if they are not, adjust parameters'
print ' _utm_grid_size_east and _utm_grid_size_north.'
print ' these are in meters and represent the utm grid size'
print ' Example: '
print ' >>> modem_model._utm_grid_size_east = 644000'
print ' >>> modem_model.make_mesh()'
print ''
print '-'*56
def add_topography(self,topographyfile=None,topographyarray=None,interp_method='nearest',
air_resistivity=1e17,sea_resistivity=0.3):
"""
"""
# first, get surface data
if topographyfile is not None:
self.project_surface(surfacefile=topographyfile,
surfacename='topography',
method=interp_method)
if topographyarray is not None:
self.surface_dict['topography'] = topographyarray
if self.n_airlayers > 0:
# cell size is topomax/n_airlayers, rounded to nearest 1 s.f.
cs = np.amax(self.surface_dict['topography'])/float(self.n_airlayers)
# cs = np.ceil(0.1*cs/10.**int(np.log10(cs)))*10.**(int(np.log10(cs))+1)
cs = np.ceil(cs)
# add air layers
new_airlayers = np.linspace(0,self.n_airlayers,self.n_airlayers+1)*cs
add_z = new_airlayers[-1] - self.grid_z[self.n_airlayers]
self.grid_z[self.n_airlayers+1:] += add_z
self.grid_z[:self.n_airlayers+1] = new_airlayers
# adjust the nodes
self.nodes_z = self.grid_z[1:] - self.grid_z[:-1]
# adjust sea level
self.sea_level = self.grid_z[self.n_airlayers]
# assign topography
self.assign_resistivity_from_surfacedata('topography',air_resistivity,where='above')
else:
print "Cannot add topography, no air layers provided. Proceeding to add bathymetry"
# assign sea water
# first make a mask array, this array can be passed through to covariance
self.covariance_mask = np.ones_like(self.res_model)
# assign model areas below sea level but above topography, as seawater
# get grid centres
gcz = np.mean([self.grid_z[:-1],self.grid_z[1:]],axis=0)
# convert topography to local grid coordinates
topo = self.sea_level - self.surface_dict['topography']
# assign values
for j in range(len(self.res_model)):
for i in range(len(self.res_model[j])):
# assign all sites above the topography to air
ii1 = np.where(gcz <= topo[j,i])
if len(ii1) > 0:
self.covariance_mask[j,i,ii1[0]] = 0.
# assign sea water to covariance and model res arrays
ii = np.where(np.all([gcz > self.sea_level,gcz <= topo[j,i]],axis=0))
if len(ii) > 0:
self.covariance_mask[j,i,ii[0]] = 9.
self.res_model[j,i,ii[0]] = sea_resistivity
self.covariance_mask = self.covariance_mask[::-1]
self.project_stations_on_topography()
def project_surface(self,surfacefile=None,surface=None,surfacename=None,
surface_epsg=4326,method='nearest'):
"""
project a surface to the model grid and add resulting elevation data
to a dictionary called surface_dict.
**returns**
nothing returned, but surface data are added to surface_dict under
the key given by surfacename.
**inputs**
choose to provide either surface_file (path to file) or surface (tuple).
If both are provided then surface tuple takes priority.
surface elevations are positive up, and relative to sea level.
surface file format is:
ncols 3601
nrows 3601
xllcorner -119.00013888889 (longitude of lower left)
yllcorner 36.999861111111 (latitude of lower left)
cellsize 0.00027777777777778
NODATA_value -9999
elevation data W --> E
N
|
V
S
Alternatively, provide a tuple with:
(lon,lat,elevation)
where elevation is a 2D array (shape (ny,nx)) containing elevation
points (order S -> N, W -> E)
and lon, lat are either 1D arrays containing list of longitudes and
latitudes (in the case of a regular grid) or 2D arrays with same shape
as elevation array containing longitude and latitude of each point.
other inputs:
surfacename = name of surface for putting into dictionary
surface_epsg = epsg number of input surface, default is 4326 for lat/lon(wgs84)
method = interpolation method. Default is 'nearest', if model grid is
dense compared to surface points then choose 'linear' or 'cubic'
"""
# initialise a dictionary to contain the surfaces
if not hasattr(self,'surface_dict'):
self.surface_dict = {}
# read the surface data in from ascii if surface not provided
if surface is None:
surface = read_surface_ascii(surfacefile)
lon,lat,elev = surface
# if lat/lon provided as a 1D list, convert to a 2d grid of points
if len(lon.shape) == 1:
lon,lat = np.meshgrid(lon,lat)
try:
import pyproj
p1,p2 = [pyproj.Proj(text) for text in [epsg_dict[surface_epsg][0],epsg_dict[self.Data.epsg][0]]]
xs,ys = pyproj.transform(p1,p2,lon,lat)
except ImportError:
print "pyproj not installed and other methods for projecting points not implemented yet. Please install pyproj"
except KeyError:
print "epsg not in dictionary, please add epsg and Proj4 text to epsg_dict at beginning of modem_new module"
return
# get centre position of model grid in real world coordinates
x0,y0 = [np.median(self.station_locations[dd]-self.station_locations['rel_'+dd]) for dd in ['east','north']]
# centre points of model grid in real world coordinates
xg,yg = [np.mean([arr[1:],arr[:-1]],axis=0) for arr in [self.grid_east+x0,self.grid_north+y0]]
# elevation in model grid
# first, get lat,lon points of surface grid
points = np.vstack([arr.flatten() for arr in [xs,ys]]).T
# corresponding surface elevation points
values = elev.flatten()
# xi, the model grid points to interpolate to
xi = np.vstack([arr.flatten() for arr in np.meshgrid(xg,yg)]).T
# elevation on the centre of the grid nodes
elev_mg = spi.griddata(points,values,xi,method=method).reshape(len(yg),len(xg))
# get a name for surface
if surfacename is None:
if surfacefile is not None:
surfacename = os.path.basename(surfacefile)
else:
ii = 1
surfacename = 'surface%01i'%ii
while surfacename in self.surface_dict.keys():
ii += 1
surfacename = 'surface%01i'%ii
# add surface to a dictionary of surface elevation data
self.surface_dict[surfacename] = elev_mg
def assign_resistivity_from_surfacedata(self,surfacename,resistivity_value,where='above'):
"""
assign resistivity value to all points above or below a surface
requires the surface_dict attribute to exist and contain data for
surface key (can get this information from ascii file using
project_surface)
**inputs**
surfacename = name of surface (must correspond to key in surface_dict)
resistivity_value = value to assign
where = 'above' or 'below' - assign resistivity above or below the
surface
"""
gcz = np.mean([self.grid_z[:-1],self.grid_z[1:]],axis=0)
# convert to positive down, relative to the top of the grid
surfacedata = self.sea_level - self.surface_dict[surfacename]
# define topography, so that we don't overwrite cells above topography
# first check if topography exists
if 'topography' in self.surface_dict.keys():
# second, check topography isn't the surface we're trying to assign resistivity for
if surfacename == 'topography':
topo = np.zeros_like(surfacedata)
else:
topo = self.sea_level - self.surface_dict['topography']
# if no topography, assign zeros
else:
topo = self.sea_level + np.zeros_like(surfacedata)
# assign resistivity value
for j in range(len(self.res_model)):
for i in range(len(self.res_model[j])):
if where == 'above':
ii = np.where((gcz <= surfacedata[j,i])&(gcz > topo[j,i]))[0]
else:
ii = np.where(gcz > surfacedata[j,i])[0]
self.res_model[j,i,ii] = resistivity_value
def project_stations_on_topography(self,air_resistivity=1e17):
sx = self.station_locations['rel_east']
sy = self.station_locations['rel_north']
# find index of station on grid
for sname in self.station_locations['station']:
ss = np.where(self.station_locations['station'] == sname)[0][0]
# relative locations of stations
sx,sy = self.station_locations['rel_east'][ss],self.station_locations['rel_north'][ss]
# indices of stations on model grid
sxi = np.where((sx <= self.grid_east[1:])&(sx > self.grid_east[:-1]))[0][0]
syi = np.where((sy <= self.grid_north[1:])&(sy > self.grid_north[:-1]))[0][0]
# first check if the site is in the sea
if np.any(self.covariance_mask[::-1][syi,sxi]==9):
szi = np.amax(np.where(self.covariance_mask[::-1][syi,sxi]==9)[0])
# second, check if there are any air cells
elif np.any(self.res_model[syi,sxi] > 0.95*air_resistivity):
szi = np.amax(np.where((self.res_model[syi,sxi] > 0.95*air_resistivity))[0])
# otherwise place station at the top of the model
else:
szi = 0
# assign topography value
topoval = self.grid_z[szi]
self.station_locations['elev'][ss] = topoval
self.Data.data_array['elev'][ss] = topoval
self.Data.station_locations = self.station_locations
self.Data.write_data_file(fill=False)
def plot_mesh(self, east_limits=None, north_limits=None, z_limits=None,
**kwargs):
"""
Arguments:
----------
**east_limits** : tuple (xmin,xmax)
plot min and max distances in meters for the
E-W direction. If None, the east_limits
will be set to furthest stations east and west.
*default* is None
**north_limits** : tuple (ymin,ymax)
plot min and max distances in meters for the
N-S direction. If None, the north_limits
will be set to furthest stations north and south.
*default* is None
**z_limits** : tuple (zmin,zmax)
plot min and max distances in meters for the
vertical direction. If None, the z_limits is
set to the number of layers. Z is positive down
*default* is None
"""
fig_size = kwargs.pop('fig_size', [6, 6])
fig_dpi = kwargs.pop('fig_dpi', 300)
fig_num = kwargs.pop('fig_num', 1)
station_marker = kwargs.pop('station_marker', 'v')
marker_color = kwargs.pop('station_color', 'b')
marker_size = kwargs.pop('marker_size', 2)
line_color = kwargs.pop('line_color', 'k')
line_width = kwargs.pop('line_width', .5)
plt.rcParams['figure.subplot.hspace'] = .3
plt.rcParams['figure.subplot.wspace'] = .3
plt.rcParams['figure.subplot.left'] = .12
plt.rcParams['font.size'] = 7
fig = plt.figure(fig_num, figsize=fig_size, dpi=fig_dpi)
plt.clf()
#make a rotation matrix to rotate data
#cos_ang = np.cos(np.deg2rad(self.mesh_rotation_angle))
#sin_ang = np.sin(np.deg2rad(self.mesh_rotation_angle))
#turns out ModEM has not accomodated rotation of the grid, so for
#now we will not rotate anything.
cos_ang = 1
sin_ang = 0
#--->plot map view
ax1 = fig.add_subplot(1, 2, 1, aspect='equal')
#plot station locations
plot_east = self.station_locations['rel_east']
plot_north = self.station_locations['rel_north']
ax1.scatter(plot_east,
plot_north,
marker=station_marker,
c=marker_color,
s=marker_size)
east_line_xlist = []
east_line_ylist = []
north_min = self.grid_north.min()
north_max = self.grid_north.max()
for xx in self.grid_east:
east_line_xlist.extend([xx*cos_ang+north_min*sin_ang,
xx*cos_ang+north_max*sin_ang])
east_line_xlist.append(None)
east_line_ylist.extend([-xx*sin_ang+north_min*cos_ang,
-xx*sin_ang+north_max*cos_ang])
east_line_ylist.append(None)
ax1.plot(east_line_xlist,
east_line_ylist,
lw=line_width,
color=line_color)
north_line_xlist = []
north_line_ylist = []
east_max = self.grid_east.max()
east_min = self.grid_east.min()
for yy in self.grid_north:
north_line_xlist.extend([east_min*cos_ang+yy*sin_ang,
east_max*cos_ang+yy*sin_ang])
north_line_xlist.append(None)
north_line_ylist.extend([-east_min*sin_ang+yy*cos_ang,
-east_max*sin_ang+yy*cos_ang])
north_line_ylist.append(None)
ax1.plot(north_line_xlist,
north_line_ylist,
lw=line_width,
color=line_color)
if east_limits == None:
ax1.set_xlim(plot_east.min()-10*self.cell_size_east,
plot_east.max()+10*self.cell_size_east)
else:
ax1.set_xlim(east_limits)
if north_limits == None:
ax1.set_ylim(plot_north.min()-10*self.cell_size_north,
plot_north.max()+ 10*self.cell_size_east)
else:
ax1.set_ylim(north_limits)
ax1.set_ylabel('Northing (m)', fontdict={'size':9,'weight':'bold'})
ax1.set_xlabel('Easting (m)', fontdict={'size':9,'weight':'bold'})
##----plot depth view
ax2 = fig.add_subplot(1, 2, 2, aspect='auto', sharex=ax1)
#plot the grid
east_line_xlist = []
east_line_ylist = []
for xx in self.grid_east:
east_line_xlist.extend([xx, xx])
east_line_xlist.append(None)
east_line_ylist.extend([0,
self.grid_z.max()])
east_line_ylist.append(None)
ax2.plot(east_line_xlist,
east_line_ylist,
lw=line_width,
color=line_color)
z_line_xlist = []
z_line_ylist = []
for zz in self.grid_z:
z_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
z_line_xlist.append(None)
z_line_ylist.extend([zz, zz])
z_line_ylist.append(None)
ax2.plot(z_line_xlist,
z_line_ylist,
lw=line_width,
color=line_color)
#--> plot stations
ax2.scatter(plot_east,
[0]*self.station_locations.shape[0],
marker=station_marker,
c=marker_color,
s=marker_size)
if z_limits == None:
ax2.set_ylim(self.z_target_depth, -200)
else:
ax2.set_ylim(z_limits)
if east_limits == None:
ax1.set_xlim(plot_east.min()-10*self.cell_size_east,
plot_east.max()+10*self.cell_size_east)
else:
ax1.set_xlim(east_limits)
ax2.set_ylabel('Depth (m)', fontdict={'size':9, 'weight':'bold'})
ax2.set_xlabel('Easting (m)', fontdict={'size':9, 'weight':'bold'})
plt.show()
def write_model_file(self, **kwargs):
"""
will write an initial file for ModEM.
Note that x is assumed to be S --> N, y is assumed to be W --> E and
z is positive downwards. This means that index [0, 0, 0] is the
southwest corner of the first layer. Therefore if you build a model
by hand the layer block will look as it should in map view.
Also, the xgrid, ygrid and zgrid are assumed to be the relative
distance between neighboring nodes. This is needed because wsinv3d
builds the model from the bottom SW corner assuming the cell width
from the init file.
Key Word Arguments:
----------------------
**nodes_north** : np.array(nx)
block dimensions (m) in the N-S direction.
**Note** that the code reads the grid assuming that
index=0 is the southern most point.
**nodes_east** : np.array(ny)
block dimensions (m) in the E-W direction.
**Note** that the code reads in the grid assuming that
index=0 is the western most point.
**nodes_z** : np.array(nz)
block dimensions (m) in the vertical direction.
This is positive downwards.
**save_path** : string
Path to where the initial file will be saved
to savepath/model_fn_basename
**model_fn_basename** : string
basename to save file to
*default* is ModEM_Model.ws
file is saved at savepath/model_fn_basename
**title** : string
Title that goes into the first line
*default* is Model File written by MTpy.modeling.modem
**res_model** : np.array((nx,ny,nz))
Prior resistivity model.
.. note:: again that the modeling code
assumes that the first row it reads in is the southern
most row and the first column it reads in is the
western most column. Similarly, the first plane it
reads in is the Earth's surface.
**res_scale** : [ 'loge' | 'log' | 'log10' | 'linear' ]
scale of resistivity. In the ModEM code it
converts everything to Loge,
*default* is 'loge'
"""
keys = ['nodes_east', 'nodes_north', 'nodes_z', 'title',
'res_model', 'save_path', 'model_fn', 'model_fn_basename']
for key in keys:
try:
setattr(self, key, kwargs[key])
except KeyError:
if self.__dict__[key] is None:
pass
if self.save_path is not None:
self.model_fn = os.path.join(self.save_path,
self.model_fn_basename)
if self.model_fn is None:
if self.save_path is None:
self.save_path = os.getcwd()
self.model_fn = os.path.join(self.save_path,
self.model_fn_basename)
elif os.path.isdir(self.save_path) == True:
self.model_fn = os.path.join(self.save_path,
self.model_fn_basename)
else:
self.save_path = os.path.dirname(self.save_path)
self.model_fn= self.save_path
if self.res_model is None or type(self.res_model) is float or\
type(self.res_model) is int:
res_model = np.zeros((self.nodes_north.shape[0],
self.nodes_east.shape[0],
self.nodes_z.shape[0]))
if self.res_model is None:
res_model[:, :, :] = 100.0
else:
res_model[:, :, :] = self.res_model
self.res_model = res_model
if not hasattr(self,'covariance_mask'):
self.covariance_mask = np.ones_like(self.res_model)
#--> write file
ifid = file(self.model_fn, 'w')
ifid.write('# {0}\n'.format(self.title.upper()))
ifid.write('{0:>5}{1:>5}{2:>5}{3:>5} {4}\n'.format(self.nodes_north.shape[0],
self.nodes_east.shape[0],
self.nodes_z.shape[0],
0,
self.res_scale.upper()))
#write S --> N node block
for ii, nnode in enumerate(self.nodes_north):
ifid.write('{0:>12.3f}'.format(abs(nnode)))
ifid.write('\n')
#write W --> E node block
for jj, enode in enumerate(self.nodes_east):
ifid.write('{0:>12.3f}'.format(abs(enode)))
ifid.write('\n')
#write top --> bottom node block
for kk, zz in enumerate(self.nodes_z):
ifid.write('{0:>12.3f}'.format(abs(zz)))
ifid.write('\n')
#write the resistivity in log e format
if self.res_scale.lower() == 'loge':
write_res_model = np.log(self.res_model[::-1, :, :])
elif self.res_scale.lower() == 'log' or \
self.res_scale.lower() == 'log10':
write_res_model = np.log10(self.res_model[::-1, :, :])
elif self.res_scale.lower() == 'linear':
write_res_model = self.res_model[::-1, :, :]
#write out the layers from resmodel
for zz in range(self.nodes_z.shape[0]):
ifid.write('\n')
for ee in range(self.nodes_east.shape[0]):
for nn in range(self.nodes_north.shape[0]):
ifid.write('{0:>13.5E}'.format(write_res_model[nn, ee, zz]))
ifid.write('\n')
if self.grid_center is None:
#compute grid center
center_east = -self.nodes_east.__abs__().sum()/2
center_north = -self.nodes_north.__abs__().sum()/2
center_z = 0
self.grid_center = np.array([center_north, center_east, center_z])
ifid.write('\n{0:>16.3f}{1:>16.3f}{2:>16.3f}\n'.format(self.grid_center[0],
self.grid_center[1], self.grid_center[2]))
if self.mesh_rotation_angle is None:
ifid.write('{0:>9.3f}\n'.format(0))
else:
ifid.write('{0:>9.3f}\n'.format(self.mesh_rotation_angle))
ifid.close()
print 'Wrote file to: {0}'.format(self.model_fn)
def read_model_file(self, model_fn=None):
"""
read an initial file and return the pertinent information including
grid positions in coordinates relative to the center point (0,0) and
starting model.
Note that the way the model file is output, it seems is that the
blocks are setup as
ModEM: WS:
---------- -----
0-----> N_north 0-------->N_east
| |
| |
V V
N_east N_north
Arguments:
----------
**model_fn** : full path to initializing file.
Outputs:
--------
**nodes_north** : np.array(nx)
array of nodes in S --> N direction
**nodes_east** : np.array(ny)
array of nodes in the W --> E direction
**nodes_z** : np.array(nz)
array of nodes in vertical direction positive downwards
**res_model** : dictionary
dictionary of the starting model with keys as layers
**res_list** : list
list of resistivity values in the model
**title** : string
title string
"""
if model_fn is not None:
self.model_fn = model_fn
if self.model_fn is None:
raise ModEMError('model_fn is None, input a model file name')
if os.path.isfile(self.model_fn) is None:
raise ModEMError('Cannot find {0}, check path'.format(self.model_fn))
self.save_path = os.path.dirname(self.model_fn)
ifid = file(self.model_fn, 'r')
ilines = ifid.readlines()
ifid.close()
self.title = ilines[0].strip()
#get size of dimensions, remembering that x is N-S, y is E-W, z is + down
nsize = ilines[1].strip().split()
n_north = int(nsize[0])
n_east = int(nsize[1])
n_z = int(nsize[2])
log_yn = nsize[4]
#get nodes
self.nodes_north = np.array([np.float(nn)
for nn in ilines[2].strip().split()])
self.nodes_east = np.array([np.float(nn)
for nn in ilines[3].strip().split()])
self.nodes_z = np.array([np.float(nn)
for nn in ilines[4].strip().split()])
self.res_model = np.zeros((n_north, n_east, n_z))
#get model
count_z = 0
line_index= 6
count_e = 0
while count_z < n_z:
iline = ilines[line_index].strip().split()
#blank lines spit the depth blocks, use those as a marker to
#set the layer number and start a new block
if len(iline) == 0:
count_z += 1
count_e = 0
line_index += 1
# 3D grid model files don't have a space at the end
# additional condition to account for this.
elif (len(iline) == 3)&(count_z == n_z - 1):
count_z += 1
count_e = 0
line_index += 1
#each line in the block is a line of N-->S values for an east value
else:
north_line = np.array([float(nres) for nres in
ilines[line_index].strip().split()])
# Need to be sure that the resistivity array matches
# with the grids, such that the first index is the
# furthest south
self.res_model[:, count_e, count_z] = north_line[::-1]
count_e += 1
line_index += 1
#--> get grid center and rotation angle
if len(ilines) > line_index:
for iline in ilines[line_index:]:
ilist = iline.strip().split()
#grid center
if len(ilist) == 3:
self.grid_center = np.array(ilist, dtype=np.float)
#rotation angle
elif len(ilist) == 1:
self.rotation_angle = np.float(ilist[0])
else:
pass
#--> make sure the resistivity units are in linear Ohm-m
if log_yn.lower() == 'loge':
self.res_model = np.e**self.res_model
elif log_yn.lower() == 'log' or log_yn.lower() == 'log10':
self.res_model = 10**self.res_model
#put the grids into coordinates relative to the center of the grid
self.grid_north = np.array([self.nodes_north[0:ii].sum()
for ii in range(n_north+1)])
self.grid_east = np.array([self.nodes_east[0:ii].sum()
for ii in range(n_east+1)])
self.grid_z = np.array([self.nodes_z[:ii].sum()
for ii in range(n_z+1)])
# center the grids
if self.grid_center is not None:
self.grid_north += self.grid_center[0]
self.grid_east += self.grid_center[1]
self.grid_z += self.grid_center[2]
def read_ws_model_file(self, ws_model_fn):
"""
reads in a WS3INV3D model file
"""
ws_model_obj = ws.WSModel(ws_model_fn)
ws_model_obj.read_model_file()
#set similar attributes
for ws_key in ws_model_obj.__dict__.keys():
for md_key in self.__dict__.keys():
if ws_key == md_key:
setattr(self, ws_key, ws_model_obj.__dict__[ws_key])
#compute grid center
center_east = -self.nodes_east.__abs__().sum()/2
center_north = -self.nodes_norths.__abs__().sum()/2
center_z = 0
self.grid_center = np.array([center_north, center_east, center_z])
def write_vtk_file(self, vtk_save_path=None,
vtk_fn_basename='ModEM_model_res'):
"""
write a vtk file to view in Paraview or other
Arguments:
-------------
**vtk_save_path** : string
directory to save vtk file to.
*default* is Model.save_path
**vtk_fn_basename** : string
filename basename of vtk file
*default* is ModEM_model_res, evtk will add
on the extension .vtr
"""
if vtk_save_path is not None:
vtk_fn = os.path.join(self.save_path, vtk_fn_basename)
else:
vtk_fn = os.path.join(vtk_save_path, vtk_fn_basename)
# grids need to be n+1
vtk_east = np.append(self.grid_east, 1.5*self.grid_east[-1])
vtk_north = np.append(self.grid_north, 1.5*self.grid_north[-1])
vtk_z = np.append(self.grid_z, 1.5*self.grid_z[-1])
gridToVTK(vtk_fn,
vtk_north,
vtk_east,
vtk_z,
pointData={'resistivity':self.res_model})
print 'Wrote file to {0}'.format(vtk_fn)
#==============================================================================
# Control File for inversion
#==============================================================================
class Control_Inv(object):
"""
read and write control file for how the inversion starts and how it is run
"""
def __init__(self, **kwargs):
self.output_fn = kwargs.pop('output_fn', 'MODULAR_NLCG')
self.lambda_initial = kwargs.pop('lambda_initial', 10)
self.lambda_step = kwargs.pop('lambda_step', 10)
self.model_search_step = kwargs.pop('model_search_step', 1)
self.rms_reset_search = kwargs.pop('rms_reset_search', 2.0e-3)
self.rms_target = kwargs.pop('rms_target', 1.05)
self.lambda_exit = kwargs.pop('lambda_exit', 1.0e-4)
self.max_iterations = kwargs.pop('max_iterations', 100)
self.save_path = kwargs.pop('save_path', os.getcwd())
self.fn_basename = kwargs.pop('fn_basename', 'control.inv')
self.control_fn = kwargs.pop('control_fn', os.path.join(self.save_path,
self.fn_basename))
self._control_keys = ['Model and data output file name',
'Initial damping factor lambda',
'To update lambda divide by',
'Initial search step in model units',
'Restart when rms diff is less than',
'Exit search when rms is less than',
'Exit when lambda is less than',
'Maximum number of iterations']
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.output_fn, self.lambda_initial,
self.lambda_step, self.model_search_step,
self.rms_reset_search, self.rms_target,
self.lambda_exit, self.max_iterations])])
self._string_fmt_dict = dict([(key, value)
for key, value in zip(self._control_keys,
['<', '<.1f', '<.1f', '<.1f', '<.1e',
'<.2f', '<.1e', '<.0f'])])
def write_control_file(self, control_fn=None, save_path=None,
fn_basename=None):
"""
write control file
Arguments:
------------
**control_fn** : string
full path to save control file to
*default* is save_path/fn_basename
**save_path** : string
directory path to save control file to
*default* is cwd
**fn_basename** : string
basename of control file
*default* is control.inv
"""
if control_fn is not None:
self.save_path = os.path.dirname(control_fn)
self.fn_basename = os.path.basename(control_fn)
if save_path is not None:
self.save_path = save_path
if fn_basename is not None:
self.fn_basename = fn_basename
self.control_fn = os.path.join(self.save_path, self.fn_basename)
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.output_fn, self.lambda_initial,
self.lambda_step, self.model_search_step,
self.rms_reset_search, self.rms_target,
self.lambda_exit, self.max_iterations])])
clines = []
for key in self._control_keys:
value = self._control_dict[key]
str_fmt = self._string_fmt_dict[key]
clines.append('{0:<35}: {1:{2}}\n'.format(key, value, str_fmt))
cfid = file(self.control_fn, 'w')
cfid.writelines(clines)
cfid.close()
print 'Wrote ModEM control file to {0}'.format(self.control_fn)
def read_control_file(self, control_fn=None):
"""
read in a control file
"""
if control_fn is not None:
self.control_fn = control_fn
if self.control_fn is None:
raise mtex.MTpyError_file_handling('control_fn is None, input '
'control file')
if os.path.isfile(self.control_fn) is False:
raise mtex.MTpyError_file_handling('Could not find {0}'.format(
self.control_fn))
self.save_path = os.path.dirname(self.control_fn)
self.fn_basename = os.path.basename(self.control_fn)
cfid = file(self.control_fn, 'r')
clines = cfid.readlines()
cfid.close()
for cline in clines:
clist = cline.strip().split(':')
if len(clist) == 2:
try:
self._control_dict[clist[0].strip()] = float(clist[1])
except ValueError:
self._control_dict[clist[0].strip()] = clist[1]
#set attributes
attr_list = ['output_fn', 'lambda_initial','lambda_step',
'model_search_step','rms_reset_search','rms_target',
'lambda_exit','max_iterations']
for key, kattr in zip(self._control_keys, attr_list):
setattr(self, kattr, self._control_dict[key])
#==============================================================================
# Control File for inversion
#==============================================================================
class Control_Fwd(object):
"""
read and write control file for
This file controls how the inversion starts and how it is run
"""
def __init__(self, **kwargs):
self.num_qmr_iter = kwargs.pop('num_qmr_iter', 40)
self.max_num_div_calls = kwargs.pop('max_num_div_calls', 20)
self.max_num_div_iters = kwargs.pop('max_num_div_iters', 100)
self.misfit_tol_fwd = kwargs.pop('misfit_tol_fwd', 1.0e-7)
self.misfit_tol_adj = kwargs.pop('misfit_tol_adj', 1.0e-7)
self.misfit_tol_div = kwargs.pop('misfit_tol_div', 1.0e-5)
self.save_path = kwargs.pop('save_path', os.getcwd())
self.fn_basename = kwargs.pop('fn_basename', 'control.fwd')
self.control_fn = kwargs.pop('control_fn', os.path.join(self.save_path,
self.fn_basename))
self._control_keys = ['Number of QMR iters per divergence correction',
'Maximum number of divergence correction calls',
'Maximum number of divergence correction iters',
'Misfit tolerance for EM forward solver',
'Misfit tolerance for EM adjoint solver',
'Misfit tolerance for divergence correction']
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.num_qmr_iter,
self.max_num_div_calls,
self.max_num_div_iters,
self.misfit_tol_fwd,
self.misfit_tol_adj,
self.misfit_tol_div])])
self._string_fmt_dict = dict([(key, value)
for key, value in zip(self._control_keys,
['<.0f', '<.0f', '<.0f', '<.1e', '<.1e',
'<.1e'])])
def write_control_file(self, control_fn=None, save_path=None,
fn_basename=None):
"""
write control file
Arguments:
------------
**control_fn** : string
full path to save control file to
*default* is save_path/fn_basename
**save_path** : string
directory path to save control file to
*default* is cwd
**fn_basename** : string
basename of control file
*default* is control.inv
"""
if control_fn is not None:
self.save_path = os.path.dirname(control_fn)
self.fn_basename = os.path.basename(control_fn)
if save_path is not None:
self.save_path = save_path
if fn_basename is not None:
self.fn_basename = fn_basename
self.control_fn = os.path.join(self.save_path, self.fn_basename)
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.num_qmr_iter,
self.max_num_div_calls,
self.max_num_div_iters,
self.misfit_tol_fwd,
self.misfit_tol_adj,
self.misfit_tol_div])])
clines = []
for key in self._control_keys:
value = self._control_dict[key]
str_fmt = self._string_fmt_dict[key]
clines.append('{0:<47}: {1:{2}}\n'.format(key, value, str_fmt))
cfid = file(self.control_fn, 'w')
cfid.writelines(clines)
cfid.close()
print 'Wrote ModEM control file to {0}'.format(self.control_fn)
def read_control_file(self, control_fn=None):
"""
read in a control file
"""
if control_fn is not None:
self.control_fn = control_fn
if self.control_fn is None:
raise mtex.MTpyError_file_handling('control_fn is None, input '
'control file')
if os.path.isfile(self.control_fn) is False:
raise mtex.MTpyError_file_handling('Could not find {0}'.format(
self.control_fn))
self.save_path = os.path.dirname(self.control_fn)
self.fn_basename = os.path.basename(self.control_fn)
cfid = file(self.control_fn, 'r')
clines = cfid.readlines()
cfid.close()
for cline in clines:
clist = cline.strip().split(':')
if len(clist) == 2:
try:
self._control_dict[clist[0].strip()] = float(clist[1])
except ValueError:
self._control_dict[clist[0].strip()] = clist[1]
#set attributes
attr_list = ['num_qmr_iter','max_num_div_calls', 'max_num_div_iters',
'misfit_tol_fwd', 'misfit_tol_adj', 'misfit_tol_div']
for key, kattr in zip(self._control_keys, attr_list):
setattr(self, kattr, self._control_dict[key])
#==============================================================================
# covariance
#==============================================================================
class Covariance(object):
"""
read and write covariance files
"""
def __init__(self, grid_dimensions=None, **kwargs):
self.grid_dimensions = grid_dimensions
self.smoothing_east = kwargs.pop('smoothing_east', 0.3)
self.smoothing_north = kwargs.pop('smoothing_north', 0.3)
self.smoothing_z = kwargs.pop('smoothing_z', 0.3)
self.smoothing_num = kwargs.pop('smoothing_num', 1)
self.exception_list = kwargs.pop('exception_list', [])
self.mask_arr = kwargs.pop('mask_arr', None)
self.save_path = kwargs.pop('save_path', os.getcwd())
self.cov_fn_basename = kwargs.pop('cov_fn_basename', 'covariance.cov')
self.cov_fn = kwargs.pop('cov_fn', None)
self._header_str = '\n'.join(['+{0}+'.format('-'*77),
'| This file defines model covariance for a recursive autoregression scheme. |',
'| The model space may be divided into distinct areas using integer masks. |',
'| Mask 0 is reserved for air; mask 9 is reserved for ocean. Smoothing between |',
'| air, ocean and the rest of the model is turned off automatically. You can |',
'| also define exceptions to override smoothing between any two model areas. |',
'| To turn off smoothing set it to zero. This header is 16 lines long. |',
'| 1. Grid dimensions excluding air layers (Nx, Ny, NzEarth) |',
'| 2. Smoothing in the X direction (NzEarth real values) |',
'| 3. Smoothing in the Y direction (NzEarth real values) |',
'| 4. Vertical smoothing (1 real value) |',
'| 5. Number of times the smoothing should be applied (1 integer >= 0) |',
'| 6. Number of exceptions (1 integer >= 0) |',
'| 7. Exceptions in the for e.g. 2 3 0. (to turn off smoothing between 3 & 4) |',
'| 8. Two integer layer indices and Nx x Ny block of masks, repeated as needed.|',
'+{0}+'.format('-'*77)])
def write_covariance_file(self, cov_fn=None, save_path=None,
cov_fn_basename=None, model_fn=None,
sea_water=0.3, air=1e12):
"""
write a covariance file
"""
if model_fn is not None:
mod_obj = Model()
mod_obj.read_model_file(model_fn)
print 'Reading {0}'.format(model_fn)
self.grid_dimensions = mod_obj.res_model.shape
if self.mask_arr is None:
self.mask_arr = np.ones_like(mod_obj.res_model)
self.mask_arr[np.where(mod_obj.res_model > air*.9)] = 0
self.mask_arr[np.where((mod_obj.res_model < sea_water*1.1) &
(mod_obj.res_model > sea_water*.9))] = 9
else:
if self.mask_arr is None:
self.mask_arr = np.ones((self.grid_dimensions[0],
self.grid_dimensions[1],
self.grid_dimensions[2]))
if self.grid_dimensions is None:
raise ModEMError('Grid dimensions are None, input as (Nx, Ny, Nz)')
if cov_fn is not None:
self.cov_fn = cov_fn
else:
if save_path is not None:
self.save_path = save_path
if cov_fn_basename is not None:
self.cov_fn_basename = cov_fn_basename
self.cov_fn = os.path.join(self.save_path, self.cov_fn_basename)
clines = [self._header_str]
clines.append('\n\n')
#--> grid dimensions
clines.append(' {0:<10}{1:<10}{2:<10}\n'.format(self.grid_dimensions[0],
self.grid_dimensions[1],
self.grid_dimensions[2]))
clines.append('\n')
#--> smoothing in north direction
n_smooth_line = ''
for zz in range(self.grid_dimensions[2]):
n_smooth_line += ' {0:<5.1f}'.format(self.smoothing_north)
clines.append(n_smooth_line+'\n')
#--> smoothing in east direction
e_smooth_line = ''
for zz in range(self.grid_dimensions[2]):
e_smooth_line += ' {0:<5.1f}'.format(self.smoothing_east)
clines.append(e_smooth_line+'\n')
#--> smoothing in vertical direction
clines.append(' {0:<5.1f}\n'.format(self.smoothing_z))
clines.append('\n')
#--> number of times to apply smoothing
clines.append(' {0:<2.0f}\n'.format(self.smoothing_num))
clines.append('\n')
#--> exceptions
clines.append(' {0:<.0f}\n'.format(len(self.exception_list)))
for exc in self.exception_list:
clines.append('{0:<5.0f}{1:<5.0f}{2:<5.0f}\n'.format(exc[0],
exc[1],
exc[2]))
clines.append('\n')
clines.append('\n')
#--> mask array
for zz in range(self.mask_arr.shape[2]):
clines.append(' {0:<8.0f}{0:<8.0f}\n'.format(zz+1))
for nn in range(self.mask_arr.shape[0]):
cline = ''
for ee in range(self.mask_arr.shape[1]):
cline += '{0:^3.0f}'.format(self.mask_arr[nn, ee, zz])
clines.append(cline+'\n')
cfid = file(self.cov_fn, 'w')
cfid.writelines(clines)
cfid.close()
print 'Wrote covariance file to {0}'.format(self.cov_fn)
#==============================================================================
# Add in elevation to the model
#==============================================================================
def read_surface_ascii(ascii_fn):
"""
read in surface which is ascii format ()
unlike original function, returns list of lat, long and elevation (no projections)
The ascii format is assumed to be:
ncols 3601
nrows 3601
xllcorner -119.00013888889 (latitude of lower left)
yllcorner 36.999861111111 (latitude of lower left)
cellsize 0.00027777777777778
NODATA_value -9999
elevation data W --> E
N
|
V
S
"""
dfid = file(ascii_fn, 'r')
d_dict = {}
skiprows=0
for ii in range(6):
dline = dfid.readline()
dline = dline.strip().split()
key = dline[0].strip().lower()
value = float(dline[1].strip())
d_dict[key] = value
# check if key is an integer
try:
int(key)
except:
skiprows += 1
dfid.close()
x0 = d_dict['xllcorner']
y0 = d_dict['yllcorner']
nx = int(d_dict['ncols'])
ny = int(d_dict['nrows'])
cs = d_dict['cellsize']
elevation = np.loadtxt(ascii_fn,skiprows=skiprows)[::-1]
# create lat and lon arrays from the dem fle
lon = np.arange(x0, x0+cs*(nx), cs)
lat = np.arange(y0, y0+cs*(ny), cs)
lon = np.linspace(x0, x0+cs*(nx-1), nx)
lat = np.linspace(y0, y0+cs*(ny-1), ny)
return lon,lat,elevation
#--> read in ascii dem file
def read_dem_ascii(ascii_fn, cell_size=500, model_center=(0, 0), rot_90=0, epsg=None):
"""
read in dem which is ascii format
The ascii format is assumed to be:
ncols 3601
nrows 3601
xllcorner -119.00013888889
yllcorner 36.999861111111
cellsize 0.00027777777777778
NODATA_value -9999
elevation data W --> E
N
|
V
S
"""
dfid = file(ascii_fn, 'r')
d_dict = {}
for ii in range(6):
dline = dfid.readline()
dline = dline.strip().split()
key = dline[0].strip().lower()
value = float(dline[1].strip())
d_dict[key] = value
x0 = d_dict['xllcorner']
y0 = d_dict['yllcorner']
nx = int(d_dict['ncols'])
ny = int(d_dict['nrows'])
cs = d_dict['cellsize']
# read in the elevation data
elevation = np.zeros((nx, ny))
for ii in range(1, int(ny)+2):
dline = dfid.readline()
if len(str(dline)) > 1:
#needs to be backwards because first line is the furthest north row.
elevation[:, -ii] = np.array(dline.strip().split(' '), dtype='float')
else:
break
# create lat and lon arrays from the dem fle
lon = np.arange(x0, x0+cs*(nx), cs)
lat = np.arange(y0, y0+cs*(ny), cs)
# calculate the lower left and uper right corners of the grid in meters
ll_en = utm2ll.LLtoUTM(23, lat[0], lon[0])
ur_en = utm2ll.LLtoUTM(23, lat[-1], lon[-1])
# estimate cell sizes for each dem measurement
d_east = abs(ll_en[1]-ur_en[1])/nx
d_north = abs(ll_en[2]-ur_en[2])/ny
# calculate the number of new cells according to the given cell size
# if the given cell size and cs are similar int could make the value 0,
# hence the need to make it one if it is 0.
num_cells = max([1, int(cell_size/np.mean([d_east, d_north]))])
# make easting and northing arrays in meters corresponding to lat and lon
east = np.arange(ll_en[1], ur_en[1], d_east)
north = np.arange(ll_en[2], ur_en[2], d_north)
#resample the data accordingly
new_east = east[np.arange(0, east.shape[0], num_cells)]
new_north = north[np.arange(0, north.shape[0], num_cells)]
try:
new_x, new_y = np.meshgrid(np.arange(0, east.shape[0], num_cells),
np.arange(0, north.shape[0], num_cells),
indexing='ij')
except TypeError:
new_x, new_y = [arr.T for arr in np.meshgrid(np.arange(0, east.shape[0], num_cells),
np.arange(0, north.shape[0], num_cells))]
elevation = elevation[new_x, new_y]
# estimate the shift of the DEM to relative model coordinates
shift_east = new_east.mean()-model_center[0]
shift_north = new_north.mean()-model_center[1]
# shift the easting and northing arrays accordingly so the DEM and model
# are collocated.
new_east = (new_east-new_east.mean())+shift_east
new_north = (new_north-new_north.mean())+shift_north
# need to rotate cause I think I wrote the dem backwards
if rot_90 == 1 or rot_90 == 3:
elevation = np.rot90(elevation, rot_90)
return new_north, new_east, elevation
else:
elevation = np.rot90(elevation, rot_90)
return new_east, new_north, elevation
def interpolate_elevation(elev_east, elev_north, elevation, model_east,
model_north, pad=3):
"""
interpolate the elevation onto the model grid.
Arguments:
---------------
*elev_east* : np.ndarray(num_east_nodes)
easting grid for elevation model
*elev_north* : np.ndarray(num_north_nodes)
northing grid for elevation model
*elevation* : np.ndarray(num_east_nodes, num_north_nodes)
elevation model assumes x is east, y is north
Units are meters
*model_east* : np.ndarray(num_east_nodes_model)
relative easting grid of resistivity model
*model_north* : np.ndarray(num_north_nodes_model)
relative northin grid of resistivity model
*pad* : int
number of cells to repeat elevation model by. So for pad=3,
then the interpolated elevation model onto the resistivity
model grid will have the outer 3 cells will be repeats of
the adjacent cell. This is to extend the elevation model
to the resistivity model cause most elevation models will
not cover the entire area.
Returns:
--------------
*interp_elev* : np.ndarray(num_north_nodes_model, num_east_nodes_model)
the elevation model interpolated onto the resistivity
model grid.
"""
# need to line up the elevation with the model
grid_east, grid_north = np.broadcast_arrays(elev_east[:, None],
elev_north[None, :])
# interpolate onto the model grid
interp_elev = spi.griddata((grid_east.ravel(), grid_north.ravel()),
elevation.ravel(),
(model_east[:, None],
model_north[None, :]),
method='linear',
fill_value=elevation.mean())
interp_elev[0:pad, pad:-pad] = interp_elev[pad, pad:-pad]
interp_elev[-pad:, pad:-pad] = interp_elev[-pad-1, pad:-pad]
interp_elev[:, 0:pad] = interp_elev[:, pad].repeat(pad).reshape(
interp_elev[:, 0:pad].shape)
interp_elev[:, -pad:] = interp_elev[:, -pad-1].repeat(pad).reshape(
interp_elev[:, -pad:].shape)
# transpose the modeled elevation to align with x=N, y=E
interp_elev = interp_elev.T
return interp_elev
def make_elevation_model(interp_elev, model_nodes_z, elevation_cell=30,
pad=3, res_air=1e12, fill_res=100, res_sea=0.3):
"""
Take the elevation data of the interpolated elevation model and map that
onto the resistivity model by adding elevation cells to the existing model.
..Note: that if there are large elevation gains, the elevation cell size
might need to be increased.
Arguments:
-------------
*interp_elev* : np.ndarray(num_nodes_north, num_nodes_east)
elevation model that has been interpolated onto the
resistivity model grid. Units are in meters.
*model_nodes_z* : np.ndarray(num_z_nodes_of_model)
vertical nodes of the resistivity model without
topography. Note these are the nodes given in
relative thickness, not the grid, which is total
depth. Units are meters.
*elevation_cell* : float
height of elevation cells to be added on. These
are assumed to be the same at all elevations.
Units are in meters
*pad* : int
number of cells to look for maximum and minimum elevation.
So if you only want elevations within the survey area,
set pad equal to the number of padding cells of the
resistivity model grid.
*res_air* : float
resistivity of air. Default is 1E12 Ohm-m
*fill_res* : float
resistivity value of subsurface in Ohm-m.
Returns:
-------------
*elevation_model* : np.ndarray(num_north_nodes, num_east_nodes,
num_elev_nodes+num_z_nodes)
Model grid with elevation mapped onto it.
Where anything above the surface will be given the
value of res_air, everything else will be fill_res
*new_nodes_z* : np.ndarray(num_z_nodes+num_elev_nodes)
a new array of vertical nodes, where any nodes smaller
than elevation_cell will be set to elevation_cell.
This can be input into a modem.Model object to
rewrite the model file.
"""
# calculate the max elevation within survey area
elev_max = interp_elev[pad:-pad, pad:-pad].max()
# need to set sea level to 0 elevation
elev_min = max([0, interp_elev[pad:-pad, pad:-pad].min()])
# scale the interpolated elevations to fit within elev_max, elev_min
interp_elev[np.where(interp_elev > elev_max)] = elev_max
#interp_elev[np.where(interp_elev < elev_min)] = elev_min
# calculate the number of elevation cells needed
num_elev_cells = int((elev_max-elev_min)/elevation_cell)
print 'Number of elevation cells: {0}'.format(num_elev_cells)
# find sea level if it is there
if elev_min < 0:
sea_level_index = num_elev_cells-abs(int((elev_min)/elevation_cell))-1
else:
sea_level_index = num_elev_cells-1
print 'Sea level index is {0}'.format(sea_level_index)
# make an array of just the elevation for the model
# north is first index, east is second, vertical is third
elevation_model = np.ones((interp_elev.shape[0],
interp_elev.shape[1],
num_elev_cells+model_nodes_z.shape[0]))
elevation_model[:, :, :] = fill_res
# fill in elevation model with air values. Remeber Z is positive down, so
# the top of the model is the highest point and index 0 is highest
# elevation
for nn in range(interp_elev.shape[0]):
for ee in range(interp_elev.shape[1]):
# need to test for ocean
if interp_elev[nn, ee] < 0:
# fill in from bottom to sea level, then rest with air
elevation_model[nn, ee, 0:sea_level_index] = res_air
dz = sea_level_index+abs(int((interp_elev[nn, ee])/elevation_cell))+1
elevation_model[nn, ee, sea_level_index:dz] = res_sea
else:
dz = int((elev_max-interp_elev[nn, ee])/elevation_cell)
elevation_model[nn, ee, 0:dz] = res_air
# make new z nodes array
new_nodes_z = np.append(np.repeat(elevation_cell, num_elev_cells),
model_nodes_z)
new_nodes_z[np.where(new_nodes_z < elevation_cell)] = elevation_cell
return elevation_model, new_nodes_z
def add_topography_to_model(dem_ascii_fn, model_fn, model_center=(0,0),
rot_90=0, cell_size=500, elev_cell=30):
"""
Add topography to an existing model from a dem in ascii format.
The ascii format is assumed to be:
ncols 3601
nrows 3601
xllcorner -119.00013888889
yllcorner 36.999861111111
cellsize 0.00027777777777778
NODATA_value -9999
elevation data W --> E
N
|
V
S
Arguments:
-------------
*dem_ascii_fn* : string
full path to ascii dem file
*model_fn* : string
full path to existing ModEM model file
*model_center* : (east, north) in meters
Sometimes the center of the DEM and the center of the
model don't line up. Use this parameter to line
everything up properly.
*rot_90* : [ 0 | 1 | 2 | 3 ]
rotate the elevation model by rot_90*90 degrees. Sometimes
the elevation model is flipped depending on your coordinate
system.
*cell_size* : float (meters)
horizontal cell size of grid to interpolate elevation
onto. This should be smaller or equal to the input
model cell size to be sure there is not spatial aliasing
*elev_cell* : float (meters)
vertical size of each elevation cell. This value should
be about 1/10th the smalles skin depth.
Returns:
---------------
*new_model_fn* : string
full path to model file that contains topography
"""
### 1.) read in the dem and center it onto the resistivity model
e_east, e_north, elevation = read_dem_ascii(dem_ascii_fn, cell_size=cell_size,
model_center=model_center,
rot_90=3)
plt.figure()
plt.pcolormesh(e_east,e_north,elevation)
m_obj = Model()
m_obj.read_model_file(model_fn)
### 2.) interpolate the elevation model onto the model grid
m_elev = interpolate_elevation(e_east, e_north, elevation,
m_obj.grid_east, m_obj.grid_north, pad=3)
### 3.) make a resistivity model that incoorporates topography
mod_elev, elev_nodes_z = make_elevation_model(m_elev, m_obj.nodes_z,
elevation_cell=elev_cell)
plt.figure()
# plt.pcolormesh(m_obj.grid_east, m_obj.grid_north,m_elev)
### 4.) write new model file
m_obj.nodes_z = elev_nodes_z
m_obj.res_model = mod_elev
m_obj.write_model_file(model_fn_basename='{0}_topo.rho'.format(
os.path.basename(m_obj.model_fn)[0:-4]))
def change_data_elevation(data_fn, model_fn, new_data_fn=None, res_air=1e12):
"""
At each station in the data file rewrite the elevation, so the station is
on the surface, not floating in air.
Arguments:
------------------
*data_fn* : string
full path to a ModEM data file
*model_fn* : string
full path to ModEM model file that has elevation
incoorporated.
*new_data_fn* : string
full path to new data file name. If None, then
new file name will add _elev.dat to input filename
*res_air* : float
resistivity of air. Default is 1E12 Ohm-m
Returns:
-------------
*new_data_fn* : string
full path to new data file.
"""
d_obj = Data()
d_obj.read_data_file(data_fn)
m_obj = Model()
m_obj.read_model_file(model_fn)
for key in d_obj.mt_dict.keys():
mt_obj = d_obj.mt_dict[key]
e_index = np.where(m_obj.grid_east > mt_obj.grid_east)[0][0]
n_index = np.where(m_obj.grid_north > mt_obj.grid_north)[0][0]
z_index = np.where(m_obj.res_model[n_index, e_index, :] < res_air*.9)[0][0]
s_index = np.where(d_obj.data_array['station']==key)[0][0]
d_obj.data_array[s_index]['elev'] = m_obj.grid_z[z_index]
mt_obj.grid_elev = m_obj.grid_z[z_index]
if new_data_fn is None:
new_dfn = '{0}{1}'.format(data_fn[:-4], '_elev.dat')
else:
new_dfn=new_data_fn
d_obj.write_data_file(save_path=os.path.dirname(new_dfn),
fn_basename=os.path.basename(new_dfn),
compute_error=False,
fill=False)
return new_dfn
#==============================================================================
# Manipulate the model to test structures or create a starting model
#==============================================================================
class ModelManipulator(Model):
"""
will plot a model from wsinv3d or init file so the user can manipulate the
resistivity values relatively easily. At the moment only plotted
in map view.
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> initial_fn = r"/home/MT/ws3dinv/Inv1/WSInitialFile"
>>> mm = ws.WSModelManipulator(initial_fn=initial_fn)
=================== =======================================================
Buttons Description
=================== =======================================================
'=' increase depth to next vertical node (deeper)
'-' decrease depth to next vertical node (shallower)
'q' quit the plot, rewrites initial file when pressed
'a' copies the above horizontal layer to the present layer
'b' copies the below horizonal layer to present layer
'u' undo previous change
=================== =======================================================
=================== =======================================================
Attributes Description
=================== =======================================================
ax1 matplotlib.axes instance for mesh plot of the model
ax2 matplotlib.axes instance of colorbar
cb matplotlib.colorbar instance for colorbar
cid_depth matplotlib.canvas.connect for depth
cmap matplotlib.colormap instance
cmax maximum value of resistivity for colorbar. (linear)
cmin minimum value of resistivity for colorbar (linear)
data_fn full path fo data file
depth_index integer value of depth slice for plotting
dpi resolution of figure in dots-per-inch
dscale depth scaling, computed internally
east_line_xlist list of east mesh lines for faster plotting
east_line_ylist list of east mesh lines for faster plotting
fdict dictionary of font properties
fig matplotlib.figure instance
fig_num number of figure instance
fig_size size of figure in inches
font_size size of font in points
grid_east location of east nodes in relative coordinates
grid_north location of north nodes in relative coordinates
grid_z location of vertical nodes in relative coordinates
initial_fn full path to initial file
m_height mean height of horizontal cells
m_width mean width of horizontal cells
map_scale [ 'm' | 'km' ] scale of map
mesh_east np.meshgrid of east, north
mesh_north np.meshgrid of east, north
mesh_plot matplotlib.axes.pcolormesh instance
model_fn full path to model file
new_initial_fn full path to new initial file
nodes_east spacing between east nodes
nodes_north spacing between north nodes
nodes_z spacing between vertical nodes
north_line_xlist list of coordinates of north nodes for faster plotting
north_line_ylist list of coordinates of north nodes for faster plotting
plot_yn [ 'y' | 'n' ] plot on instantiation
radio_res matplotlib.widget.radio instance for change resistivity
rect_selector matplotlib.widget.rect_selector
res np.ndarray(nx, ny, nz) for model in linear resistivity
res_copy copy of res for undo
res_dict dictionary of segmented resistivity values
res_list list of resistivity values for model linear scale
res_model np.ndarray(nx, ny, nz) of resistivity values from
res_list (linear scale)
res_model_int np.ndarray(nx, ny, nz) of integer values corresponding
to res_list for initial model
res_value current resistivty value of radio_res
save_path path to save initial file to
station_east station locations in east direction
station_north station locations in north direction
xlimits limits of plot in e-w direction
ylimits limits of plot in n-s direction
=================== =======================================================
"""
def __init__(self, model_fn=None, data_fn=None, **kwargs):
#be sure to initialize Model
Model.__init__(self, model_fn=model_fn, **kwargs)
self.data_fn = data_fn
self.model_fn_basename = kwargs.pop('model_fn_basename',
'ModEM_Model_rw.ws')
if self.model_fn is not None:
self.save_path = os.path.dirname(self.model_fn)
elif self.data_fn is not None:
self.save_path = os.path.dirname(self.data_fn)
else:
self.save_path = os.getcwd()
#station locations in relative coordinates read from data file
self.station_east = None
self.station_north = None
#--> set map scale
self.map_scale = kwargs.pop('map_scale', 'km')
self.m_width = 100
self.m_height = 100
#--> scale the map coordinates
if self.map_scale=='km':
self.dscale = 1000.
if self.map_scale=='m':
self.dscale = 1.
#figure attributes
self.fig = None
self.ax1 = None
self.ax2 = None
self.cb = None
self.east_line_xlist = None
self.east_line_ylist = None
self.north_line_xlist = None
self.north_line_ylist = None
#make a default resistivity list to change values
self._res_sea = 0.3
self._res_air = 1E12
self.res_dict = None
self.res_list = kwargs.pop('res_list', None)
if self.res_list is None:
self.set_res_list(np.array([self._res_sea, 1, 10, 50, 100, 500,
1000, 5000],
dtype=np.float))
#set initial resistivity value
self.res_value = self.res_list[0]
self.cov_arr = None
#--> set map limits
self.xlimits = kwargs.pop('xlimits', None)
self.ylimits = kwargs.pop('ylimits', None)
self.font_size = kwargs.pop('font_size', 7)
self.fig_dpi = kwargs.pop('fig_dpi', 300)
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.cmap = kwargs.pop('cmap', cm.jet_r)
self.depth_index = kwargs.pop('depth_index', 0)
self.fdict = {'size':self.font_size+2, 'weight':'bold'}
self.subplot_wspace = kwargs.pop('subplot_wspace', .3)
self.subplot_hspace = kwargs.pop('subplot_hspace', .0)
self.subplot_right = kwargs.pop('subplot_right', .8)
self.subplot_left = kwargs.pop('subplot_left', .01)
self.subplot_top = kwargs.pop('subplot_top', .93)
self.subplot_bottom = kwargs.pop('subplot_bottom', .1)
#plot on initialization
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn=='y':
self.get_model()
self.plot()
def set_res_list(self, res_list):
"""
on setting res_list also set the res_dict to correspond
"""
self.res_list = res_list
#make a dictionary of values to write to file.
self.res_dict = dict([(res, ii)
for ii, res in enumerate(self.res_list,1)])
if self.fig is not None:
plt.close()
self.plot()
#---read files-------------------------------------------------------------
def get_model(self):
"""
reads in initial file or model file and set attributes:
-resmodel
-northrid
-eastrid
-zgrid
-res_list if initial file
"""
#--> read in model file
self.read_model_file()
self.cov_arr = np.ones_like(self.res_model)
#--> read in data file if given
if self.data_fn is not None:
md_data = Data()
md_data.read_data_file(self.data_fn)
#get station locations
self.station_east = md_data.station_locations['rel_east']
self.station_north = md_data.station_locations['rel_north']
#get cell block sizes
self.m_height = np.median(self.nodes_north[5:-5])/self.dscale
self.m_width = np.median(self.nodes_east[5:-5])/self.dscale
#make a copy of original in case there are unwanted changes
self.res_copy = self.res_model.copy()
#---plot model-------------------------------------------------------------
def plot(self):
"""
plots the model with:
-a radio dial for depth slice
-radio dial for resistivity value
"""
# set plot properties
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = self.subplot_left
plt.rcParams['figure.subplot.right'] = self.subplot_right
plt.rcParams['figure.subplot.bottom'] = self.subplot_bottom
plt.rcParams['figure.subplot.top'] = self.subplot_top
font_dict = {'size':self.font_size+2, 'weight':'bold'}
#make sure there is a model to plot
if self.res_model is None:
self.get_model()
self.cmin = np.floor(np.log10(min(self.res_list)))
self.cmax = np.ceil(np.log10(max(self.res_list)))
#-->Plot properties
plt.rcParams['font.size'] = self.font_size
#need to add an extra row and column to east and north to make sure
#all is plotted see pcolor for details.
plot_east = np.append(self.grid_east, self.grid_east[-1]*1.25)/self.dscale
plot_north = np.append(self.grid_north, self.grid_north[-1]*1.25)/self.dscale
#make a mesh grid for plotting
#the 'ij' makes sure the resulting grid is in east, north
self.mesh_east, self.mesh_north = np.meshgrid(plot_east,
plot_north,
indexing='ij')
self.fig = plt.figure(self.fig_num, self.fig_size, dpi=self.fig_dpi)
plt.clf()
self.ax1 = self.fig.add_subplot(1, 1, 1, aspect='equal')
#transpose to make x--east and y--north
plot_res = np.log10(self.res_model[:,:,self.depth_index].T)
self.mesh_plot = self.ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.cmin,
vmax=self.cmax)
#on plus or minus change depth slice
self.cid_depth = \
self.mesh_plot.figure.canvas.mpl_connect('key_press_event',
self._on_key_callback)
#plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
self.ax1.text(ee/self.dscale, nn/self.dscale,
'*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':self.font_size-2,
'weight':'bold'})
#set axis properties
if self.xlimits is not None:
self.ax1.set_xlim(self.xlimits)
else:
self.ax1.set_xlim(xmin=self.grid_east.min()/self.dscale,
xmax=self.grid_east.max()/self.dscale)
if self.ylimits is not None:
self.ax1.set_ylim(self.ylimits)
else:
self.ax1.set_ylim(ymin=self.grid_north.min()/self.dscale,
ymax=self.grid_north.max()/self.dscale)
#self.ax1.xaxis.set_minor_locator(MultipleLocator(100*1./dscale))
#self.ax1.yaxis.set_minor_locator(MultipleLocator(100*1./dscale))
self.ax1.set_ylabel('Northing ('+self.map_scale+')',
fontdict=self.fdict)
self.ax1.set_xlabel('Easting ('+self.map_scale+')',
fontdict=self.fdict)
depth_title = self.grid_z[self.depth_index]/self.dscale
self.ax1.set_title('Depth = {:.3f} '.format(depth_title)+\
'('+self.map_scale+')',
fontdict=self.fdict)
#plot the grid if desired
self.east_line_xlist = []
self.east_line_ylist = []
for xx in self.grid_east:
self.east_line_xlist.extend([xx/self.dscale, xx/self.dscale])
self.east_line_xlist.append(None)
self.east_line_ylist.extend([self.grid_north.min()/self.dscale,
self.grid_north.max()/self.dscale])
self.east_line_ylist.append(None)
self.ax1.plot(self.east_line_xlist,
self.east_line_ylist,
lw=.25,
color='k')
self.north_line_xlist = []
self.north_line_ylist = []
for yy in self.grid_north:
self.north_line_xlist.extend([self.grid_east.min()/self.dscale,
self.grid_east.max()/self.dscale])
self.north_line_xlist.append(None)
self.north_line_ylist.extend([yy/self.dscale, yy/self.dscale])
self.north_line_ylist.append(None)
self.ax1.plot(self.north_line_xlist,
self.north_line_ylist,
lw=.25,
color='k')
#plot the colorbar
# self.ax2 = mcb.make_axes(self.ax1, orientation='vertical', shrink=.35)
self.ax2 = self.fig.add_axes([.81, .45, .16, .03])
self.ax2.xaxis.set_ticks_position('top')
#seg_cmap = ws.cmap_discretize(self.cmap, len(self.res_list))
self.cb = mcb.ColorbarBase(self.ax2,cmap=self.cmap,
norm=colors.Normalize(vmin=self.cmin,
vmax=self.cmax),
orientation='horizontal')
self.cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size':self.font_size})
self.cb.set_ticks(np.arange(self.cmin, self.cmax+1))
self.cb.set_ticklabels([mtplottools.labeldict[cc]
for cc in np.arange(self.cmin, self.cmax+1)])
#make a resistivity radio button
#resrb = self.fig.add_axes([.85,.1,.1,.2])
#reslabels = ['{0:.4g}'.format(res) for res in self.res_list]
#self.radio_res = widgets.RadioButtons(resrb, reslabels,
# active=self.res_dict[self.res_value])
# slider_ax_bounds = list(self.cb.ax.get_position().bounds)
# slider_ax_bounds[0] += .1
slider_ax = self.fig.add_axes([.81, .5, .16, .03])
self.slider_res = widgets.Slider(slider_ax, 'Resistivity',
self.cmin, self.cmax,
valinit=2)
#make a rectangular selector
self.rect_selector = widgets.RectangleSelector(self.ax1,
self.rect_onselect,
drawtype='box',
useblit=True)
plt.show()
#needs to go after show()
self.slider_res.on_changed(self.set_res_value)
#self.radio_res.on_clicked(self.set_res_value)
def redraw_plot(self):
"""
redraws the plot
"""
current_xlimits = self.ax1.get_xlim()
current_ylimits = self.ax1.get_ylim()
self.ax1.cla()
plot_res = np.log10(self.res_model[:,:,self.depth_index].T)
self.mesh_plot = self.ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.cmin,
vmax=self.cmax)
#plot the stations
if self.station_east is not None:
for ee,nn in zip(self.station_east, self.station_north):
self.ax1.text(ee/self.dscale, nn/self.dscale,
'*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':self.font_size-2,
'weight':'bold'})
#set axis properties
if self.xlimits is not None:
self.ax1.set_xlim(self.xlimits)
else:
self.ax1.set_xlim(current_xlimits)
if self.ylimits is not None:
self.ax1.set_ylim(self.ylimits)
else:
self.ax1.set_ylim(current_ylimits)
self.ax1.set_ylabel('Northing ('+self.map_scale+')',
fontdict=self.fdict)
self.ax1.set_xlabel('Easting ('+self.map_scale+')',
fontdict=self.fdict)
depth_title = self.grid_z[self.depth_index]/self.dscale
self.ax1.set_title('Depth = {:.3f} '.format(depth_title)+\
'('+self.map_scale+')',
fontdict=self.fdict)
#plot finite element mesh
self.ax1.plot(self.east_line_xlist,
self.east_line_ylist,
lw=.25,
color='k')
self.ax1.plot(self.north_line_xlist,
self.north_line_ylist,
lw=.25,
color='k')
#be sure to redraw the canvas
self.fig.canvas.draw()
# def set_res_value(self, label):
# self.res_value = float(label)
# print 'set resistivity to ', label
# print self.res_value
def set_res_value(self, val):
self.res_value = 10**val
print 'set resistivity to ', self.res_value
def _on_key_callback(self,event):
"""
on pressing a key do something
"""
self.event_change_depth = event
#go down a layer on push of +/= keys
if self.event_change_depth.key == '=':
self.depth_index += 1
if self.depth_index>len(self.grid_z)-1:
self.depth_index = len(self.grid_z)-1
print 'already at deepest depth'
print 'Plotting Depth {0:.3f}'.format(self.grid_z[self.depth_index]/\
self.dscale)+'('+self.map_scale+')'
self.redraw_plot()
#go up a layer on push of - key
elif self.event_change_depth.key == '-':
self.depth_index -= 1
if self.depth_index < 0:
self.depth_index = 0
print 'Plotting Depth {0:.3f} '.format(self.grid_z[self.depth_index]/\
self.dscale)+'('+self.map_scale+')'
self.redraw_plot()
#exit plot on press of q
elif self.event_change_depth.key == 'q':
self.event_change_depth.canvas.mpl_disconnect(self.cid_depth)
plt.close(self.event_change_depth.canvas.figure)
self.rewrite_model_file()
#copy the layer above
elif self.event_change_depth.key == 'a':
try:
if self.depth_index == 0:
print 'No layers above'
else:
self.res_model[:, :, self.depth_index] = \
self.res_model[:, :, self.depth_index-1]
except IndexError:
print 'No layers above'
self.redraw_plot()
#copy the layer below
elif self.event_change_depth.key == 'b':
try:
self.res_model[:, :, self.depth_index] = \
self.res_model[:, :, self.depth_index+1]
except IndexError:
print 'No more layers below'
self.redraw_plot()
#undo
elif self.event_change_depth.key == 'u':
if type(self.xchange) is int and type(self.ychange) is int:
self.res_model[self.ychange, self.xchange, self.depth_index] =\
self.res_copy[self.ychange, self.xchange, self.depth_index]
else:
for xx in self.xchange:
for yy in self.ychange:
self.res_model[yy, xx, self.depth_index] = \
self.res_copy[yy, xx, self.depth_index]
self.redraw_plot()
def change_model_res(self, xchange, ychange):
"""
change resistivity values of resistivity model
"""
if type(xchange) is int and type(ychange) is int:
self.res_model[ychange, xchange, self.depth_index] = self.res_value
else:
for xx in xchange:
for yy in ychange:
self.res_model[yy, xx, self.depth_index] = self.res_value
self.redraw_plot()
def rect_onselect(self, eclick, erelease):
"""
on selecting a rectangle change the colors to the resistivity values
"""
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
self.xchange = self._get_east_index(x1, x2)
self.ychange = self._get_north_index(y1, y2)
#reset values of resistivity
self.change_model_res(self.xchange, self.ychange)
def _get_east_index(self, x1, x2):
"""
get the index value of the points to be changed
"""
if x1 < x2:
xchange = np.where((self.grid_east/self.dscale >= x1) & \
(self.grid_east/self.dscale <= x2))[0]
if len(xchange) == 0:
xchange = np.where(self.grid_east/self.dscale >= x1)[0][0]-1
return [xchange]
if x1 > x2:
xchange = np.where((self.grid_east/self.dscale <= x1) & \
(self.grid_east/self.dscale >= x2))[0]
if len(xchange) == 0:
xchange = np.where(self.grid_east/self.dscale >= x2)[0][0]-1
return [xchange]
#check the edges to see if the selection should include the square
xchange = np.append(xchange, xchange[0]-1)
xchange.sort()
return xchange
def _get_north_index(self, y1, y2):
"""
get the index value of the points to be changed in north direction
need to flip the index because the plot is flipped
"""
if y1 < y2:
ychange = np.where((self.grid_north/self.dscale > y1) & \
(self.grid_north/self.dscale < y2))[0]
if len(ychange) == 0:
ychange = np.where(self.grid_north/self.dscale >= y1)[0][0]-1
return [ychange]
elif y1 > y2:
ychange = np.where((self.grid_north/self.dscale < y1) & \
(self.grid_north/self.dscale > y2))[0]
if len(ychange) == 0:
ychange = np.where(self.grid_north/self.dscale >= y2)[0][0]-1
return [ychange]
ychange -= 1
ychange = np.append(ychange, ychange[-1]+1)
return ychange
def rewrite_model_file(self, model_fn=None, save_path=None,
model_fn_basename=None):
"""
write an initial file for wsinv3d from the model created.
"""
if save_path is not None:
self.save_path = save_path
self.model_fn = model_fn
if model_fn_basename is not None:
self.model_fn_basename = model_fn_basename
self.write_model_file()
#==============================================================================
# plot response
#==============================================================================
class PlotResponse(object):
"""
plot data and response
Plots the real and imaginary impedance and induction vector if present.
:Example: ::
>>> import mtpy.modeling.new_modem as modem
>>> dfn = r"/home/MT/ModEM/Inv1/DataFile.dat"
>>> rfn = r"/home/MT/ModEM/Inv1/Test_resp_000.dat"
>>> mrp = modem.PlotResponse(data_fn=dfn, resp_fn=rfn)
>>> # plot only the TE and TM modes
>>> mrp.plot_component = 2
>>> mrp.redraw_plot()
======================== ==================================================
Attributes Description
======================== ==================================================
color_mode [ 'color' | 'bw' ] color or black and white plots
cted color for data TE mode
ctem color for data TM mode
ctmd color for model TE mode
ctmm color for model TM mode
data_fn full path to data file
data_object WSResponse instance
e_capsize cap size of error bars in points (*default* is .5)
e_capthick cap thickness of error bars in points (*default*
is 1)
fig_dpi resolution of figure in dots-per-inch (300)
fig_list list of matplotlib.figure instances for plots
fig_size size of figure in inches (*default* is [6, 6])
font_size size of font for tick labels, axes labels are
font_size+2 (*default* is 7)
legend_border_axes_pad padding between legend box and axes
legend_border_pad padding between border of legend and symbols
legend_handle_text_pad padding between text labels and symbols of legend
legend_label_spacing padding between labels
legend_loc location of legend
legend_marker_scale scale of symbols in legend
lw line width response curves (*default* is .5)
ms size of markers (*default* is 1.5)
mted marker for data TE mode
mtem marker for data TM mode
mtmd marker for model TE mode
mtmm marker for model TM mode
phase_limits limits of phase
plot_component [ 2 | 4 ] 2 for TE and TM or 4 for all components
plot_style [ 1 | 2 ] 1 to plot each mode in a seperate
subplot and 2 to plot xx, xy and yx, yy in same
plots
plot_type [ '1' | list of station name ] '1' to plot all
stations in data file or input a list of station
names to plot if station_fn is input, otherwise
input a list of integers associated with the
index with in the data file, ie 2 for 2nd station
plot_z [ True | False ] *default* is True to plot
impedance, False for plotting resistivity and
phase
plot_yn [ 'n' | 'y' ] to plot on instantiation
res_limits limits of resistivity in linear scale
resp_fn full path to response file
resp_object WSResponse object for resp_fn, or list of
WSResponse objects if resp_fn is a list of
response files
station_fn full path to station file written by WSStation
subplot_bottom space between axes and bottom of figure
subplot_hspace space between subplots in vertical direction
subplot_left space between axes and left of figure
subplot_right space between axes and right of figure
subplot_top space between axes and top of figure
subplot_wspace space between subplots in horizontal direction
======================== ==================================================
"""
def __init__(self, data_fn=None, resp_fn=None, **kwargs):
self.data_fn = data_fn
self.resp_fn = resp_fn
self.data_object = None
self.resp_object = []
self.color_mode = kwargs.pop('color_mode', 'color')
self.ms = kwargs.pop('ms', 1.5)
self.lw = kwargs.pop('lw', .5)
self.e_capthick = kwargs.pop('e_capthick', .5)
self.e_capsize = kwargs.pop('e_capsize', 2)
#color mode
if self.color_mode == 'color':
#color for data
self.cted = kwargs.pop('cted', (0, 0, 1))
self.ctmd = kwargs.pop('ctmd', (1, 0, 0))
self.mted = kwargs.pop('mted', 's')
self.mtmd = kwargs.pop('mtmd', 'o')
#color for occam2d model
self.ctem = kwargs.pop('ctem', (0, .6, .3))
self.ctmm = kwargs.pop('ctmm', (.9, 0, .8))
self.mtem = kwargs.pop('mtem', '+')
self.mtmm = kwargs.pop('mtmm', '+')
#black and white mode
elif self.color_mode == 'bw':
#color for data
self.cted = kwargs.pop('cted', (0, 0, 0))
self.ctmd = kwargs.pop('ctmd', (0, 0, 0))
self.mted = kwargs.pop('mted', 's')
self.mtmd = kwargs.pop('mtmd', 'o')
#color for occam2d model
self.ctem = kwargs.pop('ctem', (0.6, 0.6, 0.6))
self.ctmm = kwargs.pop('ctmm', (0.6, 0.6, 0.6))
self.mtem = kwargs.pop('mtem', '+')
self.mtmm = kwargs.pop('mtmm', 'x')
self.phase_limits = kwargs.pop('phase_limits', None)
self.res_limits = kwargs.pop('res_limits', None)
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.subplot_wspace = kwargs.pop('subplot_wspace', .3)
self.subplot_hspace = kwargs.pop('subplot_hspace', .0)
self.subplot_right = kwargs.pop('subplot_right', .98)
self.subplot_left = kwargs.pop('subplot_left', .08)
self.subplot_top = kwargs.pop('subplot_top', .85)
self.subplot_bottom = kwargs.pop('subplot_bottom', .1)
self.legend_loc = 'upper center'
self.legend_pos = (.5, 1.21)
self.legend_marker_scale = 1
self.legend_border_axes_pad = .01
self.legend_label_spacing = 0.07
self.legend_handle_text_pad = .2
self.legend_border_pad = .15
self.font_size = kwargs.pop('font_size', 6)
self.plot_type = kwargs.pop('plot_type', '1')
self.plot_style = kwargs.pop('plot_style', 1)
self.plot_component = kwargs.pop('plot_component', 4)
self.plot_yn = kwargs.pop('plot_yn', 'y')
self.plot_z = kwargs.pop('plot_z', True)
self.ylabel_pad = kwargs.pop('ylabel_pad', 1.25)
self.fig_list = []
if self.plot_yn == 'y':
self.plot()
def plot(self):
"""
plot
"""
self.data_object = Data()
self.data_object.read_data_file(self.data_fn)
#get shape of impedance tensors
ns = len(self.data_object.mt_dict.keys())
#read in response files
if self.resp_fn != None:
self.resp_object = []
if type(self.resp_fn) is not list:
resp_obj = Data()
resp_obj.read_data_file(self.resp_fn)
self.resp_object = [resp_obj]
else:
for rfile in self.resp_fn:
resp_obj = Data()
resp_obj.read_data_file(rfile)
self.resp_object.append(resp_obj)
#get number of response files
nr = len(self.resp_object)
if type(self.plot_type) is list:
ns = len(self.plot_type)
#--> set default font size
plt.rcParams['font.size'] = self.font_size
fontdict = {'size':self.font_size+2, 'weight':'bold'}
if self.plot_z == True:
h_ratio = [1,1]
elif self.plot_z == False:
h_ratio = [2, 1.5]
ax_list = []
line_list = []
label_list = []
#--> make key word dictionaries for plotting
kw_xx = {'color':self.cted,
'marker':self.mted,
'ms':self.ms,
'ls':':',
'lw':self.lw,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
kw_yy = {'color':self.ctmd,
'marker':self.mtmd,
'ms':self.ms,
'ls':':',
'lw':self.lw,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
if self.plot_type != '1':
pstation_list = []
if type(self.plot_type) is not list:
self.plot_type = [self.plot_type]
for ii, station in enumerate(self.data_object.mt_dict.keys()):
if type(station) is not int:
for pstation in self.plot_type:
if station.find(str(pstation)) >= 0:
pstation_list.append(station)
else:
for pstation in self.plot_type:
if station == int(pstation):
pstation_list.append(ii)
else:
pstation_list = self.data_object.mt_dict.keys()
for jj, station in enumerate(pstation_list):
z_obj = self.data_object.mt_dict[station].Z
t_obj = self.data_object.mt_dict[station].Tipper
period = self.data_object.period_list
print 'Plotting: {0}'.format(station)
#convert to apparent resistivity and phase
rp = mtplottools.ResPhase(z_object=z_obj)
#find locations where points have been masked
nzxx = np.nonzero(z_obj.z[:, 0, 0])[0]
nzxy = np.nonzero(z_obj.z[:, 0, 1])[0]
nzyx = np.nonzero(z_obj.z[:, 1, 0])[0]
nzyy = np.nonzero(z_obj.z[:, 1, 1])[0]
ntx = np.nonzero(t_obj.tipper[:, 0, 0])[0]
nty = np.nonzero(t_obj.tipper[:, 0, 1])[0]
if self.resp_fn != None:
plotr = True
else:
plotr = False
#make figure
fig = plt.figure(station, self.fig_size, dpi=self.fig_dpi)
plt.clf()
fig.suptitle(str(station), fontdict=fontdict)
#set the grid of subplots
tipper_zero = (np.round(abs(t_obj.tipper.mean()), 4) == 0.0)
if tipper_zero == False:
#makes more sense if plot_tipper is True to plot tipper
plot_tipper = True
else:
plot_tipper = False
if plot_tipper == True:
gs = gridspec.GridSpec(2, 6,
wspace=self.subplot_wspace,
left=self.subplot_left,
top=self.subplot_top,
bottom=self.subplot_bottom,
right=self.subplot_right,
hspace=self.subplot_hspace,
height_ratios=h_ratio)
else:
gs = gridspec.GridSpec(2, 4,
wspace=self.subplot_wspace,
left=self.subplot_left,
top=self.subplot_top,
bottom=self.subplot_bottom,
right=self.subplot_right,
hspace=self.subplot_hspace,
height_ratios=h_ratio)
#---------plot the apparent resistivity-----------------------------------
#plot each component in its own subplot
if self.plot_style == 1:
#plot xy and yx
if self.plot_component == 2:
if plot_tipper == False:
axrxy = fig.add_subplot(gs[0, 0:2])
axryx = fig.add_subplot(gs[0, 2:], sharex=axrxy)
axpxy = fig.add_subplot(gs[1, 0:2], sharex=axrxy)
axpyx = fig.add_subplot(gs[1, 2:], sharex=axrxy)
else:
axrxy = fig.add_subplot(gs[0, 0:2])
axryx = fig.add_subplot(gs[0, 2:4], sharex=axrxy)
axpxy = fig.add_subplot(gs[1, 0:2], sharex=axrxy)
axpyx = fig.add_subplot(gs[1, 2:4], sharex=axrxy)
axtr = fig.add_subplot(gs[0, 4:], sharex=axrxy)
axti = fig.add_subplot(gs[1, 4:], sharex=axrxy)
axtr.set_ylim(-1.2, 1.2)
axti.set_ylim(-1.2, 1.2)
if self.plot_z == False:
#plot resistivity
erxy = mtplottools.plot_errorbar(axrxy,
period,
rp.resxy[nzxy],
rp.resxy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
rp.resyx[nzyx],
rp.resyx_err[nzyx],
**kw_yy)
#plot phase
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rp.phasexy[nzxy],
rp.phasexy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
rp.phaseyx[nzyx],
rp.phaseyx_err[nzyx],
**kw_yy)
elif self.plot_z == True:
#plot real
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(z_obj.z[nzxy,0,1].real),
abs(z_obj.zerr[nzxy,0,1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
abs(z_obj.z[nzyx,1,0].real),
abs(z_obj.zerr[nzyx,1,0].real),
**kw_yy)
#plot phase
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(z_obj.z[nzxy,0,1].imag),
abs(z_obj.zerr[nzxy,0,1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
abs(z_obj.z[nzyx,1,0].imag),
abs(z_obj.zerr[nzyx,1,0].real),
**kw_yy)
#plot tipper
if plot_tipper == True:
ertx = mtplottools.plot_errorbar(axtr,
period[ntx],
t_obj.tipper[ntx, 0, 0].real,
t_obj.tippererr[ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axtr,
period[nty],
t_obj.tipper[nty, 0, 1].real,
t_obj.tippererr[nty, 0, 1],
**kw_yy)
ertx = mtplottools.plot_errorbar(axti,
period[ntx],
t_obj.tipper[ntx, 0, 0].imag,
t_obj.tippererr[ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axti,
period[nty],
t_obj.tipper[nty, 0, 1].imag,
t_obj.tippererr[nty, 0, 1],
**kw_yy)
if plot_tipper == False:
ax_list = [axrxy, axryx, axpxy, axpyx]
line_list = [[erxy[0]], [eryx[0]]]
label_list = [['$Z_{xy}$'], ['$Z_{yx}$']]
else:
ax_list = [axrxy, axryx, axpxy, axpyx, axtr, axti]
line_list = [[erxy[0]], [eryx[0]],
[ertx[0], erty[0]]]
label_list = [['$Z_{xy}$'], ['$Z_{yx}$'],
['$T_{x}$', '$T_{y}$']]
elif self.plot_component == 4:
if plot_tipper == False:
axrxx = fig.add_subplot(gs[0, 0])
axrxy = fig.add_subplot(gs[0, 1], sharex=axrxx)
axryx = fig.add_subplot(gs[0, 2], sharex=axrxx)
axryy = fig.add_subplot(gs[0, 3], sharex=axrxx)
axpxx = fig.add_subplot(gs[1, 0])
axpxy = fig.add_subplot(gs[1, 1], sharex=axrxx)
axpyx = fig.add_subplot(gs[1, 2], sharex=axrxx)
axpyy = fig.add_subplot(gs[1, 3], sharex=axrxx)
else:
axrxx = fig.add_subplot(gs[0, 0])
axrxy = fig.add_subplot(gs[0, 1], sharex=axrxx)
axryx = fig.add_subplot(gs[0, 2], sharex=axrxx)
axryy = fig.add_subplot(gs[0, 3], sharex=axrxx)
axpxx = fig.add_subplot(gs[1, 0])
axpxy = fig.add_subplot(gs[1, 1], sharex=axrxx)
axpyx = fig.add_subplot(gs[1, 2], sharex=axrxx)
axpyy = fig.add_subplot(gs[1, 3], sharex=axrxx)
axtxr = fig.add_subplot(gs[0, 4], sharex=axrxx)
axtxi = fig.add_subplot(gs[1, 4], sharex=axrxx)
axtyr = fig.add_subplot(gs[0, 5], sharex=axrxx)
axtyi = fig.add_subplot(gs[1, 5], sharex=axrxx)
axtxr.set_ylim(-1.2, 1.2)
axtxi.set_ylim(-1.2, 1.2)
axtyr.set_ylim(-1.2, 1.2)
axtyi.set_ylim(-1.2, 1.2)
if self.plot_z == False:
#plot resistivity
erxx= mtplottools.plot_errorbar(axrxx,
period[nzxx],
rp.resxx[nzxx],
rp.resxx_err[nzxx],
**kw_xx)
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
rp.resxy[nzxy],
rp.resxy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
rp.resyx[nzyx],
rp.resyx_err[nzyx],
**kw_yy)
eryy = mtplottools.plot_errorbar(axryy,
period[nzyy],
rp.resyy[nzyy],
rp.resyy_err[nzyy],
**kw_yy)
#plot phase
erxx= mtplottools.plot_errorbar(axpxx,
period[nzxx],
rp.phasexx[nzxx],
rp.phasexx_err[nzxx],
**kw_xx)
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rp.phasexy[nzxy],
rp.phasexy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
rp.phaseyx[nzyx],
rp.phaseyx_err[nzyx],
**kw_yy)
eryy = mtplottools.plot_errorbar(axpyy,
period[nzyy],
rp.phaseyy[nzyy],
rp.phaseyy_err[nzyy],
**kw_yy)
elif self.plot_z == True:
#plot real
erxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
abs(z_obj.z[nzxx,0,0].real),
abs(z_obj.zerr[nzxx,0,0].real),
**kw_xx)
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(z_obj.z[nzxy,0,1].real),
abs(z_obj.zerr[nzxy,0,1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
abs(z_obj.z[nzyx,1,0].real),
abs(z_obj.zerr[nzyx,1,0].real),
**kw_yy)
eryy = mtplottools.plot_errorbar(axryy,
period[nzyy],
abs(z_obj.z[nzyy,1,1].real),
abs(z_obj.zerr[nzyy,1,1].real),
**kw_yy)
#plot phase
erxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
abs(z_obj.z[nzxx,0,0].imag),
abs(z_obj.zerr[nzxx,0,0].real),
**kw_xx)
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(z_obj.z[nzxy,0,1].imag),
abs(z_obj.zerr[nzxy,0,1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
abs(z_obj.z[nzyx,1,0].imag),
abs(z_obj.zerr[nzyx,1,0].real),
**kw_yy)
eryy = mtplottools.plot_errorbar(axpyy,
period[nzyy],
abs(z_obj.z[nzyy,1,1].imag),
abs(z_obj.zerr[nzyy,1,1].real),
**kw_yy)
#plot tipper
if plot_tipper == True:
ertx = mtplottools.plot_errorbar(axtxr,
period[ntx],
t_obj.tipper[ntx, 0, 0].real,
t_obj.tippererr[ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axtyr,
period[nty],
t_obj.tipper[nty, 0, 1].real,
t_obj.tippererr[nty, 0, 0],
**kw_yy)
ertx = mtplottools.plot_errorbar(axtxi,
period[ntx],
t_obj.tipper[ntx, 0, 0].imag,
t_obj.tippererr[ntx, 0, 1],
**kw_xx)
erty = mtplottools.plot_errorbar(axtyi,
period[nty],
t_obj.tipper[nty, 0, 1].imag,
t_obj.tippererr[nty, 0, 1],
**kw_yy)
if plot_tipper == False:
ax_list = [axrxx, axrxy, axryx, axryy,
axpxx, axpxy, axpyx, axpyy]
line_list = [[erxx[0]], [erxy[0]], [eryx[0]], [eryy[0]]]
label_list = [['$Z_{xx}$'], ['$Z_{xy}$'],
['$Z_{yx}$'], ['$Z_{yy}$']]
else:
ax_list = [axrxx, axrxy, axryx, axryy,
axpxx, axpxy, axpyx, axpyy,
axtxr, axtxi, axtyr, axtyi]
line_list = [[erxx[0]], [erxy[0]],
[eryx[0]], [eryy[0]],
[ertx[0]], [erty[0]]]
label_list = [['$Z_{xx}$'], ['$Z_{xy}$'],
['$Z_{yx}$'], ['$Z_{yy}$'],
['$T_{x}$'], ['$T_{y}$']]
#set axis properties
for aa, ax in enumerate(ax_list):
ax.tick_params(axis='y', pad=self.ylabel_pad)
# ylabels = ax.get_yticks().tolist()
# ylabels[-1] = ''
# ylabels[0] = ''
# ax.set_yticklabels(ylabels)
# print ylabels
# dy = abs(ax.yaxis.get_ticklocs()[1]-
# ax.yaxis.get_ticklocs()[0])
# ylim = ax.get_ylim()
# ax.set_ylim(ylim[0]-.25*dy, ylim[1]+1.25*dy)
# ax.yaxis.set_major_locator(MultipleLocator(dy))
if len(ax_list) == 4:
# ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
if self.plot_z == True:
ax.set_yscale('log', nonposy='clip')
ylim = ax.get_ylim()
ylimits = (10**np.floor(np.log10(ylim[0])),
10**np.ceil(np.log10(ylim[1])))
ax.set_ylim(ylimits)
ylabels = [' ']+\
[mtplottools.labeldict[ii] for ii
in np.arange(np.log10(ylimits[0]),
np.log10(ylimits[1]), 1)]+\
[' ']
ax.set_yticklabels(ylabels)
if len(ax_list) == 6:
if aa < 4:
# ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
if self.plot_z == True:
ax.set_yscale('log', nonposy='clip')
ylim = ax.get_ylim()
ylimits = (10**np.floor(np.log10(ylim[0])),
10**np.ceil(np.log10(ylim[1])))
ax.set_ylim(ylimits)
ylabels = [' ']+\
[mtplottools.labeldict[ii] for ii
in np.arange(np.log10(ylimits[0]),
np.log10(ylimits[1]), 1)]+\
[' ']
ax.set_yticklabels(ylabels)
if len(ax_list) == 8:
# ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
if self.plot_z == True:
ax.set_yscale('log', nonposy='clip')
ylim = ax.get_ylim()
ylimits = (10**np.floor(np.log10(ylim[0])),
10**np.ceil(np.log10(ylim[1])))
ax.set_ylim(ylimits)
ylabels = [' ']+\
[mtplottools.labeldict[ii] for ii
in np.arange(np.log10(ylimits[0]),
np.log10(ylimits[1]), 1)]+\
[' ']
ax.set_yticklabels(ylabels)
if len(ax_list) == 12:
if aa < 4:
ylabels = ax.get_yticks().tolist()
ylabels[0] = ''
ax.set_yticklabels(ylabels)
if aa < 8:
# ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
if self.plot_z == True:
ax.set_yscale('log', nonposy='clip')
ylim = ax.get_ylim()
ylimits = (10**np.floor(np.log10(ylim[0])),
10**np.ceil(np.log10(ylim[1])))
ax.set_ylim(ylimits)
ylabels = [' ']+\
[mtplottools.labeldict[ii] for ii
in np.arange(np.log10(ylimits[0]),
np.log10(ylimits[1]), 1)]+\
[' ']
ax.set_yticklabels(ylabels)
if len(ax_list) == 4 or len(ax_list) == 6:
if aa < 2:
plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == False:
ax.set_yscale('log', nonposy='clip')
if self.res_limits is not None:
ax.set_ylim(self.res_limits)
else:
ax.set_ylim(self.phase_limits)
ax.set_xlabel('Period (s)', fontdict=fontdict)
#set axes labels
if aa == 0:
if self.plot_z == False:
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('|Re[Z]| (mV/km nT)',
fontdict=fontdict)
elif aa == 2:
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('|Im[Z]| (mV/km nT)',
fontdict=fontdict)
elif len(ax_list) == 8 or len(ax_list) == 12:
if aa < 4:
plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == False:
ax.set_yscale('log')
ylim = ax.get_ylim()
ylimits = (10**np.floor(np.log10(ylim[0])),
10**np.ceil(np.log10(ylim[1])))
ax.set_ylim(ylimits)
ylabels = [' ', ' ']+\
[mtplottools.labeldict[ii] for ii
in np.arange(np.log10(ylimits[0])+1,
np.log10(ylimits[1])+1, 1)]
ax.set_yticklabels(ylabels)
if self.res_limits is not None:
ax.set_ylim(self.res_limits)
else:
if aa == 8 or aa == 10:
plt.setp(ax.get_xticklabels(), visible=False)
else:
ax.set_ylim(self.phase_limits)
ax.set_xlabel('Period (s)', fontdict=fontdict)
#set axes labels
if aa == 0:
if self.plot_z == False:
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('|Re[Z]| (mV/km nT)',
fontdict=fontdict)
elif aa == 4:
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('|Im[Z]| (mV/km nT)',
fontdict=fontdict)
ax.set_xscale('log')
ax.set_xlim(xmin=10**(np.floor(np.log10(period[0])))*1.01,
xmax=10**(np.ceil(np.log10(period[-1])))*.99)
ax.grid(True, alpha=.25)
# plot xy and yx together and xx, yy together
elif self.plot_style == 2:
if self.plot_component == 2:
if plot_tipper == False:
axrxy = fig.add_subplot(gs[0, 0:])
axpxy = fig.add_subplot(gs[1, 0:], sharex=axrxy)
else:
axrxy = fig.add_subplot(gs[0, 0:4])
axpxy = fig.add_subplot(gs[1, 0:4], sharex=axrxy)
axtr = fig.add_subplot(gs[0, 4:], sharex=axrxy)
axti = fig.add_subplot(gs[1, 4:], sharex=axrxy)
if self.plot_z == False:
#plot resistivity
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
rp.resxy[nzxy],
rp.resxy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axrxy,
period[nzyx],
rp.resyx[nzyx],
rp.resyx_err[nzyx],
**kw_yy)
#plot phase
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rp.phasexy[nzxy],
rp.phasexy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
rp.phaseyx[nzyx],
rp.phaseyx_err[nzyx],
**kw_yy)
elif self.plot_z == True:
#plot real
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(z_obj.z[nzxy,0,1].real),
abs(z_obj.zerr[nzxy,0,1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(z_obj.z[nzxy,1,0].real),
abs(z_obj.zerr[nzxy,1,0].real),
**kw_yy)
#plot phase
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(z_obj.z[nzxy,0,1].imag),
abs(z_obj.zerr[nzxy,0,1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
abs(z_obj.z[nzyx,1,0].imag),
abs(z_obj.zerr[nzyx,1,0].real),
**kw_yy)
#plot tipper
if plot_tipper == True:
ertx = mtplottools.plot_errorbar(axtr,
period,
t_obj.tipper[ntx, 0, 0].real,
t_obj.tippererr[ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axtr,
period,
t_obj.tipper[nty, 0, 1].real,
t_obj.tippererr[nty, 0, 1],
**kw_yy)
ertx = mtplottools.plot_errorbar(axti,
period,
t_obj.tipper[ntx, 0, 0].imag,
t_obj.tippererr[ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axti,
period,
t_obj.tipper[nty, 0, 1].imag,
t_obj.tippererr[nty, 0, 1],
**kw_yy)
if plot_tipper == False:
ax_list = [axrxy, axpxy]
line_list = [erxy[0], eryx[0]]
label_list = ['$Z_{xy}$', '$Z_{yx}$']
else:
ax_list = [axrxy, axpxy, axtr, axti]
line_list = [[erxy[0], eryx[0]],
[ertx[0], erty[0]]]
label_list = [['$Z_{xy}$', '$Z_{yx}$'],
['$T_{x}$', '$T_{y}$']]
elif self.plot_component == 4:
if plot_tipper == False:
axrxy = fig.add_subplot(gs[0, 0:2])
axpxy = fig.add_subplot(gs[1, 0:2], sharex=axrxy)
axrxx = fig.add_subplot(gs[0, 2:], sharex=axrxy)
axpxx = fig.add_subplot(gs[1, 2:], sharex=axrxy)
else:
axrxy = fig.add_subplot(gs[0, 0:2])
axpxy = fig.add_subplot(gs[1, 0:2], sharex=axrxy)
axrxx = fig.add_subplot(gs[0, 2:4], sharex=axrxy)
axpxx = fig.add_subplot(gs[1, 2:4], sharex=axrxy)
axtr = fig.add_subplot(gs[0, 4:], sharex=axrxy)
axti = fig.add_subplot(gs[1, 4:], sharex=axrxy)
if self.plot_z == False:
#plot resistivity
erxx= mtplottools.plot_errorbar(axrxx,
period[nzxx],
rp.resxx[nzxx],
rp.resxx_err[nzxx],
**kw_xx)
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
rp.resxy[nzxy],
rp.resxy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axrxy,
period[nzyx],
rp.resyx[nzyx],
rp.resyx_err[nzyx],
**kw_yy)
eryy = mtplottools.plot_errorbar(axrxx,
period[nzyy],
rp.resyy[nzyy],
rp.resyy_err[nzyy],
**kw_yy)
#plot phase
erxx= mtplottools.plot_errorbar(axpxx,
period[nzxx],
rp.phasexx[nzxx],
rp.phasexx_err[nzxx],
**kw_xx)
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rp.phasexy[nzxy],
rp.phasexy_err[nzxy],
**kw_xx)
eryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
rp.phaseyx[nzyx],
rp.phaseyx_err[nzyx],
**kw_yy)
eryy = mtplottools.plot_errorbar(axpxx,
period[nzyy],
rp.phaseyy[nzyy],
rp.phaseyy_err[nzyy],
**kw_yy)
elif self.plot_z == True:
#plot real
erxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
abs(z_obj.z[nzxx,0,0].real),
abs(z_obj.zerr[nzxx,0,0].real),
**kw_xx)
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(z_obj.z[nzxy,0,1].real),
abs(z_obj.zerr[nzxy,0,1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axrxy,
period[nzyx],
abs(z_obj.z[nzyx,1,0].real),
abs(z_obj.zerr[nzyx,1,0].real),
**kw_yy)
eryy = mtplottools.plot_errorbar(axrxx,
period[nzyy],
abs(z_obj.z[nzyy,1,1].real),
abs(z_obj.zerr[nzyy,1,1].real),
**kw_yy)
#plot phase
erxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
abs(z_obj.z[nzxx,0,0].imag),
abs(z_obj.zerr[nzxx,0,0].real),
**kw_xx)
erxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(z_obj.z[nzxy,0,1].imag),
abs(z_obj.zerr[nzxy,0,1].real),
**kw_xx)
eryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
abs(z_obj.z[nzyx,1,0].imag),
abs(z_obj.zerr[nzyx,1,0].real),
**kw_yy)
eryy = mtplottools.plot_errorbar(axpxx,
period[nzyy],
abs(z_obj.z[nzyy,1,1].imag),
abs(z_obj.zerr[nzyy,1,1].real),
**kw_yy)
#plot tipper
if plot_tipper == True:
ertx = mtplottools.plot_errorbar(axtr,
period[ntx],
t_obj.tipper[ntx, 0, 0].real,
t_obj.tippererr[ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axtr,
period[nty],
t_obj.tipper[nty, 0, 1].real,
t_obj.tippererr[nty, 0, 1],
**kw_yy)
ertx = mtplottools.plot_errorbar(axti,
period[ntx],
t_obj.tipper[ntx, 0, 0].imag,
t_obj.tippererr[ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axti,
period[nty],
t_obj.tipper[nty, 0, 1].imag,
t_obj.tippererr[nty, 0, 1],
**kw_yy)
if plot_tipper == False:
ax_list = [axrxy, axrxx, axpxy, axpxx]
line_list = [[erxy[0], eryx[0]], [erxx[0], eryy[0]]]
label_list = [['$Z_{xy}$', '$Z_{yx}$'],
['$Z_{xx}$', '$Z_{yy}$']]
else:
ax_list = [axrxy, axrxx, axpxy, axpxx, axtr, axti]
line_list = [[erxy[0], eryx[0]], [erxx[0], eryy[0]],
[ertx[0]], erty[0]]
label_list = [['$Z_{xy}$', '$Z_{yx}$'],
['$Z_{xx}$', '$Z_{yy}$'],
['$T_x$', '$T_y$']]
#set axis properties
for aa, ax in enumerate(ax_list):
ax.tick_params(axis='y', pad=self.ylabel_pad)
# ylabels = ax.get_yticks().tolist()
# ylabels[-1] = ''
# ylabels[0] = ''
# ax.set_yticklabels(ylabels)
if len(ax_list) == 2:
ax.set_xlabel('Period (s)', fontdict=fontdict)
if self.plot_z == True:
ax.set_yscale('log')
ylim = ax.get_ylim()
ylimits = (10**np.floor(np.log10(ylim[0])),
10**np.ceil(np.log10(ylim[1])))
ax.set_ylim(ylimits)
ylabels = [' ']+\
[mtplottools.labeldict[ii] for ii
in np.arange(np.log10(ylimits[0]),
np.log10(ylimits[1]), 1)]+\
[' ']
ax.set_yticklabels(ylabels)
if aa == 0:
plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == False:
ax.set_yscale('log')
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('|Re[Z (mV/km nT)]|',
fontdict=fontdict)
if self.res_limits is not None:
ax.set_ylim(self.res_limits)
else:
ax.set_ylim(self.phase_limits)
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('|Im[Z (mV/km nT)]|',
fontdict=fontdict)
elif len(ax_list) == 4 and plot_tipper == False:
if self.plot_z == True:
ax.set_yscale('log')
if aa < 2:
plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == False:
ax.set_yscale('log')
if self.res_limits is not None:
ax.set_ylim(self.res_limits)
else:
if self.plot_z == False:
ax.set_ylim(self.phase_limits)
ax.set_xlabel('Period (s)', fontdict=fontdict)
if aa == 0:
if self.plot_z == False:
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Re[Z (mV/km nT)]',
fontdict=fontdict)
elif aa == 2:
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Im[Z (mV/km nT)]',
fontdict=fontdict)
elif len(ax_list) == 4 and plot_tipper == True:
if aa == 0 or aa == 2:
plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == False:
ax.set_yscale('log')
if self.res_limits is not None:
ax.set_ylim(self.res_limits)
else:
ax.set_ylim(self.phase_limits)
ax.set_xlabel('Period (s)', fontdict=fontdict)
if aa == 0:
if self.plot_z == False:
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Re[Z (mV/km nT)]',
fontdict=fontdict)
elif aa == 1:
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Im[Z (mV/km nT)]',
fontdict=fontdict)
if aa <= 2:
ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
if self.plot_z == True:
ax.set_yscale('log')
# else:
# plt.setp(ax.yaxis.get_ticklabels(), visible=False)
ax.set_xscale('log')
ax.set_xlim(xmin=10**(np.floor(np.log10(period[0])))*1.01,
xmax=10**(np.ceil(np.log10(period[-1])))*.99)
ax.grid(True,alpha=.25)
if plotr == True:
for rr in range(nr):
if self.color_mode == 'color':
cxy = (0,.4+float(rr)/(3*nr),0)
cyx = (.7+float(rr)/(4*nr),.13,.63-float(rr)/(4*nr))
elif self.color_mode == 'bw':
cxy = tuple(3*[1-.5/(rr+1)])
cyx = tuple(3*[1-.5/(rr+1)])
resp_z_obj = self.resp_object[rr].mt_dict[station].Z
resp_z_err = np.nan_to_num((z_obj.z-resp_z_obj.z)/z_obj.zerr)
resp_t_obj = self.resp_object[rr].mt_dict[station].Tipper
resp_t_err = np.nan_to_num((t_obj.tipper-resp_t_obj.tipper)/
t_obj.tippererr)
rrp = mtplottools.ResPhase(resp_z_obj)
rms = resp_z_err.std()
rms_xx = resp_z_err[:, 0, 0].std()
rms_xy = resp_z_err[:, 0, 1].std()
rms_yx = resp_z_err[:, 1, 0].std()
rms_yy = resp_z_err[:, 1, 1].std()
rms_tx = resp_t_err[:, 0, 0].std()
rms_ty = resp_t_err[:, 0, 1].std()
print ' --- response {0} ---'.format(rr)
print ' RMS = {:.2f}'.format(rms)
print ' RMS_xx = {:.2f}'.format(rms_xx)
print ' RMS_xy = {:.2f}'.format(rms_xy)
print ' RMS_yx = {:.2f}'.format(rms_yx)
print ' RMS_yy = {:.2f}'.format(rms_yy)
print ' RMS_Tx = {:.2f}'.format(rms_tx)
print ' RMS_Ty = {:.2f}'.format(rms_ty)
#--> make key word dictionaries for plotting
kw_xx = {'color':cxy,
'marker':self.mtem,
'ms':self.ms,
'ls':':',
'lw':self.lw,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
kw_yy = {'color':cyx,
'marker':self.mtmm,
'ms':self.ms,
'ls':':',
'lw':self.lw,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
if self.plot_style == 1:
if self.plot_component == 2:
if self.plot_z == False:
#plot resistivity
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
rrp.resxy[nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
rrp.resyx[nzyx],
**kw_yy)
#plot phase
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rrp.phasexy[nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
rrp.phaseyx[nzyx],
**kw_yy)
elif self.plot_z == True:
#plot real
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(resp_z_obj.z[nzxy,0,1].real),
**kw_xx)
reryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
abs(resp_z_obj.z[nzyx,1,0].real),
**kw_yy)
#plot phase
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(resp_z_obj.z[nzxy,0,1].imag),
**kw_xx)
reryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
abs(resp_z_obj.z[nzyx,1,0].imag),
**kw_yy)
if plot_tipper == True:
rertx = mtplottools.plot_errorbar(axtr,
period[ntx],
resp_t_obj.tipper[ntx, 0, 0].real,
**kw_xx)
rerty = mtplottools.plot_errorbar(axtr,
period[nty],
resp_t_obj.tipper[nty, 0, 1].real,
**kw_yy)
rertx = mtplottools.plot_errorbar(axti,
period[ntx],
resp_t_obj.tipper[ntx, 0, 0].imag,
**kw_xx)
rerty = mtplottools.plot_errorbar(axti,
period[nty],
resp_t_obj.tipper[nty, 0, 1].imag,
**kw_yy)
if plot_tipper == False:
line_list[0] += [rerxy[0]]
line_list[1] += [reryx[0]]
label_list[0] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy)]
label_list[1] += ['$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
else:
line_list[0] += [rerxy[0]]
line_list[1] += [reryx[0]]
line_list[2] += [rertx[0], rerty[0]]
label_list[0] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy)]
label_list[1] += ['$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
label_list[2] += ['$T^m_{x}$'+
'rms={0:.2f}'.format(rms_tx),
'$T^m_{y}$'+
'rms={0:.2f}'.format(rms_ty)]
elif self.plot_component == 4:
if self.plot_z == False:
#plot resistivity
rerxx= mtplottools.plot_errorbar(axrxx,
period[nzxx],
rrp.resxx[nzxx],
**kw_xx)
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
rrp.resxy[nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
rrp.resyx[nzyx],
**kw_yy)
reryy = mtplottools.plot_errorbar(axryy,
period[nzyy],
rrp.resyy[nzyy],
**kw_yy)
#plot phase
rerxx= mtplottools.plot_errorbar(axpxx,
period[nzxx],
rrp.phasexx[nzxx],
**kw_xx)
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rrp.phasexy[nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
rrp.phaseyx[nzyx],
**kw_yy)
reryy = mtplottools.plot_errorbar(axpyy,
period[nzyy],
rrp.phaseyy[nzyy],
**kw_yy)
elif self.plot_z == True:
#plot real
rerxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
abs(resp_z_obj.z[nzxx,0,0].real),
**kw_xx)
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(resp_z_obj.z[nzxy,0,1].real),
**kw_xx)
reryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
abs(resp_z_obj.z[nzyx,1,0].real),
**kw_yy)
reryy = mtplottools.plot_errorbar(axryy,
period[nzyy],
abs(resp_z_obj.z[nzyy,1,1].real),
**kw_yy)
#plot phase
rerxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
abs(resp_z_obj.z[nzxx,0,0].imag),
**kw_xx)
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(resp_z_obj.z[nzxy,0,1].imag),
**kw_xx)
reryx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
abs(resp_z_obj.z[nzyx,1,0].imag),
**kw_yy)
reryy = mtplottools.plot_errorbar(axpyy,
period[nzyy],
abs(resp_z_obj.z[nzyy,1,1].imag),
**kw_yy)
if plot_tipper == True:
rertx = mtplottools.plot_errorbar(axtxr,
period[ntx],
resp_t_obj.tipper[ntx, 0, 0].real,
**kw_xx)
rerty = mtplottools.plot_errorbar(axtyr,
period[nty],
resp_t_obj.tipper[nty, 0, 1].real,
**kw_yy)
rertx = mtplottools.plot_errorbar(axtxi,
period[ntx],
resp_t_obj.tipper[ntx, 0, 0].imag,
**kw_xx)
rerty = mtplottools.plot_errorbar(axtyi,
period[nty],
resp_t_obj.tipper[nty, 0, 1].imag,
**kw_yy)
if plot_tipper == False:
line_list[0] += [rerxx[0]]
line_list[1] += [rerxy[0]]
line_list[2] += [reryx[0]]
line_list[3] += [reryy[0]]
label_list[0] += ['$Z^m_{xx}$ '+
'rms={0:.2f}'.format(rms_xx)]
label_list[1] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy)]
label_list[2] += ['$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
label_list[3] += ['$Z^m_{yy}$ '+
'rms={0:.2f}'.format(rms_yy)]
else:
line_list[0] += [rerxx[0]]
line_list[1] += [rerxy[0]]
line_list[2] += [reryx[0]]
line_list[3] += [reryy[0]]
line_list[4] += [rertx[0]]
line_list[5] += [rerty[0]]
label_list[0] += ['$Z^m_{xx}$ '+
'rms={0:.2f}'.format(rms_xx)]
label_list[1] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy)]
label_list[2] += ['$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
label_list[3] += ['$Z^m_{yy}$ '+
'rms={0:.2f}'.format(rms_yy)]
label_list[4] += ['$T^m_{x}$'+
'rms={0:.2f}'.format(rms_tx)]
label_list[5] += ['$T^m_{y}$'+
'rms={0:.2f}'.format(rms_ty)]
elif self.plot_style == 2:
if self.plot_component == 2:
if self.plot_z == False:
#plot resistivity
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
rrp.resxy[nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axrxy,
period[nzyx],
rrp.resyx[nzyx],
**kw_yy)
#plot phase
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rrp.phasexy[nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
rrp.phaseyx[nzyx],
**kw_yy)
elif self.plot_z == True:
#plot real
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(resp_z_obj.z[nzxy,0,1].real),
**kw_xx)
reryx = mtplottools.plot_errorbar(axrxy,
period[nzyx],
abs(resp_z_obj.z[nzyx,1,0].real),
**kw_yy)
#plot phase
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(resp_z_obj.z[nzxy,0,1].imag),
**kw_xx)
reryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
abs(resp_z_obj.z[nzyx,1,0].imag),
**kw_xx)
if plot_tipper == True:
rertx = mtplottools.plot_errorbar(axtr,
period[ntx],
resp_t_obj.tipper[ntx, 0, 0].real,
**kw_xx)
rerty = mtplottools.plot_errorbar(axtr,
period[nty],
resp_t_obj.tipper[nty, 0, 1].real,
**kw_yy)
rertx = mtplottools.plot_errorbar(axti,
period[ntx],
resp_t_obj.tipper[ntx, 0, 0].imag,
**kw_xx)
rerty = mtplottools.plot_errorbar(axti,
period[nty],
resp_t_obj.tipper[nty, 0, 1].imag,
**kw_yy)
if plot_tipper == False:
line_list += [rerxy[0], reryx[0]]
label_list += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy),
'$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
else:
line_list[0] += [rerxy[0], reryx[0]]
line_list[1] += [rertx[0], rerty[0]]
label_list[0] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy),
'$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
label_list[1] += ['$T^m_{x}$'+
'rms={0:.2f}'.format(rms_tx),
'$T^m_{y}$'+
'rms={0:.2f}'.format(rms_ty)]
elif self.plot_component == 4:
if self.plot_z == False:
#plot resistivity
rerxx= mtplottools.plot_errorbar(axrxx,
period[nzxx],
rrp.resxx[nzxx],
**kw_xx)
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
rrp.resxy[nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axrxy,
period[nzyx],
rrp.resyx[nzyx],
**kw_yy)
reryy = mtplottools.plot_errorbar(axrxx,
period[nzyy],
rrp.resyy[nzyy],
**kw_yy)
#plot phase
rerxx= mtplottools.plot_errorbar(axpxx,
period[nzxx],
rrp.phasexx[nzxx],
**kw_xx)
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
rrp.phasexy[nzxy],
**kw_xx)
reryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
rrp.phaseyx[nzyx],
**kw_yy)
reryy = mtplottools.plot_errorbar(axpxx,
period[nzyy],
rrp.phaseyy[nzyy],
**kw_yy)
elif self.plot_z == True:
#plot real
rerxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
abs(resp_z_obj.z[nzxx,0,0].real),
**kw_xx)
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
abs(resp_z_obj.z[nzxy,0,1].real),
**kw_xx)
reryx = mtplottools.plot_errorbar(axrxy,
period[nzyx],
abs(resp_z_obj.z[nzyx,1,0].real),
**kw_yy)
reryy = mtplottools.plot_errorbar(axrxx,
period[nzyy],
abs(resp_z_obj.z[nzyy,1,1].real),
**kw_yy)
#plot phase
rerxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
abs(resp_z_obj.z[nzxx,0,0].imag),
**kw_xx)
rerxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
abs(resp_z_obj.z[nzxy,0,1].imag),
**kw_xx)
reryx = mtplottools.plot_errorbar(axpxy,
period[nzyx],
abs(resp_z_obj.z[nzyx,1,0].imag),
**kw_yy)
reryy = mtplottools.plot_errorbar(axpxx,
period[nzyy],
abs(resp_z_obj.z[nzyy,1,1].imag),
**kw_yy)
if plot_tipper == True:
rertx = mtplottools.plot_errorbar(axtr,
period[ntx],
resp_t_obj.tipper[ntx, 0, 0].real,
**kw_xx)
rerty = mtplottools.plot_errorbar(axtr,
period[nty],
resp_t_obj.tipper[nty, 0, 1].real,
**kw_yy)
rertx = mtplottools.plot_errorbar(axti,
period[ntx],
resp_t_obj.tipper[ntx, 0, 0].imag,
**kw_xx)
rerty = mtplottools.plot_errorbar(axti,
period[nty],
resp_t_obj.tipper[nty, 0, 1].imag,
**kw_yy)
if plot_tipper == False:
line_list[0] += [rerxy[0], reryx[0]]
line_list[1] += [rerxx[0], reryy[0]]
label_list[0] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy),
'$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
label_list[1] += ['$Z^m_{xx}$ '+
'rms={0:.2f}'.format(rms_xx),
'$Z^m_{yy}$ '+
'rms={0:.2f}'.format(rms_yy)]
else:
line_list[0] += [rerxy[0], reryx[0]]
line_list[1] += [rerxx[0], reryy[0]]
line_list[2] += [rertx[0], rerty[0]]
label_list[0] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy),
'$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
label_list[1] += ['$Z^m_{xx}$ '+
'rms={0:.2f}'.format(rms_xx),
'$Z^m_{yy}$ '+
'rms={0:.2f}'.format(rms_yy)]
label_list[2] += ['$T^m_{x}$'+
'rms={0:.2f}'.format(rms_tx),
'$T^m_{y}$'+
'rms={0:.2f}'.format(rms_ty)]
#make legends
if self.plot_style == 1:
legend_ax_list = ax_list[0:self.plot_component]
if plot_tipper == True:
if self.plot_component == 2:
legend_ax_list.append(ax_list[4])
elif self.plot_component == 4:
legend_ax_list.append(ax_list[8])
legend_ax_list.append(ax_list[10])
for aa, ax in enumerate(legend_ax_list):
ax.legend(line_list[aa],
label_list[aa],
loc=self.legend_loc,
bbox_to_anchor=self.legend_pos,
markerscale=self.legend_marker_scale,
borderaxespad=self.legend_border_axes_pad,
labelspacing=self.legend_label_spacing,
handletextpad=self.legend_handle_text_pad,
borderpad=self.legend_border_pad,
prop={'size':max([self.font_size/(nr+1), 5])})
if self.plot_style == 2:
if self.plot_component == 2:
legend_ax_list = [ax_list[0]]
if plot_tipper == True:
legend_ax_list.append(ax_list[2])
for aa, ax in enumerate(legend_ax_list):
ax.legend(line_list[aa],
label_list[aa],
loc=self.legend_loc,
bbox_to_anchor=self.legend_pos,
markerscale=self.legend_marker_scale,
borderaxespad=self.legend_border_axes_pad,
labelspacing=self.legend_label_spacing,
handletextpad=self.legend_handle_text_pad,
borderpad=self.legend_border_pad,
prop={'size':max([self.font_size/(nr+1), 5])})
else:
legend_ax_list = ax_list[0:self.plot_component/2]
if plot_tipper == True:
if self.plot_component == 2:
legend_ax_list.append(ax_list[2])
elif self.plot_component == 4:
legend_ax_list.append(ax_list[4])
for aa, ax in enumerate(legend_ax_list):
ax.legend(line_list[aa],
label_list[aa],
loc=self.legend_loc,
bbox_to_anchor=self.legend_pos,
markerscale=self.legend_marker_scale,
borderaxespad=self.legend_border_axes_pad,
labelspacing=self.legend_label_spacing,
handletextpad=self.legend_handle_text_pad,
borderpad=self.legend_border_pad,
prop={'size':max([self.font_size/(nr+1), 5])})
##--> BE SURE TO SHOW THE PLOT
plt.show()
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def save_figure(self, save_fn, file_format='pdf', orientation='portrait',
fig_dpi=None, close_fig='y'):
"""
save_plot will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
fig = plt.gcf()
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_fn) == False:
file_format = save_fn[-3:]
fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
else:
save_fn = os.path.join(save_fn, '_L2.'+
file_format)
fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.clf()
plt.close(fig)
else:
pass
self.fig_fn = save_fn
print 'Saved figure to: '+self.fig_fn
def update_plot(self):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotAllResponses()
>>> [ax.grid(True, which='major') for ax in [ps1.axrte,ps1.axtep]]
>>> ps1.update_plot()
"""
self.fig.canvas.draw()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return ("Plots data vs model response computed by WS3DINV")
#==============================================================================
# plot phase tensors
#==============================================================================
class PlotPTMaps(mtplottools.MTEllipse):
"""
Plot phase tensor maps including residual pt if response file is input.
:Plot only data for one period: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> dfn = r"/home/MT/ws3dinv/Inv1/WSDataFile.dat"
>>> ptm = ws.PlotPTMaps(data_fn=dfn, plot_period_list=[0])
:Plot data and model response: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> dfn = r"/home/MT/ws3dinv/Inv1/WSDataFile.dat"
>>> rfn = r"/home/MT/ws3dinv/Inv1/Test_resp.00"
>>> mfn = r"/home/MT/ws3dinv/Inv1/Test_model.00"
>>> ptm = ws.PlotPTMaps(data_fn=dfn, resp_fn=rfn, model_fn=mfn,
>>> ... plot_period_list=[0])
>>> # adjust colorbar
>>> ptm.cb_res_pad = 1.25
>>> ptm.redraw_plot()
========================== ================================================
Attributes Description
========================== ================================================
cb_pt_pad percentage from top of axes to place pt
color bar. *default* is .90
cb_res_pad percentage from bottom of axes to place
resistivity color bar. *default* is 1.2
cb_residual_tick_step tick step for residual pt. *default* is 3
cb_tick_step tick step for phase tensor color bar,
*default* is 45
data np.ndarray(n_station, n_periods, 2, 2)
impedance tensors for station data
data_fn full path to data fle
dscale scaling parameter depending on map_scale
ellipse_cmap color map for pt ellipses. *default* is
mt_bl2gr2rd
ellipse_colorby [ 'skew' | 'skew_seg' | 'phimin' | 'phimax'|
'phidet' | 'ellipticity' ] parameter to color
ellipses by. *default* is 'phimin'
ellipse_range (min, max, step) min and max of colormap, need
to input step if plotting skew_seg
ellipse_size relative size of ellipses in map_scale
ew_limits limits of plot in e-w direction in map_scale
units. *default* is None, scales to station
area
fig_aspect aspect of figure. *default* is 1
fig_dpi resolution in dots-per-inch. *default* is 300
fig_list list of matplotlib.figure instances for each
figure plotted.
fig_size [width, height] in inches of figure window
*default* is [6, 6]
font_size font size of ticklabels, axes labels are
font_size+2. *default* is 7
grid_east relative location of grid nodes in e-w direction
in map_scale units
grid_north relative location of grid nodes in n-s direction
in map_scale units
grid_z relative location of grid nodes in z direction
in map_scale units
model_fn full path to initial file
map_scale [ 'km' | 'm' ] distance units of map.
*default* is km
mesh_east np.meshgrid(grid_east, grid_north, indexing='ij')
mesh_north np.meshgrid(grid_east, grid_north, indexing='ij')
model_fn full path to model file
nodes_east relative distance betwen nodes in e-w direction
in map_scale units
nodes_north relative distance betwen nodes in n-s direction
in map_scale units
nodes_z relative distance betwen nodes in z direction
in map_scale units
ns_limits (min, max) limits of plot in n-s direction
*default* is None, viewing area is station area
pad_east padding from extreme stations in east direction
pad_north padding from extreme stations in north direction
period_list list of periods from data
plot_grid [ 'y' | 'n' ] 'y' to plot grid lines
*default* is 'n'
plot_period_list list of period index values to plot
*default* is None
plot_yn ['y' | 'n' ] 'y' to plot on instantiation
*default* is 'y'
res_cmap colormap for resisitivity values.
*default* is 'jet_r'
res_limits (min, max) resistivity limits in log scale
*default* is (0, 4)
res_model np.ndarray(n_north, n_east, n_vertical) of
model resistivity values in linear scale
residual_cmap color map for pt residuals.
*default* is 'mt_wh2or'
resp np.ndarray(n_stations, n_periods, 2, 2)
impedance tensors for model response
resp_fn full path to response file
save_path directory to save figures to
save_plots [ 'y' | 'n' ] 'y' to save plots to save_path
station_east location of stations in east direction in
map_scale units
station_fn full path to station locations file
station_names station names
station_north location of station in north direction in
map_scale units
subplot_bottom distance between axes and bottom of figure window
subplot_left distance between axes and left of figure window
subplot_right distance between axes and right of figure window
subplot_top distance between axes and top of figure window
title titiel of plot *default* is depth of slice
xminorticks location of xminorticks
yminorticks location of yminorticks
========================== ================================================
"""
def __init__(self, data_fn=None, resp_fn=None, model_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn
self.resp_fn = resp_fn
self.save_path = kwargs.pop('save_path', None)
if self.model_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.model_fn)
elif self.model_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.model_fn)
if self.save_path is not None:
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
self.save_plots = kwargs.pop('save_plots', 'y')
self.plot_period_list = kwargs.pop('plot_period_list', None)
self.period_dict = None
self.map_scale = kwargs.pop('map_scale', 'km')
#make map scale
if self.map_scale == 'km':
self.dscale = 1000.
elif self.map_scale == 'm':
self.dscale = 1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.pad_east = kwargs.pop('pad_east', 2000)
self.pad_north = kwargs.pop('pad_north', 2000)
self.plot_grid = kwargs.pop('plot_grid', 'n')
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.fig_list = []
self.xminorticks = kwargs.pop('xminorticks', 1000)
self.yminorticks = kwargs.pop('yminorticks', 1000)
self.residual_cmap = kwargs.pop('residual_cmap', 'mt_wh2or')
self.font_size = kwargs.pop('font_size', 7)
self.cb_tick_step = kwargs.pop('cb_tick_step', 45)
self.cb_residual_tick_step = kwargs.pop('cb_residual_tick_step', 3)
self.cb_pt_pad = kwargs.pop('cb_pt_pad', 1.2)
self.cb_res_pad = kwargs.pop('cb_res_pad', .5)
self.res_limits = kwargs.pop('res_limits', (0,4))
self.res_cmap = kwargs.pop('res_cmap', 'jet_r')
#--> set the ellipse properties -------------------
self._ellipse_dict = kwargs.pop('ellipse_dict', {'size':2})
self._read_ellipse_dict()
self.ellipse_size = kwargs.pop('ellipse_size',self._ellipse_dict['size'])
self.subplot_right = .99
self.subplot_left = .085
self.subplot_top = .92
self.subplot_bottom = .1
self.subplot_hspace = .2
self.subplot_wspace = .05
self.data_obj = None
self.resp_obj = None
self.model_obj = None
self.period_list = None
self.pt_data_arr = None
self.pt_resp_arr = None
self.pt_resid_arr = None
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def _read_files(self):
"""
get information from files
"""
#--> read in data file
self.data_obj = Data()
self.data_obj.read_data_file(self.data_fn)
#--> read response file
if self.resp_fn is not None:
self.resp_obj = Data()
self.resp_obj.read_data_file(self.resp_fn)
#--> read mode file
if self.model_fn is not None:
self.model_obj = Model()
self.model_obj.read_model_file(self.model_fn)
self._get_plot_period_list()
self._get_pt()
def _get_plot_period_list(self):
"""
get periods to plot from input or data file
"""
#--> get period list to plot
if self.plot_period_list is None:
self.plot_period_list = self.data_obj.period_list
else:
if type(self.plot_period_list) is list:
#check if entries are index values or actual periods
if type(self.plot_period_list[0]) is int:
self.plot_period_list = [self.period_list[ii]
for ii in self.plot_period_list]
else:
pass
elif type(self.plot_period_list) is int:
self.plot_period_list = self.period_list[self.plot_period_list]
elif type(self.plot_period_list) is float:
self.plot_period_list = [self.plot_period_list]
self.period_dict = dict([(key, value) for value, key in
enumerate(self.data_obj.period_list)])
def _get_pt(self):
"""
put pt parameters into something useful for plotting
"""
ns = len(self.data_obj.mt_dict.keys())
nf = len(self.data_obj.period_list)
data_pt_arr = np.zeros((nf, ns), dtype=[('phimin', np.float),
('phimax', np.float),
('skew', np.float),
('azimuth', np.float),
('east', np.float),
('north', np.float)])
if self.resp_fn is not None:
model_pt_arr = np.zeros((nf, ns), dtype=[('phimin', np.float),
('phimax', np.float),
('skew', np.float),
('azimuth', np.float),
('east', np.float),
('north', np.float)])
res_pt_arr = np.zeros((nf, ns), dtype=[('phimin', np.float),
('phimax', np.float),
('skew', np.float),
('azimuth', np.float),
('east', np.float),
('north', np.float),
('geometric_mean', np.float)])
for ii, key in enumerate(self.data_obj.mt_dict.keys()):
east = self.data_obj.mt_dict[key].grid_east/self.dscale
north = self.data_obj.mt_dict[key].grid_north/self.dscale
dpt = self.data_obj.mt_dict[key].pt
data_pt_arr[:, ii]['east'] = east
data_pt_arr[:, ii]['north'] = north
data_pt_arr[:, ii]['phimin'] = dpt.phimin[0]
data_pt_arr[:, ii]['phimax'] = dpt.phimax[0]
data_pt_arr[:, ii]['azimuth'] = dpt.azimuth[0]
data_pt_arr[:, ii]['skew'] = dpt.beta[0]
if self.resp_fn is not None:
mpt = self.resp_obj.mt_dict[key].pt
try:
rpt = mtpt.ResidualPhaseTensor(pt_object1=dpt,
pt_object2=mpt)
rpt = rpt.residual_pt
res_pt_arr[:, ii]['east'] = east
res_pt_arr[:, ii]['north'] = north
res_pt_arr[:, ii]['phimin'] = rpt.phimin[0]
res_pt_arr[:, ii]['phimax'] = rpt.phimax[0]
res_pt_arr[:, ii]['azimuth'] = rpt.azimuth[0]
res_pt_arr[:, ii]['skew'] = rpt.beta[0]
res_pt_arr[:, ii]['geometric_mean'] = np.sqrt(abs(rpt.phimin[0]*\
rpt.phimax[0]))
except mtex.MTpyError_PT:
print key, dpt.pt.shape, mpt.pt.shape
model_pt_arr[:, ii]['east'] = east
model_pt_arr[:, ii]['north'] = north
model_pt_arr[:, ii]['phimin'] = mpt.phimin[0]
model_pt_arr[:, ii]['phimax'] = mpt.phimax[0]
model_pt_arr[:, ii]['azimuth'] = mpt.azimuth[0]
model_pt_arr[:, ii]['skew'] = mpt.beta[0]
#make these attributes
self.pt_data_arr = data_pt_arr
if self.resp_fn is not None:
self.pt_resp_arr = model_pt_arr
self.pt_resid_arr = res_pt_arr
def plot(self):
"""
plot phase tensor maps for data and or response, each figure is of a
different period. If response is input a third column is added which is
the residual phase tensor showing where the model is not fitting the data
well. The data is plotted in km.
"""
#--> read in data first
if self.data_obj is None:
self._read_files()
# set plot properties
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = self.subplot_left
plt.rcParams['figure.subplot.right'] = self.subplot_right
plt.rcParams['figure.subplot.bottom'] = self.subplot_bottom
plt.rcParams['figure.subplot.top'] = self.subplot_top
font_dict = {'size':self.font_size+2, 'weight':'bold'}
# make a grid of subplots
gs = gridspec.GridSpec(1, 3, hspace=self.subplot_hspace,
wspace=self.subplot_wspace)
#set some parameters for the colorbar
ckmin = float(self.ellipse_range[0])
ckmax = float(self.ellipse_range[1])
try:
ckstep = float(self.ellipse_range[2])
except IndexError:
if self.ellipse_cmap == 'mt_seg_bl2wh2rd':
raise ValueError('Need to input range as (min, max, step)')
else:
ckstep = 3
bounds = np.arange(ckmin, ckmax+ckstep, ckstep)
# set plot limits to be the station area
if self.ew_limits == None:
east_min = self.data_obj.data_array['rel_east'].min()-\
self.pad_east
east_max = self.data_obj.data_array['rel_east'].max()+\
self.pad_east
self.ew_limits = (east_min/self.dscale, east_max/self.dscale)
if self.ns_limits == None:
north_min = self.data_obj.data_array['rel_north'].min()-\
self.pad_north
north_max = self.data_obj.data_array['rel_north'].max()+\
self.pad_north
self.ns_limits = (north_min/self.dscale, north_max/self.dscale)
#-------------plot phase tensors------------------------------------
for ff, per in enumerate(self.plot_period_list):
data_ii = self.period_dict[per]
print 'Plotting Period: {0:.5g}'.format(per)
fig = plt.figure('{0:.5g}'.format(per), figsize=self.fig_size,
dpi=self.fig_dpi)
fig.clf()
if self.resp_fn is not None:
axd = fig.add_subplot(gs[0, 0], aspect='equal')
axm = fig.add_subplot(gs[0, 1], aspect='equal')
axr = fig.add_subplot(gs[0, 2], aspect='equal')
ax_list = [axd, axm, axr]
else:
axd = fig.add_subplot(gs[0, :], aspect='equal')
ax_list = [axd]
#plot model below the phase tensors
if self.model_fn is not None:
gridzcentre = np.mean([self.model_obj.grid_z[1:],self.model_obj.grid_z[:-1]],axis=0)
approx_depth, d_index = ws.estimate_skin_depth(self.model_obj.res_model.copy(),
gridzcentre/self.dscale,
per,
dscale=self.dscale)
#need to add an extra row and column to east and north to make sure
#all is plotted see pcolor for details.
plot_east = np.append(self.model_obj.grid_east,
self.model_obj.grid_east[-1]*1.25)/\
self.dscale
plot_north = np.append(self.model_obj.grid_north,
self.model_obj.grid_north[-1]*1.25)/\
self.dscale
#make a mesh grid for plotting
#the 'ij' makes sure the resulting grid is in east, north
try:
self.mesh_east, self.mesh_north = np.meshgrid(plot_east,
plot_north,
indexing='ij')
except TypeError:
self.mesh_east, self.mesh_north = [arr.T for arr in np.meshgrid(plot_east,
plot_north)]
for ax in ax_list:
plot_res = np.log10(self.model_obj.res_model[:, :, d_index].T)
ax.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.res_cmap,
vmin=self.res_limits[0],
vmax=self.res_limits[1])
#--> plot data phase tensors
for pt in self.pt_data_arr[data_ii]:
eheight = pt['phimin']/\
self.pt_data_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ewidth = pt['phimax']/\
self.pt_data_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ellipse = Ellipse((pt['east'],
pt['north']),
width=ewidth,
height=eheight,
angle=90-pt['azimuth'])
#get ellipse color
if self.ellipse_cmap.find('seg')>0:
ellipse.set_facecolor(mtcl.get_plot_color(pt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax,
bounds=bounds))
else:
ellipse.set_facecolor(mtcl.get_plot_color(pt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax))
axd.add_artist(ellipse)
#-----------plot response phase tensors---------------
if self.resp_fn is not None:
rcmin = np.floor(self.pt_resid_arr['geometric_mean'].min())
rcmax = np.floor(self.pt_resid_arr['geometric_mean'].max())
for mpt, rpt in zip(self.pt_resp_arr[data_ii],
self.pt_resid_arr[data_ii]):
eheight = mpt['phimin']/\
self.pt_resp_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ewidth = mpt['phimax']/\
self.pt_resp_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ellipsem = Ellipse((mpt['east'],
mpt['north']),
width=ewidth,
height=eheight,
angle=90-mpt['azimuth'])
#get ellipse color
if self.ellipse_cmap.find('seg')>0:
ellipsem.set_facecolor(mtcl.get_plot_color(mpt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax,
bounds=bounds))
else:
ellipsem.set_facecolor(mtcl.get_plot_color(mpt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax))
axm.add_artist(ellipsem)
#-----------plot residual phase tensors---------------
eheight = rpt['phimin']/\
self.pt_resid_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ewidth = rpt['phimax']/\
self.pt_resid_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ellipser = Ellipse((rpt['east'],
rpt['north']),
width=ewidth,
height=eheight,
angle=rpt['azimuth'])
#get ellipse color
rpt_color = np.sqrt(abs(rpt['phimin']*rpt['phimax']))
if self.ellipse_cmap.find('seg')>0:
ellipser.set_facecolor(mtcl.get_plot_color(rpt_color,
'geometric_mean',
self.residual_cmap,
ckmin,
ckmax,
bounds=bounds))
else:
ellipser.set_facecolor(mtcl.get_plot_color(rpt_color,
'geometric_mean',
self.residual_cmap,
ckmin,
ckmax))
axr.add_artist(ellipser)
#--> set axes properties
# data
axd.set_xlim(self.ew_limits)
axd.set_ylim(self.ns_limits)
axd.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=font_dict)
axd.set_ylabel('Northing ({0})'.format(self.map_scale),
fontdict=font_dict)
#make a colorbar for phase tensors
#bb = axd.axes.get_position().bounds
bb = axd.get_position().bounds
y1 = .25*(2+(self.ns_limits[1]-self.ns_limits[0])/
(self.ew_limits[1]-self.ew_limits[0]))
cb_location = (3.35*bb[2]/5+bb[0],
y1*self.cb_pt_pad, .295*bb[2], .02)
cbaxd = fig.add_axes(cb_location)
cbd = mcb.ColorbarBase(cbaxd,
cmap=mtcl.cmapdict[self.ellipse_cmap],
norm=Normalize(vmin=ckmin,
vmax=ckmax),
orientation='horizontal')
cbd.ax.xaxis.set_label_position('top')
cbd.ax.xaxis.set_label_coords(.5, 1.75)
cbd.set_label(mtplottools.ckdict[self.ellipse_colorby])
cbd.set_ticks(np.arange(ckmin, ckmax+self.cb_tick_step,
self.cb_tick_step))
axd.text(self.ew_limits[0]*.95,
self.ns_limits[1]*.95,
'Data',
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor':'white'},
fontdict={'size':self.font_size+1})
#Model and residual
if self.resp_fn is not None:
for aa, ax in enumerate([axm, axr]):
ax.set_xlim(self.ew_limits)
ax.set_ylim(self.ns_limits)
ax.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=font_dict)
plt.setp(ax.yaxis.get_ticklabels(), visible=False)
#make a colorbar ontop of axis
bb = ax.axes.get_position().bounds
y1 = .25*(2+(self.ns_limits[1]-self.ns_limits[0])/
(self.ew_limits[1]-self.ew_limits[0]))
cb_location = (3.35*bb[2]/5+bb[0],
y1*self.cb_pt_pad, .295*bb[2], .02)
cbax = fig.add_axes(cb_location)
if aa == 0:
cb = mcb.ColorbarBase(cbax,
cmap=mtcl.cmapdict[self.ellipse_cmap],
norm=Normalize(vmin=ckmin,
vmax=ckmax),
orientation='horizontal')
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.75)
cb.set_label(mtplottools.ckdict[self.ellipse_colorby])
cb.set_ticks(np.arange(ckmin, ckmax+self.cb_tick_step,
self.cb_tick_step))
ax.text(self.ew_limits[0]*.95,
self.ns_limits[1]*.95,
'Model',
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor':'white'},
fontdict={'size':self.font_size+1})
else:
cb = mcb.ColorbarBase(cbax,
cmap=mtcl.cmapdict[self.residual_cmap],
norm=Normalize(vmin=rcmin,
vmax=rcmax),
orientation='horizontal')
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.75)
cb.set_label(r"$\sqrt{\Phi_{min} \Phi_{max}}$")
cb_ticks = [rcmin, (rcmax-rcmin)/2, rcmax]
cb.set_ticks(cb_ticks)
ax.text(self.ew_limits[0]*.95,
self.ns_limits[1]*.95,
'Residual',
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor':'white'},
fontdict={'size':self.font_size+1})
if self.model_fn is not None:
for ax in ax_list:
ax.tick_params(direction='out')
bb = ax.axes.get_position().bounds
y1 = .25*(2-(self.ns_limits[1]-self.ns_limits[0])/
(self.ew_limits[1]-self.ew_limits[0]))
cb_position = (3.0*bb[2]/5+bb[0],
y1*self.cb_res_pad, .35*bb[2], .02)
cbax = fig.add_axes(cb_position)
cb = mcb.ColorbarBase(cbax,
cmap=self.res_cmap,
norm=Normalize(vmin=self.res_limits[0],
vmax=self.res_limits[1]),
orientation='horizontal')
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.5)
cb.set_label('Resistivity ($\Omega \cdot$m)')
cb_ticks = np.arange(np.floor(self.res_limits[0]),
np.ceil(self.res_limits[1]+1), 1)
cb.set_ticks(cb_ticks)
cb.set_ticklabels([mtplottools.labeldict[ctk] for ctk in cb_ticks])
plt.show()
self.fig_list.append(fig)
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def save_figure(self, save_path=None, fig_dpi=None, file_format='pdf',
orientation='landscape', close_fig='y'):
"""
save_figure will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_path) == False:
try:
os.mkdir(save_path)
except:
raise IOError('Need to input a correct directory path')
for fig in self.fig_list:
per = fig.canvas.get_window_title()
save_fn = os.path.join(save_path, 'PT_DepthSlice_{0}s.{1}'.format(
per, file_format))
fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.close(fig)
else:
pass
self.fig_fn = save_fn
print 'Saved figure to: '+self.fig_fn
#==============================================================================
# plot depth slices
#==============================================================================
class PlotDepthSlice(object):
"""
Plots depth slices of resistivity model
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> mfn = r"/home/MT/ws3dinv/Inv1/Test_model.00"
>>> sfn = r"/home/MT/ws3dinv/Inv1/WSStationLocations.txt"
>>> # plot just first layer to check the formating
>>> pds = ws.PlotDepthSlice(model_fn=mfn, station_fn=sfn,
>>> ... depth_index=0, save_plots='n')
>>> #move color bar up
>>> pds.cb_location
>>> (0.64500000000000002, 0.14999999999999997, 0.3, 0.025)
>>> pds.cb_location = (.645, .175, .3, .025)
>>> pds.redraw_plot()
>>> #looks good now plot all depth slices and save them to a folder
>>> pds.save_path = r"/home/MT/ws3dinv/Inv1/DepthSlices"
>>> pds.depth_index = None
>>> pds.save_plots = 'y'
>>> pds.redraw_plot()
======================= ===================================================
Attributes Description
======================= ===================================================
cb_location location of color bar (x, y, width, height)
*default* is None, automatically locates
cb_orientation [ 'vertical' | 'horizontal' ]
*default* is horizontal
cb_pad padding between axes and colorbar
*default* is None
cb_shrink percentage to shrink colorbar by
*default* is None
climits (min, max) of resistivity color on log scale
*default* is (0, 4)
cmap name of color map *default* is 'jet_r'
data_fn full path to data file
depth_index integer value of depth slice index, shallowest
layer is 0
dscale scaling parameter depending on map_scale
ew_limits (min, max) plot limits in e-w direction in
map_scale units. *default* is None, sets viewing
area to the station area
fig_aspect aspect ratio of plot. *default* is 1
fig_dpi resolution of figure in dots-per-inch. *default* is
300
fig_list list of matplotlib.figure instances for each
depth slice
fig_size [width, height] in inches of figure size
*default* is [6, 6]
font_size size of ticklabel font in points, labels are
font_size+2. *default* is 7
grid_east relative location of grid nodes in e-w direction
in map_scale units
grid_north relative location of grid nodes in n-s direction
in map_scale units
grid_z relative location of grid nodes in z direction
in map_scale units
initial_fn full path to initial file
map_scale [ 'km' | 'm' ] distance units of map. *default* is
km
mesh_east np.meshgrid(grid_east, grid_north, indexing='ij')
mesh_north np.meshgrid(grid_east, grid_north, indexing='ij')
model_fn full path to model file
nodes_east relative distance betwen nodes in e-w direction
in map_scale units
nodes_north relative distance betwen nodes in n-s direction
in map_scale units
nodes_z relative distance betwen nodes in z direction
in map_scale units
ns_limits (min, max) plot limits in n-s direction in
map_scale units. *default* is None, sets viewing
area to the station area
plot_grid [ 'y' | 'n' ] 'y' to plot mesh grid lines.
*default* is 'n'
plot_yn [ 'y' | 'n' ] 'y' to plot on instantiation
res_model np.ndarray(n_north, n_east, n_vertical) of
model resistivity values in linear scale
save_path path to save figures to
save_plots [ 'y' | 'n' ] 'y' to save depth slices to save_path
station_east location of stations in east direction in
map_scale units
station_fn full path to station locations file
station_names station names
station_north location of station in north direction in
map_scale units
subplot_bottom distance between axes and bottom of figure window
subplot_left distance between axes and left of figure window
subplot_right distance between axes and right of figure window
subplot_top distance between axes and top of figure window
title titiel of plot *default* is depth of slice
xminorticks location of xminorticks
yminorticks location of yminorticks
======================= ===================================================
"""
def __init__(self, model_fn=None, data_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn
self.save_path = kwargs.pop('save_path', None)
if self.model_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.model_fn)
elif self.initial_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.initial_fn)
if self.save_path is not None:
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
self.save_plots = kwargs.pop('save_plots', 'y')
self.depth_index = kwargs.pop('depth_index', None)
self.map_scale = kwargs.pop('map_scale', 'km')
#make map scale
if self.map_scale=='km':
self.dscale=1000.
elif self.map_scale=='m':
self.dscale=1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.plot_grid = kwargs.pop('plot_grid', 'n')
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.fig_list = []
self.xminorticks = kwargs.pop('xminorticks', 1000)
self.yminorticks = kwargs.pop('yminorticks', 1000)
self.climits = kwargs.pop('climits', (0,4))
self.cmap = kwargs.pop('cmap', 'jet_r')
self.font_size = kwargs.pop('font_size', 8)
self.cb_shrink = kwargs.pop('cb_shrink', .8)
self.cb_pad = kwargs.pop('cb_pad', .01)
self.cb_orientation = kwargs.pop('cb_orientation', 'horizontal')
self.cb_location = kwargs.pop('cb_location', None)
self.subplot_right = .99
self.subplot_left = .085
self.subplot_top = .92
self.subplot_bottom = .1
self.res_model = None
self.grid_east = None
self.grid_north = None
self.grid_z = None
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
self.mesh_east = None
self.mesh_north = None
self.station_east = None
self.station_north = None
self.station_names = None
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def read_files(self):
"""
read in the files to get appropriate information
"""
#--> read in model file
if self.model_fn is not None:
if os.path.isfile(self.model_fn) == True:
md_model = Model()
md_model.read_model_file(self.model_fn)
self.res_model = md_model.res_model
self.grid_east = md_model.grid_east/self.dscale
self.grid_north = md_model.grid_north/self.dscale
self.grid_z = md_model.grid_z/self.dscale
self.nodes_east = md_model.nodes_east/self.dscale
self.nodes_north = md_model.nodes_north/self.dscale
self.nodes_z = md_model.nodes_z/self.dscale
else:
raise mtex.MTpyError_file_handling(
'{0} does not exist, check path'.format(self.model_fn))
#--> read in data file to get station locations
if self.data_fn is not None:
if os.path.isfile(self.data_fn) == True:
md_data = Data()
md_data.read_data_file(self.data_fn)
self.station_east = md_data.station_locations['rel_east']/self.dscale
self.station_north = md_data.station_locations['rel_north']/self.dscale
self.station_names = md_data.station_locations['station']
else:
print 'Could not find data file {0}'.format(self.data_fn)
def plot(self):
"""
plot depth slices
"""
#--> get information from files
self.read_files()
fdict = {'size':self.font_size+2, 'weight':'bold'}
cblabeldict={-2:'$10^{-3}$',-1:'$10^{-1}$',0:'$10^{0}$',1:'$10^{1}$',
2:'$10^{2}$',3:'$10^{3}$',4:'$10^{4}$',5:'$10^{5}$',
6:'$10^{6}$',7:'$10^{7}$',8:'$10^{8}$'}
#create an list of depth slices to plot
if self.depth_index == None:
zrange = range(self.grid_z.shape[0])
elif type(self.depth_index) is int:
zrange = [self.depth_index]
elif type(self.depth_index) is list or \
type(self.depth_index) is np.ndarray:
zrange = self.depth_index
#set the limits of the plot
if self.ew_limits == None:
if self.station_east is not None:
xlimits = (np.floor(self.station_east.min()),
np.ceil(self.station_east.max()))
else:
xlimits = (self.grid_east[5], self.grid_east[-5])
else:
xlimits = self.ew_limits
if self.ns_limits == None:
if self.station_north is not None:
ylimits = (np.floor(self.station_north.min()),
np.ceil(self.station_north.max()))
else:
ylimits = (self.grid_north[5], self.grid_north[-5])
else:
ylimits = self.ns_limits
#make a mesh grid of north and east
try:
self.mesh_east, self.mesh_north = np.meshgrid(self.grid_east,
self.grid_north,
indexing='ij')
except:
self.mesh_east, self.mesh_north = [arr.T for arr in np.meshgrid(self.grid_east,
self.grid_north)]
plt.rcParams['font.size'] = self.font_size
#--> plot depths into individual figures
for ii in zrange:
depth = '{0:.3f} ({1})'.format(self.grid_z[ii],
self.map_scale)
fig = plt.figure(depth, figsize=self.fig_size, dpi=self.fig_dpi)
plt.clf()
ax1 = fig.add_subplot(1, 1, 1, aspect=self.fig_aspect)
plot_res = np.log10(self.res_model[:, :, ii].T)
mesh_plot = ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
#plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
ax1.text(ee, nn, '*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':5, 'weight':'bold'})
#set axis properties
ax1.set_xlim(xlimits)
ax1.set_ylim(ylimits)
ax1.xaxis.set_minor_locator(MultipleLocator(self.xminorticks/self.dscale))
ax1.yaxis.set_minor_locator(MultipleLocator(self.yminorticks/self.dscale))
ax1.set_ylabel('Northing ('+self.map_scale+')',fontdict=fdict)
ax1.set_xlabel('Easting ('+self.map_scale+')',fontdict=fdict)
ax1.set_title('Depth = {0}'.format(depth), fontdict=fdict)
#plot the grid if desired
if self.plot_grid == 'y':
east_line_xlist = []
east_line_ylist = []
for xx in self.grid_east:
east_line_xlist.extend([xx, xx])
east_line_xlist.append(None)
east_line_ylist.extend([self.grid_north.min(),
self.grid_north.max()])
east_line_ylist.append(None)
ax1.plot(east_line_xlist,
east_line_ylist,
lw=.25,
color='k')
north_line_xlist = []
north_line_ylist = []
for yy in self.grid_north:
north_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
north_line_xlist.append(None)
north_line_ylist.extend([yy, yy])
north_line_ylist.append(None)
ax1.plot(north_line_xlist,
north_line_ylist,
lw=.25,
color='k')
#plot the colorbar
if self.cb_location is None:
if self.cb_orientation == 'horizontal':
self.cb_location = (ax1.axes.figbox.bounds[3]-.225,
ax1.axes.figbox.bounds[1]+.05,.3,.025)
elif self.cb_orientation == 'vertical':
self.cb_location = ((ax1.axes.figbox.bounds[2]-.15,
ax1.axes.figbox.bounds[3]-.21,.025,.3))
ax2 = fig.add_axes(self.cb_location)
cb = mcb.ColorbarBase(ax2,
cmap=self.cmap,
norm=Normalize(vmin=self.climits[0],
vmax=self.climits[1]),
orientation=self.cb_orientation)
if self.cb_orientation == 'horizontal':
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5,1.3)
elif self.cb_orientation == 'vertical':
cb.ax.yaxis.set_label_position('right')
cb.ax.yaxis.set_label_coords(1.25,.5)
cb.ax.yaxis.tick_left()
cb.ax.tick_params(axis='y',direction='in')
cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size':self.font_size+1})
cb.set_ticks(np.arange(self.climits[0],self.climits[1]+1))
cb.set_ticklabels([cblabeldict[cc]
for cc in np.arange(self.climits[0],
self.climits[1]+1)])
self.fig_list.append(fig)
#--> save plots to a common folder
if self.save_plots == 'y':
fig.savefig(os.path.join(self.save_path,
"Depth_{}_{:.4f}.png".format(ii, self.grid_z[ii])),
dpi=self.fig_dpi, bbox_inches='tight')
fig.clear()
plt.close()
else:
pass
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def update_plot(self, fig):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotAllResponses()
>>> [ax.grid(True, which='major') for ax in [ps1.axrte,ps1.axtep]]
>>> ps1.update_plot()
"""
fig.canvas.draw()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return ("Plots depth slices of model from WS3DINV")
#==============================================================================
# plot slices
#==============================================================================
class PlotSlices(object):
"""
plot all slices and be able to scroll through the model
:Example: ::
>>> import mtpy.modeling.modem_new as modem
>>> mfn = r"/home/modem/Inv1/Modular_NLCG_100.rho"
>>> dfn = r"/home/modem/Inv1/ModEM_data.dat"
>>> pds = ws.PlotSlices(model_fn=mfn, data_fn=dfn)
======================= ===================================================
Buttons Description
======================= ===================================================
'e' moves n-s slice east by one model block
'w' moves n-s slice west by one model block
'n' moves e-w slice north by one model block
'm' moves e-w slice south by one model block
'd' moves depth slice down by one model block
'u' moves depth slice up by one model block
======================= ===================================================
======================= ===================================================
Attributes Description
======================= ===================================================
ax_en matplotlib.axes instance for depth slice map view
ax_ez matplotlib.axes instance for e-w slice
ax_map matplotlib.axes instance for location map
ax_nz matplotlib.axes instance for n-s slice
climits (min , max) color limits on resistivity in log
scale. *default* is (0, 4)
cmap name of color map for resisitiviy.
*default* is 'jet_r'
data_fn full path to data file name
dscale scaling parameter depending on map_scale
east_line_xlist list of line nodes of east grid for faster plotting
east_line_ylist list of line nodes of east grid for faster plotting
ew_limits (min, max) limits of e-w in map_scale units
*default* is None and scales to station area
fig matplotlib.figure instance for figure
fig_aspect aspect ratio of plots. *default* is 1
fig_dpi resolution of figure in dots-per-inch
*default* is 300
fig_num figure instance number
fig_size [width, height] of figure window.
*default* is [6,6]
font_dict dictionary of font keywords, internally created
font_size size of ticklables in points, axes labes are
font_size+2. *default* is 7
grid_east relative location of grid nodes in e-w direction
in map_scale units
grid_north relative location of grid nodes in n-s direction
in map_scale units
grid_z relative location of grid nodes in z direction
in map_scale units
index_east index value of grid_east being plotted
index_north index value of grid_north being plotted
index_vertical index value of grid_z being plotted
initial_fn full path to initial file
key_press matplotlib.canvas.connect instance
map_scale [ 'm' | 'km' ] scale of map. *default* is km
mesh_east np.meshgrid(grid_east, grid_north)[0]
mesh_en_east np.meshgrid(grid_east, grid_north)[0]
mesh_en_north np.meshgrid(grid_east, grid_north)[1]
mesh_ez_east np.meshgrid(grid_east, grid_z)[0]
mesh_ez_vertical np.meshgrid(grid_east, grid_z)[1]
mesh_north np.meshgrid(grid_east, grid_north)[1]
mesh_nz_north np.meshgrid(grid_north, grid_z)[0]
mesh_nz_vertical np.meshgrid(grid_north, grid_z)[1]
model_fn full path to model file
ms size of station markers in points. *default* is 2
nodes_east relative distance betwen nodes in e-w direction
in map_scale units
nodes_north relative distance betwen nodes in n-s direction
in map_scale units
nodes_z relative distance betwen nodes in z direction
in map_scale units
north_line_xlist list of line nodes north grid for faster plotting
north_line_ylist list of line nodes north grid for faster plotting
ns_limits (min, max) limits of plots in n-s direction
*default* is None, set veiwing area to station area
plot_yn [ 'y' | 'n' ] 'y' to plot on instantiation
*default* is 'y'
res_model np.ndarray(n_north, n_east, n_vertical) of
model resistivity values in linear scale
station_color color of station marker. *default* is black
station_dict_east location of stations for each east grid row
station_dict_north location of stations for each north grid row
station_east location of stations in east direction
station_fn full path to station file
station_font_color color of station label
station_font_pad padding between station marker and label
station_font_rotation angle of station label
station_font_size font size of station label
station_font_weight weight of font for station label
station_id [min, max] index values for station labels
station_marker station marker
station_names name of stations
station_north location of stations in north direction
subplot_bottom distance between axes and bottom of figure window
subplot_hspace distance between subplots in vertical direction
subplot_left distance between axes and left of figure window
subplot_right distance between axes and right of figure window
subplot_top distance between axes and top of figure window
subplot_wspace distance between subplots in horizontal direction
title title of plot
z_limits (min, max) limits in vertical direction,
======================= ===================================================
"""
def __init__(self, model_fn, data_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.font_size = kwargs.pop('font_size', 7)
self.subplot_wspace = .20
self.subplot_hspace = .30
self.subplot_right = .98
self.subplot_left = .08
self.subplot_top = .97
self.subplot_bottom = .1
self.index_vertical = kwargs.pop('index_vertical', 0)
self.index_east = kwargs.pop('index_east', 0)
self.index_north = kwargs.pop('index_north', 0)
self.cmap = kwargs.pop('cmap', 'jet_r')
self.climits = kwargs.pop('climits', (0, 4))
self.map_scale = kwargs.pop('map_scale', 'km')
#make map scale
if self.map_scale=='km':
self.dscale=1000.
elif self.map_scale=='m':
self.dscale=1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.z_limits = kwargs.pop('z_limits', None)
self.res_model = None
self.grid_east = None
self.grid_north = None
self.grid_z = None
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
self.mesh_east = None
self.mesh_north = None
self.station_east = None
self.station_north = None
self.station_names = None
self.station_id = kwargs.pop('station_id', None)
self.station_font_size = kwargs.pop('station_font_size', 8)
self.station_font_pad = kwargs.pop('station_font_pad', 1.0)
self.station_font_weight = kwargs.pop('station_font_weight', 'bold')
self.station_font_rotation = kwargs.pop('station_font_rotation', 60)
self.station_font_color = kwargs.pop('station_font_color', 'k')
self.station_marker = kwargs.pop('station_marker',
r"$\blacktriangledown$")
self.station_color = kwargs.pop('station_color', 'k')
self.ms = kwargs.pop('ms', 10)
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def read_files(self):
"""
read in the files to get appropriate information
"""
#--> read in model file
if self.model_fn is not None:
if os.path.isfile(self.model_fn) == True:
md_model = Model()
md_model.read_model_file(self.model_fn)
self.res_model = md_model.res_model
self.grid_east = md_model.grid_east/self.dscale
self.grid_north = md_model.grid_north/self.dscale
self.grid_z = md_model.grid_z/self.dscale
self.nodes_east = md_model.nodes_east/self.dscale
self.nodes_north = md_model.nodes_north/self.dscale
self.nodes_z = md_model.nodes_z/self.dscale
else:
raise mtex.MTpyError_file_handling(
'{0} does not exist, check path'.format(self.model_fn))
#--> read in data file to get station locations
if self.data_fn is not None:
if os.path.isfile(self.data_fn) == True:
md_data = Data()
md_data.read_data_file(self.data_fn)
self.station_east = md_data.station_locations['rel_east']/self.dscale
self.station_north = md_data.station_locations['rel_north']/self.dscale
self.station_names = md_data.station_locations['station']
else:
print 'Could not find data file {0}'.format(self.data_fn)
def plot(self):
"""
plot:
east vs. vertical,
north vs. vertical,
east vs. north
"""
self.read_files()
self.get_station_grid_locations()
print "=============== ==============================================="
print " Buttons Description "
print "=============== ==============================================="
print " 'e' moves n-s slice east by one model block"
print " 'w' moves n-s slice west by one model block"
print " 'n' moves e-w slice north by one model block"
print " 'm' moves e-w slice south by one model block"
print " 'd' moves depth slice down by one model block"
print " 'u' moves depth slice up by one model block"
print "=============== ==============================================="
self.font_dict = {'size':self.font_size+2, 'weight':'bold'}
#--> set default font size
plt.rcParams['font.size'] = self.font_size
#set the limits of the plot
if self.ew_limits == None:
if self.station_east is not None:
self.ew_limits = (np.floor(self.station_east.min()),
np.ceil(self.station_east.max()))
else:
self.ew_limits = (self.grid_east[5], self.grid_east[-5])
if self.ns_limits == None:
if self.station_north is not None:
self.ns_limits = (np.floor(self.station_north.min()),
np.ceil(self.station_north.max()))
else:
self.ns_limits = (self.grid_north[5], self.grid_north[-5])
if self.z_limits == None:
depth_limit = max([(abs(self.ew_limits[0])+abs(self.ew_limits[1])),
(abs(self.ns_limits[0])+abs(self.ns_limits[1]))])
self.z_limits = (-5000/self.dscale, depth_limit)
self.fig = plt.figure(self.fig_num, figsize=self.fig_size,
dpi=self.fig_dpi)
plt.clf()
gs = gridspec.GridSpec(2, 2,
wspace=self.subplot_wspace,
left=self.subplot_left,
top=self.subplot_top,
bottom=self.subplot_bottom,
right=self.subplot_right,
hspace=self.subplot_hspace)
#make subplots
self.ax_ez = self.fig.add_subplot(gs[0, 0], aspect=self.fig_aspect)
self.ax_nz = self.fig.add_subplot(gs[1, 1], aspect=self.fig_aspect)
self.ax_en = self.fig.add_subplot(gs[1, 0], aspect=self.fig_aspect)
self.ax_map = self.fig.add_subplot(gs[0, 1])
#make grid meshes being sure the indexing is correct
self.mesh_ez_east, self.mesh_ez_vertical = np.meshgrid(self.grid_east,
self.grid_z,
indexing='ij')
self.mesh_nz_north, self.mesh_nz_vertical = np.meshgrid(self.grid_north,
self.grid_z,
indexing='ij')
self.mesh_en_east, self.mesh_en_north = np.meshgrid(self.grid_east,
self.grid_north,
indexing='ij')
#--> plot east vs vertical
self._update_ax_ez()
#--> plot north vs vertical
self._update_ax_nz()
#--> plot east vs north
self._update_ax_en()
#--> plot the grid as a map view
self._update_map()
#plot color bar
cbx = mcb.make_axes(self.ax_map, fraction=.15, shrink=.75, pad = .15)
cb = mcb.ColorbarBase(cbx[0],
cmap=self.cmap,
norm=Normalize(vmin=self.climits[0],
vmax=self.climits[1]))
cb.ax.yaxis.set_label_position('right')
cb.ax.yaxis.set_label_coords(1.25,.5)
cb.ax.yaxis.tick_left()
cb.ax.tick_params(axis='y',direction='in')
cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size':self.font_size+1})
cb.set_ticks(np.arange(np.ceil(self.climits[0]),
np.floor(self.climits[1]+1)))
cblabeldict={-2:'$10^{-3}$',-1:'$10^{-1}$',0:'$10^{0}$',1:'$10^{1}$',
2:'$10^{2}$',3:'$10^{3}$',4:'$10^{4}$',5:'$10^{5}$',
6:'$10^{6}$',7:'$10^{7}$',8:'$10^{8}$'}
cb.set_ticklabels([cblabeldict[cc]
for cc in np.arange(np.ceil(self.climits[0]),
np.floor(self.climits[1]+1))])
plt.show()
self.key_press = self.fig.canvas.mpl_connect('key_press_event',
self.on_key_press)
def on_key_press(self, event):
"""
on a key press change the slices
"""
key_press = event.key
if key_press == 'n':
if self.index_north == self.grid_north.shape[0]:
print 'Already at northern most grid cell'
else:
self.index_north += 1
if self.index_north > self.grid_north.shape[0]:
self.index_north = self.grid_north.shape[0]
self._update_ax_ez()
self._update_map()
if key_press == 'm':
if self.index_north == 0:
print 'Already at southern most grid cell'
else:
self.index_north -= 1
if self.index_north < 0:
self.index_north = 0
self._update_ax_ez()
self._update_map()
if key_press == 'e':
if self.index_east == self.grid_east.shape[0]:
print 'Already at eastern most grid cell'
else:
self.index_east += 1
if self.index_east > self.grid_east.shape[0]:
self.index_east = self.grid_east.shape[0]
self._update_ax_nz()
self._update_map()
if key_press == 'w':
if self.index_east == 0:
print 'Already at western most grid cell'
else:
self.index_east -= 1
if self.index_east < 0:
self.index_east = 0
self._update_ax_nz()
self._update_map()
if key_press == 'd':
if self.index_vertical == self.grid_z.shape[0]:
print 'Already at deepest grid cell'
else:
self.index_vertical += 1
if self.index_vertical > self.grid_z.shape[0]:
self.index_vertical = self.grid_z.shape[0]
self._update_ax_en()
print 'Depth = {0:.5g} ({1})'.format(self.grid_z[self.index_vertical],
self.map_scale)
if key_press == 'u':
if self.index_vertical == 0:
print 'Already at surface grid cell'
else:
self.index_vertical -= 1
if self.index_vertical < 0:
self.index_vertical = 0
self._update_ax_en()
print 'Depth = {0:.5gf} ({1})'.format(self.grid_z[self.index_vertical],
self.map_scale)
def _update_ax_ez(self):
"""
update east vs vertical plot
"""
self.ax_ez.cla()
plot_ez = np.log10(self.res_model[self.index_north, :, :])
self.ax_ez.pcolormesh(self.mesh_ez_east,
self.mesh_ez_vertical,
plot_ez,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
#plot stations
for sx in self.station_dict_north[self.grid_north[self.index_north]]:
self.ax_ez.text(sx,
0,
self.station_marker,
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size':self.ms,
'color':self.station_color})
self.ax_ez.set_xlim(self.ew_limits)
self.ax_ez.set_ylim(self.z_limits[1], self.z_limits[0])
self.ax_ez.set_ylabel('Depth ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_ez.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.fig.canvas.draw()
self._update_map()
def _update_ax_nz(self):
"""
update east vs vertical plot
"""
self.ax_nz.cla()
plot_nz = np.log10(self.res_model[:, self.index_east, :])
self.ax_nz.pcolormesh(self.mesh_nz_north,
self.mesh_nz_vertical,
plot_nz,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
#plot stations
for sy in self.station_dict_east[self.grid_east[self.index_east]]:
self.ax_nz.text(sy,
0,
self.station_marker,
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size':self.ms,
'color':self.station_color})
self.ax_nz.set_xlim(self.ns_limits)
self.ax_nz.set_ylim(self.z_limits[1], self.z_limits[0])
self.ax_nz.set_xlabel('Northing ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_nz.set_ylabel('Depth ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.fig.canvas.draw()
self._update_map()
def _update_ax_en(self):
"""
update east vs vertical plot
"""
self.ax_en.cla()
plot_en = np.log10(self.res_model[:, :, self.index_vertical].T)
self.ax_en.pcolormesh(self.mesh_en_east,
self.mesh_en_north,
plot_en,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
self.ax_en.set_xlim(self.ew_limits)
self.ax_en.set_ylim(self.ns_limits)
self.ax_en.set_ylabel('Northing ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_en.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=self.font_dict)
#--> plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
self.ax_en.text(ee, nn, '*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':5, 'weight':'bold'})
self.fig.canvas.draw()
self._update_map()
def _update_map(self):
self.ax_map.cla()
self.east_line_xlist = []
self.east_line_ylist = []
for xx in self.grid_east:
self.east_line_xlist.extend([xx, xx])
self.east_line_xlist.append(None)
self.east_line_ylist.extend([self.grid_north.min(),
self.grid_north.max()])
self.east_line_ylist.append(None)
self.ax_map.plot(self.east_line_xlist,
self.east_line_ylist,
lw=.25,
color='k')
self.north_line_xlist = []
self.north_line_ylist = []
for yy in self.grid_north:
self.north_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
self.north_line_xlist.append(None)
self.north_line_ylist.extend([yy, yy])
self.north_line_ylist.append(None)
self.ax_map.plot(self.north_line_xlist,
self.north_line_ylist,
lw=.25,
color='k')
#--> e-w indication line
self.ax_map.plot([self.grid_east.min(),
self.grid_east.max()],
[self.grid_north[self.index_north+1],
self.grid_north[self.index_north+1]],
lw=1,
color='g')
#--> e-w indication line
self.ax_map.plot([self.grid_east[self.index_east+1],
self.grid_east[self.index_east+1]],
[self.grid_north.min(),
self.grid_north.max()],
lw=1,
color='b')
#--> plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
self.ax_map.text(ee, nn, '*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':5, 'weight':'bold'})
self.ax_map.set_xlim(self.ew_limits)
self.ax_map.set_ylim(self.ns_limits)
self.ax_map.set_ylabel('Northing ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_map.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=self.font_dict)
#plot stations
self.ax_map.text(self.ew_limits[0]*.95, self.ns_limits[1]*.95,
'{0:.5g} ({1})'.format(self.grid_z[self.index_vertical],
self.map_scale),
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor': 'white'},
fontdict=self.font_dict)
self.fig.canvas.draw()
def get_station_grid_locations(self):
"""
get the grid line on which a station resides for plotting
"""
self.station_dict_east = dict([(gx, []) for gx in self.grid_east])
self.station_dict_north = dict([(gy, []) for gy in self.grid_north])
if self.station_east is not None:
for ss, sx in enumerate(self.station_east):
gx = np.where(self.grid_east <= sx)[0][-1]
self.station_dict_east[self.grid_east[gx]].append(self.station_north[ss])
for ss, sy in enumerate(self.station_north):
gy = np.where(self.grid_north <= sy)[0][-1]
self.station_dict_north[self.grid_north[gy]].append(self.station_east[ss])
else:
return
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
plt.close(self.fig)
self.plot()
def save_figure(self, save_fn=None, fig_dpi=None, file_format='pdf',
orientation='landscape', close_fig='y'):
"""
save_figure will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_fn) == False:
file_format = save_fn[-3:]
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
else:
save_fn = os.path.join(save_fn, '_E{0}_N{1}_Z{2}.{3}'.format(
self.index_east, self.index_north,
self.index_vertical, file_format))
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.clf()
plt.close(self.fig)
else:
pass
self.fig_fn = save_fn
print 'Saved figure to: '+self.fig_fn
#==============================================================================
# plot rms maps
#==============================================================================
class Plot_RMS_Maps(object):
"""
plots the RMS as (data-model)/(error) in map view for all components
of the data file. Gets this infomration from the .res file output
by ModEM.
Arguments:
------------------
**residual_fn** : string
full path to .res file
=================== =======================================================
Attributes Description
=================== =======================================================
fig matplotlib.figure instance for a single plot
fig_dpi dots-per-inch resolution of figure *default* is 200
fig_num number of fig instance *default* is 1
fig_size size of figure in inches [width, height]
*default* is [7,6]
font_size font size of tick labels, axis labels are +2
*default* is 8
marker marker style for station rms,
see matplotlib.line for options,
*default* is 's' --> square
marker_size size of marker in points. *default* is 10
pad_x padding in map units from edge of the axis to stations
at the extremeties in longitude.
*default* is 1/2 tick_locator
pad_y padding in map units from edge of the axis to stations
at the extremeties in latitude.
*default* is 1/2 tick_locator
period_index index of the period you want to plot according to
self.residual.period_list. *default* is 1
plot_yn [ 'y' | 'n' ] default is 'y' to plot on instantiation
plot_z_list internal variable for plotting
residual modem.Data instance that holds all the information
from the residual_fn given
residual_fn full path to .res file
rms_cmap matplotlib.cm object for coloring the markers
rms_cmap_dict dictionary of color values for rms_cmap
rms_max maximum rms to plot. *default* is 5.0
rms_min minimum rms to plot. *default* is 1.0
save_path path to save figures to. *default* is directory of
residual_fn
subplot_bottom spacing from axis to bottom of figure canvas.
*default* is .1
subplot_hspace horizontal spacing between subplots.
*default* is .1
subplot_left spacing from axis to left of figure canvas.
*default* is .1
subplot_right spacing from axis to right of figure canvas.
*default* is .9
subplot_top spacing from axis to top of figure canvas.
*default* is .95
subplot_vspace vertical spacing between subplots.
*default* is .01
tick_locator increment for x and y major ticks. *default* is
limits/5
=================== =======================================================
=================== =======================================================
Methods Description
=================== =======================================================
plot plot rms maps for a single period
plot_loop loop over all frequencies and save figures to save_path
read_residual_fn read in residual_fn
redraw_plot after updating attributes call redraw_plot to
well redraw the plot
save_figure save the figure to a file
=================== =======================================================
:Example: ::
>>> import mtpy.modeling.modem_new as modem
>>> rms_plot = Plot_RMS_Maps(r"/home/ModEM/Inv1/mb_NLCG_030.res")
>>> # change some attributes
>>> rms_plot.fig_size = [6, 4]
>>> rms_plot.rms_max = 3
>>> rms_plot.redraw_plot()
>>> # happy with the look now loop over all periods
>>> rms_plot.plot_loop()
"""
def __init__(self, residual_fn, **kwargs):
self.residual_fn = residual_fn
self.residual = None
self.save_path = kwargs.pop('save_path', os.path.dirname(self.residual_fn))
self.period_index = kwargs.pop('period_index', 0)
self.subplot_left = kwargs.pop('subplot_left', .1)
self.subplot_right = kwargs.pop('subplot_right', .9)
self.subplot_top = kwargs.pop('subplot_top', .95)
self.subplot_bottom = kwargs.pop('subplot_bottom', .1)
self.subplot_hspace = kwargs.pop('subplot_hspace', .1)
self.subplot_vspace = kwargs.pop('subplot_vspace', .01)
self.font_size = kwargs.pop('font_size', 8)
self.fig_size = kwargs.pop('fig_size', [7.75, 6.75])
self.fig_dpi = kwargs.pop('fig_dpi', 200)
self.fig_num = kwargs.pop('fig_num', 1)
self.fig = None
self.marker = kwargs.pop('marker', 's')
self.marker_size = kwargs.pop('marker_size', 10)
self.rms_max = kwargs.pop('rms_max', 5)
self.rms_min = kwargs.pop('rms_min', 0)
self.tick_locator = kwargs.pop('tick_locator', None)
self.pad_x = kwargs.pop('pad_x', None)
self.pad_y = kwargs.pop('pad_y', None)
self.plot_yn = kwargs.pop('plot_yn', 'y')
# colormap for rms, goes white to black from 0 to rms max and
# red below 1 to show where the data is being over fit
self.rms_cmap_dict = {'red':((0.0, 1.0, 1.0),
(0.2, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'green':((0.0, 0.0, 0.0),
(0.2, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'blue':((0.0, 0.0, 0.0),
(0.2, 1.0, 1.0),
(1.0, 0.0, 0.0))}
self.rms_cmap = colors.LinearSegmentedColormap('rms_cmap',
self.rms_cmap_dict,
256)
self.plot_z_list = [{'label':r'$Z_{xx}$', 'index':(0, 0), 'plot_num':1},
{'label':r'$Z_{xy}$', 'index':(0, 1), 'plot_num':2},
{'label':r'$Z_{yx}$', 'index':(1, 0), 'plot_num':3},
{'label':r'$Z_{yy}$', 'index':(1, 1), 'plot_num':4},
{'label':r'$T_{x}$', 'index':(0, 0), 'plot_num':5},
{'label':r'$T_{y}$', 'index':(0, 1), 'plot_num':6}]
if self.plot_yn == 'y':
self.plot()
def read_residual_fn(self):
if self.residual is None:
self.residual = Data()
self.residual.read_data_file(self.residual_fn)
else:
pass
def plot(self):
"""
plot rms in map view
"""
self.read_residual_fn()
font_dict = {'size':self.font_size+2, 'weight':'bold'}
rms_1 = 1./self.rms_max
if self.tick_locator is None:
x_locator = np.round((self.residual.data_array['lon'].max()-
self.residual.data_array['lon'].min())/5, 2)
y_locator = np.round((self.residual.data_array['lat'].max()-
self.residual.data_array['lat'].min())/5, 2)
if x_locator > y_locator:
self.tick_locator = x_locator
elif x_locator < y_locator:
self.tick_locator = y_locator
if self.pad_x is None:
self.pad_x = self.tick_locator/2
if self.pad_y is None:
self.pad_y = self.tick_locator/2
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = self.subplot_left
plt.rcParams['figure.subplot.right'] = self.subplot_right
plt.rcParams['figure.subplot.bottom'] = self.subplot_bottom
plt.rcParams['figure.subplot.top'] = self.subplot_top
plt.rcParams['figure.subplot.wspace'] = self.subplot_hspace
plt.rcParams['figure.subplot.hspace'] = self.subplot_vspace
self.fig = plt.figure(self.fig_num, self.fig_size, dpi=self.fig_dpi)
for p_dict in self.plot_z_list:
ax = self.fig.add_subplot(3, 2, p_dict['plot_num'], aspect='equal')
ii = p_dict['index'][0]
jj = p_dict['index'][0]
for r_arr in self.residual.data_array:
# calulate the rms self.residual/error
if p_dict['plot_num'] < 5:
rms = r_arr['z'][self.period_index, ii, jj].__abs__()/\
(r_arr['z_err'][self.period_index, ii, jj].real)
else:
rms = r_arr['tip'][self.period_index, ii, jj].__abs__()/\
(r_arr['tip_err'][self.period_index, ii, jj].real)
#color appropriately
if np.nan_to_num(rms) == 0.0:
marker_color = (1, 1, 1)
marker = '.'
marker_size = .1
marker_edge_color = (1, 1, 1)
if rms > self.rms_max:
marker_color = (0, 0, 0)
marker = self.marker
marker_size = self.marker_size
marker_edge_color = (0, 0, 0)
elif rms >= 1 and rms <= self.rms_max:
r_color = 1-rms/self.rms_max+rms_1
marker_color = (r_color, r_color, r_color)
marker = self.marker
marker_size = self.marker_size
marker_edge_color = (0, 0, 0)
elif rms < 1:
r_color = 1-rms/self.rms_max
marker_color = (1, r_color, r_color)
marker = self.marker
marker_size = self.marker_size
marker_edge_color = (0, 0, 0)
ax.plot(r_arr['lon'], r_arr['lat'],
marker=marker,
ms=marker_size,
mec=marker_edge_color,
mfc=marker_color,
zorder=3)
if p_dict['plot_num'] == 1 or p_dict['plot_num'] == 3:
ax.set_ylabel('Latitude (deg)', fontdict=font_dict)
plt.setp(ax.get_xticklabels(), visible=False)
elif p_dict['plot_num'] == 2 or p_dict['plot_num'] == 4:
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
elif p_dict['plot_num'] == 6:
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_xlabel('Longitude (deg)', fontdict=font_dict)
else:
ax.set_xlabel('Longitude (deg)', fontdict=font_dict)
ax.set_ylabel('Latitude (deg)', fontdict=font_dict)
ax.text(self.residual.data_array['lon'].min()+.005-self.pad_x,
self.residual.data_array['lat'].max()-.005+self.pad_y,
p_dict['label'],
verticalalignment='top',
horizontalalignment='left',
bbox={'facecolor':'white'},
zorder=3)
ax.tick_params(direction='out')
ax.grid(zorder=0, color=(.75, .75, .75))
#[line.set_zorder(3) for line in ax.lines]
ax.set_xlim(self.residual.data_array['lon'].min()-self.pad_x,
self.residual.data_array['lon'].max()+self.pad_x)
ax.set_ylim(self.residual.data_array['lat'].min()-self.pad_y,
self.residual.data_array['lat'].max()+self.pad_y)
ax.xaxis.set_major_locator(MultipleLocator(self.tick_locator))
ax.yaxis.set_major_locator(MultipleLocator(self.tick_locator))
ax.xaxis.set_major_formatter(FormatStrFormatter('%2.2f'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%2.2f'))
#cb_ax = mcb.make_axes(ax, orientation='vertical', fraction=.1)
cb_ax = self.fig.add_axes([self.subplot_right+.02, .225, .02, .45])
color_bar = mcb.ColorbarBase(cb_ax,
cmap=self.rms_cmap,
norm=colors.Normalize(vmin=self.rms_min,
vmax=self.rms_max),
orientation='vertical')
color_bar.set_label('RMS', fontdict=font_dict)
self.fig.suptitle('period = {0:.5g} (s)'.format(self.residual.period_list[self.period_index]),
fontdict={'size':self.font_size+3, 'weight':'bold'})
plt.show()
def redraw_plot(self):
plt.close('all')
self.plot()
def save_figure(self, save_path=None, save_fn_basename=None,
save_fig_dpi=None, fig_format='.png', fig_close=True):
"""
save figure in the desired format
"""
if save_path is not None:
self.save_path = save_path
if save_fn_basename is not None:
pass
else:
save_fn_basename = '{0:02}_RMS_{1:.5g}_s.{2}'.format(self.period_index,
self.residual.period_list[self.period_index],
fig_format)
save_fn = os.path.join(self.save_path, save_fn_basename)
if save_fig_dpi is not None:
self.fig_dpi = save_fig_dpi
self.fig.savefig(save_fn, dpi=self.fig_dpi)
print 'saved file to {0}'.format(save_fn)
if fig_close == True:
plt.close('all')
def plot_loop(self, fig_format='png'):
"""
loop over all periods and save figures accordingly
"""
self.read_residual_fn()
for f_index in range(self.residual.period_list.shape[0]):
self.period_index = f_index
self.plot()
self.save_figure(fig_format=fig_format)
#==============================================================================
# Exceptions
#==============================================================================
class ModEMError(Exception):
pass
|
geophysics/mtpy
|
mtpy/modeling/modem_new.py
|
Python
|
gpl-3.0
| 377,608
|
[
"ParaView",
"VTK"
] |
dd674a64c6b77e6e3d69b737ebf49ae1fcb5f839b8f9b77e0ce890673e082226
|
from nose2.compat import unittest
from nose2.tools import params
import re
class TestBasicReFunction(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_001_match_something_at_the_beginning(self):
# re.match only works at the beginning of string
match = re.match(r"^abc", "abcdefabc")
assert match.group(0) == "abc"
assert match.start(0) == 0
# So string in the middle doesn't work \n re.match(r'abc', 'xabc') = ",
match = re.match(r'abc', 'xabc')
assert match == None
def test_002_search_for_one_match(self):
match = re.search(r'(?:abc)adf', 'abcadfasdfadfabcasdfasdfabc')
# Remember, group(0) is the entire match
assert match.group(0) == "abcadf"
assert match.start(0) == 0
assert match.end(0) == 6
def test_003_search_for_multiple_match(self):
# Search for multiple matches using findall \n re.findall(r'abc', 'abcadfasdfadfabcasdfasdfabc')"
match = re.findall(r'abc', 'abcadfasdfadfabcasdfasdfabc')
assert len(match) == 3
# finditer is more useful for finding more information about the match
match = re.finditer(r'abc', 'abcadfasdfadfabcasdfasdfabc')
assert sum(1 for _ in match) == 3
def test_004_print_debug_expression(self):
# Debug expression by printing more informationsearch \n re.findall(r'abc', 'abcadfasdfadfabcasdfasdfabc', re.DEBUG)"
match = re.findall(r'abc', 'abcadfasdfadfabcasdfasdfabc', re.DEBUG)
def test_005_match_ignorecase(self):
# We can ignore case \n re.findall(r'abc', 'ABC', re.I)",
match = re.findall(r'abc', 'ABC', re.I)
assert len(match) == 1
def test_006_match_multiline(self):
multiline_text = """
some Varying TEXT
DSJFKDAFJKDAFJDSAKFJADSFLKDLAFKDSAF
[more of the above, ending with a newline]
[yep, there is a variable number of lines here]
some Varying TEXT
DSJFKDAFJKDAFJDSAKFJADSFLKDLAFKDSAF
[more of the above, ending with a newline]
[yep, there is a variable number of lines here]
"""
regex = re.compile(r'^(.+)(?:\n|\r\n?)((?:(?:\n|\r\n?).+)+)', re.MULTILINE)
match = regex.search(multiline_text)
# We can search multiline \n re.compile(r'^(.+)(?:\\n|\\r\\n?)((?:(?:\\n|\\r\\n?).+)+) = ', re.MULTILINE)",
assert match.groups() == (' some Varying TEXT', '\n DSJFKDAFJKDAFJDSAKFJADSFLKDLAFKDSAF\n [more of the above, ending with a newline]\n [yep, there is a variable number of lines here]')
def test_007_match_dotall(self):
# DOTALL can be used to match pattern on multiline\n re.compile('some\\s*fancy', re.DOTALL)",
fancy_text = """
<div>I'm some
fancy text that needs
to be found</div>
"""
regex = re.compile('some\s*fancy', re.DOTALL)
match = regex.search(fancy_text)
assert match.group() == "some\n fancy"
def test_010_print_verbose_expression(self):
# "Debug expression by printing more informationsearch\n re.findall(r'abc', 'abcadfasdfadfabcasdfasdfabc', re.VERBOSE)",
re.findall(r'abc', 'abcadfasdfadfabcasdfasdfabc', re.VERBOSE)
def test_011_split_string(self):
# Split string is easy\n re.split(r',', 'hello,the,world')",
splitted_strings = re.split(r',', 'hello,the,world')
assert splitted_strings == ['hello', 'the', 'world']
def test_012_substitute(self):
# Substitute string is easy\n re.sub(r'hello', 'hi', 'hello,the,world')",
new_string = re.sub(r'hello', 'hi', 'hello,the,world')
assert new_string == 'hi,the,world'
def test_013_escape(self):
# Regex can escape string\n",
escaped_string = re.escape('A$^a|string-*.withmetacharacters')
assert escaped_string == "A\$\^a\|string\-\*\.withmetacharacters"
|
minhhh/wiki
|
code/regex/test_regex.py
|
Python
|
mit
| 3,997
|
[
"ADF"
] |
cc4c89df727e91c5b6d0fa9841054dd66b1690577138d7e4f82fae70647093d4
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkVoxelContoursToSurfaceFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkVoxelContoursToSurfaceFilter(), 'Processing.',
('vtkPolyData',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkVoxelContoursToSurfaceFilter.py
|
Python
|
bsd-3-clause
| 517
|
[
"VTK"
] |
9f76cc8e88e1260fe188866d80aef0fb1f3f8df57c427e2a427dc3b5fd769877
|
import vtk
import numpy as np
from glue.external.qt import QtGui
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from palettable.colorbrewer import get_map
__all__ = ['QtVTKWidget']
class QtVTKWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(QtVTKWidget, self).__init__(parent=parent)
self.ren = vtk.vtkRenderer()
self.ren.SetBackground(0, 0, 0)
self.render_window = vtk.vtkRenderWindow()
self.window_interactor = QVTKRenderWindowInteractor(self, rw=self.render_window)
self.render_window.Render()
self.render_window.PolygonSmoothingOn()
self.window_interactor.Initialize()
self.window_interactor.Start()
self.data = None
self.levels = []
self.cmap = 'RdYlBu'
self.alpha = 0.5
self.spectral_stretch = 1.
def resizeEvent(self, event):
super(QtVTKWidget, self).resizeEvent(event)
self.window_interactor.resize(self.width(), self.height())
def set_data(self, data):
self.data = data
self.nz, self.ny, self.nx = data.shape
self._update_scaled_data()
@property
def spectral_stretch(self):
return self._spectral_stretch
@spectral_stretch.setter
def spectral_stretch(self, value):
self._spectral_stretch = value
self._update_scaled_data()
def _update_scaled_data(self, vmin=None, vmax=None):
if self.data is None:
return
if vmin is None:
self.vmin = np.nanmin(self.data)
else:
self.vmin = vmin
if vmax is None:
self.vmax = np.nanmax(self.data)
else:
self.vmax = vmax
data = np.clip((self.data - self.vmin) / (self.vmax - self.vmin) * 255., 0., 255.)
data = data.astype(np.uint8)
data_string = data.tostring()
self.reader_volume = vtk.vtkImageImport()
self.reader_volume.CopyImportVoidPointer(data_string, len(data_string))
self.reader_volume.SetDataScalarTypeToUnsignedChar()
self.reader_volume.SetNumberOfScalarComponents(1)
self.reader_volume.SetDataExtent(0, self.nx - 1, 0, self.ny - 1, 0, self.nz - 1)
self.reader_volume.SetWholeExtent(0, self.nx - 1, 0, self.ny - 1, 0, self.nz - 1)
self.reader_volume.SetDataSpacing(1, 1, self._spectral_stretch)
self.reader_volume.SetDataOrigin(self.nx / 2., self.ny / 2., self.nz / 2.)
self.render_window.AddRenderer(self.ren)
self.ren.ResetCameraClippingRange()
@property
def levels(self):
return self._levels
@levels.setter
def levels(self, values):
self._reset_levels()
if len(values) == 0:
return
values = np.asarray(values)
values = np.clip((values - self.vmin) / (self.vmax - self.vmin) * 255., 0., 255.)
for ilevel, level in enumerate(values):
self.add_contour(level, ilevel)
self._update_level_colors()
def _reset_levels(self):
self.ren.RemoveAllViewProps()
self._levels = []
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, name):
self._cmap = get_map(name, 'diverging', 5).mpl_colormap
self._update_level_colors()
@property
def alpha(self):
return self._alpha
@alpha.setter
def alpha(self, value):
self._alpha = value
self._update_level_colors()
def _update_level_colors(self):
if len(self._levels) == 0:
return
vmin = 0
vmax = len(self._levels) - 1
for level, actor in self._levels:
if vmin == vmax:
x = 0.5
else:
x = (level - vmin) / float(vmax - vmin)
color = self._cmap(x)
prop = actor.GetProperty()
prop.SetColor(*color[:3])
prop.SetOpacity(self.alpha)
def add_contour(self, level, ilevel, color=(1., 1., 1.), alpha=1.):
contour = vtk.vtkMarchingCubes()
contour.SetInput(self.reader_volume.GetOutput())
contour.SetValue(0, level)
contour.ComputeNormalsOn()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(contour.GetOutput())
mapper.ScalarVisibilityOff()
actor = vtk.vtkLODActor()
actor.SetMapper(mapper)
actor.SetNumberOfCloudPoints(100000)
actor.SetMapper(mapper)
self._levels.append((ilevel, actor))
self.ren.AddActor(actor)
def render(self):
self.render_window.Render()
|
astrofrog/cube-viewer
|
cube_viewer/vtk_widget.py
|
Python
|
bsd-2-clause
| 4,614
|
[
"VTK"
] |
50cce984e4947b3b02e57daa8d749f4a60b227c22040e168b4f467818068c2c6
|
#------------------------------------------------------------------------------
# pycparser: c_generator.py
#
# C code generator from pycparser AST nodes.
#
# Copyright (C) 2008-2015, Eli Bendersky
# License: BSD
#------------------------------------------------------------------------------
from . import c_ast
class CGenerator(object):
""" Uses the same visitor pattern as c_ast.NodeVisitor, but modified to
return a value from each visit method, using string accumulation in
generic_visit.
"""
def __init__(self):
# Statements start with indentation of self.indent_level spaces, using
# the _make_indent method
#
self.indent_level = 0
def _make_indent(self):
return ' ' * self.indent_level
def visit(self, node):
method = 'visit_' + node.__class__.__name__
return getattr(self, method, self.generic_visit)(node)
def generic_visit(self, node):
#~ print('generic:', type(node))
if node is None:
return ''
else:
return ''.join(self.visit(c) for c_name, c in node.children())
def visit_Constant(self, n):
return n.value
def visit_ID(self, n):
return n.name
def visit_Pragma(self, n):
ret = '#pragma'
if n.string:
ret += ' ' + n.string
return ret
def visit_ArrayRef(self, n):
arrref = self._parenthesize_unless_simple(n.name)
return arrref + '[' + self.visit(n.subscript) + ']'
def visit_StructRef(self, n):
sref = self._parenthesize_unless_simple(n.name)
return sref + n.type + self.visit(n.field)
def visit_FuncCall(self, n):
fref = self._parenthesize_unless_simple(n.name)
return fref + '(' + self.visit(n.args) + ')'
def visit_UnaryOp(self, n):
operand = self._parenthesize_unless_simple(n.expr)
if n.op == 'p++':
return '%s++' % operand
elif n.op == 'p--':
return '%s--' % operand
elif n.op == 'sizeof':
# Always parenthesize the argument of sizeof since it can be
# a name.
return 'sizeof(%s)' % self.visit(n.expr)
else:
return '%s%s' % (n.op, operand)
def visit_BinaryOp(self, n):
lval_str = self._parenthesize_if(n.left,
lambda d: not self._is_simple_node(d))
rval_str = self._parenthesize_if(n.right,
lambda d: not self._is_simple_node(d))
return '%s %s %s' % (lval_str, n.op, rval_str)
def visit_Assignment(self, n):
rval_str = self._parenthesize_if(
n.rvalue,
lambda n: isinstance(n, c_ast.Assignment))
return '%s %s %s' % (self.visit(n.lvalue), n.op, rval_str)
def visit_IdentifierType(self, n):
return ' '.join(n.names)
def _visit_expr(self, n):
if isinstance(n, c_ast.InitList):
return '{' + self.visit(n) + '}'
elif isinstance(n, c_ast.ExprList):
return '(' + self.visit(n) + ')'
else:
return self.visit(n)
def visit_Decl(self, n, no_type=False):
# no_type is used when a Decl is part of a DeclList, where the type is
# explicitly only for the first declaration in a list.
#
s = n.name if no_type else self._generate_decl(n)
if n.bitsize: s += ' : ' + self.visit(n.bitsize)
if n.init:
s += ' = ' + self._visit_expr(n.init)
return s
def visit_DeclList(self, n):
s = self.visit(n.decls[0])
if len(n.decls) > 1:
s += ', ' + ', '.join(self.visit_Decl(decl, no_type=True)
for decl in n.decls[1:])
return s
def visit_Typedef(self, n):
s = ''
if n.storage: s += ' '.join(n.storage) + ' '
s += self._generate_type(n.type)
return s
def visit_Cast(self, n):
s = '(' + self._generate_type(n.to_type) + ')'
return s + ' ' + self._parenthesize_unless_simple(n.expr)
def visit_ExprList(self, n):
visited_subexprs = []
for expr in n.exprs:
visited_subexprs.append(self._visit_expr(expr))
return ', '.join(visited_subexprs)
def visit_InitList(self, n):
visited_subexprs = []
for expr in n.exprs:
visited_subexprs.append(self._visit_expr(expr))
return ', '.join(visited_subexprs)
def visit_Enum(self, n):
s = 'enum'
if n.name: s += ' ' + n.name
if n.values:
s += ' {'
for i, enumerator in enumerate(n.values.enumerators):
s += enumerator.name
if enumerator.value:
s += ' = ' + self.visit(enumerator.value)
if i != len(n.values.enumerators) - 1:
s += ', '
s += '}'
return s
def visit_FuncDef(self, n):
decl = self.visit(n.decl)
self.indent_level = 0
body = self.visit(n.body)
if n.param_decls:
knrdecls = ';\n'.join(self.visit(p) for p in n.param_decls)
return decl + '\n' + knrdecls + ';\n' + body + '\n'
else:
return decl + '\n' + body + '\n'
def visit_FileAST(self, n):
s = ''
for ext in n.ext:
if isinstance(ext, c_ast.FuncDef):
s += self.visit(ext)
elif isinstance(ext, c_ast.Pragma):
s += self.visit(ext) + '\n'
else:
s += self.visit(ext) + ';\n'
return s
def visit_Compound(self, n):
s = self._make_indent() + '{\n'
self.indent_level += 2
if n.block_items:
s += ''.join(self._generate_stmt(stmt) for stmt in n.block_items)
self.indent_level -= 2
s += self._make_indent() + '}\n'
return s
def visit_EmptyStatement(self, n):
return ';'
def visit_ParamList(self, n):
return ', '.join(self.visit(param) for param in n.params)
def visit_Return(self, n):
s = 'return'
if n.expr: s += ' ' + self.visit(n.expr)
return s + ';'
def visit_Break(self, n):
return 'break;'
def visit_Continue(self, n):
return 'continue;'
def visit_TernaryOp(self, n):
s = '(' + self._visit_expr(n.cond) + ') ? '
s += '(' + self._visit_expr(n.iftrue) + ') : '
s += '(' + self._visit_expr(n.iffalse) + ')'
return s
def visit_If(self, n):
s = 'if ('
if n.cond: s += self.visit(n.cond)
s += ')\n'
s += self._generate_stmt(n.iftrue, add_indent=True)
if n.iffalse:
s += self._make_indent() + 'else\n'
s += self._generate_stmt(n.iffalse, add_indent=True)
return s
def visit_For(self, n):
s = 'for ('
if n.init: s += self.visit(n.init)
s += ';'
if n.cond: s += ' ' + self.visit(n.cond)
s += ';'
if n.next: s += ' ' + self.visit(n.next)
s += ')\n'
s += self._generate_stmt(n.stmt, add_indent=True)
return s
def visit_While(self, n):
s = 'while ('
if n.cond: s += self.visit(n.cond)
s += ')\n'
s += self._generate_stmt(n.stmt, add_indent=True)
return s
def visit_DoWhile(self, n):
s = 'do\n'
s += self._generate_stmt(n.stmt, add_indent=True)
s += self._make_indent() + 'while ('
if n.cond: s += self.visit(n.cond)
s += ');'
return s
def visit_Switch(self, n):
s = 'switch (' + self.visit(n.cond) + ')\n'
s += self._generate_stmt(n.stmt, add_indent=True)
return s
def visit_Case(self, n):
s = 'case ' + self.visit(n.expr) + ':\n'
for stmt in n.stmts:
s += self._generate_stmt(stmt, add_indent=True)
return s
def visit_Default(self, n):
s = 'default:\n'
for stmt in n.stmts:
s += self._generate_stmt(stmt, add_indent=True)
return s
def visit_Label(self, n):
return n.name + ':\n' + self._generate_stmt(n.stmt)
def visit_Goto(self, n):
return 'goto ' + n.name + ';'
def visit_EllipsisParam(self, n):
return '...'
def visit_Struct(self, n):
return self._generate_struct_union(n, 'struct')
def visit_Typename(self, n):
return self._generate_type(n.type)
def visit_Union(self, n):
return self._generate_struct_union(n, 'union')
def visit_NamedInitializer(self, n):
s = ''
for name in n.name:
if isinstance(name, c_ast.ID):
s += '.' + name.name
elif isinstance(name, c_ast.Constant):
s += '[' + name.value + ']'
s += ' = ' + self._visit_expr(n.expr)
return s
def visit_FuncDecl(self, n):
return self._generate_type(n)
def _generate_struct_union(self, n, name):
""" Generates code for structs and unions. name should be either
'struct' or union.
"""
s = name + ' ' + (n.name or '')
if n.decls:
s += '\n'
s += self._make_indent()
self.indent_level += 2
s += '{\n'
for decl in n.decls:
s += self._generate_stmt(decl)
self.indent_level -= 2
s += self._make_indent() + '}'
return s
def _generate_stmt(self, n, add_indent=False):
""" Generation from a statement node. This method exists as a wrapper
for individual visit_* methods to handle different treatment of
some statements in this context.
"""
typ = type(n)
if add_indent: self.indent_level += 2
indent = self._make_indent()
if add_indent: self.indent_level -= 2
if typ in (
c_ast.Decl, c_ast.Assignment, c_ast.Cast, c_ast.UnaryOp,
c_ast.BinaryOp, c_ast.TernaryOp, c_ast.FuncCall, c_ast.ArrayRef,
c_ast.StructRef, c_ast.Constant, c_ast.ID, c_ast.Typedef,
c_ast.ExprList):
# These can also appear in an expression context so no semicolon
# is added to them automatically
#
return indent + self.visit(n) + ';\n'
elif typ in (c_ast.Compound,):
# No extra indentation required before the opening brace of a
# compound - because it consists of multiple lines it has to
# compute its own indentation.
#
return self.visit(n)
else:
return indent + self.visit(n) + '\n'
def _generate_decl(self, n):
""" Generation from a Decl node.
"""
s = ''
if n.funcspec: s = ' '.join(n.funcspec) + ' '
if n.storage: s += ' '.join(n.storage) + ' '
s += self._generate_type(n.type)
return s
def _generate_type(self, n, modifiers=[]):
""" Recursive generation from a type node. n is the type node.
modifiers collects the PtrDecl, ArrayDecl and FuncDecl modifiers
encountered on the way down to a TypeDecl, to allow proper
generation from it.
"""
typ = type(n)
#~ print(n, modifiers)
if typ == c_ast.TypeDecl:
s = ''
if n.quals: s += ' '.join(n.quals) + ' '
s += self.visit(n.type)
nstr = n.declname if n.declname else ''
# Resolve modifiers.
# Wrap in parens to distinguish pointer to array and pointer to
# function syntax.
#
for i, modifier in enumerate(modifiers):
if isinstance(modifier, c_ast.ArrayDecl):
if (i != 0 and isinstance(modifiers[i - 1], c_ast.PtrDecl)):
nstr = '(' + nstr + ')'
nstr += '[' + self.visit(modifier.dim) + ']'
elif isinstance(modifier, c_ast.FuncDecl):
if (i != 0 and isinstance(modifiers[i - 1], c_ast.PtrDecl)):
nstr = '(' + nstr + ')'
nstr += '(' + self.visit(modifier.args) + ')'
elif isinstance(modifier, c_ast.PtrDecl):
if modifier.quals:
nstr = '* %s %s' % (' '.join(modifier.quals), nstr)
else:
nstr = '*' + nstr
if nstr: s += ' ' + nstr
return s
elif typ == c_ast.Decl:
return self._generate_decl(n.type)
elif typ == c_ast.Typename:
return self._generate_type(n.type)
elif typ == c_ast.IdentifierType:
return ' '.join(n.names) + ' '
elif typ in (c_ast.ArrayDecl, c_ast.PtrDecl, c_ast.FuncDecl):
return self._generate_type(n.type, modifiers + [n])
else:
return self.visit(n)
def _parenthesize_if(self, n, condition):
""" Visits 'n' and returns its string representation, parenthesized
if the condition function applied to the node returns True.
"""
s = self._visit_expr(n)
if condition(n):
return '(' + s + ')'
else:
return s
def _parenthesize_unless_simple(self, n):
""" Common use case for _parenthesize_if
"""
return self._parenthesize_if(n, lambda d: not self._is_simple_node(d))
def _is_simple_node(self, n):
""" Returns True for nodes that are "simple" - i.e. nodes that always
have higher precedence than operators.
"""
return isinstance(n,( c_ast.Constant, c_ast.ID, c_ast.ArrayRef,
c_ast.StructRef, c_ast.FuncCall))
|
hipnusleo/laserjet
|
resource/pypi/pycparser-2.17/pycparser/c_generator.py
|
Python
|
apache-2.0
| 14,236
|
[
"VisIt"
] |
45918c3df9458ad0dff0b1d592b20a8918f9a55c95d06a0ea2e17c331de8796f
|
# geotecha - A software suite for geotechncial engineering
# Copyright (C) 2018 Rohan T. Walker (rtrwalker@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/gpl.html.
"""Some test routines for the inputoutput module
"""
from __future__ import division, print_function
import ast
from nose import with_setup
from nose.tools.trivial import assert_almost_equal
from nose.tools.trivial import assert_raises
from nose.tools.trivial import ok_
from nose.tools.trivial import assert_equal
from nose.tools.trivial import assert_equals
import unittest
from testfixtures import TempDirectory
import textwrap
from math import pi
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
import os
import matplotlib
from geotecha.piecewise.piecewise_linear_1d import PolyLine
from geotecha.inputoutput.inputoutput import make_module_from_text
from geotecha.inputoutput.inputoutput import copy_attributes_between_objects
from geotecha.inputoutput.inputoutput import copy_attributes_from_text_to_object
from geotecha.inputoutput.inputoutput import check_attribute_is_list
from geotecha.inputoutput.inputoutput import check_attribute_PolyLines_have_same_x_limits
from geotecha.inputoutput.inputoutput import check_attribute_pairs_have_equal_length
from geotecha.inputoutput.inputoutput import check_attribute_combinations
from geotecha.inputoutput.inputoutput import initialize_objects_attributes
from geotecha.inputoutput.inputoutput import code_for_explicit_attribute_initialization
from geotecha.inputoutput.inputoutput import object_members
from geotecha.inputoutput.inputoutput import SyntaxChecker
from geotecha.inputoutput.inputoutput import force_attribute_same_len_if_none
from geotecha.inputoutput.inputoutput import string_of_object_attributes
from geotecha.inputoutput.inputoutput import next_output_stem
from geotecha.inputoutput.inputoutput import make_array_into_dataframe
from geotecha.inputoutput.inputoutput import save_grid_data_to_file
from geotecha.inputoutput.inputoutput import GenericInputFileArgParser
from geotecha.inputoutput.inputoutput import working_directory
from geotecha.inputoutput.inputoutput import hms_string
from geotecha.inputoutput.inputoutput import fcode_one_large_expr
from geotecha.inputoutput.inputoutput import InputFileLoaderCheckerSaver
class EmptyClass(object):
"""empty class for assigning attributes fot object testing"""
def __init__(self):
pass
def test_make_module_from_text():
"""test for make_module_from_text function"""
#make_module_from_text(reader)
reader = textwrap.dedent("""\
a = 2
""")
ok_(isinstance(make_module_from_text(reader), type(textwrap)))
assert_equal(make_module_from_text(reader).a, 2)
assert_raises(SyntaxError,make_module_from_text,
reader,
syntax_checker=SyntaxChecker())
def test_copy_attributes_between_objects():
"""test for copy_attributes_between_objects function"""
#copy_attributes_between_objects(from_object, to_object, attributes=[], defaults = dict(), not_found_value = None)
a = EmptyClass()
from_object = EmptyClass()
from_object.a = 2
from_object.b = 3
copy_attributes_between_objects(from_object,a,['a','b', 'aa', 'bb'], {'bb': 27})
assert_equal([a.a, a.b, a.aa, a.bb], [2, 3, None, 27])
copy_attributes_between_objects(from_object,a,['c'], not_found_value = 1000)
assert_equal([a.c], [1000])
def test_copy_attributes_from_text_to_object():
"""test for copy_attributes_from_text_to_object function"""
#copy_attributes_from_text_to_object(reader,*args, **kwargs)
reader = textwrap.dedent("""\
a = 2
b = 3
""")
a = EmptyClass()
copy_attributes_from_text_to_object(reader,a,['a','b', 'aa', 'bb'], {'bb': 27})
assert_equal([a.a, a.b, a.aa, a.bb], [2, 3, None, 27])
def test_check_attribute_is_list():
"""test for check_attribute_is_list function"""
#check_attribute_is_list(obj, attributes=[], force_list=False)
a = EmptyClass()
a.a = 2
a.b = 4
a.c = [8]
a.d = [6,7]
assert_raises(ValueError, check_attribute_is_list, a, attributes=['a','b','c'], force_list=False)
check_attribute_is_list(a, attributes=['a','b','c'], force_list=True)
assert_equal([a.a,a.b,a.c,a.d], [[2],[4],[8], [6,7]])
def test_check_attribute_PolyLines_have_same_x_limits():
"""test for check_attribute_PolyLines_have_same_x_limits function"""
#check_attribute_PolyLines_have_same_x_limits(obj, attributes=[])
a = EmptyClass()
a.a = None
a.b = PolyLine([0,4],[4,5])
a.c = [PolyLine([0,4],[6,3]), PolyLine([0,5],[6,3])]
a.d = PolyLine([0,2,4], [3,2,4])
assert_raises(ValueError, check_attribute_PolyLines_have_same_x_limits, a,
attributes=[['a','b','c','d']])
assert_raises(ValueError, check_attribute_PolyLines_have_same_x_limits, a,
attributes=[['c']])
assert_equal(check_attribute_PolyLines_have_same_x_limits(a,
attributes=[['a','b','d']]), None)
def test_check_attribute_pairs_have_equal_length():
"""test for check_attribute_pairs_have_equal_length function"""
#check_attribute_pairs_have_equal_length(obj, attributes=[])
a = EmptyClass()
a.a = None
a.b = [7, 8]
a.c = [8]
a.d = [6,7]
a.e = 8
# assert_raises(ValueError, check_attribute_pairs_have_equal_length, a,
# attributes=[['a','b']])
assert_raises(ValueError, check_attribute_pairs_have_equal_length, a,
attributes=[['b','c']])
assert_raises(TypeError, check_attribute_pairs_have_equal_length, a,
attributes=[['b','e']])
assert_equal(check_attribute_pairs_have_equal_length(a,
attributes=[['b','d']]), None)
def test_check_attribute_combinations():
"""test for check_attribute_combinations function"""
#check_attribute_combinations(obj, zero_or_all=[], at_least_one=[], one_implies_others=[])
a = EmptyClass()
a.a = None
a.b = None
a.c = 1
a.d = 2
a.e = None
a.f = 5
assert_equal(check_attribute_combinations(a, zero_or_all=[['a','b']]), None)
assert_equal(check_attribute_combinations(a, zero_or_all=[['c','d']]), None)
assert_equal(check_attribute_combinations(a, zero_or_all=[['a','b'],['c','d']]), None)
assert_raises(ValueError, check_attribute_combinations,a, zero_or_all=[['a','c']])
assert_raises(ValueError, check_attribute_combinations,a, zero_or_all=[['a','b'], ['a','c']])
assert_equal(check_attribute_combinations(a, at_least_one=[['a','c','e']]), None)
assert_equal(check_attribute_combinations(a, at_least_one=[['c','d']]), None)
assert_equal(check_attribute_combinations(a, at_least_one=[['a','c'],['c']]), None)
assert_raises(ValueError, check_attribute_combinations, a, at_least_one=[['a','b','e']])
assert_raises(ValueError, check_attribute_combinations, a, at_least_one=[['a','c'], ['a','b','e']])
assert_equal(check_attribute_combinations(a, one_implies_others=[['c','d']]), None)
assert_equal(check_attribute_combinations(a, one_implies_others=[['a','b']]), None)
assert_equal(check_attribute_combinations(a, one_implies_others=[['a','b'],['c','d','f']]), None)
assert_raises(ValueError, check_attribute_combinations, a,
one_implies_others=[['c','a']])
assert_raises(ValueError, check_attribute_combinations, a,
one_implies_others=[['c','d','e']])
assert_raises(ValueError, check_attribute_combinations, a,
one_implies_others=[['c','d'], ['c','d','e']])
def test_initialize_objects_attributes():
"""test for initialize_objects_attributes function"""
#initialize_objects_attributes(obj, attributes=[], defaults = dict(), not_found_value = None):
a = EmptyClass()
initialize_objects_attributes(a,attributes=['a','b'], defaults={'a': 6})
assert_equal([a.a,a.b],[6,None])
def test_code_for_explicit_attribute_initialization():
"""test for code_for_explicit_attribute_initialization function"""
#code_for_explicit_attribute_initialization(attributes=[], defaults={}, defaults_name = '_attribute_defaults', object_name = 'self', not_found_value = None)
a = 'a b c'.split
b = {'a': 3,'b': 6}
c = None
e = None
assert_equal(code_for_explicit_attribute_initialization('a b c'.split(), {'a': 3,'b': 6}, None), 'self.a = 3\nself.b = 6\nself.c = None\n')
assert_equal(code_for_explicit_attribute_initialization('a b c'.split(), {'a': 3,'b': 6}, None, not_found_value='sally'), "self.a = 3\nself.b = 6\nself.c = 'sally'\n")
assert_equal(code_for_explicit_attribute_initialization('a b c'.split(), {'a': 3,'b': 6}), "self.a = self._attribute_defaults.get('a', None)\nself.b = self._attribute_defaults.get('b', None)\nself.c = None\n")
def test_force_attribute_same_len_if_none():
"""test for force_attribute_same_len_if_none"""
#force_attribute_same_len_if_none(obj, same_len_attributes=[], value=None)
a = EmptyClass
a.a = [3,4]
a.b = None
a.c = [7,2,3]
a.d = None
force_attribute_same_len_if_none(a, same_len_attributes=[['a', 'b']], value=None)
assert_equal(a.b,[None, None])
force_attribute_same_len_if_none(a, same_len_attributes=[['d', 'c']], value=None)
assert_equal(a.c, [7, 2, 3])
def test_object_members():
"""test for object_members function"""
import math
ok_(set(['acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh',
'ceil', 'copysign', 'cos', 'cosh', 'degrees', 'erf', 'erfc',
'exp', 'expm1', 'fabs', 'factorial', 'floor', 'fmod',
'frexp', 'fsum', 'gamma', 'hypot', 'isinf', 'isnan', 'ldexp',
'lgamma', 'log', 'log10', 'log1p', 'modf', 'pow', 'radians',
'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc']).issubset(
set(object_members(math, 'routine', join=False))))
def test_SyntaxChecker():
"""test for SytaxChecker class"""
syntax_checker=SyntaxChecker(['ast','builtin','numpy','PolyLine'])
assert_raises(SyntaxError, syntax_checker.visit,
ast.parse('import math', mode='exec'))
assert_raises(SyntaxError, syntax_checker.visit,
ast.parse('from math import cos', mode='exec'))
assert_raises(SyntaxError, syntax_checker.visit,
ast.parse('eval(44*2)', mode='exec'))
assert_raises(SyntaxError, syntax_checker.visit,
ast.parse('exec("a=34")', mode='exec'))
assert_raises(SyntaxError, syntax_checker.visit,
ast.parse("""[x for x in ().__class__.__bases__[0].__subclasses__()
if x.__name__ == 'Popen'][0](['ls', '-la']).wait()""", mode='exec'))
class test_string_of_object_attributes(unittest.TestCase):
""" tests for string_of_object_attributes"""
# string_of_object_attributes(obj, attributes=[], none_at_bottom=True,
# numpy_array_prefix = "np."):
def test_defaults(self):
a=EmptyClass()
a.a=None
a.b=4
a.c = np.array([1,2,3])
a.d='happy'
assert_equal(string_of_object_attributes(a, 'a b c d'.split()),
textwrap.dedent("""\
b = 4
c = np.array([1, 2, 3])
d = 'happy'
a = None
"""))
def test_numpy_array_prefix_none(self):
a=EmptyClass()
a.a=None
a.b=4
a.c = np.array([1,2,3])
a.d='happy'
assert_equal(string_of_object_attributes(a, 'a b c d'.split(), numpy_array_prefix = None),
textwrap.dedent("""\
b = 4
c = array([1, 2, 3])
d = 'happy'
a = None
"""))
def test_none_at_bottom(self):
a=EmptyClass()
a.a=None
a.b=4
a.d='happy'
assert_equal(string_of_object_attributes(a, 'a b c d'.split(), none_at_bottom=False),
textwrap.dedent("""\
a = None
b = 4
c = None
d = 'happy'
"""))
class test_next_output_stem(unittest.TestCase):
"""tests for next_output_stem"""
#next_output_stem(prefix, path=None, start=1, inc=1, zfill=3,
# overwrite=False)
def setUp(self):
self.tempdir = TempDirectory()
self.tempdir.write('a_004', b'some text a4')
self.tempdir.write('a_005', b'some text a5')
self.tempdir.write('b_002.txt', b'some text b2')
self.tempdir.write('b_008.out', b'some text b8')
self.tempdir.write(('c_010', 'por'), b'some text c5por')
def tearDown(self):
self.tempdir.cleanup()
# @with_setup(setup=self.setup, teardown=self.teardown)
def test_file(self):
assert_equal(next_output_stem(prefix='a_', path=self.tempdir.path),
'a_006')
def test_file2(self):
assert_equal(next_output_stem(prefix='b_', path=self.tempdir.path),
'b_009')
def test_directory(self):
assert_equal(next_output_stem(prefix='c_', path=self.tempdir.path),
'c_011')
def test_file_overwrite(self):
assert_equal(next_output_stem(prefix='a_', path=self.tempdir.path,
overwrite=True),
'a_005')
def test_inc(self):
assert_equal(next_output_stem(prefix='a_', path=self.tempdir.path,
inc=3),
'a_008')
def test_zfill(self):
assert_equal(next_output_stem(prefix='a_', path=self.tempdir.path,
zfill=5),
'a_00006')
def test_does_not_exist(self):
assert_equal(next_output_stem(prefix='g_', path=self.tempdir.path),
'g_001')
def test_does_not_exist(self):
assert_equal(next_output_stem(prefix='g_', path=self.tempdir.path,
start=4),
'g_004')
class test_make_array_into_dataframe(unittest.TestCase):
"""tests for make_array_into_dataframe"""
# make_array_into_dataframe(data, column_labels=None, row_labels=None,
# row_labels_label='item')
def test_defaults(self):
assert_frame_equal(make_array_into_dataframe(
data=np.arange(10).reshape((5,2))),
pd.DataFrame(data=np.arange(10).reshape((5,2))))
def test_column_labels(self):
assert_frame_equal(make_array_into_dataframe(
data=np.arange(10).reshape((5,2)),
column_labels=['a', 'b']),
pd.DataFrame(data=np.arange(10).reshape((5,2)),
columns=['a', 'b']))
def test_row_labels(self):
df = pd.DataFrame(data=np.arange(10).reshape((5,2)))
s = pd.Series(['a', 'b', 'c', 'd', 'e'])
df.insert(loc=0, column='item', value=s)
assert_frame_equal(make_array_into_dataframe(
data=np.arange(10).reshape((5,2)),
row_labels=['a', 'b', 'c', 'd', 'e']),
df)
def test_row_labels_label(self):
df = pd.DataFrame(data=np.arange(10).reshape((5,2)))
s = pd.Series(['a', 'b', 'c', 'd', 'e'])
df.insert(loc=0, column='hey', value=s)
assert_frame_equal(make_array_into_dataframe(
data=np.arange(10).reshape((5,2)),
row_labels=['a', 'b', 'c', 'd', 'e'],
row_labels_label='hey'),
df)
class test_save_grid_data_to_file(unittest.TestCase):
"""tests for save_grid_data_to_file"""
# save_grid_data_to_file(directory=None, file_stem='out_000',
# create_directory=True, ext='.csv', *data_dicts)
def setUp(self):
self.tempdir = TempDirectory()
def tearDown(self):
self.tempdir.cleanup()
def test_defaults(self):
data = np.arange(6).reshape(3,2)
save_grid_data_to_file({'data': data},
directory=self.tempdir.path)
assert_equal(self.tempdir.read(('out_000','out_000.csv'), 'utf-8').splitlines(),
textwrap.dedent("""\
idex,0,1
0,0,1
1,2,3
2,4,5""").splitlines())
def test_directory(self):
data = np.arange(6).reshape(3,2)
save_grid_data_to_file({'data': data},
directory=os.path.join(self.tempdir.path,'g'))
assert_equal(self.tempdir.read(('g','out_000','out_000.csv'), 'utf-8').splitlines(),
textwrap.dedent("""\
idex,0,1
0,0,1
1,2,3
2,4,5""").splitlines())
def test_file_stem(self):
data = np.arange(6).reshape(3,2)
save_grid_data_to_file({'data': data},
directory=self.tempdir.path,
file_stem="ppp")
assert_equal(self.tempdir.read(('ppp','ppp.csv'), 'utf-8').splitlines(),
textwrap.dedent("""\
idex,0,1
0,0,1
1,2,3
2,4,5""").splitlines())
def test_ext(self):
data = np.arange(6).reshape(3,2)
save_grid_data_to_file({'data': data},
directory=self.tempdir.path,
ext=".out")
assert_equal(self.tempdir.read(('out_000','out_000.out'), 'utf-8').splitlines(),
textwrap.dedent("""\
idex,0,1
0,0,1
1,2,3
2,4,5""").splitlines())
def test_create_directory(self):
data = np.arange(6).reshape(3,2)
save_grid_data_to_file({'data': data},
directory=self.tempdir.path,
create_directory=False)
assert_equal(self.tempdir.read('out_000.csv', 'utf-8').splitlines(),
textwrap.dedent("""\
idex,0,1
0,0,1
1,2,3
2,4,5""").splitlines())
def test_data_dict_header(self):
data = np.arange(6).reshape(3,2)
save_grid_data_to_file({'data': data, 'header':'hello header'},
directory=self.tempdir.path)
assert_equal(self.tempdir.read(('out_000','out_000.csv'), 'utf-8').splitlines(),
textwrap.dedent("""\
hello header
idex,0,1
0,0,1
1,2,3
2,4,5""").splitlines())
def test_data_dict_name(self):
data = np.arange(6).reshape(3,2)
save_grid_data_to_file({'data': data, 'name':'xx'},
directory=self.tempdir.path)
assert_equal(self.tempdir.read(('out_000','out_000xx.csv'), 'utf-8').splitlines(),
textwrap.dedent("""\
idex,0,1
0,0,1
1,2,3
2,4,5""").splitlines())
def test_data_dict_row_labels(self):
data = np.arange(6).reshape(3,2)
save_grid_data_to_file({'data': data, 'row_labels':[8,12,6]},
directory=self.tempdir.path)
assert_equal(self.tempdir.read(('out_000','out_000.csv'), 'utf-8').splitlines(),
textwrap.dedent("""\
idex,item,0,1
0,8,0,1
1,12,2,3
2,6,4,5""").splitlines())
def test_data_dict_row_labels_label(self):
data = np.arange(6).reshape(3,2)
save_grid_data_to_file({'data': data, 'row_labels':[8,12,6],
'row_labels_label':'yyy'},
directory=self.tempdir.path)
assert_equal(self.tempdir.read(('out_000','out_000.csv'), 'utf-8').splitlines(),
textwrap.dedent("""\
idex,yyy,0,1
0,8,0,1
1,12,2,3
2,6,4,5""").splitlines())
def test_data_dict_column_labels(self):
data = np.arange(6).reshape(3,2)
save_grid_data_to_file({'data': data, 'column_labels':['a', 'b']},
directory=self.tempdir.path)
assert_equal(self.tempdir.read(('out_000','out_000.csv'), 'utf-8').splitlines(),
textwrap.dedent("""\
idex,a,b
0,0,1
1,2,3
2,4,5""").splitlines())
def test_two_files(self):
data = np.arange(6).reshape(3,2)
save_grid_data_to_file([{'data': data, 'name':1},
{'data': 2*data, 'name':2}],
directory=self.tempdir.path,
file_stem="qqq")
assert_equal(self.tempdir.read(('qqq','qqq1.csv'), 'utf-8').splitlines(),
textwrap.dedent("""\
idex,0,1
0,0,1
1,2,3
2,4,5""").splitlines())
assert_equal(self.tempdir.read(('qqq','qqq2.csv'), 'utf-8').splitlines(),
textwrap.dedent("""\
idex,0,1
0,0,2
1,4,6
2,8,10""").splitlines())
class HelperForGenericInputFileArgParser(object):
def __init__(self, path):
self.path=path
self.oname = os.path.join(os.path.dirname(path), 'out.zebra')
with open(self.oname, 'a') as f:
f.write(os.path.basename(path)+'\n')
return
def dog(self):
with open(self.oname, 'a') as f:
f.write('dog\n')
class test_GenericInputFileArgParser(unittest.TestCase):
"""tests GenericInputFileArgParser"""
def setUp(self):
self.tempdir = TempDirectory()
self.tempdir.write('a1.py', "a1", 'utf-8')
self.tempdir.write('a2.py', "a2", 'utf-8')
self.tempdir.write('b1.txt', "b1", 'utf-8')
self.tempdir.write('b2.txt', "b2", 'utf-8')
def tearDown(self):
self.tempdir.cleanup()
def _abc_fobj(self, fobj):
"""print file contents to out.zebra"""
with open(os.path.join(self.tempdir.path, 'out.zebra'), 'a') as f:
f.write(fobj.read()+'\n')
return
def _abc_path(self, path):
"""print file basename out.zebra"""
with open(os.path.join(self.tempdir.path, 'out.zebra'), 'a') as f:
f.write(os.path.basename(path)+'\n')
return
# def dog(self):
# with open(os.path.join(self.tempdir.path, 'out.zebra'), 'a') as f:
# f.write('dog\n')
def test_directory_with_path(self):
a = GenericInputFileArgParser(self._abc_path, False)
args = '-d {0} -p'.format(self.tempdir.path).split()
print(args)
a.main(argv=args)
assert_equal(self.tempdir.read(('out.zebra'), 'utf-8').splitlines(),
textwrap.dedent("""\
a1.py
a2.py""").splitlines())
def test_directory_with_fobj(self):
a = GenericInputFileArgParser(self._abc_fobj, True)
args = '-d {0} -p'.format(self.tempdir.path).split()
# print(args)
a.main(argv=args)
assert_equal(self.tempdir.read(('out.zebra'), 'utf-8').splitlines(),
textwrap.dedent("""\
a1
a2""").splitlines())
def test_pattern_with_path(self):
a = GenericInputFileArgParser(self._abc_path, False)
args = '-d {0} -p *.txt'.format(self.tempdir.path).split()
# print(args)
a.main(argv=args)
assert_equal(self.tempdir.read(('out.zebra'), 'utf-8').splitlines(),
textwrap.dedent("""\
b1.txt
b2.txt""").splitlines())
def test_pattern_with_fobj(self):
a = GenericInputFileArgParser(self._abc_fobj, True)
args = '-d {0} -p *.txt'.format(self.tempdir.path).split()
# print(args)
a.main(argv=args)
assert_equal(self.tempdir.read(('out.zebra'), 'utf-8').splitlines(),
textwrap.dedent("""\
b1
b2""").splitlines())
def test_filename_with_fobj(self):
a = GenericInputFileArgParser(self._abc_fobj, True)
args = '-f {0} {1}'.format(
os.path.join(self.tempdir.path, 'a1.py'),
os.path.join(self.tempdir.path, 'b1.txt')
).split()
# print(args)
a.main(argv=args)
assert_equal(self.tempdir.read(('out.zebra'), 'utf-8').splitlines(),
textwrap.dedent("""\
a1
b1""").splitlines())
def test_filename_with_path(self):
a = GenericInputFileArgParser(self._abc_path, False)
args = '-f {0} {1}'.format(
os.path.join(self.tempdir.path, 'a1.py'),
os.path.join(self.tempdir.path, 'b1.txt')
).split()
# print(args)
a.main(argv=args)
assert_equal(self.tempdir.read(('out.zebra'), 'utf-8').splitlines(),
textwrap.dedent("""\
a1.py
b1.txt""").splitlines())
def test_default_directory(self):
a = GenericInputFileArgParser(self._abc_path, False)
args = '-d -p'.format(self.tempdir.path).split()
with working_directory(self.tempdir.path):
a.main(argv=args)
assert_equal(self.tempdir.read(('out.zebra'), 'utf-8').splitlines(),
textwrap.dedent("""\
a1.py
a2.py""").splitlines())
def test_methods_with_path(self):
a = GenericInputFileArgParser(HelperForGenericInputFileArgParser,
False, [('dog',[],{})])
args = '-f {0} {1}'.format(
os.path.join(self.tempdir.path, 'a1.py'),
os.path.join(self.tempdir.path, 'b1.txt')
).split()
a.main(argv=args)
assert_equal(self.tempdir.read(('out.zebra'), 'utf-8').splitlines(),
textwrap.dedent("""\
a1.py
dog
b1.txt
dog
""").splitlines())
class test_hms_string(unittest.TestCase):
"""tests hms_string"""
#hms_string(sec_elapsed)
def test_20s(self):
assert_equal(hms_string(20),"0:00:20.00")
def test_130p5s(self):
assert_equal(hms_string(130.5),"0:02:10.50")
class test_fcode_one_large_expr(unittest.TestCase):
"""tests for fcode_one_large_expr"""
#fcode_one_large_expr(expr, prepend=None, **settings)
import sympy
from sympy import symbols, sin
n= 8
sss = symbols(','.join(['a{:d}'.format(v) for v in range(n)]))
e1 = 0
for i in sss:
e1+=sin(i)
m = sympy.tensor.IndexedBase('m')
j = sympy.tensor.Idx('j')
e2 = m[j] + m[j+1]
def test_line_wrap(self):
assert_equal(fcode_one_large_expr(self.e1).splitlines(),
' (sin(a0) + sin(a1) + sin(a2) + sin(a3) '
'+ sin(a4) + sin(a5) + sin(&\n a6) + '
'sin(a7))'.splitlines())
def test_prepend(self):
assert_equal(fcode_one_large_expr(self.e1, prepend='k=').splitlines(),
' k=(sin(a0) + sin(a1) + sin(a2) + sin(a3) + '
'sin(a4) + sin(a5) + sin&\n (a6) + '
'sin(a7))'.splitlines())
def test_settings(self):
#note this only tests to see if setting s is passed correctly
assert_equal(fcode_one_large_expr(self.e1,
source_format="free").splitlines(),
'(sin(a0) + sin(a1) + sin(a2) + sin(a3) + sin(a4) + '
'sin(a5) + sin(a6) + sin(a7))'.splitlines())
def test_parentheses(self):
assert_equal(fcode_one_large_expr(self.e2),
' (m(j + 1) + m(j))')
class test_working_directory(unittest.TestCase):
"""tests working_direcroty"""
def setUp(self):
self.tempdir = TempDirectory()
self.original_dir = os.getcwd()
def tearDown(self):
os.chdir(self.original_dir)
self.tempdir.cleanup()
def test_directory_change(self):
assert_equal(os.getcwd(), self.original_dir)
with working_directory(self.tempdir.path):
assert_equal(os.getcwd(), self.tempdir.path)
assert_equal(os.getcwd(), self.original_dir)
class test_InputFileLoaderCheckerSaver(unittest.TestCase):
"""tests for InputFileLoaderCheckerSaver"""
def setUp(self):
self.dir = os.path.abspath(os.curdir)
self.tempdir = TempDirectory()
self.tempdir.write('inp1.py', 'a=4\nb=6', 'utf-8')
self.tempdir.write('out0001.py', 'a=4\nb=6', 'utf-8')
self.tempdir.write(('what', 'out0001.py'), 'a=4\nb=6', 'utf-8')
# self.tempdir.write('a_005', b'some text a5')
# self.tempdir.write('b_002.txt', b'some text b2')
# self.tempdir.write('b_008.out', b'some text b8')
# self.tempdir.write(('c_010', 'por'), b'some text c5por')
os.chdir(self.tempdir.path)
def tearDown(self):
os.chdir(self.dir)
self.tempdir.cleanup()
def test_init_from_str(self):
a = InputFileLoaderCheckerSaver()
a._attributes = "a b".split()
a._initialize_attributes()
a.__init__('a=4\nb=6')
assert_equal(a.a, 4)
assert_equal(a.b, 6)
assert_equal(a._input_text, 'a=4\nb=6')
def test_init_from_fileobj(self):
a = InputFileLoaderCheckerSaver()
a._attributes = "a b".split()
a._initialize_attributes()
with open(os.path.join(self.tempdir.path, 'inp1.py'), 'r') as f:
a.__init__(f)
assert_equal(a.a, 4)
assert_equal(a.b, 6)
assert_equal(a._input_text, 'a=4\nb=6')
def test_attribute_defaults(self):
a = InputFileLoaderCheckerSaver()
a._input_text = 'b=6'
a._attributes = "a b".split()
a._attribute_defaults = {'a': 24}
a._initialize_attributes()
a._transfer_attributes_from_inputfile()
assert_equal(a.a, 24)
assert_equal(a.b, 6)
assert_equal(a._input_text, 'b=6')
def test_check_attributes_that_should_be_lists(self):
a = InputFileLoaderCheckerSaver()
a.a=4
a.b=6
a._attributes_that_should_be_lists = ['b']
a.check_input_attributes()
assert_equal(a.a, 4)
assert_equal(a.b, [6])
def test_check_zero_or_all(self):
a = InputFileLoaderCheckerSaver()
a.a=4
a.b=6
a.c=None
a._zero_or_all = ['a b c'.split()]
assert_raises(ValueError, a.check_input_attributes)
def test_check_at_least_one(self):
a = InputFileLoaderCheckerSaver()
a.c=None
a._at_least_one = ['c'.split()]
assert_raises(ValueError, a.check_input_attributes)
def test_check_one_implies_others(self):
a = InputFileLoaderCheckerSaver()
a.a = 4
a.c=None
a._one_implies_others = ['a c'.split()]
assert_raises(ValueError, a.check_input_attributes)
def test_check_attributes_to_force_same_len(self):
a = InputFileLoaderCheckerSaver()
a.a = [4,5]
a.c=None
a._attributes_to_force_same_len = ['a c'.split()]
a.check_input_attributes()
assert_equal(a.c, [None, None])
def test_check_attributes_that_should_have_same_x_limits(self):
a = InputFileLoaderCheckerSaver()
a.a = PolyLine([0,1], [2,5])
a.c = PolyLine([0,7], [5,6])
a._attributes_that_should_have_same_x_limits = ['a c'.split()]
assert_raises(ValueError, a.check_input_attributes)
def test_check_attributes_that_should_have_same_len_pairs(self):
a = InputFileLoaderCheckerSaver()
a.a = [2, 3]
a.c = [3]
a._attributes_that_should_have_same_len_pairs = ['a c'.split()]
assert_raises(ValueError, a.check_input_attributes)
def test_determine_output_stem_defaults(self):
a = InputFileLoaderCheckerSaver()
a._determine_output_stem()
assert_equal(a._file_stem, '.\\out0002\\out0002')
def test_determine_output_stem_overwrite(self):
a = InputFileLoaderCheckerSaver()
a.overwrite = True
a._determine_output_stem()
assert_equal(a._file_stem, '.\\out0001\\out0001')
def test_determine_output_stem_create_directory(self):
a = InputFileLoaderCheckerSaver()
a.create_directory = False
a._determine_output_stem()
assert_equal(a._file_stem, '.\\out0002')
def test_determine_output_stem_prefix(self):
a = InputFileLoaderCheckerSaver()
a.prefix = 'hello_'
a._determine_output_stem()
assert_equal(a._file_stem, '.\\hello_0001\\hello_0001')
def test_determine_output_stem_directory(self):
a = InputFileLoaderCheckerSaver()
a.directory = os.path.join(self.tempdir.path, 'what')
a._determine_output_stem()
assert_equal(a._file_stem, os.path.join(self.tempdir.path,'what', 'out0002', 'out0002'))
def test_save_data_parsed(self):
a = InputFileLoaderCheckerSaver()
a._attributes = "a b ".split()
a.save_data_to_file=True
# a._initialize_attributes()
a.a=4
a.b=6
a._save_data()
# print(os.listdir(self.tempdir.path))
# print(os.listdir(os.path.join(self.tempdir.path,'out0002')))
assert_equal(self.tempdir.read(
('out0002','out0002_input_parsed.py'), 'utf-8').strip().splitlines(),
'a = 4\nb = 6'.splitlines())
def test_save_data_input_text(self):
a = InputFileLoaderCheckerSaver()
a._input_text= "hello"
a.save_data_to_file=True
a._save_data()
assert_equal(self.tempdir.read(
('out0002','out0002_input_original.py'), 'utf-8').strip().splitlines(),
'hello'.splitlines())
def test_save_data_input_ext(self):
a = InputFileLoaderCheckerSaver()
a._input_text= "hello"
a.input_ext= '.txt'
a.save_data_to_file=True
a._save_data()
ok_(os.path.isfile(os.path.join(
self.tempdir.path, 'out0002','out0002_input_original.txt')))
def test_save_data_grid_data_dicts(self):
a = InputFileLoaderCheckerSaver()
a._grid_data_dicts= {'data': np.arange(6).reshape(3,2)}
a.save_data_to_file=True
a._save_data()
# print(os.listdir(os.path.join(self.tempdir.path,'out0002')))
ok_(os.path.isfile(os.path.join(
self.tempdir.path, 'out0002','out0002.csv')))
def test_save_data_grid_data_dicts_data_ext(self):
a = InputFileLoaderCheckerSaver()
a._grid_data_dicts= {'data': np.arange(6).reshape(3,2)}
a.save_data_to_file=True
a.data_ext = ".txt"
a._save_data()
# print(os.listdir(os.path.join(self.tempdir.path,'out0002')))
ok_(os.path.isfile(os.path.join(
self.tempdir.path, 'out0002','out0002.txt')))
def test_save_figures(self):
a = InputFileLoaderCheckerSaver()
a.save_figures_to_file=True
fig = matplotlib.pyplot.figure()
ax = fig.add_subplot('111')
ax.plot(4,5)
fig.set_label('sing')
a._figures=[fig]
a._save_figures()
a._figures=None
matplotlib.pyplot.clf()
# print(os.listdir(os.path.join(self.tempdir.path,'out0002')))
ok_(os.path.isfile(os.path.join(
self.tempdir.path, 'out0002','out0002_sing.eps')))
def test_save_figures_figure_ext(self):
a = InputFileLoaderCheckerSaver()
a.save_figures_to_file=True
a.figure_ext='.pdf'
fig = matplotlib.pyplot.figure()
ax = fig.add_subplot('111')
ax.plot(4,5)
fig.set_label('sing')
a._figures=[fig]
a._save_figures()
a._figures=None
matplotlib.pyplot.clf()
# print(os.listdir(os.path.join(self.tempdir.path,'out0002')))
ok_(os.path.isfile(os.path.join(
self.tempdir.path, 'out0002','out0002_sing.pdf')))
if __name__ == '__main__':
import nose
nose.runmodule(argv=['nose', '--verbosity=3', '--with-doctest'])
# nose.runmodule(argv=['nose', '--verbosity=3'])
|
rtrwalker/geotecha
|
geotecha/inputoutput/test/test_inputoutput.py
|
Python
|
gpl-3.0
| 38,372
|
[
"VisIt"
] |
db9bf6d0cb800440a7f0d0c5df067711526094f51a03ec0bad00fad911a5865c
|
#
# Copyright 2015 Olli Tapaninen, VTT Technical Research Center of Finland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from meshpy.geometry import generate_extrusion
from matplotlib import pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from meshpy.tet import MeshInfo, build
rz = [(0, 0), (1, 0), (1.5, 0.5), (2, 1), (0, 1)]
base = []
for theta in np.linspace(0, 2 * np.pi, 40):
x = np.sin(theta)
y = np.cos(theta)
base.extend([(x, y)])
(points, facets,
facet_holestarts, markers) = generate_extrusion(rz_points=rz, base_shape=base)
p_array = np.array(points)
xs = p_array[:, 0]
ys = p_array[:, 1]
zs = p_array[:, 2]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs, ys, zs)
for f in facets:
plt.plot(xs[list(f[0])], ys[list(f[0])], zs[list(f[0])])
plt.show()
for i_facet, poly_list in enumerate(facets):
print(poly_list)
mesh_info = MeshInfo()
mesh_info.set_points(points)
mesh_info.set_facets_ex(facets, facet_holestarts, markers)
mesh = build(mesh_info)
print(mesh.elements)
mesh.write_vtk('test.vtk')
|
ollitapa/MMP-TracerApi
|
Tests/MeshTests/meshConeTest.py
|
Python
|
apache-2.0
| 1,598
|
[
"VTK"
] |
22578206ea0cf09f1c97cc9be08e489de0bf368b8acffce13d2c95d1c7570dbd
|
import logging
from paste.httpexceptions import HTTPBadRequest, HTTPForbidden
from time import strftime
from galaxy import util
from galaxy import web
from galaxy import exceptions
from galaxy.web import _future_expose_api as expose_api
from galaxy.util import json
from galaxy.web.base.controller import BaseAPIController
from tool_shed.galaxy_install.install_manager import InstallRepositoryManager
from tool_shed.galaxy_install.metadata.installed_repository_metadata_manager import InstalledRepositoryMetadataManager
from tool_shed.galaxy_install.repair_repository_manager import RepairRepositoryManager
from tool_shed.util import common_util
from tool_shed.util import encoding_util
from tool_shed.util import hg_util
from tool_shed.util import metadata_util
from tool_shed.util import workflow_util
from tool_shed.util import tool_util
import tool_shed.util.shed_util_common as suc
log = logging.getLogger( __name__ )
def get_message_for_no_shed_tool_config():
# This Galaxy instance is not configured with a shed-related tool panel configuration file.
message = 'The tool_config_file setting in galaxy.ini must include at least one shed tool configuration file name with a <toolbox> '
message += 'tag that includes a tool_path attribute value which is a directory relative to the Galaxy installation directory in order to '
message += 'automatically install tools from a tool shed into Galaxy (e.g., the file name shed_tool_conf.xml whose <toolbox> tag is '
message += '<toolbox tool_path="../shed_tools">). For details, see the "Installation of Galaxy tool shed repository tools into a local '
message += 'Galaxy instance" section of the Galaxy tool shed wiki at http://wiki.galaxyproject.org/InstallingRepositoriesToGalaxy#'
message += 'Installing_Galaxy_tool_shed_repository_tools_into_a_local_Galaxy_instance.'
return message
class ToolShedRepositoriesController( BaseAPIController ):
"""RESTful controller for interactions with tool shed repositories."""
def __ensure_can_install_repos( self, trans ):
# Make sure this Galaxy instance is configured with a shed-related tool panel configuration file.
if not suc.have_shed_tool_conf_for_install( trans.app ):
message = get_message_for_no_shed_tool_config()
log.debug( message )
return dict( status='error', error=message )
# Make sure the current user's API key proves he is an admin user in this Galaxy instance.
if not trans.user_is_admin():
raise exceptions.AdminRequiredException( 'You are not authorized to request the latest installable revision for a repository in this Galaxy instance.' )
@expose_api
def exported_workflows( self, trans, id, **kwd ):
"""
GET /api/tool_shed_repositories/{encoded_tool_shed_repository_id}/exported_workflows
Display a list of dictionaries containing information about this tool shed repository's exported workflows.
:param id: the encoded id of the ToolShedRepository object
"""
# Example URL: http://localhost:8763/api/tool_shed_repositories/f2db41e1fa331b3e/exported_workflows
# Since exported workflows are dictionaries with very few attributes that differentiate them from each
# other, we'll build the list based on the following dictionary of those few attributes.
exported_workflows = []
repository = suc.get_tool_shed_repository_by_id( trans.app, id )
metadata = repository.metadata
if metadata:
exported_workflow_tups = metadata.get( 'workflows', [] )
else:
exported_workflow_tups = []
for index, exported_workflow_tup in enumerate( exported_workflow_tups ):
# The exported_workflow_tup looks like ( relative_path, exported_workflow_dict ), where the value of
# relative_path is the location on disk (relative to the root of the installed repository) where the
# exported_workflow_dict file (.ga file) is located.
exported_workflow_dict = exported_workflow_tup[ 1 ]
annotation = exported_workflow_dict.get( 'annotation', '' )
format_version = exported_workflow_dict.get( 'format-version', '' )
workflow_name = exported_workflow_dict.get( 'name', '' )
# Since we don't have an in-memory object with an id, we'll identify the exported workflow via its
# location (i.e., index) in the list.
display_dict = dict( index=index, annotation=annotation, format_version=format_version, workflow_name=workflow_name )
exported_workflows.append( display_dict )
return exported_workflows
@expose_api
def get_latest_installable_revision( self, trans, payload, **kwd ):
"""
POST /api/tool_shed_repositories/get_latest_installable_revision
Get the latest installable revision of a specified repository from a specified Tool Shed.
:param key: the current Galaxy admin user's API key
The following parameters are included in the payload.
:param tool_shed_url (required): the base URL of the Tool Shed from which to retrieve the Repository revision.
:param name (required): the name of the Repository
:param owner (required): the owner of the Repository
"""
# Get the information about the repository to be installed from the payload.
tool_shed_url, name, owner = self.__parse_repository_from_payload( payload )
# Make sure the current user's API key proves he is an admin user in this Galaxy instance.
if not trans.user_is_admin():
raise exceptions.AdminRequiredException( 'You are not authorized to request the latest installable revision for a repository in this Galaxy instance.' )
params = '?name=%s&owner=%s' % ( name, owner )
url = common_util.url_join( tool_shed_url,
'api/repositories/get_ordered_installable_revisions%s' % params )
try:
raw_text = common_util.tool_shed_get( trans.app, tool_shed_url, url )
except Exception, e:
message = "Error attempting to retrieve the latest installable revision from tool shed %s for repository %s owned by %s: %s" % \
( str( tool_shed_url ), str( name ), str( owner ), str( e ) )
log.debug( message )
return dict( status='error', error=message )
if raw_text:
# If successful, the response from get_ordered_installable_revisions will be a list of
# changeset_revision hash strings.
changeset_revisions = json.loads( raw_text )
if len( changeset_revisions ) >= 1:
return changeset_revisions[ -1 ]
return hg_util.INITIAL_CHANGELOG_HASH
def __get_value_mapper( self, trans, tool_shed_repository ):
value_mapper={ 'id' : trans.security.encode_id( tool_shed_repository.id ),
'error_message' : tool_shed_repository.error_message or '' }
return value_mapper
@expose_api
def import_workflow( self, trans, payload, **kwd ):
"""
POST /api/tool_shed_repositories/import_workflow
Import the specified exported workflow contained in the specified installed tool shed repository into Galaxy.
:param key: the API key of the Galaxy user with which the imported workflow will be associated.
:param id: the encoded id of the ToolShedRepository object
The following parameters are included in the payload.
:param index: the index location of the workflow tuple in the list of exported workflows stored in the metadata for the specified repository
"""
api_key = kwd.get( 'key', None )
if api_key is None:
raise HTTPBadRequest( detail="Missing required parameter 'key' whose value is the API key for the Galaxy user importing the specified workflow." )
tool_shed_repository_id = kwd.get( 'id', '' )
if not tool_shed_repository_id:
raise HTTPBadRequest( detail="Missing required parameter 'id'." )
index = payload.get( 'index', None )
if index is None:
raise HTTPBadRequest( detail="Missing required parameter 'index'." )
repository = suc.get_tool_shed_repository_by_id( trans.app, tool_shed_repository_id )
exported_workflows = json.loads( self.exported_workflows( trans, tool_shed_repository_id ) )
# Since we don't have an in-memory object with an id, we'll identify the exported workflow via its location (i.e., index) in the list.
exported_workflow = exported_workflows[ int( index ) ]
workflow_name = exported_workflow[ 'workflow_name' ]
workflow, status, error_message = workflow_util.import_workflow( trans, repository, workflow_name )
if status == 'error':
log.debug( error_message )
return {}
return workflow.to_dict( view='element' )
@expose_api
def import_workflows( self, trans, **kwd ):
"""
POST /api/tool_shed_repositories/import_workflows
Import all of the exported workflows contained in the specified installed tool shed repository into Galaxy.
:param key: the API key of the Galaxy user with which the imported workflows will be associated.
:param id: the encoded id of the ToolShedRepository object
"""
api_key = kwd.get( 'key', None )
if api_key is None:
raise HTTPBadRequest( detail="Missing required parameter 'key' whose value is the API key for the Galaxy user importing the specified workflow." )
tool_shed_repository_id = kwd.get( 'id', '' )
if not tool_shed_repository_id:
raise HTTPBadRequest( detail="Missing required parameter 'id'." )
repository = suc.get_tool_shed_repository_by_id( trans.app, tool_shed_repository_id )
exported_workflows = json.loads( self.exported_workflows( trans, tool_shed_repository_id ) )
imported_workflow_dicts = []
for exported_workflow_dict in exported_workflows:
workflow_name = exported_workflow_dict[ 'workflow_name' ]
workflow, status, error_message = workflow_util.import_workflow( trans, repository, workflow_name )
if status == 'error':
log.debug( error_message )
else:
imported_workflow_dicts.append( workflow.to_dict( view='element' ) )
return imported_workflow_dicts
@expose_api
def index( self, trans, **kwd ):
"""
GET /api/tool_shed_repositories
Display a list of dictionaries containing information about installed tool shed repositories.
"""
# Example URL: http://localhost:8763/api/tool_shed_repositories
tool_shed_repository_dicts = []
for tool_shed_repository in trans.install_model.context.query( trans.app.install_model.ToolShedRepository ) \
.order_by( trans.app.install_model.ToolShedRepository.table.c.name ):
tool_shed_repository_dict = \
tool_shed_repository.to_dict( value_mapper=self.__get_value_mapper( trans, tool_shed_repository ) )
tool_shed_repository_dict[ 'url' ] = web.url_for( controller='tool_shed_repositories',
action='show',
id=trans.security.encode_id( tool_shed_repository.id ) )
tool_shed_repository_dicts.append( tool_shed_repository_dict )
return tool_shed_repository_dicts
@expose_api
def install_repository_revision( self, trans, payload, **kwd ):
"""
POST /api/tool_shed_repositories/install_repository_revision
Install a specified repository revision from a specified tool shed into Galaxy.
:param key: the current Galaxy admin user's API key
The following parameters are included in the payload.
:param tool_shed_url (required): the base URL of the Tool Shed from which to install the Repository
:param name (required): the name of the Repository
:param owner (required): the owner of the Repository
:param changeset_revision (required): the changeset_revision of the RepositoryMetadata object associated with the Repository
:param new_tool_panel_section_label (optional): label of a new section to be added to the Galaxy tool panel in which to load
tools contained in the Repository. Either this parameter must be an empty string or
the tool_panel_section_id parameter must be an empty string or both must be an empty
string (both cannot be used simultaneously).
:param tool_panel_section_id (optional): id of the Galaxy tool panel section in which to load tools contained in the Repository.
If this parameter is an empty string and the above new_tool_panel_section_label parameter is an
empty string, tools will be loaded outside of any sections in the tool panel. Either this
parameter must be an empty string or the tool_panel_section_id parameter must be an empty string
of both must be an empty string (both cannot be used simultaneously).
:param install_repository_dependencies (optional): Set to True if you want to install repository dependencies defined for the specified
repository being installed. The default setting is False.
:param install_tool_dependencies (optional): Set to True if you want to install tool dependencies defined for the specified repository being
installed. The default setting is False.
:param shed_tool_conf (optional): The shed-related tool panel configuration file configured in the "tool_config_file" setting in the Galaxy config file
(e.g., galaxy.ini). At least one shed-related tool panel config file is required to be configured. Setting
this parameter to a specific file enables you to choose where the specified repository will be installed because
the tool_path attribute of the <toolbox> from the specified file is used as the installation location
(e.g., <toolbox tool_path="../shed_tools">). If this parameter is not set, a shed-related tool panel configuration
file will be selected automatically.
"""
# Get the information about the repository to be installed from the payload.
tool_shed_url, name, owner, changeset_revision = self.__parse_repository_from_payload( payload, include_changeset=True )
self.__ensure_can_install_repos( trans )
install_repository_manager = InstallRepositoryManager( trans.app )
installed_tool_shed_repositories = install_repository_manager.install( tool_shed_url,
name,
owner,
changeset_revision,
payload )
def to_dict( tool_shed_repository ):
tool_shed_repository_dict = tool_shed_repository.as_dict( value_mapper=self.__get_value_mapper( trans, tool_shed_repository ) )
tool_shed_repository_dict[ 'url' ] = web.url_for( controller='tool_shed_repositories',
action='show',
id=trans.security.encode_id( tool_shed_repository.id ) )
return tool_shed_repository_dict
return map( to_dict, installed_tool_shed_repositories )
@expose_api
def install_repository_revisions( self, trans, payload, **kwd ):
"""
POST /api/tool_shed_repositories/install_repository_revisions
Install one or more specified repository revisions from one or more specified tool sheds into Galaxy. The received parameters
must be ordered lists so that positional values in tool_shed_urls, names, owners and changeset_revisions are associated.
It's questionable whether this method is needed as the above method for installing a single repository can probably cover all
desired scenarios. We'll keep this one around just in case...
:param key: the current Galaxy admin user's API key
The following parameters are included in the payload.
:param tool_shed_urls: the base URLs of the Tool Sheds from which to install a specified Repository
:param names: the names of the Repositories to be installed
:param owners: the owners of the Repositories to be installed
:param changeset_revisions: the changeset_revisions of each RepositoryMetadata object associated with each Repository to be installed
:param new_tool_panel_section_label: optional label of a new section to be added to the Galaxy tool panel in which to load
tools contained in the Repository. Either this parameter must be an empty string or
the tool_panel_section_id parameter must be an empty string, as both cannot be used.
:param tool_panel_section_id: optional id of the Galaxy tool panel section in which to load tools contained in the Repository.
If not set, tools will be loaded outside of any sections in the tool panel. Either this
parameter must be an empty string or the tool_panel_section_id parameter must be an empty string,
as both cannot be used.
:param install_repository_dependencies (optional): Set to True if you want to install repository dependencies defined for the specified
repository being installed. The default setting is False.
:param install_tool_dependencies (optional): Set to True if you want to install tool dependencies defined for the specified repository being
installed. The default setting is False.
:param shed_tool_conf (optional): The shed-related tool panel configuration file configured in the "tool_config_file" setting in the Galaxy config file
(e.g., galaxy.ini). At least one shed-related tool panel config file is required to be configured. Setting
this parameter to a specific file enables you to choose where the specified repository will be installed because
the tool_path attribute of the <toolbox> from the specified file is used as the installation location
(e.g., <toolbox tool_path="../shed_tools">). If this parameter is not set, a shed-related tool panel configuration
file will be selected automatically.
"""
self.__ensure_can_install_repos( trans )
# Get the information about all of the repositories to be installed.
tool_shed_urls = util.listify( payload.get( 'tool_shed_urls', '' ) )
names = util.listify( payload.get( 'names', '' ) )
owners = util.listify( payload.get( 'owners', '' ) )
changeset_revisions = util.listify( payload.get( 'changeset_revisions', '' ) )
num_specified_repositories = len( tool_shed_urls )
if len( names ) != num_specified_repositories or \
len( owners ) != num_specified_repositories or \
len( changeset_revisions ) != num_specified_repositories:
message = 'Error in tool_shed_repositories API in install_repository_revisions: the received parameters must be ordered '
message += 'lists so that positional values in tool_shed_urls, names, owners and changeset_revisions are associated.'
log.debug( message )
return dict( status='error', error=message )
# Get the information about the Galaxy components (e.g., tool pane section, tool config file, etc) that will contain information
# about each of the repositories being installed.
# TODO: we may want to enhance this method to allow for each of the following to be associated with each repository instead of
# forcing all repositories to use the same settings.
install_repository_dependencies = payload.get( 'install_repository_dependencies', False )
install_tool_dependencies = payload.get( 'install_tool_dependencies', False )
new_tool_panel_section_label = payload.get( 'new_tool_panel_section_label', '' )
shed_tool_conf = payload.get( 'shed_tool_conf', None )
tool_panel_section_id = payload.get( 'tool_panel_section_id', '' )
all_installed_tool_shed_repositories = []
for index, tool_shed_url in enumerate( tool_shed_urls ):
current_payload = {}
current_payload[ 'tool_shed_url' ] = tool_shed_url
current_payload[ 'name' ] = names[ index ]
current_payload[ 'owner' ] = owners[ index ]
current_payload[ 'changeset_revision' ] = changeset_revisions[ index ]
current_payload[ 'new_tool_panel_section_label' ] = new_tool_panel_section_label
current_payload[ 'tool_panel_section_id' ] = tool_panel_section_id
current_payload[ 'install_repository_dependencies' ] = install_repository_dependencies
current_payload[ 'install_tool_dependencies' ] = install_tool_dependencies
current_payload[ 'shed_tool_conf' ] = shed_tool_conf
installed_tool_shed_repositories = self.install_repository_revision( trans, **current_payload )
if isinstance( installed_tool_shed_repositories, dict ):
# We encountered an error.
return installed_tool_shed_repositories
elif isinstance( installed_tool_shed_repositories, list ):
all_installed_tool_shed_repositories.extend( installed_tool_shed_repositories )
return all_installed_tool_shed_repositories
@expose_api
def repair_repository_revision( self, trans, payload, **kwd ):
"""
POST /api/tool_shed_repositories/repair_repository_revision
Repair a specified repository revision previously installed into Galaxy.
:param key: the current Galaxy admin user's API key
The following parameters are included in the payload.
:param tool_shed_url (required): the base URL of the Tool Shed from which the Repository was installed
:param name (required): the name of the Repository
:param owner (required): the owner of the Repository
:param changeset_revision (required): the changeset_revision of the RepositoryMetadata object associated with the Repository
"""
# Get the information about the repository to be installed from the payload.
tool_shed_url, name, owner, changeset_revision = self.__parse_repository_from_payload( payload, include_changeset=True )
tool_shed_repositories = []
tool_shed_repository = suc.get_tool_shed_repository_by_shed_name_owner_changeset_revision( trans.app,
tool_shed_url,
name,
owner,
changeset_revision )
rrm = RepairRepositoryManager( trans.app )
repair_dict = rrm.get_repair_dict( tool_shed_repository )
ordered_tsr_ids = repair_dict.get( 'ordered_tsr_ids', [] )
ordered_repo_info_dicts = repair_dict.get( 'ordered_repo_info_dicts', [] )
if ordered_tsr_ids and ordered_repo_info_dicts:
for index, tsr_id in enumerate( ordered_tsr_ids ):
repository = trans.install_model.context.query( trans.install_model.ToolShedRepository ).get( trans.security.decode_id( tsr_id ) )
repo_info_dict = ordered_repo_info_dicts[ index ]
# TODO: handle errors in repair_dict.
repair_dict = rrm.repair_tool_shed_repository( repository,
encoding_util.tool_shed_encode( repo_info_dict ) )
repository_dict = repository.to_dict( value_mapper=self.__get_value_mapper( trans, repository ) )
repository_dict[ 'url' ] = web.url_for( controller='tool_shed_repositories',
action='show',
id=trans.security.encode_id( repository.id ) )
if repair_dict:
errors = repair_dict.get( repository.name, [] )
repository_dict[ 'errors_attempting_repair' ] = ' '.join( errors )
tool_shed_repositories.append( repository_dict )
# Display the list of repaired repositories.
return tool_shed_repositories
def __parse_repository_from_payload( self, payload, include_changeset=False ):
# Get the information about the repository to be installed from the payload.
tool_shed_url = payload.get( 'tool_shed_url', '' )
if not tool_shed_url:
raise exceptions.RequestParameterMissingException( "Missing required parameter 'tool_shed_url'." )
name = payload.get( 'name', '' )
if not name:
raise exceptions.RequestParameterMissingException( "Missing required parameter 'name'." )
owner = payload.get( 'owner', '' )
if not owner:
raise exceptions.RequestParameterMissingException( "Missing required parameter 'owner'." )
if not include_changeset:
return tool_shed_url, name, owner
changeset_revision = payload.get( 'changeset_revision', '' )
if not changeset_revision:
raise HTTPBadRequest( detail="Missing required parameter 'changeset_revision'." )
return tool_shed_url, name, owner, changeset_revision
@expose_api
def reset_metadata_on_installed_repositories( self, trans, payload, **kwd ):
"""
PUT /api/tool_shed_repositories/reset_metadata_on_installed_repositories
Resets all metadata on all repositories installed into Galaxy in an "orderly fashion".
:param key: the API key of the Galaxy admin user.
"""
start_time = strftime( "%Y-%m-%d %H:%M:%S" )
results = dict( start_time=start_time,
successful_count=0,
unsuccessful_count=0,
repository_status=[] )
# Make sure the current user's API key proves he is an admin user in this Galaxy instance.
if not trans.user_is_admin():
raise HTTPForbidden( detail='You are not authorized to reset metadata on repositories installed into this Galaxy instance.' )
irmm = InstalledRepositoryMetadataManager( trans.app )
query = irmm.get_query_for_setting_metadata_on_repositories( order=False )
# Now reset metadata on all remaining repositories.
for repository in query:
try:
irmm.set_repository( repository )
irmm.reset_all_metadata_on_installed_repository()
irmm_invalid_file_tups = irmm.get_invalid_file_tups()
if irmm_invalid_file_tups:
message = tool_util.generate_message_for_invalid_tools( trans.app,
irmm_invalid_file_tups,
repository,
None,
as_html=False )
results[ 'unsuccessful_count' ] += 1
else:
message = "Successfully reset metadata on repository %s owned by %s" % \
( str( repository.name ), str( repository.owner ) )
results[ 'successful_count' ] += 1
except Exception, e:
message = "Error resetting metadata on repository %s owned by %s: %s" % \
( str( repository.name ), str( repository.owner ), str( e ) )
results[ 'unsuccessful_count' ] += 1
results[ 'repository_status' ].append( message )
stop_time = strftime( "%Y-%m-%d %H:%M:%S" )
results[ 'stop_time' ] = stop_time
return json.dumps( results, sort_keys=True, indent=4 )
@expose_api
def show( self, trans, id, **kwd ):
"""
GET /api/tool_shed_repositories/{encoded_tool_shed_repsository_id}
Display a dictionary containing information about a specified tool_shed_repository.
:param id: the encoded id of the ToolShedRepository object
"""
# Example URL: http://localhost:8763/api/tool_shed_repositories/df7a1f0c02a5b08e
tool_shed_repository = suc.get_tool_shed_repository_by_id( trans.app, id )
if tool_shed_repository is None:
log.debug( "Unable to locate tool_shed_repository record for id %s." % ( str( id ) ) )
return {}
tool_shed_repository_dict = tool_shed_repository.as_dict( value_mapper=self.__get_value_mapper( trans, tool_shed_repository ) )
tool_shed_repository_dict[ 'url' ] = web.url_for( controller='tool_shed_repositories',
action='show',
id=trans.security.encode_id( tool_shed_repository.id ) )
return tool_shed_repository_dict
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py
|
Python
|
gpl-3.0
| 30,735
|
[
"Galaxy"
] |
3d76b4c911175eaf59804bc2b190264eeceea664815f9403e5522a73226089ae
|
#!/usr/bin/env python
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
"""
MosaicContents: database interface class.
These classes provide an interface between the database and the top-level
ingest algorithm (AbstractIngester and its subclasses). They also provide
the implementation of the database and tile store side of the ingest
process. They are expected to be independent of the structure of any
particular dataset, but will change if the database schema or tile store
format changes.
"""
from __future__ import absolute_import
import logging
import os
import re
import shutil
from eotools.execute import execute
from eotools.utils import log_multiline
from agdc.cube_util import DatasetError, get_file_size_mb, create_directory
from .ingest_db_wrapper import TC_MOSAIC
from osgeo import gdal
import numpy
# Set up logger.
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
#
# Constants for PQA mosaic formation:
#
PQA_CONTIGUITY = 256 # contiguity = bit 8
#
# Classes
#
class MosaicContents(object):
"""MosaicContents database interface class.
This class has 'remove' and 'make_permanent' methods, so can be
used as a tile_contents object with the collection.Collection and
collection.Transaction classes.
"""
def __init__(self, tile_record_list, tile_type_dict,
level_name, temp_tile_dir):
"""Create the mosaic contents."""
assert len(tile_record_list) > 1, \
"Attempt to make a mosaic out of a single tile."
assert len(tile_record_list) <= 2, \
("Attempt to make a mosaic out of more than 2 tiles.\n" +
"Handling for this case is not yet implemented.")
tile_dict = tile_record_list[0]
tile_type_id = tile_dict['tile_type_id']
tile_type_info = tile_type_dict[tile_type_id]
if level_name == 'PQA':
extension = tile_type_info['file_extension']
else:
extension = '.vrt'
(self.mosaic_temp_path, self.mosaic_final_path) = (
self.__get_mosaic_paths(tile_dict['tile_pathname'],
extension,
temp_tile_dir))
if level_name == 'PQA':
self.__make_mosaic_pqa(tile_record_list,
tile_type_info,
self.mosaic_temp_path)
else:
self.__make_mosaic_vrt(tile_record_list,
self.mosaic_temp_path)
self.mosaic_dict = dict(tile_dict)
self.mosaic_dict['tile_id'] = None
self.mosaic_dict['tile_pathname'] = self.mosaic_final_path
self.mosaic_dict['tile_class_id'] = TC_MOSAIC
self.mosaic_dict['tile_size'] = (
get_file_size_mb(self.mosaic_temp_path))
def remove(self):
"""Remove the temporary mosaic file."""
if os.path.isfile(self.mosaic_temp_path):
os.remove(self.mosaic_temp_path)
def make_permanent(self):
"""Move mosaic tile contents to its permanent location."""
shutil.move(self.mosaic_temp_path, self.mosaic_final_path)
def get_output_path(self):
"""Return the final location for the mosaic."""
return self.mosaic_final_path
def create_record(self, db):
"""Create a record for the mosaic in the database."""
db.insert_tile_record(self.mosaic_dict)
@staticmethod
def __get_mosaic_paths(tile_pathname, extension, temp_tile_dir):
"""Generate the temporary and final pathnames for the mosaic.
'tile_pathname' is the path to the first tile in the mosaic.
'extension' is the extension to use for the mosaic filename.
Returns a tuple (mosaic_temp_path, mosaic_final_path).
"""
(tile_dir, tile_basename) = os.path.split(tile_pathname)
mosaic_final_dir = os.path.join(tile_dir, 'mosaic_cache')
create_directory(mosaic_final_dir)
mosaic_temp_dir = os.path.join(temp_tile_dir, 'mosaic_cache')
create_directory(mosaic_temp_dir)
mosaic_basename = re.sub(r'\.\w+$', extension, tile_basename)
mosaic_temp_path = os.path.join(mosaic_temp_dir, mosaic_basename)
mosaic_final_path = os.path.join(mosaic_final_dir, mosaic_basename)
return (mosaic_temp_path, mosaic_final_path)
@staticmethod
def __make_mosaic_pqa(tile_record_list, tile_type_info, mosaic_path):
"""From the PQA tiles in tile_record_list, create a mosaic tile
at mosaic_pathname.
"""
LOGGER.info('Creating PQA mosaic file %s', mosaic_path)
mosaic_file_list = [tr['tile_pathname'] for tr in tile_record_list]
template_dataset = gdal.Open(mosaic_file_list[0])
gdal_driver = gdal.GetDriverByName(tile_type_info['file_format'])
#Set datatype formats appropriate to Create() and numpy
gdal_dtype = template_dataset.GetRasterBand(1).DataType
numpy_dtype = gdal.GetDataTypeName(gdal_dtype)
mosaic_dataset = gdal_driver.Create(
mosaic_path,
template_dataset.RasterXSize,
template_dataset.RasterYSize,
1,
gdal_dtype,
tile_type_info['format_options'].split(','),
)
if not mosaic_dataset:
raise DatasetError(
'Unable to open output dataset %s' % mosaic_dataset)
mosaic_dataset.SetGeoTransform(template_dataset.GetGeoTransform())
mosaic_dataset.SetProjection(template_dataset.GetProjection())
#TODO: make vrt here - not really needed for single-layer file
# if tile_type_info['file_format'] == 'netCDF':
# pass
output_band = mosaic_dataset.GetRasterBand(1)
# Set all background values of data_array to FFFF (i.e. all ones)
data_array = numpy.ones(shape=(template_dataset.RasterYSize,
template_dataset.RasterXSize),
dtype=numpy_dtype
) * -1
# Set all background values of no_data_array to 0 (i.e. all zeroes)
no_data_array = numpy.zeros(shape=(template_dataset.RasterYSize,
template_dataset.RasterXSize),
dtype=numpy_dtype
)
overall_data_mask = numpy.zeros((mosaic_dataset.RasterYSize,
mosaic_dataset.RasterXSize),
dtype=numpy.bool
)
del template_dataset
# Populate data_array with -masked PQA data
for pqa_dataset_index in range(len(mosaic_file_list)):
pqa_dataset_path = mosaic_file_list[pqa_dataset_index]
pqa_dataset = gdal.Open(pqa_dataset_path)
if not pqa_dataset:
raise DatasetError('Unable to open %s' % pqa_dataset_path)
pqa_array = pqa_dataset.ReadAsArray()
del pqa_dataset
LOGGER.debug('Opened %s', pqa_dataset_path)
# Treat contiguous and non-contiguous pixels separately
# Set all contiguous pixels to true in data_mask
pqa_data_mask = (pqa_array & PQA_CONTIGUITY).astype(numpy.bool)
# Expand overall_data_mask to true for any contiguous pixels
overall_data_mask = overall_data_mask | pqa_data_mask
# Perform bitwise-and on contiguous pixels in data_array
data_array[pqa_data_mask] &= pqa_array[pqa_data_mask]
# Perform bitwise-or on non-contiguous pixels in no_data_array
no_data_array[~pqa_data_mask] |= pqa_array[~pqa_data_mask]
# Set all pixels which don't contain data to combined no-data values
# (should be same as original no-data values)
data_array[~overall_data_mask] = no_data_array[~overall_data_mask]
output_band.WriteArray(data_array)
mosaic_dataset.FlushCache()
@staticmethod
def __make_mosaic_vrt(tile_record_list, mosaic_path):
"""From two or more source tiles create a vrt"""
LOGGER.info('Creating mosaic VRT file %s', mosaic_path)
source_file_list = [tr['tile_pathname'] for tr in tile_record_list]
gdalbuildvrt_cmd = ["gdalbuildvrt",
"-q",
"-overwrite",
"%s" % mosaic_path
]
gdalbuildvrt_cmd.extend(source_file_list)
result = execute(gdalbuildvrt_cmd, shell=False)
if result['stdout']:
log_multiline(LOGGER.info, result['stdout'],
'stdout from %s' % gdalbuildvrt_cmd, '\t')
if result['stderr']:
log_multiline(LOGGER.debug, result['stderr'],
'stderr from %s' % gdalbuildvrt_cmd, '\t')
if result['returncode'] != 0:
raise DatasetError('Unable to perform gdalbuildvrt: ' +
'"%s" failed: %s'
% (gdalbuildvrt_cmd, result['stderr']))
|
alex-ip/agdc
|
agdc/abstract_ingester/mosaic_contents.py
|
Python
|
bsd-3-clause
| 10,867
|
[
"NetCDF"
] |
556ed5302afc84e18c49d281dd78300e0480f8b12661448d193a3fa0a5f4b527
|
farmer = {
'kb': '''
Farmer(Mac)
Rabbit(Pete)
Mother(MrsMac, Mac)
Mother(MrsRabbit, Pete)
(Rabbit(r) & Farmer(f)) ==> Hates(f, r)
(Mother(m, c)) ==> Loves(m, c)
(Mother(m, r) & Rabbit(r)) ==> Rabbit(m)
(Farmer(f)) ==> Human(f)
(Mother(m, h) & Human(h)) ==> Human(m)
''',
# Note that this order of conjuncts
# would result in infinite recursion:
# '(Human(h) & Mother(m, h)) ==> Human(m)'
'queries':'''
Human(x)
Hates(x, y)
''',
# 'limit': 1,
}
weapons = {
'kb': '''
(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)
Owns(Nono, M1)
Missile(M1)
(Missile(x) & Owns(Nono, x)) ==> Sells(West, x, Nono)
Missile(x) ==> Weapon(x)
Enemy(x, America) ==> Hostile(x)
American(West)
Enemy(Nono, America)
''',
'queries':'''
Criminal(x)
''',
}
FoodChain = {
'Differ': '''
Amber
Pete
John
Jerry
Falchion
Skippy
Lisa
Grup
Debbie
Jax
Lassie
''',
'kb': '''
Fox(Amber)
Fox(Jax)
Brother(Jax, Amber)
Sister(Amber,Jax)
Rabbit(Pete)
Hare(Lassie)
Lion(Steve)
Owl(John)
Mouse(Jerry)
Bird(Falchion)
Grasshopper(Skippy)
Carrot(Lisa)
Grain(Grup)
Grass(Debbie)
DartFrog(Guppy)
Mammal(Amber)
Mammal(Jax)
Mammal(Steve)
Bird(John)
Bird(Falchion)
Herbivore(Lassie)
Herbivore(Jerry)
Herbivore(Skippy)
Herbivore(Pete)
Plants(Grain)
Plants(Grass)
Plants(Carrot)
Poisonous(Guppy)
(Owl(x)) ==> Nocturnal(x)
(Fox(x)) ==> Nimble(x)
(Mammal(f)) & Poisonous(o) ==> Despises(f,o)
(Bird(b)) & Poisonous(o) ==> Despises(b,o)
(Mouse(x)) & Rabbit(d) & Hare(t) ==> Rodents(x,d,t)
(Nocturnal(n)) & Rodents(x,d,t) ==> Hunts(n,x,d,t)
(Hunts(n,x,d,t)) & Bird(n) ==> Apex(n)
(Despises(f,o)) & Rodents(x,d,t) ==> Devours(f,x,d,t)
(Apex(f)) & Herbivore(d) ==> TerrorizesAtNight(d,f)
(Plants(f)) & Herbivore(d) ==> Fears(f,d)
(Herbivore(h)) & Plants(p) ==> Eats(h,p)
(Despises(f,d)) & Eats(h,p) ==> DoesntEat(f,p)
(Bird(b)) & Mammal(f) ==> Carnivores(b,f)
(Brother(b , s)) ==> Siblings(b,s)
''',
'queries':'''
Despises(Falchion,y)
Despises(x,y)
Fox(x)
Nimble(x)
Apex(x)
Eats(x,y)
Siblings(f,s)
Fears(x,y)
Fears(Carrot, y)
Rodents(x,y,z)
Hunts(x,y,z,q)
TerrorizesAtNight(x,y)
Devours(x,y,z,r)
DoesntEat(x,y)
Carnivores(b,f)
Nocturnal(x)
''',
'limit': 20,
}
Examples = {
'farmer': farmer,
'weapons': weapons,
'FoodChain': FoodChain
}
|
WmHHooper/aima-python
|
submissions/Martinez/myLogic.py
|
Python
|
mit
| 2,339
|
[
"Amber"
] |
fdf491b7888a6dd8b11a619dcecf0ec7a7fc92b4636da7e39f342c01ebb14c9c
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import linalg
from ..utils import array2d, check_random_state
from ..utils import shuffle as util_shuffle
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
dupplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined in order to add covariance. The clusters
are then placed on the vertices of the hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=2)
The number of dupplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float or None, optional (default=0.0)
Shift all features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float or None, optional (default=1.0)
Multiply all features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
"be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope
C = np.array(list(product([-class_sep, class_sep], repeat=n_informative)))
if not hypercube:
for k in range(n_clusters):
C[k, :] *= generator.rand()
for f in range(n_informative):
C[:, f] *= generator.rand()
generator.shuffle(C)
# Loop over all clusters
pos = 0
pos_end = 0
for k in range(n_clusters):
# Number of samples in cluster k
n_samples_k = n_samples_per_cluster[k]
# Define the range of samples
pos = pos_end
pos_end = pos + n_samples_k
# Assign labels
y[pos:pos_end] = k % n_classes
# Draw features at random
X[pos:pos_end, :n_informative] = generator.randn(n_samples_k,
n_informative)
# Multiply by a random matrix to create co-variance of the features
A = 2 * generator.rand(n_informative, n_informative) - 1
X[pos:pos_end, :n_informative] = np.dot(X[pos:pos_end, :n_informative],
A)
# Shift the cluster to a vertice
X[pos:pos_end, :n_informative] += np.tile(C[k, :], (n_samples_k, 1))
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.int)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
X[:, n_features - n_useless:] = generator.randn(n_samples, n_useless)
# Randomly flip labels
if flip_y >= 0.0:
for i in range(n_samples):
if generator.rand() < flip_y:
y[i] = generator.randint(n_classes)
# Randomly shift and scale
constant_shift = shift is not None
constant_scale = scale is not None
for f in range(n_features):
if not constant_shift:
shift = (2 * generator.rand() - 1) * class_sep
if not constant_scale:
scale = 1 + 100 * generator.rand()
X[:, f] += shift
X[:, f] *= scale
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50,
allow_unlabeled=True, random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. Number of labels follows
a Poisson distribution that never takes the value 0.
length : int, optional (default=50)
Sum of the features (number of words if documents).
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : list of tuples
The label sets.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
n = n_classes + 1
while (not allow_unlabeled and n == 0) or n > n_classes:
n = generator.poisson(n_labels)
# pick n classes
y = []
while len(y) != n:
# pick a class with probability P(c)
c = generator.multinomial(1, p_c).argmax()
if not c in y:
y.append(c)
# pick a non-zero document length by rejection sampling
k = 0
while k == 0:
k = generator.poisson(length)
# generate a document of length k words
x = np.zeros(n_features, dtype=int)
for i in range(k):
if len(y) == 0:
# if sample does not belong to any class, generate noise word
w = generator.randint(n_features)
else:
# pick a class and generate an appropriate word
c = y[generator.randint(len(y))]
w = generator.multinomial(1, p_w_c[:, c]).argmax()
x[w] += 1
return x, y
X, Y = zip(*[sample_example() for i in range(n_samples)])
return np.array(X, dtype=np.float64), Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See the `make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples / 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples / 2), np.ones(n_samples / 2)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y.astype(np.int)
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples / 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in), np.ones(n_samples_out)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y.astype(np.int)
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = array2d(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman #1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman #2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman #3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
from ..utils.fixes import qr_economic
u, _ = qr_economic(generator.randn(n_samples, n))
v, _ = qr_economic(generator.randn(n_features, n))
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symetric definite positive matrix.
Parameters
----------
dim: integer, optional (default=1)
The size of the random (matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
prec: array of shape = [dim, dim]
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have assymetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
d = np.diag(prec)
d = 1. / np.sqrt(d)
prec *= d
prec *= d[:, np.newaxis]
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perpsective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
|
florian-f/sklearn
|
sklearn/datasets/samples_generator.py
|
Python
|
bsd-3-clause
| 43,680
|
[
"Gaussian"
] |
58f43fa70720e5ad5cd5eca2da52c541e5dbb0497d7c8be9d4e20037c14934ed
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from mdtraj.testing import eq
from mdtraj import load, load_topology
def test_xml(get_fn):
top = load_topology(get_fn('native2.pdb'), no_boxchk=True)
t1 = load(get_fn('native2.xml'), top=top)
t2 = load(get_fn('native2.pdb'), no_boxchk=True)
t1.center_coordinates()
t2.center_coordinates()
assert eq(t1.xyz, t2.xyz)
assert eq(t1.unitcell_vectors, t2.unitcell_vectors)
|
dwhswenson/mdtraj
|
tests/test_xml.py
|
Python
|
lgpl-2.1
| 1,415
|
[
"MDTraj"
] |
0af44a41656138eeee4ce0729098908b190a5459737a661ec9417dcd06c0fbc1
|
"""Base class for mixture models."""
# Author: Wei Xue <xuewei4d@gmail.com>
# Modified by Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
import warnings
from abc import ABCMeta, abstractmethod
from time import time
import numpy as np
from .. import cluster
from ..base import BaseEstimator
from ..base import DensityMixin
from ..externals import six
from ..exceptions import ConvergenceWarning
from ..utils import check_array, check_random_state
from ..utils.extmath import logsumexp
def _check_shape(param, param_shape, name):
"""Validate the shape of the input parameter 'param'.
Parameters
----------
param : array
param_shape : tuple
name : string
"""
param = np.array(param)
if param.shape != param_shape:
raise ValueError("The parameter '%s' should have the shape of %s, "
"but got %s" % (name, param_shape, param.shape))
def _check_X(X, n_components=None, n_features=None):
"""Check the input data X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
n_components : int
Returns
-------
X : array, shape (n_samples, n_features)
"""
X = check_array(X, dtype=[np.float64, np.float32])
if n_components is not None and X.shape[0] < n_components:
raise ValueError('Expected n_samples >= n_components '
'but got n_components = %d, n_samples = %d'
% (n_components, X.shape[0]))
if n_features is not None and X.shape[1] != n_features:
raise ValueError("Expected the input data X have %d features, "
"but got %d features"
% (n_features, X.shape[1]))
return X
class BaseMixture(six.with_metaclass(ABCMeta, DensityMixin, BaseEstimator)):
"""Base class for mixture models.
This abstract class specifies an interface for all mixture classes and
provides basic common methods for mixture models.
"""
def __init__(self, n_components, tol, reg_covar,
max_iter, n_init, init_params, random_state, warm_start,
verbose, verbose_interval):
self.n_components = n_components
self.tol = tol
self.reg_covar = reg_covar
self.max_iter = max_iter
self.n_init = n_init
self.init_params = init_params
self.random_state = random_state
self.warm_start = warm_start
self.verbose = verbose
self.verbose_interval = verbose_interval
def _check_initial_parameters(self, X):
"""Check values of the basic parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
if self.n_components < 1:
raise ValueError("Invalid value for 'n_components': %d "
"Estimation requires at least one component"
% self.n_components)
if self.tol < 0.:
raise ValueError("Invalid value for 'tol': %.5f "
"Tolerance used by the EM must be non-negative"
% self.tol)
if self.n_init < 1:
raise ValueError("Invalid value for 'n_init': %d "
"Estimation requires at least one run"
% self.n_init)
if self.max_iter < 1:
raise ValueError("Invalid value for 'max_iter': %d "
"Estimation requires at least one iteration"
% self.max_iter)
if self.reg_covar < 0.:
raise ValueError("Invalid value for 'reg_covar': %.5f "
"regularization on covariance must be "
"non-negative"
% self.reg_covar)
# Check all the parameters values of the derived class
self._check_parameters(X)
@abstractmethod
def _check_parameters(self, X):
"""Check initial parameters of the derived class.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
pass
def _initialize_parameters(self, X, random_state):
"""Initialize the model parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
random_state : RandomState
A random number generator instance.
"""
n_samples, _ = X.shape
if self.init_params == 'kmeans':
resp = np.zeros((n_samples, self.n_components))
label = cluster.KMeans(n_clusters=self.n_components, n_init=1,
random_state=random_state).fit(X).labels_
resp[np.arange(n_samples), label] = 1
elif self.init_params == 'random':
resp = random_state.rand(n_samples, self.n_components)
resp /= resp.sum(axis=1)[:, np.newaxis]
else:
raise ValueError("Unimplemented initialization method '%s'"
% self.init_params)
self._initialize(X, resp)
@abstractmethod
def _initialize(self, X, resp):
"""Initialize the model parameters of the derived class.
Parameters
----------
X : array-like, shape (n_samples, n_features)
resp : array-like, shape (n_samples, n_components)
"""
pass
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
The method fit the model `n_init` times and set the parameters with
which the model has the largest likelihood or lower bound. Within each
trial, the method iterates between E-step and M-step for `max_iter`
times until the change of likelihood or lower bound is less than
`tol`, otherwise, a `ConvergenceWarning` is raised.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
X = _check_X(X, self.n_components)
self._check_initial_parameters(X)
# if we enable warm_start, we will have a unique initialisation
do_init = not(self.warm_start and hasattr(self, 'converged_'))
n_init = self.n_init if do_init else 1
max_lower_bound = -np.infty
self.converged_ = False
random_state = check_random_state(self.random_state)
n_samples, _ = X.shape
for init in range(n_init):
self._print_verbose_msg_init_beg(init)
if do_init:
self._initialize_parameters(X, random_state)
self.lower_bound_ = -np.infty
for n_iter in range(self.max_iter):
prev_lower_bound = self.lower_bound_
log_prob_norm, log_resp = self._e_step(X)
self._m_step(X, log_resp)
self.lower_bound_ = self._compute_lower_bound(
log_resp, log_prob_norm)
change = self.lower_bound_ - prev_lower_bound
self._print_verbose_msg_iter_end(n_iter, change)
if abs(change) < self.tol:
self.converged_ = True
break
self._print_verbose_msg_init_end(self.lower_bound_)
if self.lower_bound_ > max_lower_bound:
max_lower_bound = self.lower_bound_
best_params = self._get_parameters()
best_n_iter = n_iter
if not self.converged_:
warnings.warn('Initialization %d did not converged. '
'Try different init parameters, '
'or increase max_iter, tol '
'or check for degenerate data.'
% (init + 1), ConvergenceWarning)
self._set_parameters(best_params)
self.n_iter_ = best_n_iter
return self
def _e_step(self, X):
"""E step.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
log_prob_norm : float
Mean of the logarithms of the probabilities of each sample in X
log_responsibility : array, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
log_prob_norm, log_resp = self._estimate_log_prob_resp(X)
return np.mean(log_prob_norm), log_resp
@abstractmethod
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
pass
@abstractmethod
def _check_is_fitted(self):
pass
@abstractmethod
def _get_parameters(self):
pass
@abstractmethod
def _set_parameters(self, params):
pass
def score_samples(self, X):
"""Compute the weighted log probabilities for each sample.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
log_prob : array, shape (n_samples,)
Log probabilities of each data point in X.
"""
self._check_is_fitted()
X = _check_X(X, None, self.means_.shape[1])
return logsumexp(self._estimate_weighted_log_prob(X), axis=1)
def score(self, X, y=None):
"""Compute the per-sample average log-likelihood of the given data X.
Parameters
----------
X : array-like, shape (n_samples, n_dimensions)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
log_likelihood : float
Log likelihood of the Gaussian mixture given X.
"""
return self.score_samples(X).mean()
def predict(self, X, y=None):
"""Predict the labels for the data samples in X using trained model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
labels : array, shape (n_samples,)
Component labels.
"""
self._check_is_fitted()
X = _check_X(X, None, self.means_.shape[1])
return self._estimate_weighted_log_prob(X).argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data per each component.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
resp : array, shape (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
self._check_is_fitted()
X = _check_X(X, None, self.means_.shape[1])
_, log_resp = self._estimate_log_prob_resp(X)
return np.exp(log_resp)
def sample(self, n_samples=1):
"""Generate random samples from the fitted Gaussian distribution.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_samples, n_features)
Randomly generated sample
y : array, shape (nsamples,)
Component labels
"""
self._check_is_fitted()
if n_samples < 1:
raise ValueError(
"Invalid value for 'n_samples': %d . The sampling requires at "
"least one sample." % (self.n_components))
_, n_features = self.means_.shape
rng = check_random_state(self.random_state)
n_samples_comp = rng.multinomial(n_samples, self.weights_)
if self.covariance_type == 'full':
X = np.vstack([
rng.multivariate_normal(mean, covariance, int(sample))
for (mean, covariance, sample) in zip(
self.means_, self.covariances_, n_samples_comp)])
elif self.covariance_type == "tied":
X = np.vstack([
rng.multivariate_normal(mean, self.covariances_, int(sample))
for (mean, sample) in zip(
self.means_, n_samples_comp)])
else:
X = np.vstack([
mean + rng.randn(sample, n_features) * np.sqrt(covariance)
for (mean, covariance, sample) in zip(
self.means_, self.covariances_, n_samples_comp)])
y = np.concatenate([j * np.ones(sample, dtype=int)
for j, sample in enumerate(n_samples_comp)])
return (X, y)
def _estimate_weighted_log_prob(self, X):
"""Estimate the weighted log-probabilities, log P(X | Z) + log weights.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
weighted_log_prob : array, shape (n_features, n_component)
"""
return self._estimate_log_prob(X) + self._estimate_log_weights()
@abstractmethod
def _estimate_log_weights(self):
"""Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm.
Returns
-------
log_weight : array, shape (n_components, )
"""
pass
@abstractmethod
def _estimate_log_prob(self, X):
"""Estimate the log-probabilities log P(X | Z).
Compute the log-probabilities per each component for each sample.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
log_prob : array, shape (n_samples, n_component)
"""
pass
def _estimate_log_prob_resp(self, X):
"""Estimate log probabilities and responsibilities for each sample.
Compute the log probabilities, weighted log probabilities per
component and responsibilities for each sample in X with respect to
the current state of the model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
log_prob_norm : array, shape (n_samples,)
log p(X)
log_responsibilities : array, shape (n_samples, n_components)
logarithm of the responsibilities
"""
weighted_log_prob = self._estimate_weighted_log_prob(X)
log_prob_norm = logsumexp(weighted_log_prob, axis=1)
with np.errstate(under='ignore'):
# ignore underflow
log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis]
return log_prob_norm, log_resp
def _print_verbose_msg_init_beg(self, n_init):
"""Print verbose message on initialization."""
if self.verbose == 1:
print("Initialization %d" % n_init)
elif self.verbose >= 2:
print("Initialization %d" % n_init)
self._init_prev_time = time()
self._iter_prev_time = self._init_prev_time
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
"""Print verbose message on initialization."""
if n_iter % self.verbose_interval == 0:
if self.verbose == 1:
print(" Iteration %d" % n_iter)
elif self.verbose >= 2:
cur_time = time()
print(" Iteration %d\t time lapse %.5fs\t ll change %.5f" % (
n_iter, cur_time - self._iter_prev_time, diff_ll))
self._iter_prev_time = cur_time
def _print_verbose_msg_init_end(self, ll):
"""Print verbose message on the end of iteration."""
if self.verbose == 1:
print("Initialization converged: %s" % self.converged_)
elif self.verbose >= 2:
print("Initialization converged: %s\t time lapse %.5fs\t ll %.5f" %
(self.converged_, time() - self._init_prev_time, ll))
|
zuku1985/scikit-learn
|
sklearn/mixture/base.py
|
Python
|
bsd-3-clause
| 16,649
|
[
"Gaussian"
] |
d6870d0f873669882f70c280cd5134f9f4d97ce82422f2833e8720a099fec6a7
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import numpy as np
import stripeline.rng as rng
class FlatRNG:
'Random number generator with uniform distribution in the range [0, 1['
def __init__(self, x_init=0, y_init=0, z_init=0, w_init=0):
'''Initialize the random number generator.
The four parameters ``x_init``, ``y_init``, ``z_init``, and
``w_init`` are the four 32-bit seeds used by the generator.
'''
self.state = rng.init_rng(x_init, y_init, z_init, w_init)
def next(self):
'Return a new pseudorandom number'
return rng.rand_uniform(self.state)
def fill_vector(self, array):
'Fill the ``array`` vector with a sequence of pseudorandom numbers'
rng.fill_vector_uniform(self.state, array)
class NormalRNG:
'''Random number generator with Gaussian distribution
The Gaussian distribution has mean=0 and sigma=1. It is easy to
scale the result to an arbitrary mean and sigma:
rng = NormalRNG()
mean = 10.0
sigma = 1.36
num = mean + rng.next() * sigma
'''
def __init__(self, x_init=0, y_init=0, z_init=0, w_init=0):
self.state = rng.init_rng(x_init, y_init, z_init, w_init)
self.empty = np.ones(1, dtype='int8')
self.gset = np.zeros(1, dtype='float64')
def next(self):
'Return a new pseudorandom number'
return rng.rand_normal(self.state, self.empty, self.gset)
def fill_vector(self, array):
'Fill the ``array`` vector with a sequence of pseudorandom numbers'
rng.fill_vector_normal(self.state, self.empty, self.gset, array)
class Oof2RNG:
'''Random number generator with spectral power 1/f^2
The random numbers have zero mean.
'''
def __init__(self, fmin, fknee, fsample,
x_init=0, y_init=0, z_init=0, w_init=0):
self.flat_state = rng.init_rng(x_init, y_init, z_init, w_init)
self.empty = np.ones(1, dtype='int8')
self.gset = np.zeros(1, dtype='float64')
self.oof2_state = rng.init_oof2(fmin, fknee, fsample)
def next(self):
'Return a new pseudorandom number'
return rng.rand_oof2(self.flat_state, self.empty,
self.gset, self.oof2_state)
def fill_vector(self, array):
'Fill the ``array`` vector with a sequence of pseudorandom numbers'
rng.fill_vector_oof2(self.flat_state, self.empty, self.gset,
self.oof2_state, array)
class OofRNG:
'''Random number generator with spectral power 1/f^a
The random numbers have zero mean. The value of a must be in the range [-2, 0).'''
def __init__(self, alpha, fmin, fknee, fsample,
x_init=0, y_init=0, z_init=0, w_init=0):
self.flat_state = rng.init_rng(x_init, y_init, z_init, w_init)
self.empty = np.ones(1, dtype='int8')
self.gset = np.zeros(1, dtype='float64')
self.oof_state = np.empty(rng.oof_state_size(fmin, fknee, fsample),
dtype='float64')
self.num_of_states = rng.init_oof(alpha, fmin, fknee, fsample,
self.oof_state)
def next(self):
'Return a new pseudorandom number'
return rng.rand_oof(self.flat_state, self.empty,
self.gset, self.oof_state, self.num_of_states)
def fill_vector(self, array):
'Fill the ``array`` vector with a sequence of pseudorandom numbers'
rng.fill_vector_oof(self.flat_state, self.empty, self.gset,
self.oof_state, self.num_of_states, array)
|
ziotom78/stripeline
|
stripeline/noisegen.py
|
Python
|
mit
| 3,655
|
[
"Gaussian"
] |
da5b40b84c0700a1673d16d3a05577cffa2ccf458ca2f2339b084c875bd2825d
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides a class to fit elliptical isophotes.
"""
import warnings
from astropy.utils.exceptions import AstropyUserWarning
import numpy as np
from .fitter import (DEFAULT_CONVERGENCE, DEFAULT_FFLAG, DEFAULT_MAXGERR,
DEFAULT_MAXIT, DEFAULT_MINIT, CentralEllipseFitter,
EllipseFitter)
from .geometry import EllipseGeometry
from .integrator import BILINEAR
from .isophote import Isophote, IsophoteList
from .sample import CentralEllipseSample, EllipseSample
__all__ = ['Ellipse']
class Ellipse:
r"""
Class to fit elliptical isophotes to a galaxy image.
The isophotes in the image are measured using an iterative method
described by `Jedrzejewski (1987; MNRAS 226, 747)
<https://ui.adsabs.harvard.edu/abs/1987MNRAS.226..747J/abstract>`_.
See the **Notes** section below for details about the algorithm.
Parameters
----------
image : 2D `~numpy.ndarray`
The image array.
geometry : `~photutils.isophote.EllipseGeometry` instance or `None`, optional
The optional geometry that describes the first ellipse to be
fitted. If `None`, a default
`~photutils.isophote.EllipseGeometry` instance is created
centered on the image frame with ellipticity of 0.2 and a
position angle of 90 degrees.
threshold : float, optional
The threshold for the object centerer algorithm. By lowering
this value the object centerer becomes less strict, in the sense
that it will accept lower signal-to-noise data. If set to a very
large value, the centerer is effectively shut off. In this case,
either the geometry information supplied by the ``geometry``
parameter is used as is, or the fit algorithm will terminate
prematurely. Note that once the object centerer runs
successfully, the (x, y) coordinates in the ``geometry``
attribute (an `~photutils.isophote.EllipseGeometry` instance)
are modified in place. The default is 0.1
Notes
-----
The image is measured using an iterative method described by
`Jedrzejewski (1987; MNRAS 226, 747)
<https://ui.adsabs.harvard.edu/abs/1987MNRAS.226..747J/abstract>`_.
Each isophote is fitted at a pre-defined, fixed semimajor axis
length. The algorithm starts from a first-guess elliptical isophote
defined by approximate values for the (x, y) center coordinates,
ellipticity, and position angle. Using these values, the image is
sampled along an elliptical path, producing a 1-dimensional function
that describes the dependence of intensity (pixel value) with angle
(E). The function is stored as a set of 1D numpy arrays. The
harmonic content of this function is analyzed by least-squares
fitting to the function:
.. math::
y = y0 + (A1 * \sin(E)) + (B1 * \cos(E)) + (A2 * \sin(2 * E))
+ (B2 * \cos(2 * E))
Each one of the harmonic amplitudes (A1, B1, A2, and B2) is related
to a specific ellipse geometric parameter in the sense that it
conveys information regarding how much the parameter's current value
deviates from the "true" one. To compute this deviation, the image's
local radial gradient has to be taken into account too. The
algorithm picks up the largest amplitude among the four, estimates
the local gradient, and computes the corresponding increment in the
associated ellipse parameter. That parameter is updated, and the
image is resampled. This process is repeated until any one of the
following criteria are met:
1. the largest harmonic amplitude is less than a given fraction of
the rms residual of the intensity data around the harmonic fit.
2. a user-specified maximum number of iterations is reached.
3. more than a given fraction of the elliptical sample points have no
valid data in then, either because they lie outside the image
boundaries or because they were flagged out from the fit by
sigma-clipping.
In any case, a minimum number of iterations is always performed. If
iterations stop because of reasons 2 or 3 above, then those ellipse
parameters that generated the lowest absolute values for harmonic
amplitudes will be used. At this point, the image data sample coming
from the best fit ellipse is fitted by the following function:
.. math::
y = y0 + (An * sin(n * E)) + (Bn * cos(n * E))
with :math:`n = 3` and :math:`n = 4`. The corresponding amplitudes
(A3, B3, A4, and B4), divided by the semimajor axis length and local
intensity gradient, measure the isophote's deviations from perfect
ellipticity (these amplitudes, divided by semimajor axis and
gradient, are the actual quantities stored in the output
`~photutils.isophote.Isophote` instance).
The algorithm then measures the integrated intensity and the number
of non-flagged pixels inside the elliptical isophote, and also
inside the corresponding circle with same center and radius equal to
the semimajor axis length. These parameters, their errors, other
associated parameters, and auxiliary information, are stored in the
`~photutils.isophote.Isophote` instance.
Errors in intensity and local gradient are obtained directly from
the rms scatter of intensity data along the fitted ellipse. Ellipse
geometry errors are obtained from the errors in the coefficients of
the first and second simultaneous harmonic fit. Third and fourth
harmonic amplitude errors are obtained in the same way, but only
after the first and second harmonics are subtracted from the raw
data. For more details, see the error analysis in `Busko (1996;
ASPC 101, 139)
<https://ui.adsabs.harvard.edu/abs/1996ASPC..101..139B/abstract>`_.
After fitting the ellipse that corresponds to a given value of the
semimajor axis (by the process described above), the axis length is
incremented/decremented following a pre-defined rule. At each step,
the starting, first-guess, ellipse parameters are taken from the
previously fitted ellipse that has the closest semimajor axis length
to the current one. On low surface brightness regions (those having
large radii), the small values of the image radial gradient can
induce large corrections and meaningless values for the ellipse
parameters. The algorithm has the ability to stop increasing
semimajor axis based on several criteria, including signal-to-noise
ratio.
See the `~photutils.isophote.Isophote` documentation for the meaning
of the stop code reported after each fit.
The fit algorithm provides a k-sigma clipping algorithm for cleaning
deviant sample points at each isophote, thus improving convergence
stability against any non-elliptical structure such as stars, spiral
arms, HII regions, defects, etc.
The fit algorithm has no way of finding where, in the input image
frame, the galaxy to be measured is located. The center (x, y)
coordinates need to be close to the actual center for the fit to
work. An "object centerer" function helps to verify that the
selected position can be used as starting point. This function scans
a 10x10 window centered either on the (x, y) coordinates in the
`~photutils.isophote.EllipseGeometry` instance passed to the
constructor of the `~photutils.isophote.Ellipse` class, or, if any
one of them, or both, are set to `None`, on the input image frame
center. In case a successful acquisition takes place, the
`~photutils.isophote.EllipseGeometry` instance is modified in place
to reflect the solution of the object centerer algorithm.
In some cases the object centerer algorithm may fail, even though
there is enough signal-to-noise to start a fit (e.g., in objects
with very high ellipticity). In those cases the sensitivity of the
algorithm can be decreased by decreasing the value of the object
centerer threshold parameter. The centerer works by looking to where
a quantity akin to a signal-to-noise ratio is maximized within the
10x10 window. The centerer can thus be shut off entirely by setting
the threshold to a large value >> 1 (meaning, no location inside the
search window will achieve that signal-to-noise ratio).
A note of caution: the ellipse fitting algorithm was designed
explicitly with an elliptical galaxy brightness distribution in
mind. In particular, a well defined negative radial intensity
gradient across the region being fitted is paramount for the
achievement of stable solutions. Use of the algorithm in other types
of images (e.g., planetary nebulae) may lead to inability to
converge to any acceptable solution.
"""
def __init__(self, image, geometry=None, threshold=0.1):
self.image = image
if geometry is not None:
self._geometry = geometry
else:
_x0 = image.shape[1] / 2
_y0 = image.shape[0] / 2
self._geometry = EllipseGeometry(_x0, _y0, 10., eps=0.2,
pa=np.pi/2)
self.set_threshold(threshold)
def set_threshold(self, threshold):
"""
Modify the threshold value used by the centerer.
Parameters
----------
threshold : float
The new threshold value to use.
"""
self._geometry.centerer_threshold = threshold
def fit_image(self, sma0=None, minsma=0., maxsma=None, step=0.1,
conver=DEFAULT_CONVERGENCE, minit=DEFAULT_MINIT,
maxit=DEFAULT_MAXIT, fflag=DEFAULT_FFLAG,
maxgerr=DEFAULT_MAXGERR, sclip=3., nclip=0,
integrmode=BILINEAR, linear=None, maxrit=None,
fix_center=False, fix_pa=False, fix_eps=False):
# This parameter list is quite large and should in principle be
# simplified by re-distributing these controls to somewhere else.
# We keep this design though because it better mimics the flat
# architecture used in the original STSDAS task `ellipse`.
"""
Fit multiple isophotes to the image array.
This method loops over each value of the semimajor axis (sma)
length (constructed from the input parameters), fitting a single
isophote at each sma. The entire set of isophotes is returned
in an `~photutils.isophote.IsophoteList` instance.
Note that the fix_XXX parameters act in unison. Meaning, if one
of them is set via this call, the others will assume their default
(False) values. This effectively overrides any settings that are
present in the internal `~photutils.isophote.EllipseGeometry`
instance that is carried along as a property of this class. If
an instance of `~photutils.isophote.EllipseGeometry` was passed
to this class' constructor, that instance will be effectively
overridden by the fix_XXX parameters in this call.
Parameters
----------
sma0 : float, optional
The starting value for the semimajor axis length (pixels).
This value must not be the minimum or maximum semimajor axis
length, but something in between. The algorithm can't start
from the very center of the galaxy image because the
modelling of elliptical isophotes on that region is poor and
it will diverge very easily if not tied to other previously
fit isophotes. It can't start from the maximum value either
because the maximum is not known beforehand, depending on
signal-to-noise. The ``sma0`` value should be selected such
that the corresponding isophote has a good signal-to-noise
ratio and a clearly defined geometry. If set to `None` (the
default), one of two actions will be taken: if a
`~photutils.isophote.EllipseGeometry` instance was input to
the `~photutils.isophote.Ellipse` constructor, its ``sma``
value will be used. Otherwise, a default value of 10. will
be used.
minsma : float, optional
The minimum value for the semimajor axis length (pixels).
The default is 0.
maxsma : float or `None`, optional
The maximum value for the semimajor axis length (pixels).
When set to `None` (default), the algorithm will increase
the semimajor axis until one of several conditions will
cause it to stop and revert to fit ellipses with sma <
``sma0``.
step : float, optional
The step value used to grow/shrink the semimajor axis length
(pixels if ``linear=True``, or a relative value if
``linear=False``). See the ``linear`` parameter. The
default is 0.1.
conver : float, optional
The main convergence criterion. Iterations stop when the
largest harmonic amplitude becomes smaller (in absolute
value) than ``conver`` times the harmonic fit rms. The
default is 0.05.
minit : int, optional
The minimum number of iterations to perform. A minimum of 10
(the default) iterations guarantees that, on average, 2
iterations will be available for fitting each independent
parameter (the four harmonic amplitudes and the intensity
level). For the first isophote, the minimum number of
iterations is 2 * ``minit`` to ensure that, even departing
from not-so-good initial values, the algorithm has a better
chance to converge to a sensible solution.
maxit : int, optional
The maximum number of iterations to perform. The default is
50.
fflag : float, optional
The acceptable fraction of flagged data points in the
sample. If the actual fraction of valid data points is
smaller than this, the iterations will stop and the current
`~photutils.isophote.Isophote` will be returned. Flagged
data points are points that either lie outside the image
frame, are masked, or were rejected by sigma-clipping. The
default is 0.7.
maxgerr : float, optional
The maximum acceptable relative error in the local radial
intensity gradient. This is the main control for preventing
ellipses to grow to regions of too low signal-to-noise
ratio. It specifies the maximum acceptable relative error
in the local radial intensity gradient. `Busko (1996; ASPC
101, 139)
<https://ui.adsabs.harvard.edu/abs/1996ASPC..101..139B/abstract>`_
showed that the fitting precision relates to that relative
error. The usual behavior of the gradient relative error is
to increase with semimajor axis, being larger in outer,
fainter regions of a galaxy image. In the current
implementation, the ``maxgerr`` criterion is triggered only
when two consecutive isophotes exceed the value specified by
the parameter. This prevents premature stopping caused by
contamination such as stars and HII regions.
A number of actions may happen when the gradient error
exceeds ``maxgerr`` (or becomes non-significant and is set
to `None`). If the maximum semimajor axis specified by
``maxsma`` is set to `None`, semimajor axis growth is
stopped and the algorithm proceeds inwards to the galaxy
center. If ``maxsma`` is set to some finite value, and this
value is larger than the current semimajor axis length, the
algorithm enters non-iterative mode and proceeds outwards
until reaching ``maxsma``. The default is 0.5.
sclip : float, optional
The sigma-clip sigma value. The default is 3.0.
nclip : int, optional
The number of sigma-clip iterations. The default is 0, which
means sigma-clipping is skipped.
integrmode : {'bilinear', 'nearest_neighbor', 'mean', 'median'}, optional
The area integration mode. The default is 'bilinear'.
linear : bool, optional
The semimajor axis growing/shrinking mode. If `False`
(default), the geometric growing mode is chosen, thus the
semimajor axis length is increased by a factor of (1. +
``step``), and the process is repeated until either the
semimajor axis value reaches the value of parameter
``maxsma``, or the last fitted ellipse has more than a given
fraction of its sampled points flagged out (see ``fflag``).
The process then resumes from the first fitted ellipse (at
``sma0``) inwards, in steps of (1./(1. + ``step``)), until
the semimajor axis length reaches the value ``minsma``. In
case of linear growing, the increment or decrement value is
given directly by ``step`` in pixels. If ``maxsma`` is set
to `None`, the semimajor axis will grow until a low
signal-to-noise criterion is met. See ``maxgerr``.
maxrit : float or `None`, optional
The maximum value of semimajor axis to perform an actual
fit. Whenever the current semimajor axis length is larger
than ``maxrit``, the isophotes will be extracted using the
current geometry, without being fitted. This non-iterative
mode may be useful for sampling regions of very low surface
brightness, where the algorithm may become unstable and
unable to recover reliable geometry information.
Non-iterative mode can also be entered automatically
whenever the ellipticity exceeds 1.0 or the ellipse center
crosses the image boundaries. If `None` (default), then no
maximum value is used.
fix_center : bool, optional
Keep center of ellipse fixed during fit? The default is False.
fix_pa : bool, optional
Keep position angle of semi-major axis of ellipse fixed during fit?
The default is False.
fix_eps : bool, optional
Keep ellipticity of ellipse fixed during fit? The default is False.
Returns
-------
result : `~photutils.isophote.IsophoteList` instance
A list-like object of `~photutils.isophote.Isophote`
instances, sorted by increasing semimajor axis length.
"""
# multiple fitted isophotes will be stored here
isophote_list = []
# get starting sma from appropriate source: keyword parameter,
# internal EllipseGeometry instance, or fixed default value.
if not sma0:
if self._geometry:
sma = self._geometry.sma
else:
sma = 10.
else:
sma = sma0
# Override geometry instance with parameters set at the call.
if isinstance(linear, bool):
self._geometry.linear_growth = linear
else:
linear = self._geometry.linear_growth
if fix_center and fix_pa and fix_eps:
warnings.warn(': Everything is fixed. Fit not possible.',
AstropyUserWarning)
return IsophoteList([])
if fix_center or fix_pa or fix_eps:
# Note that this overrides the geometry instance for good.
self._geometry.fix = np.array([fix_center, fix_center, fix_pa, fix_eps])
# first, go from initial sma outwards until
# hitting one of several stopping criteria.
noiter = False
first_isophote = True
while True:
# first isophote runs longer
minit_a = 2 * minit if first_isophote else minit
first_isophote = False
isophote = self.fit_isophote(sma, step, conver, minit_a, maxit,
fflag, maxgerr, sclip, nclip,
integrmode, linear, maxrit,
noniterate=noiter,
isophote_list=isophote_list)
# check for failed fit.
if (isophote.stop_code < 0 or isophote.stop_code == 1):
# in case the fit failed right at the outset, return an
# empty list. This is the usual case when the user
# provides initial guesses that are too way off to enable
# the fitting algorithm to find any meaningful solution.
if len(isophote_list) == 1:
warnings.warn('No meaningful fit was possible.',
AstropyUserWarning)
return IsophoteList([])
self._fix_last_isophote(isophote_list, -1)
# get last isophote from the actual list, since the last
# `isophote` instance in this context may no longer be OK.
isophote = isophote_list[-1]
# if two consecutive isophotes failed to fit,
# shut off iterative mode. Or, bail out and
# change to go inwards.
if len(isophote_list) > 2:
if ((isophote.stop_code == 5 and
isophote_list[-2].stop_code == 5)
or isophote.stop_code == 1):
if maxsma and maxsma > isophote.sma:
# if a maximum sma value was provided by
# user, and the current sma is smaller than
# maxsma, keep growing sma in non-iterative
# mode until reaching it.
noiter = True
else:
# if no maximum sma, stop growing and change
# to go inwards.
break
# reset variable from the actual list, since the last
# `isophote` instance may no longer be OK.
isophote = isophote_list[-1]
# update sma. If exceeded user-defined
# maximum, bail out from this loop.
sma = isophote.sample.geometry.update_sma(step)
if maxsma and sma >= maxsma:
break
# reset sma so as to go inwards.
first_isophote = isophote_list[0]
sma, step = first_isophote.sample.geometry.reset_sma(step)
# now, go from initial sma inwards towards center.
while True:
isophote = self.fit_isophote(sma, step, conver, minit, maxit,
fflag, maxgerr, sclip, nclip,
integrmode, linear, maxrit,
going_inwards=True,
isophote_list=isophote_list)
# if abnormal condition, fix isophote but keep going.
if isophote.stop_code < 0:
self._fix_last_isophote(isophote_list, 0)
# but if we get an error from the scipy fitter, bail out
# immediately. This usually happens at very small radii
# when the number of data points is too small.
if isophote.stop_code == 3:
break
# reset variable from the actual list, since the last
# `isophote` instance may no longer be OK.
isophote = isophote_list[-1]
# figure out next sma; if exceeded user-defined
# minimum, or too small, bail out from this loop
sma = isophote.sample.geometry.update_sma(step)
if sma <= max(minsma, 0.5):
break
# if user asked for minsma=0, extract special isophote there
if minsma == 0.0:
# isophote is appended to isophote_list
_ = self.fit_isophote(0.0, isophote_list=isophote_list)
# sort list of isophotes according to sma
isophote_list.sort()
return IsophoteList(isophote_list)
def fit_isophote(self, sma, step=0.1, conver=DEFAULT_CONVERGENCE,
minit=DEFAULT_MINIT, maxit=DEFAULT_MAXIT,
fflag=DEFAULT_FFLAG, maxgerr=DEFAULT_MAXGERR,
sclip=3., nclip=0, integrmode=BILINEAR,
linear=False, maxrit=None, noniterate=False,
going_inwards=False, isophote_list=None):
"""
Fit a single isophote with a given semimajor axis length.
The ``step`` and ``linear`` parameters are not used to actually
grow or shrink the current fitting semimajor axis length. They
are necessary so the sampling algorithm can know where to start
the gradient computation and also how to compute the elliptical
sector areas (when area integration mode is selected).
Parameters
----------
sma : float
The semimajor axis length (pixels).
step : float, optional
The step value used to grow/shrink the semimajor axis length
(pixels if ``linear=True``, or a relative value if
``linear=False``). See the ``linear`` parameter. The
default is 0.1.
conver : float, optional
The main convergence criterion. Iterations stop when the
largest harmonic amplitude becomes smaller (in absolute
value) than ``conver`` times the harmonic fit rms. The
default is 0.05.
minit : int, optional
The minimum number of iterations to perform. A minimum of 10
(the default) iterations guarantees that, on average, 2
iterations will be available for fitting each independent
parameter (the four harmonic amplitudes and the intensity
level). For the first isophote, the minimum number of
iterations is 2 * ``minit`` to ensure that, even departing
from not-so-good initial values, the algorithm has a better
chance to converge to a sensible solution.
maxit : int, optional
The maximum number of iterations to perform. The default is
50.
fflag : float, optional
The acceptable fraction of flagged data points in the
sample. If the actual fraction of valid data points is
smaller than this, the iterations will stop and the current
`~photutils.isophote.Isophote` will be returned. Flagged
data points are points that either lie outside the image
frame, are masked, or were rejected by sigma-clipping. The
default is 0.7.
maxgerr : float, optional
The maximum acceptable relative error in the local radial
intensity gradient. When fitting a single isophote by itself
this parameter doesn't have any effect on the outcome.
sclip : float, optional
The sigma-clip sigma value. The default is 3.0.
nclip : int, optional
The number of sigma-clip iterations. The default is 0, which
means sigma-clipping is skipped.
integrmode : {'bilinear', 'nearest_neighbor', 'mean', 'median'}, optional
The area integration mode. The default is 'bilinear'.
linear : bool, optional
The semimajor axis growing/shrinking mode. When fitting
just one isophote, this parameter is used only by the code
that define the details of how elliptical arc segments
("sectors") are extracted from the image when using area
extraction modes (see the ``integrmode`` parameter).
maxrit : float or `None`, optional
The maximum value of semimajor axis to perform an actual
fit. Whenever the current semimajor axis length is larger
than ``maxrit``, the isophotes will be extracted using the
current geometry, without being fitted. This non-iterative
mode may be useful for sampling regions of very low surface
brightness, where the algorithm may become unstable and
unable to recover reliable geometry information.
Non-iterative mode can also be entered automatically
whenever the ellipticity exceeds 1.0 or the ellipse center
crosses the image boundaries. If `None` (default), then no
maximum value is used.
noniterate : bool, optional
Whether the fitting algorithm should be bypassed and an
isophote should be extracted with the geometry taken
directly from the most recent `~photutils.isophote.Isophote`
instance stored in the ``isophote_list`` parameter. This
parameter is mainly used when running the method in a loop
over different values of semimajor axis length, and we want
to change from iterative to non-iterative mode somewhere
along the sequence of isophotes. When set to `True`, this
parameter overrides the behavior associated with parameter
``maxrit``. The default is `False`.
going_inwards : bool, optional
Parameter to define the sense of SMA growth. When fitting
just one isophote, this parameter is used only by the code
that defines the details of how elliptical arc segments
("sectors") are extracted from the image, when using area
extraction modes (see the ``integrmode`` parameter). The
default is `False`.
isophote_list : list or `None`, optional
If not `None` (the default), the fitted
`~photutils.isophote.Isophote` instance is appended to this
list. It must be created and managed by the caller.
Returns
-------
result : `~photutils.isophote.Isophote` instance
The fitted isophote. The fitted isophote is also appended to
the input list input to the ``isophote_list`` parameter.
"""
geometry = self._geometry
# if available, geometry from last fitted isophote will be
# used as initial guess for next isophote.
if isophote_list:
geometry = isophote_list[-1].sample.geometry
# do the fit
if noniterate or (maxrit and sma > maxrit):
isophote = self._non_iterative(sma, step, linear, geometry,
sclip, nclip, integrmode)
else:
isophote = self._iterative(sma, step, linear, geometry, sclip,
nclip, integrmode, conver, minit,
maxit, fflag, maxgerr, going_inwards)
# store result in list
if isophote_list is not None and isophote.valid:
isophote_list.append(isophote)
return isophote
def _iterative(self, sma, step, linear, geometry, sclip, nclip,
integrmode, conver, minit, maxit, fflag, maxgerr,
going_inwards=False):
if sma > 0.:
# iterative fitter
sample = EllipseSample(self.image, sma, astep=step, sclip=sclip,
nclip=nclip, linear_growth=linear,
geometry=geometry, integrmode=integrmode)
fitter = EllipseFitter(sample)
else:
# sma == 0 requires special handling
sample = CentralEllipseSample(self.image, 0.0, geometry=geometry)
fitter = CentralEllipseFitter(sample)
isophote = fitter.fit(conver, minit, maxit, fflag, maxgerr,
going_inwards)
return isophote
def _non_iterative(self, sma, step, linear, geometry, sclip, nclip,
integrmode):
sample = EllipseSample(self.image, sma, astep=step, sclip=sclip,
nclip=nclip, linear_growth=linear,
geometry=geometry, integrmode=integrmode)
sample.update(geometry.fix)
# build isophote without iterating with an EllipseFitter
isophote = Isophote(sample, 0, True, stop_code=4)
return isophote
@staticmethod
def _fix_last_isophote(isophote_list, index):
if isophote_list:
isophote = isophote_list.pop()
# check if isophote is bad; if so, fix its geometry
# to be like the geometry of the index-th isophote
# in list.
isophote.fix_geometry(isophote_list[index])
# force new extraction of raw data, since
# geometry changed.
isophote.sample.values = None
isophote.sample.update(isophote.sample.geometry.fix)
# we take the opportunity to change an eventual
# negative stop code to its' positive equivalent.
code = (5 if isophote.stop_code < 0 else isophote.stop_code)
# build new instance so it can have its attributes
# populated from the updated sample attributes.
new_isophote = Isophote(isophote.sample, isophote.niter,
isophote.valid, code)
# add new isophote to list
isophote_list.append(new_isophote)
|
astropy/photutils
|
photutils/isophote/ellipse.py
|
Python
|
bsd-3-clause
| 33,805
|
[
"Galaxy"
] |
740526071003fb0b94d236451a6687374bac988e6471710eca0eb84f156649ef
|
#!/usr/bin/env python
# Copyright 2017 Informatics Matters Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from pipelines_utils import parameter_utils, utils
from pipelines_utils_rdkit import rdkit_utils
### start main execution #########################################
def main():
### command line args defintions #########################################
parser = argparse.ArgumentParser(description='RDKit rxn smarts filter')
parameter_utils.add_default_io_args(parser)
parser.add_argument('-q', '--quiet', action='store_true', help='Quiet mode')
parser.add_argument('-m', '--multi', action='store_true', help='Output one file for each reaction')
parser.add_argument('--thin', action='store_true', help='Thin output mode')
args = parser.parse_args()
utils.log("Screen Args: ", args)
if not args.output and args.multi:
raise ValueError("Must specify output location when writing individual result files")
### Define the filter chooser - lots of logic possible
# SMARTS patterns are defined in poised_filter.py. Currently this is hardcoded.
# Should make this configurable so that this can be specified by the user at some stage.
poised_filter = True
if poised_filter == True:
from .poised_filter import Filter
filter_to_use = Filter()
rxn_names = filter_to_use.get_rxn_names()
utils.log("Using", len(rxn_names), "reaction filters")
# handle metadata
source = "rxn_smarts_filter.py"
datasetMetaProps = {"source":source, "description": "Reaction SMARTS filter"}
clsMappings = {}
fieldMetaProps = []
for name in rxn_names:
# this is the Java class type for an array of MoleculeObjects
clsMappings[name] = "[Lorg.squonk.types.MoleculeObject;"
fieldMetaProps.append({"fieldName":name, "values": {"source":source, "description":"Sythons from " + name + " reaction"}})
input, output, suppl, writer, output_base = rdkit_utils.default_open_input_output(
args.input, args.informat, args.output,
'rxn_smarts_filter', args.outformat, thinOutput=args.thin,
valueClassMappings=clsMappings, datasetMetaProps=datasetMetaProps,
fieldMetaProps=fieldMetaProps)
i = 0
count = 0
if args.multi:
dir_base = os.path.dirname(args.output)
writer_dict = filter_to_use.get_writers(dir_base)
else:
writer_dict = None
dir_base = None
for mol in suppl:
i += 1
if mol is None: continue
# Return a dict/class here - indicating which filters passed
filter_pass = filter_to_use.pass_filter(mol)
utils.log("Found", str(len(filter_pass)), "matches")
if filter_pass:
props = {}
count += 1
for reaction in filter_pass:
molObjList = []
# Write the reaction name as a newline separated list of the synthons to the mol object
# this is used in SDF output
mol.SetProp(reaction, "\n".join(filter_pass[reaction]))
# now write to the props is a way that can be used for the JSON output
for smiles in filter_pass[reaction]:
# generate a dict that generates MoleculeObject JSON
mo = utils.generate_molecule_object_dict(smiles, "smiles", None)
molObjList.append(mo)
props[reaction] = molObjList
if args.multi:
writer_dict[reaction].write(mol)
writer_dict[reaction].flush()
# write the output.
# In JSON format the props will override values set on the mol
# In SDF format the props are ignored so the values in the mol are used
writer.write(mol, props)
writer.flush()
utils.log("Matched", count, "molecules from a total of", i)
if dir_base:
utils.log("Individual SD files found in: " + dir_base)
writer.flush()
writer.close()
if input:
input.close()
if output:
output.close()
# close the individual writers
if writer_dict:
for key in writer_dict:
writer_dict[key].close()
if args.meta:
utils.write_metrics(output_base, {'__InputCount__': i, '__OutputCount__': count, 'RxnSmartsFilter': count})
if __name__ == "__main__":
main()
|
InformaticsMatters/pipelines
|
src/python/pipelines/rdkit/rxn_smarts_filter.py
|
Python
|
apache-2.0
| 4,919
|
[
"RDKit"
] |
ce8a5ee79cce3f2367fc4a40fcb60d9660f5c00dcbc3e1e83f0306bf1c3ef671
|
# coding: utf-8
# Copyright (c) 2013 Jorge Javier Araya Navarro <jorgean@lavabit.org>
#
# This file is free software: you may copy, redistribute and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""
SummaNode: the basic element of cocos2d
"""
__docformat__ = 'restructuredtext'
import bisect, copy
import pyglet
from pyglet import gl
from director import director
from camera import Camera
import euclid
import math
import weakref
__all__ = ['SummaNode']
class SummaNode(object):
"""
Cocosnode is the main element. Anything thats gets drawn or contains things
that get drawn is a summanode. The most popular summanodes are scenes,
layers and sprites.
The main features of a summanode are:
- They can contain other cocos nodes (add, get, remove, etc)
- They can schedule periodic callback (schedule, schedule_interval, etc)
- They can execute actions (do, pause, stop, etc)
Some summanodes provide extra functionality for them or their children.
Subclassing a summanode usually means (one/all) of:
- overriding __init__ to initialize resources and schedule callbacks
- create callbacks to handle the advancement of time
- overriding draw to render the node
"""
def __init__(self):
# composition stuff
#: list of (int, child-reference) where int is the z-order, sorted by
#: ascending z (back to front order)
self.children = []
#: dictionary that maps children names with children references
self.children_names = {}
self._parent = None
# drawing stuff
#: x-position of the object relative to its
# parent's children_anchor_x value.
#: Default: 0
self._x = 0
#: y-position of the object relative to its
# parent's children_anchor_y value.
#: Default: 0
self._y = 0
#: a float, alters the scale of this node and its children.
#: Default: 1.0
self._scale = 1.0
#: a float, in degrees, alters the rotation
# of this node and its children.
#: Default: 0.0
self._rotation = 0.0
#: eye, center and up vector for the `Camera`.
#: gluLookAt() is used with these values.
#: Default: FOV 60, center of the screen.
#: IMPORTANT: The camera can perform exactly the same
#: transformation as ``scale``, ``rotation`` and the
#: ``x``, ``y`` attributes (with the exception that the
#: camera can modify also the z-coordinate)
#: In fact, they all transform the same matrix, so
#: use either the camera or the other attributes, but not both
#: since the camera will be overridden by the transformations done
#: by the other attributes.
#: You can change the camera manually or by using the `Camera3DAction`
#: action.
self.camera = Camera()
#: offset from (x,0) from where rotation and scale will be applied.
#: Default: 0
self.transform_anchor_x = 0
#: offset from (0,y) from where rotation and scale will be applied.
#: Default: 0
self.transform_anchor_y = 0
#: whether of not the object and his childrens are visible.
#: Default: True
self.visible = True
#: the grid object for the grid actions.
#: This can be a `Grid3D` or a `TiledGrid3D` object depending
#: on the action.
self.grid = None
# actions stuff
#: list of `Action` objects that are running
self.actions = []
#: list of `Action` objects to be removed
self.to_remove = []
#: whether or not the next frame will be skipped
self.skip_frame = False
# schedule stuff
self.scheduled = False # deprecated, soon to be removed
self.scheduled_calls = [] #: list of scheduled callbacks
self.scheduled_interval_calls = [] #: list of scheduled interval callbacks
self.is_running = False #: whether of not the object is running
# matrix stuff
self.is_transform_dirty = False
self.transform_matrix = euclid.Matrix3().identity()
self.is_inverse_transform_dirty = False
self.inverse_transform_matrix = euclid.Matrix3().identity()
def make_property(attr):
def set_attr():
def inner(self, value):
setattr(self, "_".join(["transform", attr]), value)
return inner
def get_attr():
def inner(self):
return getattr(self,"_".join(["transform", attr]))
return inner
return property(
get_attr(),
set_attr(),
doc="""a property to get fast access to [transform_|children_]
:type: (int,int)
"""+attr )
#: Anchor point of the object.
#: Children will be added at this point
#: and transformations like scaling and rotation will use this point
#: as the center
anchor = make_property("anchor")
#: Anchor x value for transformations and adding children
anchor_x = make_property("anchor_x")
#: Anchor y value for transformations and adding children
anchor_y = make_property("anchor_y")
def make_property(attr):
def set_attr():
def inner(self, value):
setattr(self, "_".join([attr, "x"]), value[0])
setattr(self, "_".join([attr, "y"]), value[1])
return inner
def get_attr(self):
return (getattr(self, "_".join([attr, "x"])),
getattr(self, "_".join([attr, "y"])))
return property(
get_attr,
set_attr(),
doc='''a property to get fast access to "+attr+"_[x|y]
:type: (int,int)
''')
#: Transformation anchor point.
#: Transformations like scaling and rotation
#: will use this point as it's center
transform_anchor = make_property("transform_anchor")
del make_property
def schedule_interval(self, callback, interval, *args, **kwargs):
"""
Schedule a function to be called every `interval` seconds.
Specifying an interval of 0 prevents the function from being
called again (see `schedule` to call a function as often as possible).
The callback function prototype is the same as for `schedule`.
:Parameters:
`callback` : function
The function to call when the timer lapses.
`interval` : float
The number of seconds to wait between each call.
This function is a wrapper to pyglet.clock.schedule_interval.
It has the additional benefit that all calllbacks are paused and
resumed when the node leaves or enters a scene.
You should not have to schedule things using pyglet by yourself.
"""
if self.is_running:
pyglet.clock.schedule_interval(callback, interval, *args, **kwargs)
self.scheduled_interval_calls.append(
(callback, interval, args, kwargs))
def schedule(self, callback, *args, **kwargs):
"""
Schedule a function to be called every frame.
The function should have a prototype that includes ``dt`` as the
first argument, which gives the elapsed time, in seconds, since the
last clock tick. Any additional arguments given to this function
are passed on to the callback::
def callback(dt, *args, **kwargs):
pass
:Parameters:
`callback` : function
The function to call each frame.
This function is a wrapper to pyglet.clock.schedule.
It has the additional benefit that all calllbacks are paused and
resumed when the node leaves or enters a scene.
You should not have to schedule things using pyglet by yourself.
"""
if self.is_running:
pyglet.clock.schedule(callback, *args, **kwargs)
self.scheduled_calls.append(
(callback, args, kwargs))
def unschedule(self, callback):
"""
Remove a function from the schedule.
If the function appears in the schedule more than once, all occurances
are removed. If the function was not scheduled, no error is raised.
:Parameters:
`callback` : function
The function to remove from the schedule.
This function is a wrapper to pyglet.clock.unschedule.
It has the additional benefit that all calllbacks are paused and
resumed when the node leaves or enters a scene.
You should not unschedule things using pyglet that where scheduled
by node.schedule/node.schedule_interface.
"""
total_len = len(self.scheduled_calls + self.scheduled_interval_calls)
self.scheduled_calls = [
c for c in self.scheduled_calls if c[0] != callback
]
self.scheduled_interval_calls = [
c for c in self.scheduled_interval_calls if c[0] != callback
]
if self.is_running:
pyglet.clock.unschedule( callback )
def resume_scheduler(self):
"""
Time will continue/start passing for this node and callbacks
will be called, worker actions will be called
"""
for c, i, a, k in self.scheduled_interval_calls:
pyglet.clock.schedule_interval(c, i, *a, **k)
for c, a, k in self.scheduled_calls:
pyglet.clock.schedule(c, *a, **k)
def pause_scheduler(self):
"""
Time will stop passing for this node: scheduled callbacks will
not be called, worker actions will not be called
"""
for f in set(
[ x[0] for x in self.scheduled_interval_calls ] +
[ x[0] for x in self.scheduled_calls ]):
pyglet.clock.unschedule(f)
for arg in self.scheduled_calls:
pyglet.clock.unschedule(arg[0])
@property
def parent(self):
""" The parent of this object
:type: object
"""
if self._parent is None:
return None
else:
return self._parent()
@parent.setter
def parent(self, parent):
if parent is None:
self._parent = None
else:
self._parent = weakref.ref(parent)
def get_ancestor(self, klass):
"""
Walks the nodes tree upwards until it finds a node of the class `klass`
or returns None
:rtype: `SummaNode` or None
"""
if isinstance(self, klass):
return self
parent = self.parent
if parent:
return parent.get_ancestor( klass )
#
# Transform properties
#
def __dirty(self, transform_dirty=True, inverse_transform=True):
self.is_transform_dirty = transform_dirty
self.is_inverse_transform_dirty = inverse_transform
@property
def x(self):
""" The x coordinate of the object
"""
return self._x
@x.setter
def x(self, value):
self._x = value
self.__dirty()
@property
def y(self):
"""The y coordinate of the object
"""
return self._y
@y.setter
def y(self, value):
self._y = value
self.__dirty()
@property
def position(self):
""" The (X, Y) coordinates of the object
:type: (int, int)
"""
return (self._x, self._y)
@position.setter
def position(self, (x, y)):
self._x = x
self._y = y
self.__dirty()
@property
def scale(self):
""" The scale of the object
"""
return self._scale
@scale.setter
def scale(self, s):
self._scale = s
self.__dirty()
@property
def rotation(self):
""" The rotation of the object
"""
return self._rotation
@rotation.setter
def rotation(self, a):
self._rotation = a
self.__dirty()
def add(self, child, z=0, name=None):
"""Adds a child and if it becomes part of the active scene
calls its on_enter method
:Parameters:
`child` : SummaNode
object to be added
`z` : float
the z index of self
`name` : str
Name of the child
:rtype: `SummaNode` instance
:return: self
"""
# child must be a subclass of supported_classes
#if not isinstance( child, self.supported_classes ):
# raise TypeError("%s is not instance of: %s" % (type(child), self.supported_classes) )
if name:
if name in self.children_names:
raise Exception("Name already exists: %s" % name )
else:
self.children_names[name] = child
if not isinstance(z, int):
raise TypeError("Z index is not an int object but {}".format(
type(z)))
child.parent = self
elem = z, child
bisect.insort(self.children, elem)
if self.is_running:
child.on_enter()
return self
def kill(self):
'''Remove this object from its parent, and thus most likely from
everything.
'''
self.parent.remove(self)
def remove( self, obj ):
"""Removes a child given its name or object
If the node was added with name, it is better to remove by name, else
the name will be unavailable for further adds ( and will raise Exception
if add with this same name is attempted)
If the node was part of the active scene, its on_exit method will be called.
:Parameters:
`obj` : string or object
name of the reference to be removed
or object to be removed
"""
if isinstance(obj, str):
if obj in self.children_names:
child = self.children_names.pop( obj )
self._remove( child )
else:
raise Exception("Child not found: {}".format(obj))
else:
self._remove(obj)
def _remove(self, child):
l_old = len(self.children)
self.children = [ (z,c) for (z,c) in self.children if c != child ]
if l_old == len(self.children):
raise Exception("Child not found: %s" % str(child) )
if self.is_running:
child.on_exit()
def get_children(self):
"""Return a list with the node's childs, order is back to front
:rtype: list of SummaNode
:return: childs of this node, ordered back to front
"""
return [ c for (z, c) in self.children ]
def __contains__(self, child):
return child in self.get_children()
def get(self, name):
"""Gets a child given its name
:Parameters:
`name` : string
name of the reference to be get
:rtype: SummaNode
:return: the child named 'name'. Will raise Exception if not present
Warning: if a node is added with name, then removed not by name,
the name cannot be recycled: attempting to add other node with this
name will produce an Exception.
"""
if name in self.children_names:
return self.children_names[name]
else:
raise Exception("Child not found: {}".format(name))
def on_enter(self):
"""
Called every time just before the node enters the stage.
scheduled calls and worker actions begins or continues to perform
Good point to do .push_handlers if you have custom ones
Rule: a handler pushed there is near certain to require a .pop_handlers
in the .on_exit method (else it will be called even after removed from
the active scene, or, if going on stage again will be called multiple
times for each event ocurrence)
"""
self.is_running = True
# start actions
self.resume()
# resume scheduler
self.resume_scheduler()
# propagate
for c in self.get_children():
c.on_enter()
def on_exit(self):
"""
Called every time just before the node leaves the stage
scheduled calls and worker actions are suspended, that is, will not
be called until an on_enter event happens.
Most of the time you will want to .pop_handlers for all explicit
.push_handlers found in on_enter
Consider to release here openGL resources created by this node, like
compiled vertex lists
"""
self.is_running = False
# pause actions
self.pause()
# pause callbacks
self.pause_scheduler()
# propagate
for c in self.get_children():
c.on_exit()
def transform(self):
"""
Apply ModelView transformations
you will most likely want to wrap calls to this function with
glPushMatrix/glPopMatrix
"""
x, y = director.get_window_size()
if not(self.grid and self.grid.active):
# only apply the camera if the grid is not active
# otherwise, the camera will be applied inside the grid
self.camera.locate()
gl.glTranslatef( self.position[0], self.position[1], 0 )
gl.glTranslatef( self.transform_anchor_x, self.transform_anchor_y, 0 )
if self.rotation != 0.0:
gl.glRotatef( -self._rotation, 0, 0, 1)
if self.scale != 1.0:
gl.glScalef( self._scale, self._scale, 1)
if self.transform_anchor != (0,0):
gl.glTranslatef(
- self.transform_anchor_x,
- self.transform_anchor_y,
0 )
def walk(self, callback, collect=None):
"""
Executes callback on all the subtree starting at self.
returns a list of all return values that are not none
:Parameters:
`callback` : function
callable, takes a summanode as argument
`collect` : list
list of visited nodes
:rtype: list
:return: the list of not-none return values
"""
if collect is None:
collect = []
r = callback(self)
if r is not None:
collect.append( r )
for node in self.get_children():
node.walk(callback, collect)
return collect
def visit(self):
'''
This function *visits* it's children in a recursive
way.
It will first *visit* the children that
that have a z-order value less than 0.
Then it will call the `draw` method to
draw itself.
And finally it will *visit* the rest of the
children (the ones with a z-value bigger
or equal than 0)
Before *visiting* any children it will call
the `transform` method to apply any possible
transformation.
'''
if not self.visible:
return
position = 0
if self.grid and self.grid.active:
self.grid.before_draw()
# we visit all nodes that should be drawn before ourselves
if self.children and self.children[0][0] < 0:
gl.glPushMatrix()
self.transform()
for z,c in self.children:
if z >= 0: break
position += 1
c.visit()
gl.glPopMatrix()
# we draw ourselves
self.draw()
# we visit all the remaining nodes, that are over ourselves
if position < len(self.children):
gl.glPushMatrix()
self.transform()
for z,c in self.children[position:]:
c.visit()
gl.glPopMatrix()
if self.grid and self.grid.active:
self.grid.after_draw( self.camera )
def draw(self, *args, **kwargs):
"""
This is the function you will have to override if you want your
subclassed to draw something on screen.
You *must* respect the position, scale, rotation and anchor attributes.
If you want OpenGL to do the scaling for you, you can::
def draw(self):
glPushMatrix()
self.transform()
# ... draw ..
glPopMatrix()
"""
pass
def do(self, action, target=None):
'''Executes an *action*.
When the action finished, it will be removed from the node's actions
container.
:Parameters:
`action` : an `Action` instance
Action that will be executed.
:rtype: `Action` instance
:return: A clone of *action*
to remove an action you must use the .do return value to
call .remove_action
'''
a = copy.deepcopy( action )
if target is None:
a.target = self
else:
a.target = target
a.start()
self.actions.append( a )
if not self.scheduled:
if self.is_running:
self.scheduled = True
pyglet.clock.schedule(self._step)
return a
def remove_action(self, action):
"""Removes an action from the node actions container, potentially
calling action.stop()
If action was running, action.stop is called
Mandatory interfase to remove actions in the node actions container.
When skipping this there is the posibility to
double call the action.stop
:Parameters:
`action` : Action
Action to be removed
Must be the return value for a .do(...) call
"""
assert action in self.actions
if not action.scheduled_to_remove:
action.scheduled_to_remove = True
action.stop()
action.target = None
self.to_remove.append( action )
def pause(self):
"""
Suspends the execution of actions.
"""
if not self.scheduled:
return
self.scheduled = False
pyglet.clock.unschedule( self._step )
def resume(self):
"""
Resumes the execution of actions.
"""
if self.scheduled:
return
self.scheduled = True
pyglet.clock.schedule( self._step )
self.skip_frame = True
def stop(self):
"""
Removes all actions from the running action list
For each action running the stop method will be called,
and the action will be retired from the actions container.
"""
for action in self.actions:
self.remove_action(action)
def are_actions_running(self):
"""
Determine whether any actions are running.
"""
return bool(set(self.actions) - set(self.to_remove))
def _step(self, dt):
"""pumps all the actions in the node actions container
The actions scheduled to be removed are removed
Then an action.step() is called for each action in the
node actions container, and if the action doenst need any more step
calls will be scheduled to remove. When scheduled to remove,
the stop method for the action is called.
:Parameters:
`dt` : delta_time
The time that elapsed since that last time this functions was called.
"""
for x in self.to_remove:
if x in self.actions:
self.actions.remove(x)
self.to_remove = []
if self.skip_frame:
self.skip_frame = False
return
if len( self.actions ) == 0:
self.scheduled = False
pyglet.clock.unschedule(self._step)
for action in self.actions:
if not action.scheduled_to_remove:
action.step(dt)
if action.done():
self.remove_action(action)
# world to local / local to world methods
def get_local_transform(self):
'''returns an euclid.Matrix3 with the local transformation matrix
:rtype: euclid.Matrix3
'''
if self.is_transform_dirty:
matrix = euclid.Matrix3().identity()
matrix.translate(self._x, self._y)
matrix.translate( self.transform_anchor_x, self.transform_anchor_y )
matrix.rotate( math.radians(-self.rotation) )
matrix.scale(self._scale, self._scale)
matrix.translate( -self.transform_anchor_x, -self.transform_anchor_y )
self.is_transform_dirty = False
self.transform_matrix = matrix
return self.transform_matrix
def get_world_transform( self ):
'''returns an euclid.Matrix3 with the world transformation matrix
:rtype: euclid.Matrix3
'''
matrix = self.get_local_transform()
p = self.parent
while p != None:
matrix = p.get_local_transform() * matrix
p = p.parent
return matrix
def point_to_world(self, p):
'''returns an euclid.Vector2 converted to world space
:rtype: euclid.Vector2
'''
v = euclid.Point2( p[0], p[1] )
matrix = self.get_world_transform()
return matrix * v
def get_local_inverse(self):
'''returns an euclid.Matrix3 with the local inverse transformation matrix
:rtype: euclid.Matrix3
'''
if self.is_inverse_transform_dirty:
matrix = self.get_local_transform().inverse()
self.inverse_transform_matrix = matrix
self.is_inverse_transform_dirty = False
return self.inverse_transform_matrix
def get_world_inverse(self):
'''returns an euclid.Matrix3 with the world inverse transformation matrix
:rtype: euclid.Matrix3
'''
matrix = self.get_local_inverse()
p = self.parent
while p != None:
matrix = matrix * p.get_local_inverse()
p = p.parent
return matrix
def point_to_local( self, p ):
'''returns an euclid.Vector2 converted to local space
:rtype: euclid.Vector2
'''
v = euclid.Point2( p[0], p[1] )
matrix = self.get_world_inverse()
return matrix * v
|
shackra/thomas-aquinas
|
summa/summanode.py
|
Python
|
bsd-3-clause
| 28,940
|
[
"VisIt"
] |
67eccbcfc188d946c35d11906f345b21776c68b186e061b1140aba87ee24bc37
|
# Copyright (c) 2015, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import os,sys
import pickle
from rdkit import rdBase
from rdkit import Chem
from rdkit.Chem import rdChemReactions, AllChem
from rdkit import Geometry
from rdkit import RDConfig
import itertools, time
test_data = [("good", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M RGP 1 2 1
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R# 0 0 0 0 0 0 0 0 0 2 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M RGP 1 1 2
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0
11.9811 -6.9292 0.0000 R# 0 0 0 0 0 0 0 0 0 2 0 0
1 2 1 0 0 0 0
M RGP 2 1 1 2 2
M END'''),
("bad", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M RGP 1 2 1
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M RGP 1 1 2
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
11.9811 -6.9292 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
M RGP 2 1 1 2 2
M END'''),
# chemdraw style
("bad", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R2 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
11.9811 -6.9292 0.0000 R2 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
M END'''),
("fail", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R3 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
11.9811 -6.9292 0.0000 R2 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
M END'''),
]
unused_rlabel_in_product = """$RXN
bug.rxn
ChemDraw06121709062D
1 1
$MOL
2 1 0 0 0 0 0 0 0 0999 V2000
0.1604 0.3798 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.1604 -0.3798 0.0000 R 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0
M END
$MOL
2 1 0 0 0 0 0 0 0 0999 V2000
-1.2690 -1.3345 0.0000 R 0 0 0 0 0 0 0 0 0 1 0 0
1.2690 1.3345 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0
M END
"""
kekule_rxn = """$RXN
bug.rxn
ChemDraw06121709062D
1 1
$MOL
RDKit 2D
6 6 0 0 0 0 0 0 0 0999 V2000
1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 2 0
3 4 1 0
4 5 2 0
5 6 1 0
6 1 2 0
M END
$MOL
RDKit 2D
6 6 0 0 0 0 0 0 0 0999 V2000
1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 2 0
3 4 1 0
4 5 2 0
5 6 1 0
6 1 2 0
M END
"""
good_res = (0,0,2,1,(((0, 'halogen.bromine.aromatic'),), ((1, 'boronicacid'),)))
bad_res = (3,0,2,1,(((0, 'halogen.bromine.aromatic'),), ((1, 'boronicacid'),)))
class TestCase(unittest.TestCase) :
def test_sanitize(self):
for status, block in test_data:
print("*"*44)
rxna = AllChem.ReactionFromRxnBlock(block)
rxnb = AllChem.ReactionFromRxnBlock(block)
rxna.Initialize()
res = rdChemReactions.PreprocessReaction(rxna)
print(AllChem.ReactionToRxnBlock(rxna))
if status == "good":
self.assertEquals(res, good_res)
elif status == "bad":
self.assertEquals(res, bad_res)
print (">"*44)
rxnb.Initialize()
try:
rdChemReactions.SanitizeRxn(rxnb)
res = rdChemReactions.PreprocessReaction(rxnb)
print(AllChem.ReactionToRxnBlock(rxnb))
self.assertEquals(res, good_res)
assert not status == "fail"
except:
print ("$RXN Failed")
if status == "fail":
continue
raise
def test_unused_rlabel_in_product(self):
rxn = AllChem.ReactionFromRxnBlock(unused_rlabel_in_product)
# test was for a seg fault
rdChemReactions.SanitizeRxn(rxn)
def test_only_aromatize_if_possible(self):
rxn = AllChem.ReactionFromRxnBlock(kekule_rxn)
# test was for a seg fault
groups = rxn.RunReactants([Chem.MolFromSmiles("c1ccccc1")])
print(groups)
self.assertFalse(len(groups))
# check normal sanitization
rdChemReactions.SanitizeRxn(rxn)
groups = rxn.RunReactants([Chem.MolFromSmiles("c1ccccc1")])
self.assertTrue(len(groups[0]))
# now check adjustparams with ONLY aromatize if possible
rxn = AllChem.ReactionFromRxnBlock(kekule_rxn)
rdChemReactions.SanitizeRxn(rxn)
groups = rxn.RunReactants([Chem.MolFromSmiles("c1ccccc1")])
self.assertTrue(len(groups[0]))
def test_github_4162(self):
rxn = rdChemReactions.ReactionFromSmarts(
"[C:1](=[O:2])-[OD1].[N!H0:3]>>[C:1](=[O:2])[N:3]")
rxn_copy = rdChemReactions.ChemicalReaction(rxn)
rdChemReactions.SanitizeRxn(rxn)
rdChemReactions.SanitizeRxn(rxn_copy)
pkl = rxn.ToBinary()
rxn_from_pickle = rdChemReactions.ChemicalReaction(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
pkl = pickle.dumps(rxn)
rxn_from_pickle = pickle.loads(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
pkl = rxn_from_pickle.ToBinary()
rxn_from_pickle = rdChemReactions.ChemicalReaction(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
pkl = pickle.dumps(rxn_from_pickle)
rxn_from_pickle = pickle.loads(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
if __name__ == '__main__':
unittest.main()
|
bp-kelley/rdkit
|
Code/GraphMol/ChemReactions/Wrap/testSanitize.py
|
Python
|
bsd-3-clause
| 10,894
|
[
"RDKit"
] |
4de2a2c50bad3de37dab6016441e318e8914374af7780d52762482abb4d8be20
|
'''
module: utils.py
use: contains functions associated with general functionality that are not unique to any particular part of the project
'''
import numpy as np
#from scipy.stats import mode #this isnt actually used i think
def getDifferenceArray(vector):
'''
Purpose:
Takes an m by n vector and returns a symmetric array with elements representing the different between components in the vector
array[i,j] = ||vector[i,:] - vector[j,:]||^2
Inputs:
vector - m by n ndarray type representing a set of joint positions, for example
Outputs:
array - n by n ndarray with the i-jth element equal to the norm^2 difference between the ith and jth rows of vector
'''
vec_len = len(vector)
array = np.zeros((vec_len,vec_len))
for i in range(0,vec_len):
for j in range(i,vec_len):
array[i,j] = np.linalg.norm((vector[i,:]-vector[j,:]))
array = symmetrize(array)
return array
def getSimilarityArray(feature_array,similarity_method = 'exp',k_nn = 5):
'''
Purpose:
Computes the similarity array for a given feature set, similarity method, and k_nearest_neighbors value
Part of the spectral clustering process
Inputs:
feature_array - set of features
similarity_method - method to use for computing the similarity array:
--'exp' computes W[i,j] = exp(-||xi - xj||^2 / 2)
--'norm' computes W[i,j] = ||xi - xj||^2
--'chain' is specifically for the 'chain' generateData type
k_nn - number of nearest neighbors to consider (k_nn=5 means only the top 5 largest similarity values are kept nonzero)
Outputs:
sim_array - symmetric array of similarity strength values
'''
allowed_methods = ['exp','norm','chain']
if similarity_method not in allowed_methods:
print 'ERROR: Not a valid similarity_method'
return
else:
sim_array = np.zeros((len(feature_array),len(feature_array)))
i = 0
j = 0
for rowi in feature_array:
for rowj in feature_array:
if i <= j:
difference = (rowi-rowj).T
if similarity_method == 'exp':
sim_array[i,j] = np.exp(-1*((difference.T).dot(difference)))
elif similarity_method == 'norm':
sim_array[i,j] = difference.T.dot(difference)
elif similarity_method == 'chain':
if np.linalg.norm(difference) <= 1.5:
if ((i != int(len(feature_array)/2.)-1) and (j != int(len(feature_array)/2.))):
sim_array[i,j] = 1
if i == j:
sim_array[i,j] = 1
j += 1
i += 1
j = 0
sim_array = sim_array - np.diag(sim_array.diagonal()) #remove diagonal nonzero values
if k_nn != -1:
for rowi in sim_array:
ind = np.argpartition(rowi, -1*k_nn)[(-1*k_nn):]
for i in range(len(rowi)):
if i not in ind:
rowi[i] = 0;
return symmetrize(sim_array)
def symmetrize(array):
'''
Purpose:
Returns the symmetric version of an upper or lower triangular array
Inputs:
array - upper OR lower triangular ndarray
Outputs:
symmetric version of array
'''
return array + array.T - np.diag(array.diagonal())
def normalize(array,normalizer):
'''
Purpose:
Normalize an array by some 'normalizer' value
Inputs:
array - an ndarray type
normalizer - non int-type value
Outputs:
array - output of (array/normalizer)
'''
array = (1.0/normalizer)*array
return array
def runningAvg(vector,N):
'''
Purpose:
Performs a runningAvg calculation on a 1d array 'vector' and averages over N spaces
Inputs:
vector - ndarray 1-dimensional array
N - number of elements to average over
Outputs:
vector with each element being the runningAvg over N elements - same size as original vector
'''
return np.convolve(vector, np.ones(N,)/(N*1.0))[(N-1):]
def numOutsideBounds(_input,bounds):
'''
Purpose:
given an input vector of length n and bounds = [lower,upper] each of length n (for each element in the input vector), return the number of elements of the input that are not within the lower and upper bounds
Inputs:
_input - n-length ndarray
bounds - list of [lower,upper] where lower and upper are each n-length ndarray objects representing the lower and upper bounds that the input should satisfy
Outputs:
num_outside_bounds - integer number of elements of the _input that fell outside of the bounds
'''
num_below_lower_bound = np.sum(_input<bounds[0])
num_above_upper_bound = np.sum(_input>bounds[1])
num_outside_bounds = num_below_lower_bound+num_above_upper_bound
return num_outside_bounds
def getBackwardsUniqueOrder(iterable,backward=True):
'''
Purpose:
Returns the unique 'most recently seen' order of iterables. For example if the iterable is [0,0,1,3,2,0,1,2,2], this function will return [2,1,0,3].
Inputs:
iterable - list or 1D-ndarray with potentially repeated values
backward - if set to True, then this will return the unique values starting at index 0 of the iterable instead of index -1
Outputs:
reverse - list object
'''
if backward:
_iterable = iterable[::-1]
else:
_iterable = iterable
reverse = [y for ind,y in enumerate(_iterable) if y not in _iterable[0:ind]]
return reverse
def softmax(x,alpha=-1.0,rescale_=False):
if rescale_:
x_ = rescale(x)
else:
x_ = x
expx = np.exp(alpha*np.array(x_)) #take exponential of the -x(i) values in x
total = np.sum(expx) #for use in the denominator
return expx/float(total)
def rescale(x,max_=10):
x_scaled = [k/400*float(max_) for k in x]
return x_scaled
def gaussiansMeet(mu1, std1, mu2, std2):
'''
Purpose:
Calculates the intersection points of two gaussian distributions
Inputs:
mu1, mu2 - mean values of the respective guassian distributions
std1, std2 - standard deviation values of the respective gaussian distributions
Outputs:
roots - all real values of intersection points
'''
#print 'mu stuff: ', mu1, std1, mu2,std2
a = 1/(2.*std1**2) - 1/(2.*std2**2)
b = mu2/(1.*std2**2) - mu1/(1.*std1**2)
c = mu1**2 /(2.*std1**2) - mu2**2 / (2.*std2**2) - np.log(std2/(1.*std1))
#print a,b,c
return np.roots([a,b,c])
class Subspace(object):
'''
Purpose:
Subspace class that allows for easy projections into the subspace. Used to allow a new set of points that you know would lie in the same subspace (or a similar one) if the number of points/features in the new set of points were the same as the subspace. Thus this is a useful class if there is a structure to be exploited.
Functions:
self.projectOnMe(self,X) - allows for a differently shaped matrix X (m by r) to be projected on the same space as the base subspace self.U (n by p) if they share a similar structure but for some reason are not the same number of points.
callables:
self.U - n by p basis matrix for the subspace
self.n - number of elements in the subspace
self.p - number of features in the subspace
'''
def __init__(self,U):
'''
Initialize the subspace class with the basis array U (n by p)
'''
self.U = U #U is orthogonal subspace ndarray object n by p
self.n = U.shape[0] #number of elements in the subspace
self.p = U.shape[1] #number of features in subspace
def projectOnMe(self,X,onlyshape=False):
'''
Purpose:
Adds or subtracts random points from the matrix X to coincide with the same number of points as self.n. This function uses interpolation between points randomly chosen to add new points to coincide with the dimension of the basis array self.U.
Inputs:
X - m by r array with m and r possibly different from self.n and self.p
Outputs:
Z - m by self.p array in the proper subspace self.U
'''
#project a different subspace Y (m by r, m and r possible not equal to n and p) onto the space spaned by self.U
def extendX(X):
'''
Purpose:
Adds the necessary number of points to X to match self.n
Inputs:
X - m by r array with m and r possibly different from self.n and self.p
Outputs:
X - an updated version of X that is now self.n by r
inds - array of indices that were added to X to be removed later
'''
#inds = np.array([added_ind1, added_ind2, added_ind3, ...]) int between {1,2,...,max_ind-1}
#X is too small to be projected on U, so need to add additional points
if len(X) > self.n: #check you didn't use the wrong function (should be done for you already though )
#print 'whoops, extendX() is not for you'
return
else:
num_add = self.n-len(X)
#print 'adding ', num_add, ' elements'
interps = np.random.randint(len(X)-1, size=num_add) #select interpolation indices at the halfway points ]along the elements of the basis {0.5,1.5,2.5...,max_ind-0.5}
interps = interps.astype('float64')
interps += 0.5
interps = np.sort(interps)
ceil_interps = np.ceil(interps)
Xnew = np.ones((len(X)+num_add,1))
for col in X.T: #for each column of X, interpolate
value_add = np.interp(interps,np.arange(len(col)),col)
col = np.insert(col,ceil_interps,value_add)
col99 = np.insert(col,ceil_interps,np.ones(len(ceil_interps))*-99) #fills in added entries with -99
inds = np.where(col99==-99) #inds which will be removed later
Xnew = np.hstack((Xnew,col.reshape(len(col),1)))
X = Xnew[:,1:] #ignore first column
return X, inds
def contractX(X):
'''
Purpose:
Removes the necessary number of points to X to match self.n
Inputs:
X - m by r array with m and r possibly different from self.n and self.p
Outputs:
X - an updated version of X that is now self.n by r
inds - array of indices that were removed to X to be added back in later through interpolation in the new basis
'''
#X is too large to be projected on U, so need to remove points
#inds = np.array([added_ind1, added_ind2, added_ind3, ...])
if len(X) < self.n:
#print 'whoops, contractX() is not for you'
return
else:
num_remove = len(X) - self.n
#print 'removing ', num_remove, ' elements'
removes = np.random.choice(len(X)-2,size=num_remove,replace=False)+1 #select from {1,2,...max_ind-1} without replacement
removes = np.sort(removes)
inds = np.empty_like(removes)
for i,r in enumerate(removes):
inds[i] = r-1-i #index after which to place the new element when adding them back for interpolation
Xnew = np.ones((len(X)-num_remove,1))
for col in X.T:
col = np.delete(col,removes,axis=0)
Xnew = np.hstack((Xnew,col.reshape(len(col),1)))
X = Xnew[:,1:]
return X, inds
def resolveProjection(Z,inds,status):
'''
Purpose:
Resolves the projection process after the newly shaped array has been projected on the new subspace by replacing the proper indicies or removing the added indicies placed in inds.
Inputs:
Z - self.n by self.p array coming from utils.projectToSubspace()
inds - indicies of removed or added points in order to shape the projected subspace into the self.U basis.
status - (0 = no changes necessary),(+1 = need to remove the unnecessary points that had been added previously),(-1 = need to add in points through interpolation at the appropriate indicies)
Outputs:
Z - m by self.p array in the proper subspace self.U
'''
if status == 0:
#print 'status is go'
return Z
elif status == +1:
#print 'removing uncessary dumb additions'
#remove unnecessary added rows from Z
Z = np.delete(Z,inds,axis=0)
return Z
elif status == -1:
#add necessary removed points to Z
#print 'adding the important addtions back'
interps = inds + 0.5
Znew = np.ones((len(Z)+len(inds),1))
for col in Z.T:
values = np.interp(inds+0.5, np.arange(len(col)), col)
col = np.insert(col,np.ceil(interps),values)
Znew = np.hstack((Znew,col.reshape(len(col),1)))
Z = Znew[:,1:]
return Z
status = 0 #default that self.U and X are the same length
inds = []
if len(X) < self.n:
#print 'extending'
X,inds = extendX(X)
status = +1 #indices have been added, will need to remove these from the projection later
elif len(X) > self.n:
#print 'contracting'
X,inds = contractX(X)
status = -1 #indices have been removed, will need to interpolate in projection later
if onlyshape:
return X
else:
Z = projectToSubspace(X,self.U)
Z = resolveProjection(Z,inds,status)
return Z
def projectToSubspace(X,Y):
'''
Purpose:
Embeds a set of features X (in R^(n by k)) onto a reduced dimension subspace Y (in R^(n by r)), r < k, via least squares approximation, Z = Xw where w = inv(X'X)X'Y
Inputs:
X - n by k feature array (ndarray type)
Y - n by r feature array, (r<k, ndarray type)
Outputs:
Z - n by r ndarray subspace projection of X onto Y
'''
w = np.linalg.lstsq(X,Y)
Z = X.dot(w[0])
return Z
def orderStates(vector):
'''
Purpose:
Orders states so that first defined state is a 0, second defined state is a 1, etc
Inputs:
vector - 1 dimensional array of a relatively small number of ints
Outputs:
ordered_vector - vector of same size as original vector but with the first few states ordered
'''
order_hold = []
for ind,elt in enumerate(vector):
if ind == 0:
order_hold.append(elt)
ordered_vector = [0]
else:
if elt not in order_hold:
order_hold.append(elt)
ordered_vector.append(order_hold.index(elt))
return ordered_vector
def generateData(N,form='bull',dim=2):
'''
Purpose:
Generates (N by dim) ndarray of a type described by 'form'
Particularly useful for testing clustering methods
Inputs:
N - length of data set
dim - number of dimensions in dataset (ie dim = 2)
form - data set type
--'sep' compiles a dataset with two distinct groups
--'bull' compiles a dataset of a bullseye shape (one labeled group within a ring of the other group)
--'chain' compiles a dataset of a linear chain with a label break in between them
Outputs:
X - compiled data array of 'form' type
y - labels associated with each of the N examples of X
'''
X = np.zeros((N,dim),dtype = np.float16)
y = np.zeros((N,1), dtype = np.int_)
if form == 'sep': #seperate clusters of data
base1 = np.ones((1,dim))
base2 = np.zeros((1,dim))
cnt = 0
while cnt < np.floor(N/2):
X[cnt,:] = base1 + 0.5*(np.random.rand(1,dim)*2.0-1.)
y[cnt] = 1
cnt += 1
while cnt < N:
X[cnt,:] = base2 + 0.5*(np.random.rand(1,dim)*2.0-1.)
y[cnt] = -1
cnt += 1
y.shape = (N,)
return X,y
elif form == 'bull': #inner cluster surrounded by ring of points
cnt=0;
X = np.zeros((N,dim),dtype = np.float16)
y = np.zeros((N,1), dtype = np.int_)
totalg1 = 0
totalg2 = 0
while cnt < N :
x = 2*np.random.rand(1,dim)-1;
if np.linalg.norm(x) < 0.15 and totalg1<=(N-np.floor(N/1.2)):
X[cnt,:] = x;
y[cnt] = +1
cnt=cnt+1;
totalg1 +=1
elif (np.linalg.norm(x) > 0.5 and np.linalg.norm(x) < 0.55) and totalg2<(N-(N-np.floor(N/1.2))):
X[cnt,:] = x;
y[cnt] = -1
cnt=cnt+1;
totalg2 += 1
y.shape = (N,)
return X,y
elif form == 'chain': #linear chain graph of N points
X = np.zeros((N,dim),dtype = np.float16)
for i in np.arange(N):
X[i,:] = i
if i < N/2.:
y[i] = +1
else:
y[i] = -1
y.shape = (N,)
return X,y
def loader(handover_starts,data_object,n):
'''
Purpose:
utility function to get a list with the appropriate start and end frame numbers for a certain data_object and handover_starts. Returns starts = [init_frame,end_frame]
Inputs:
handover_starts - list of the frame numbers for all starts of handovers or general tasks
data_object - kinectData object that has been filled with data from a file
n - the handover number you would like to get the begining and end frames of
Outputs:
starts - list of [init_frame for handover n, end_frame for handover n]
'''
try:
starts = [handover_starts[n],handover_starts[n+1]]
except IndexError:
starts = [handover_starts[n],data_object.num_vectors-1]
return starts
def runTasks(handover_starts,data_obj,task_obj,n,max_ind=10):
'''
Purpose:
Performs the task class update step for n randomly chosen tasks from the potential dataset of tasks in handover_starts. In other words, for n = 5, 5 different values from handover_starts will be chosen to be used for the task update on the task_obj using data found in data_obj. max_ind represents the total number of handover options in handover_starts to choose from.
Inputs:
handover_starts - list of the frame numbers for all starts of handovers or general tasks
data_obj - kinectData object
task_obj - process.Task() object
n - number of handovers/full tasks to randomly choose
max_ind - total number of handovers available to be chosen from
Outputs:
no return object, but task_obj is updated with the new state values and historical data
'''
inds = np.random.randint(max_ind,size=n)
for i in inds:
task_obj.update(data_obj,loader(handover_starts,data_obj,i))
def euclideanDist(point1,point2):
'''
Purpose:
Calculates euclidean distance between two points
Inputs:
point1,point2 - same dimensioned points in some space
Outputs:
output - euclidean distance between the two points, ||point1-point2||_2
'''
return np.linalg.norm(point1-point2)
def majorityVote(values):
'''
Purpose:
Outputs the most often seen values from a 1d list/array of values along with a list of the sorted indices and sorted values by majority vote
Inputs:
values - 1d list/array of values that include redundant values
Outputs: Two outputs - output1,output2
output1 - the most often counted value that was found in values
output2 - list with two components = [sorted unique values from least often to most often, counts corresponding to the unique values]
'''
'''test code (place on own as main)
x1 = [2]*5+[1]*10+[0]*3 # expected list return- [0,2,1], [3,5,10]
x2 = [1]*5+[2]*10+[0]*3 # [0,1,2], [3,5,10]
x3 = [0]*5+[1]*10+[2]*3 # [2,0,1], [3,5,10]
x4 = [0]*5+[2]*10+[1]*3 # [1,0,2], [3,5,10]
x5 = [2]*5+[0]*10+[1]*3 # [1,2,0], [3,5,10]
x6 = [2]*10+[1]*5 # [1,2], [5,10]
def dothings(x):
print x
best_val, obj = majorityVote(x)
print 'most often: ', best_val
print 'sorted indicies: ', obj[0]
print 'sorted count values for indicies: ', obj[1]
dothings(x1)
dothings(x2)
dothings(x3)
dothings(x4)
dothings(x5)
dothings(x6)
'''
#print 'np.unique(values): ', np.unique(values), values
if isinstance(values,list):
uValues = np.unique(values).tolist()
uCounts = [np.sum(np.array(values) == uv) for uv in uValues]
sorted_inds = np.argsort(uCounts)
best_val = uValues[sorted_inds[-1]]
sorted_vals = [int(uValues[x]) for x in sorted_inds]
sorted_cnts = np.sort(uCounts)
else:
best_val = values
sorted_vals = values
sorted_cnts = len(values)
return best_val, [sorted_vals, sorted_cnts]
def kNN(new_point, history_points, history_labels, k=5):
'''
Purpose:
performs k nearest neighbors algorithm using euclidean distances. Need to give the new point and the past labeled points and labels along with the number of past points to choose the new point label from.
Inputs:
new_point - 1 by p array representing the new p-featured point in space
history_points - n by p array representing the known labeled points in space
history-labels - length n list of labels corresponding to the n history_points examples
k - nearest neighbors to consider for choosing new point label. The majority vote label from the k closest points to the new point will be output as the new label.
Outputs: Two outputs in a single list object - [vote,counts_info]
vote - majority vote label from the k closest points to the new point
counts_info - two element list [sorted_inds,counts], sorted_inds: unique labels found in the majority vote search of the k closest elements sorted from fewest examples to most examples, counts: counts of each unique label (same order as in sorted_inds) which in total sum up to k.
'''
distances = []
for old_point in history_points:
distances.append(euclideanDist(new_point,old_point))
sorted_inds = np.argsort(distances)
consider_labels = np.array(history_labels)[sorted_inds[0:k]].tolist()
vote, counts_info = majorityVote(consider_labels)
return [vote,counts_info]
def compareTaskDef(task_obj,new_labels,kinectData_obj):
import process
new_path = task_obj.definePath(new_labels)
dummy_task = process.Task(kinectData_obj) #create dummy task object to printed out the task definition
dummy_task.path = new_path[0]
dummy_task.times = new_path[1]
print 'Expected path (', sum(task_obj.times),'frames ):'
dummy_var = task_obj.printTaskDef(1)
print 'New path (', sum(dummy_task.times),'frames ):'
new_path_info = dummy_task.printTaskDef(sum(dummy_task.times)/float(sum(task_obj.times))) #prints the new path information in a good way
return
def plotFeaturesTogether(data_obj,col,starts,tasknums):
import matplotlib.pyplot as plt
colors = 'kbgrmy'
for i,t in enumerate(tasknums):
a,b = starts[t],starts[t+1]
print a,b
print colors[i]
plt.plot(np.arange(b-a),data_obj.feat_array[a:b,col],'-',color=colors[i],label='task'+str(t))
plt.legend()
|
jvahala/lucid-robotics
|
code/python-modules/utils.py
|
Python
|
apache-2.0
| 21,305
|
[
"Gaussian"
] |
a85e88e8b70600ff2642543e1a5a0d1a40278c1ea43b7af93596c6420736cd73
|
"""
# Copyright (C) 2007 Nathan Ramella (nar@remix.net)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Nathan Ramella <nar@remix.net> or visit http://www.liveapi.org
RemixNet Module
This module contains four classes that have been assembled to facilitate
remote control of Ableton Live. It's been an interesting experience learning
Python and has given me a lot of time to think about music and networking
protocols. I used OSC as it's somewhat of an accepted protocol and at least
more flexible than MIDI. It's not the quickest protocol in terms of
pure ops, but it gets the job done.
For most uses all you'll need to do is create an OSCServer object, it
in turn creates an OSCClient and registers a couple default callbacks
for you to test with. Both OSCClient and OSCServer create their own UDP
sockets this is settable on initialization and during runtime if you wish
to change them.
Any input or feedback on this code will always be appreciated and I look
forward to seeing what will come next.
-Nathan Ramella (nar@remix.net)
-Updated 29/04/09 by ST8 (st8@q3f.org)
Works on Mac OSX with Live7/8
The socket module is missing on osx and including it from the default python install doesnt work.
Turns out its the os module that causes all the problems, removing dependance on this module and
packaging the script with a modified version of the socket module allows it to run on osx.
"""
import inspect
import os
import sys
import Live
# Import correct paths for os / version
version = Live.Application.get_application().get_major_version()
if sys.platform == "win32":
import socket
else:
if version > 7:
# 10.5
try:
file = open("/usr/lib/python2.5/string.pyc")
except IOError:
sys.path.append("/Library/Frameworks/Python.framework/Versions/2.5/lib/python2.5")
import socket_live8 as socket
else:
sys.path.append("/usr/lib/python2.5")
import socket
# OSC
from OSCMessage import OSCMessage
from CallbackManager import CallbackManager
from OSCUtils import *
class UDPServer:
"""
RemixNet.UDPServer
This class is a barebones UDP server setup with the ability to
assign callbacks for incoming data. In the design as is, we use
an OSC.CallbackManager when we recieve any data.
This class is designed to be used by RemixNet.OSCServer, as it
will do all the setup for you and register a few default OSCManager
callbacks.
"""
def __init__(self, src, srcPort):
"""
Sets up the UDPServer component of this package. By default
we listen to all interfaces on port 9000 for incoming requests
with a 4096 byte buffer.
You can modify these settings by using the methods setport() and setHost()
"""
if srcPort:
self.srcPort = srcPort
else:
self.srcPort = 9000
if src:
self.src = src
else:
self.src = ''
self.buf = 4096
def processIncomingUDP(self):
"""
Attempt to process incoming packets in the network buffer. If none are
available it will return. If there is data, and a callback manager has been
defined we'll send the data to the callback manager.
You can specify a callback manager using the UDPServer.setCallbackManager()
function and passing it a populated OSC.Manager object.
"""
try:
# You'd think this while 1 loop would get stuck and block the
# program. But. As it turns out. It doesn't.
while 1:
self.data,self.addr = self.UDPSock.recvfrom(self.buf)
if not self.data:
# No data buffered this round!
return
else:
if self.data != '\n':
# Oh snap, we have data!
# If you want to write your own special handlers for dealing
# with incoming data, this is the place. self.data contains
# the raw data sent to our UDP socket.
print('UDP raw: ' + self.data)
if self.callbackManager:
self.callbackManager.handle(self.data)
except Exception, e:
pass
def setCallbackManager(self, callbackManager):
"""
You can specify a callbackManager here as derived from OSC.py.
We use this function in OSCServer to register the default /remix/
namespace addresses as utility callbacks.
"""
self.callbackManager = callbackManager
def bind(self):
"""
After initializing you must UDPServer.listen() to bind to the socket
and accept whatever packets are in the buffer. Since we're binding a
non-blocking socket, your program (and Ableton Live) will still be
able to run.
"""
self.addr = (self.src,self.srcPort)
self.UDPSock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
self.UDPSock.bind(self.addr)
self.UDPSock.setblocking(0)
def close(self):
"""
Close our UDPSock
"""
# Closing time!
self.UDPSock.close()
|
shouldmakemusic/yaas
|
LiveOSC/UDPServer.py
|
Python
|
gpl-2.0
| 6,206
|
[
"VisIt"
] |
9d1db32a84d1e8ff6925685c21afa08967e7639b79942177c30dd67d750ea7a7
|
'''
@author Sumedha Ganjoo
@see LICENSE (MIT style license file).
'''
import os.path
import sys
outputfile=open(sys.argv[2],'w')
outputfile.seek(0,0)
outputfile.write('\n Tool added...For independent use, find the tool under Web Service Tools on the left side window, \n and for use in workflows find the tool under Web Service Workflow Tools. \n If the tool is not visible click on "Galaxy" on the top left corner of this window to refresh the page.')
|
UGA-WSAG/wsextensions
|
WebServiceToolWorkflow_REST_SOAP/refreshTool.py
|
Python
|
mit
| 469
|
[
"Galaxy"
] |
dff32671e548439627ba6d30c73076cd6c7493d39ac7167f3033a99b64349c71
|
#!/usr/bin/python
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
Checks (rule) patterns associated with rows in tables, and adds an
additional column to each row (in each table) which captures
constraints in rule pattern.
"""
from __future__ import print_function
import dgen_core
# If true, print traces of how patterns are added.
# Useful to trace how patterns are generated for one (or more) tables,
# depending on the value of _restrict_to_tables.
_trace = False
# If defined, do a detailed trace of optimizing the given pattern
# Note: This flag is used to discover the cause of a "Row not reachable"
# or a "Table XXX malformed for pattern YYY" exception. It also can be
# used to see how the $pattern test was generated in the generated decoder
# state.
_trace_detailed_pattern = None
# If defined, only optimize patterns only in the given list of table names
_restrict_to_tables = None
def add_rule_pattern_constraints(decoder):
"""Adds an additional column to each table, defining additional
constraints assumed by rule patterns in rows.
"""
for table in decoder.tables():
_add_rule_pattern_constraints_to_table(decoder, table)
return decoder
def _process_table(table):
global _restrict_to_tables
return table.name in _restrict_to_tables if _restrict_to_tables else True
def _add_rule_pattern_constraints_to_table(decoder, table):
"""Adds an additional column to the given table, defining
additional constraints assumed by rule patterns in rows.
"""
global _trace
if _trace and _process_table(table):
print("*** processing table: %s ***" % table.name)
constraint_col = len(table.columns())
table.add_column(dgen_core.BitField('$pattern', 31, 0))
for row in table.rows():
_add_rule_pattern_constraints_to_row(decoder, table, row, constraint_col)
def _add_rule_pattern_constraints_to_row(decoder, table, row, constraint_col):
"""Adds an additional (constraint) colum to the given row,
defining additional constraints assumed by the rule
pattern in the row.
"""
global _trace
if _trace and _process_table(table):
print("consider: %s" % repr(row))
action = row.action
if action and action.__class__.__name__ == 'DecoderAction':
pattern = action.pattern()
if pattern:
rule_pattern = table.define_pattern(pattern, constraint_col)
if _process_table(table):
# Figure out what bits in the pattern aren't tested when
# reaching this row, and add a pattern to cover those bits.
reaching_pattern = RulePatternLookup.reaching_pattern(
decoder, table, row, pattern, constraint_col)
row.add_pattern(reaching_pattern)
else:
row.add_pattern(table.define_pattern(pattern, constraint_col))
return
# If reached, no explicit pattern defined, so add default pattern
row.add_pattern(table.define_pattern('-', constraint_col))
class RulePatternLookup(object):
"""Lookup state for finding what parts of an instruction rule pattern
survive to the corresponding row of a table. This information is
use to optimize how rule patterns are added.
Note: Implements a table stack so that a depth-first
search can be used. The stack is used to detect cycles,
and report the problem if detected.
Note: This data structure also implements a row stack. This
stack is not really needed. However, when debugging, it can
be very useful in describing how the current state was reached.
Hence, it is included for that capability.
"""
@staticmethod
def reaching_pattern(decoder, table, row, pattern_text, pattern_column):
"""Given a rule in the given row, of the given table, of the
given decoder, return the set of bit patterns not already
handled.
"""
# Create a look up state and then do a depth-first walk of possible
# matches, to find possible (unmatched) patterns reaching the
# given table and row.
state = RulePatternLookup(decoder, table, row,
pattern_text, pattern_column)
if state._trace_pattern():
print("*** Tracing pattern: %s ***" % pattern_text)
print(" table: %s" % table.name)
print(" row: %s" % repr(row))
# Do a depth-first walk of possible matches, to find
# possible (unmatched) patterns reaching the given table and
# row.
state._visit_table(decoder.primary)
# Verify that the row can be reached!
if not state.is_reachable:
raise Exception("Row not reachable: %s : %s"
% (table.name, repr(row)))
# Return the pattern of significant bits that could not
# be ruled out by table (parse) patterns.
return state.reaching_pattern
def _trace_pattern(self):
global _trace_detailed_pattern
if _trace_detailed_pattern:
return (_trace_detailed_pattern and
self.pattern_text == _trace_detailed_pattern)
def __init__(self, decoder, table, row, pattern_text, pattern_column):
"""Create a rule pattern lookup. Arguments are:
decoder - The decoder being processed.
table - The table in the decoder the row appears in.
row - The row we are associating a pattern with.
pattern - The (rule) pattern associated with a row.
Uses a depth-first search to find all possible paths
that can reach the given row in the given table, and
what bits were already tested in that path.
"""
self.decoder = decoder
self.table = table
self.row = row
self.pattern_text = pattern_text
# Define the corresponding pattern for the pattern text.
self.pattern = table.define_pattern(pattern_text, pattern_column)
# The following holds the stack of tables visited.
self.visited_tables = []
# The following holds the stack of rows (between tables) visited.
self.visited_rows = []
# The following holds the significant bits that have been shown
# as possibly unmatched. Initially, we assume no bits are significant,
# and let the lookup fill in bits found to be potentially significant.
self.reaching_pattern = dgen_core.BitPattern.always_matches(
self.pattern.column)
# The following holds the part of the current pattern that is still
# unmatched, or at least only partially matched, and therefore can't
# be removed.
self.unmatched_pattern = self.pattern
# The following defines if the pattern is reachable!
self.is_reachable = False
def _visit_table(self, table):
"""Visits the given table, trying to match all rows in the table."""
if self._trace_pattern():
print("-> visit %s" % table.name)
if table in self.visited_tables:
# cycle found, quit.
raise Exception("Table %s malformed for pattern %s" %
(table.name, repr(self.pattern)))
return
self.visited_tables.append(table)
for row in table.rows():
self._visit_row(row)
self.visited_tables.pop()
if self._trace_pattern():
print("<- visit %s" % table.name)
def _visit_row(self, row):
"""Visits the given row of a table, and updates the reaching pattern
if there are unmatched bits for the (self) row being processed.
"""
global _trace
self.visited_rows.append(row)
if self._trace_pattern():
print('row %s' % row)
# Before processing the row, use a copy of the unmatched pattern so
# that we don't pollute other path searches through the tables.
previous_unmatched = self.unmatched_pattern
self.unmatched_pattern = self.unmatched_pattern.copy()
matched = True # Assume true till proven otherwise.
# Try to match each pattern in the row, removing matched significant
# bits from the unmatched pattern.
for row_pattern in row.patterns:
match = self.unmatched_pattern.categorize_match(row_pattern)
if self._trace_pattern():
print('match %s : %s => %s' % (repr(self.unmatched_pattern),
repr(row_pattern), match))
if match == 'match':
# Matches, i.e. all significant bits were used in the match.
self.unmatched_pattern = (
self.unmatched_pattern.remove_overlapping_bits(row_pattern))
if self._trace_pattern():
print(' unmatched = %s' % repr(self.unmatched_pattern))
elif match == 'consistent':
# Can't draw conclusion if any bits of pattern
# affect the unmatched pattern. Hence, ignore this
# pattern and continue matching remaining patterns
# in the row.
continue
elif match == 'conflicts':
# This row can't be followed because it conflicts with
# the unmatched pattern. Give up.
matched = False
break
else:
# This should not happen!
raise Exception("Error matching %s and %s!"
% (repr(row_pattern), repr(self.unmatched_pattern)))
if matched:
# Row (may) apply. Continue search for paths that can match
# the pattern.
if self._trace_pattern():
print("row matched!")
print("row: %s" % repr(row))
if row == self.row:
# We've reached the row in the table that we are trying to
# reach. Ssignificant bits remaining in unmatched_pattern
# still need to be tested. Union them into the reaching pattern.
old_reaching = self.reaching_pattern.copy()
self.reaching_pattern = self.reaching_pattern.union_mask_and_value(
self.unmatched_pattern)
if self._trace_pattern():
print(" reaching pattern: %s => %s" % (repr(old_reaching),
repr(self.reaching_pattern)))
self.is_reachable = True
if _trace:
print("*** pattern inference ***")
self._print_trace()
print("implies: %s => %s" % (repr(self.pattern),
repr(self.unmatched_pattern)))
print("resulting in: %s => %s" % (repr(old_reaching),
repr(self.reaching_pattern)))
else:
# if action is to call another table, continue search with that table.
if row.action and row.action.__class__.__name__ == 'DecoderMethod':
tbl = self.decoder.get_table(row.action.name)
if tbl:
self._visit_table(tbl)
else:
raise Exception("Error: action -> %s used, but not defined" %
row.action.name)
# Restore state back to before matching the row.
self.visited_rows.pop()
self.unmatched_pattern = previous_unmatched
def _print_trace(self):
for i in range(0, len(self.visited_tables)):
print("Table %s:" % self.visited_tables[i].name)
if i < len(self.visited_rows):
print(" %s" % self.visited_rows[i].patterns)
|
endlessm/chromium-browser
|
native_client/src/trusted/validator_arm/dgen_add_patterns.py
|
Python
|
bsd-3-clause
| 11,007
|
[
"VisIt"
] |
00c426769d2d34afef37189cb40bdfb7f93970393ad4c9aafb4a9da6b3d85ff2
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from scipy.integrate import cumtrapz
from scipy.interpolate import interp1d
N_ROLLS = 1000
NPTS = 100
# unfair die
Pr12 = 0.25
Pr6 = 0.5
Pr4 = 0.25
m_12 = N_ROLLS / 12.
s_12 = np.sqrt(N_ROLLS*(1/12.)*(11/12.))
m_6 = N_ROLLS / 6.
s_6 = np.sqrt(N_ROLLS*(1/6.)*(5/6.))
m_4 = N_ROLLS / 4.
s_4 = np.sqrt(N_ROLLS*(1/4.)*(3/4.))
print m_12, s_12
print m_6, s_6
print m_4, s_4
n_12 = norm(m_12, s_12)
n_6 = norm(m_6, s_6)
n_4 = norm(m_4, s_4)
min_y = np.min([n_12.ppf(0.001), n_6.ppf(0.001), n_4.ppf(0.001)])
max_y = np.max([n_12.ppf(0.999), n_6.ppf(0.999), n_4.ppf(0.999)])
y = np.linspace(min_y, max_y, NPTS)
# the prior is a mixture of three gaussian distributions
prior_y = Pr12*n_12.pdf(y) + Pr6*n_6.pdf(y) + Pr4*n_4.pdf(y)
# the cumulative prior is necessary to get quantiles
cum_prior = cumtrapz(prior_y, x=y, initial=0)
q = interp1d(cum_prior, y)
plt.figure()
fig, axes = plt.subplots(1, 2)
fig.set_size_inches(12, 6)
plt.sca(axes[0])
plt.plot(y, prior_y, 'b')
plt.xlabel('Number of throws of a 6')
plt.ylabel('Prior PDF')
plt.sca(axes[1])
plt.plot(y, cum_prior, 'r')
plt.xlabel('Number of throws of a 6')
plt.ylabel('Prior CDF')
plt.savefig('ex_04.png')
plt.close()
# get quantiles:
print q(0.05), q(0.25), q(0.5), q(0.75), q(0.95)
|
amaggi/bda
|
chapter_02/ex_04.py
|
Python
|
gpl-2.0
| 1,323
|
[
"Gaussian"
] |
97ee5ed03c223421c60920c58148a83ef0de6aaff396e818096b6dd16e04baad
|
#coding=utf-8
"""This module contains the "Viz" objects
These objects represent the backend of all the visualizations that
Caravel can render.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import hashlib
import logging
import uuid
import zlib
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from flask import request
from flask_babel import lazy_gettext as _
from markdown import markdown
import simplejson as json
from six import string_types, PY3
from werkzeug.datastructures import ImmutableMultiDict, MultiDict
from werkzeug.urls import Href
from dateutil import relativedelta as rdelta
from caravel import app, utils, cache, db
from caravel.forms import FormFactory
from caravel.utils import flasher
config = app.config
class BaseViz(object):
"""All visualizations derive this base class"""
viz_type = None
verbose_name = "Base Viz"
credits = ""
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'metrics', 'groupby',
)
},)
form_overrides = {}
def __init__(self, datasource, form_data, slice_=None):
self.orig_form_data = form_data
if not datasource:
raise Exception("Viz is missing a datasource")
self.datasource = datasource
self.request = request
self.viz_type = form_data.get("viz_type")
self.slice = slice_
# TODO refactor all form related logic out of here and into forms.py
ff = FormFactory(self)
form_class = ff.get_form()
defaults = form_class().data.copy()
previous_viz_type = form_data.get('previous_viz_type')
if isinstance(form_data, (MultiDict, ImmutableMultiDict)):
form = form_class(form_data)
else:
form = form_class(**form_data)
data = form.data.copy()
if not form.validate():
for k, v in form.errors.items():
if not data.get('json') and not data.get('async'):
flasher("{}: {}".format(k, " ".join(v)), 'danger')
if previous_viz_type != self.viz_type:
data = {
k: form.data[k]
for k in form_data.keys()
if k in form.data}
defaults.update(data)
self.form_data = defaults
self.query = ""
self.form_data['previous_viz_type'] = self.viz_type
self.token = self.form_data.get(
'token', 'token_' + uuid.uuid4().hex[:8])
self.metrics = self.form_data.get('metrics') or []
self.groupby = self.form_data.get('groupby') or []
self.reassignments()
@classmethod
def flat_form_fields(cls):
l = set()
for d in cls.fieldsets:
for obj in d['fields']:
if obj and isinstance(obj, (tuple, list)):
l |= {a for a in obj if a}
elif obj:
l.add(obj)
return tuple(l)
def reassignments(self):
pass
def get_url(self, for_cache_key=False, **kwargs):
"""Returns the URL for the viz
:param for_cache_key: when getting the url as the identifier to hash
for the cache key
:type for_cache_key: boolean
"""
d = self.orig_form_data.copy()
if 'json' in d:
del d['json']
if 'action' in d:
del d['action']
d.update(kwargs)
# Remove unchecked checkboxes because HTML is weird like that
od = MultiDict()
for key in sorted(d.keys()):
# if MultiDict is initialized with MD({key:[emptyarray]}),
# key is included in d.keys() but accessing it throws
try:
if d[key] is False:
del d[key]
continue
except IndexError:
pass
if isinstance(d, (MultiDict, ImmutableMultiDict)):
v = d.getlist(key)
else:
v = d.get(key)
if not isinstance(v, list):
v = [v]
for item in v:
od.add(key, item)
href = Href(
'/caravel/explore/{self.datasource.type}/'
'{self.datasource.id}/'.format(**locals()))
if for_cache_key and 'force' in od:
del od['force']
return href(od)
def get_df(self, query_obj=None):
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
self.error_msg = ""
self.results = None
timestamp_format = None
if self.datasource.type == 'table':
dttm_col = self.datasource.get_col(query_obj['granularity'])
if dttm_col:
timestamp_format = dttm_col.python_date_format
# The datasource here can be different backend but the interface is common
self.results = self.datasource.query(**query_obj)
self.query = self.results.query
df = self.results.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic.
if df is None or df.empty:
raise Exception("No data, review your incantations!")
else:
if 'timestamp' in df.columns:
if timestamp_format in ("epoch_s", "epoch_ms"):
df.timestamp = pd.to_datetime(
df.timestamp, utc=False)
else:
df.timestamp = pd.to_datetime(
df.timestamp, utc=False, format=timestamp_format)
if self.datasource.offset:
df.timestamp += timedelta(hours=self.datasource.offset)
df.replace([np.inf, -np.inf], np.nan)
df = df.fillna(0)
return df
@property
def form(self):
return self.form_class(**self.form_data)
@property
def form_class(self):
return FormFactory(self).get_form()
def get_extra_filters(self):
extra_filters = self.form_data.get('extra_filters')
if not extra_filters:
return {}
return json.loads(extra_filters)
def query_filters(self, is_having_filter=False):
"""Processes the filters for the query"""
form_data = self.form_data
# Building filters
filters = []
field_prefix = 'flt' if not is_having_filter else 'having'
for i in range(1, 10):
col = form_data.get(field_prefix + "_col_" + str(i))
op = form_data.get(field_prefix + "_op_" + str(i))
eq = form_data.get(field_prefix + "_eq_" + str(i))
if col and op and eq is not None:
filters.append((col, op, eq))
if is_having_filter:
return filters
# Extra filters (coming from dashboard)
for col, vals in self.get_extra_filters().items():
if not (col and vals):
continue
elif col in self.datasource.filterable_column_names:
# Quote values with comma to avoid conflict
vals = ["'{}'".format(x) if "," in x else x for x in vals]
filters += [(col, 'in', ",".join(vals))]
return filters
def query_obj(self):
"""Building a query object"""
form_data = self.form_data
groupby = form_data.get("groupby") or []
metrics = form_data.get("metrics") or ['count']
extra_filters = self.get_extra_filters()
granularity = (
form_data.get("granularity") or form_data.get("granularity_sqla")
)
limit = int(form_data.get("limit", 0))
row_limit = int(
form_data.get("row_limit", config.get("ROW_LIMIT")))
since = (
extra_filters.get('__from') or form_data.get("since", "1 year ago")
)
from_dttm = utils.parse_human_datetime(since)
now = datetime.now()
if from_dttm > now:
from_dttm = now - (from_dttm - now)
until = extra_filters.get('__to') or form_data.get("until", "now")
to_dttm = utils.parse_human_datetime(until)
if from_dttm > to_dttm:
flasher("The date range doesn't seem right.", "danger")
from_dttm = to_dttm # Making them identical to not raise
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
'where': form_data.get("where", ''),
'having': form_data.get("having", ''),
'having_druid': self.query_filters(is_having_filter=True),
'time_grain_sqla': form_data.get("time_grain_sqla", ''),
'druid_time_origin': form_data.get("druid_time_origin", ''),
}
d = {
'granularity': granularity,
'from_dttm': from_dttm,
'to_dttm': to_dttm,
'is_timeseries': self.is_timeseries,
'groupby': groupby,
'metrics': metrics,
'row_limit': row_limit,
'filter': self.query_filters(),
'timeseries_limit': limit,
'extras': extras,
}
return d
@property
def cache_timeout(self):
if self.slice and self.slice.cache_timeout:
return self.slice.cache_timeout
if self.datasource.cache_timeout:
return self.datasource.cache_timeout
if (
hasattr(self.datasource, 'database') and
self.datasource.database.cache_timeout):
return self.datasource.database.cache_timeout
return config.get("CACHE_DEFAULT_TIMEOUT")
def get_json(self, force=False):
"""Handles caching around the json payload retrieval"""
cache_key = self.cache_key
payload = None
force = force if force else self.form_data.get('force') == 'true'
if not force:
payload = cache.get(cache_key)
if payload:
is_cached = True
try:
cached_data = zlib.decompress(payload)
if PY3:
cached_data = cached_data.decode('utf-8')
payload = json.loads(cached_data)
except Exception as e:
logging.error("Error reading cache: " +
utils.error_msg_from_exception(e))
payload = None
logging.info("Serving from cache")
if not payload:
is_cached = False
cache_timeout = self.cache_timeout
payload = {
'cache_timeout': cache_timeout,
'cache_key': cache_key,
'csv_endpoint': self.csv_endpoint,
'data': self.get_data(),
'form_data': self.form_data,
'json_endpoint': self.json_endpoint,
'query': self.query,
'standalone_endpoint': self.standalone_endpoint,
}
payload['cached_dttm'] = datetime.now().isoformat().split('.')[0]
logging.info("Caching for the next {} seconds".format(
cache_timeout))
try:
data = self.json_dumps(payload)
if PY3:
data = bytes(data, 'utf-8')
cache.set(
cache_key,
zlib.compress(data),
timeout=cache_timeout)
except Exception as e:
# cache.set call can fail if the backend is down or if
# the key is too large or whatever other reasons
logging.warning("Could not cache key {}".format(cache_key))
logging.exception(e)
cache.delete(cache_key)
payload['is_cached'] = is_cached
return self.json_dumps(payload)
def json_dumps(self, obj):
"""Used by get_json, can be overridden to use specific switches"""
return json.dumps(obj, default=utils.json_int_dttm_ser, ignore_nan=True)
@property
def data(self):
"""This is the data object serialized to the js layer"""
content = {
'csv_endpoint': self.csv_endpoint,
'form_data': self.form_data,
'json_endpoint': self.json_endpoint,
'standalone_endpoint': self.standalone_endpoint,
'token': self.token,
'viz_name': self.viz_type,
'column_formats': {
m.metric_name: m.d3format
for m in self.datasource.metrics
if m.d3format
},
}
return content
def get_csv(self):
df = self.get_df()
include_index = not isinstance(df.index, pd.RangeIndex)
return df.to_csv(index=include_index, encoding="utf-8")
def get_data(self):
return []
@property
def json_endpoint(self):
return self.get_url(json="true")
@property
def cache_key(self):
url = self.get_url(for_cache_key=True, json="true", force="false")
return hashlib.md5(url.encode('utf-8')).hexdigest()
@property
def csv_endpoint(self):
return self.get_url(csv="true")
@property
def standalone_endpoint(self):
return self.get_url(standalone="true")
@property
def json_data(self):
return json.dumps(self.data)
class TableViz(BaseViz):
"""A basic html table that is sortable and searchable"""
viz_type = "table"
verbose_name = _("Table View")
credits = 'a <a href="https://github.com/airbnb/caravel">Caravel</a> original'
fieldsets = ({
'label': _("GROUP BY"),
'description': _('Use this section if you want a query that aggregates'),
'fields': ('groupby', 'metrics')
}, {
'label': _("NOT GROUPED BY"),
'description': _('Use this section if you want to query atomic rows'),
'fields': ('all_columns', 'order_by_cols'),
}, {
'label': _("Options"),
'fields': (
'table_timestamp_format',
'row_limit',
('include_search', None),
)
})
form_overrides = ({
'metrics': {
'default': [],
},
})
is_timeseries = False
def query_obj(self):
d = super(TableViz, self).query_obj()
fd = self.form_data
if fd.get('all_columns') and (fd.get('groupby') or fd.get('metrics')):
raise Exception(
"Choose either fields to [Group By] and [Metrics] or "
"[Columns], not both")
if fd.get('all_columns'):
d['columns'] = fd.get('all_columns')
d['groupby'] = []
d['orderby'] = [json.loads(t) for t in fd.get('order_by_cols', [])]
return d
def get_df(self, query_obj=None):
df = super(TableViz, self).get_df(query_obj)
if (
self.form_data.get("granularity") == "all" and
'timestamp' in df):
del df['timestamp']
return df
def get_data(self):
df = self.get_df()
return dict(
records=df.to_dict(orient="records"),
columns=list(df.columns),
)
def json_dumps(self, obj):
return json.dumps(obj, default=utils.json_iso_dttm_ser)
class PivotTableViz(BaseViz):
"""A pivot table view, define your rows, columns and metrics"""
viz_type = "pivot_table"
verbose_name = _("Pivot Table")
credits = 'a <a href="https://github.com/airbnb/caravel">Caravel</a> original'
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'groupby',
'columns',
'metrics',
'pandas_aggfunc',
)
},)
def query_obj(self):
d = super(PivotTableViz, self).query_obj()
groupby = self.form_data.get('groupby')
columns = self.form_data.get('columns')
metrics = self.form_data.get('metrics')
if not columns:
columns = []
if not groupby:
groupby = []
if not groupby:
raise Exception("Please choose at least one \"Group by\" field ")
if not metrics:
raise Exception("Please choose at least one metric")
if (
any(v in groupby for v in columns) or
any(v in columns for v in groupby)):
raise Exception("groupby and columns can't overlap")
d['groupby'] = list(set(groupby) | set(columns))
return d
def get_df(self, query_obj=None):
df = super(PivotTableViz, self).get_df(query_obj)
if (
self.form_data.get("granularity") == "all" and
'timestamp' in df):
del df['timestamp']
df = df.pivot_table(
index=self.form_data.get('groupby'),
columns=self.form_data.get('columns'),
values=self.form_data.get('metrics'),
aggfunc=self.form_data.get('pandas_aggfunc'),
margins=True,
)
return df
def get_data(self):
return self.get_df().to_html(
na_rep='',
classes=(
"dataframe table table-striped table-bordered "
"table-condensed table-hover").split(" "))
class MarkupViz(BaseViz):
"""Use html or markdown to create a free form widget"""
viz_type = "markup"
verbose_name = _("Markup")
fieldsets = ({
'label': None,
'fields': ('markup_type', 'code')
},)
is_timeseries = False
def rendered(self):
markup_type = self.form_data.get("markup_type")
code = self.form_data.get("code", '')
if markup_type == "markdown":
return markdown(code)
elif markup_type == "html":
return code
def get_data(self):
return dict(html=self.rendered())
class SeparatorViz(MarkupViz):
"""Use to create section headers in a dashboard, similar to `Markup`"""
viz_type = "separator"
verbose_name = _("Separator")
form_overrides = {
'code': {
'default': (
"####Section Title\n"
"A paragraph describing the section"
"of the dashboard, right before the separator line "
"\n\n"
"---------------"
),
}
}
class WordCloudViz(BaseViz):
"""Build a colorful word cloud
Uses the nice library at:
https://github.com/jasondavies/d3-cloud
"""
viz_type = "word_cloud"
verbose_name = _("Word Cloud")
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'series', 'metric', 'limit',
('size_from', 'size_to'),
'rotation',
)
},)
def query_obj(self):
d = super(WordCloudViz, self).query_obj()
d['metrics'] = [self.form_data.get('metric')]
d['groupby'] = [self.form_data.get('series')]
return d
def get_data(self):
df = self.get_df()
# Ordering the columns
df = df[[self.form_data.get('series'), self.form_data.get('metric')]]
# Labeling the columns for uniform json schema
df.columns = ['text', 'size']
return df.to_dict(orient="records")
class TreemapViz(BaseViz):
"""Tree map visualisation for hierarchical data."""
viz_type = "treemap"
verbose_name = _("Treemap")
credits = '<a href="https://d3js.org">d3.js</a>'
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'metrics',
'groupby',
),
}, {
'label': _('Chart Options'),
'fields': (
'treemap_ratio',
'number_format',
)
},)
def get_df(self, query_obj=None):
df = super(TreemapViz, self).get_df(query_obj)
df = df.set_index(self.form_data.get("groupby"))
return df
def _nest(self, metric, df):
nlevels = df.index.nlevels
if nlevels == 1:
result = [{"name": n, "value": v}
for n, v in zip(df.index, df[metric])]
else:
result = [{"name": l, "children": self._nest(metric, df.loc[l])}
for l in df.index.levels[0]]
return result
def get_data(self):
df = self.get_df()
chart_data = [{"name": metric, "children": self._nest(metric, df)}
for metric in df.columns]
return chart_data
class CalHeatmapViz(BaseViz):
"""Calendar heatmap."""
viz_type = "cal_heatmap"
verbose_name = _("Calender Heatmap")
credits = (
'<a href=https://github.com/wa0x6e/cal-heatmap>cal-heatmap</a>')
is_timeseries = True
fieldsets = ({
'label': None,
'fields': (
'metric',
'domain_granularity',
'subdomain_granularity',
),
},)
def get_df(self, query_obj=None):
df = super(CalHeatmapViz, self).get_df(query_obj)
return df
def get_data(self):
df = self.get_df()
form_data = self.form_data
df.columns = ["timestamp", "metric"]
timestamps = {str(obj["timestamp"].value / 10**9):
obj.get("metric") for obj in df.to_dict("records")}
start = utils.parse_human_datetime(form_data.get("since"))
end = utils.parse_human_datetime(form_data.get("until"))
domain = form_data.get("domain_granularity")
diff_delta = rdelta.relativedelta(end, start)
diff_secs = (end - start).total_seconds()
if domain == "year":
range_ = diff_delta.years + 1
elif domain == "month":
range_ = diff_delta.years * 12 + diff_delta.months + 1
elif domain == "week":
range_ = diff_delta.years * 53 + diff_delta.weeks + 1
elif domain == "day":
range_ = diff_secs // (24*60*60) + 1
else:
range_ = diff_secs // (60*60) + 1
return {
"timestamps": timestamps,
"start": start,
"domain": domain,
"subdomain": form_data.get("subdomain_granularity"),
"range": range_,
}
def query_obj(self):
qry = super(CalHeatmapViz, self).query_obj()
qry["metrics"] = [self.form_data["metric"]]
return qry
class NVD3Viz(BaseViz):
"""Base class for all nvd3 vizs"""
credits = '<a href="http://nvd3.org/">NVD3.org</a>'
viz_type = None
verbose_name = "Base NVD3 Viz"
is_timeseries = False
class BoxPlotViz(NVD3Viz):
"""Box plot viz from ND3"""
viz_type = "box_plot"
verbose_name = _("Box Plot")
sort_series = False
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'metrics',
'groupby', 'limit',
),
}, {
'label': _('Chart Options'),
'fields': (
'whisker_options',
)
},)
def get_df(self, query_obj=None):
form_data = self.form_data
df = super(BoxPlotViz, self).get_df(query_obj)
df = df.fillna(0)
# conform to NVD3 names
def Q1(series): # need to be named functions - can't use lambdas
return np.percentile(series, 25)
def Q3(series):
return np.percentile(series, 75)
whisker_type = form_data.get('whisker_options')
if whisker_type == "Tukey":
def whisker_high(series):
upper_outer_lim = Q3(series) + 1.5 * (Q3(series) - Q1(series))
series = series[series <= upper_outer_lim]
return series[np.abs(series - upper_outer_lim).argmin()]
def whisker_low(series):
lower_outer_lim = Q1(series) - 1.5 * (Q3(series) - Q1(series))
# find the closest value above the lower outer limit
series = series[series >= lower_outer_lim]
return series[np.abs(series - lower_outer_lim).argmin()]
elif whisker_type == "Min/max (no outliers)":
def whisker_high(series):
return series.max()
def whisker_low(series):
return series.min()
elif " percentiles" in whisker_type:
low, high = whisker_type.replace(" percentiles", "").split("/")
def whisker_high(series):
return np.percentile(series, int(high))
def whisker_low(series):
return np.percentile(series, int(low))
else:
raise ValueError("Unknown whisker type: {}".format(whisker_type))
def outliers(series):
above = series[series > whisker_high(series)]
below = series[series < whisker_low(series)]
# pandas sometimes doesn't like getting lists back here
return set(above.tolist() + below.tolist())
aggregate = [Q1, np.median, Q3, whisker_high, whisker_low, outliers]
df = df.groupby(form_data.get('groupby')).agg(aggregate)
return df
def to_series(self, df, classed='', title_suffix=''):
label_sep = " - "
chart_data = []
for index_value, row in zip(df.index, df.to_dict(orient="records")):
if isinstance(index_value, tuple):
index_value = label_sep.join(index_value)
boxes = defaultdict(dict)
for (label, key), value in row.items():
if key == "median":
key = "Q2"
boxes[label][key] = value
for label, box in boxes.items():
if len(self.form_data.get("metrics")) > 1:
# need to render data labels with metrics
chart_label = label_sep.join([index_value, label])
else:
chart_label = index_value
chart_data.append({
"label": chart_label,
"values": box,
})
return chart_data
def get_data(self):
df = self.get_df()
chart_data = self.to_series(df)
return chart_data
class BubbleViz(NVD3Viz):
"""Based on the NVD3 bubble chart"""
viz_type = "bubble"
verbose_name = _("Bubble Chart")
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'series', 'entity',
'x', 'y',
'size', 'limit',
)
}, {
'label': _('Chart Options'),
'fields': (
('x_log_scale', 'y_log_scale'),
('show_legend', None),
'max_bubble_size',
('x_axis_label', 'y_axis_label'),
)
},)
def query_obj(self):
form_data = self.form_data
d = super(BubbleViz, self).query_obj()
d['groupby'] = list({
form_data.get('series'),
form_data.get('entity')
})
self.x_metric = form_data.get('x')
self.y_metric = form_data.get('y')
self.z_metric = form_data.get('size')
self.entity = form_data.get('entity')
self.series = form_data.get('series')
d['metrics'] = [
self.z_metric,
self.x_metric,
self.y_metric,
]
if not all(d['metrics'] + [self.entity, self.series]):
raise Exception("Pick a metric for x, y and size")
return d
def get_df(self, query_obj=None):
df = super(BubbleViz, self).get_df(query_obj)
df = df.fillna(0)
df['x'] = df[[self.x_metric]]
df['y'] = df[[self.y_metric]]
df['size'] = df[[self.z_metric]]
df['shape'] = 'circle'
df['group'] = df[[self.series]]
return df
def get_data(self):
df = self.get_df()
series = defaultdict(list)
for row in df.to_dict(orient='records'):
series[row['group']].append(row)
chart_data = []
for k, v in series.items():
chart_data.append({
'key': k,
'values': v})
return chart_data
class BigNumberViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number"
verbose_name = _("Big Number with Trendline")
credits = 'a <a href="https://github.com/airbnb/caravel">Caravel</a> original'
is_timeseries = True
fieldsets = ({
'label': None,
'fields': (
'metric',
'compare_lag',
'compare_suffix',
'y_axis_format',
)
},)
form_overrides = {
'y_axis_format': {
'label': _('Number format'),
}
}
def reassignments(self):
metric = self.form_data.get('metric')
if not metric:
self.form_data['metric'] = self.orig_form_data.get('metrics')
def query_obj(self):
d = super(BigNumberViz, self).query_obj()
metric = self.form_data.get('metric')
if not metric:
raise Exception("Pick a metric!")
d['metrics'] = [self.form_data.get('metric')]
self.form_data['metric'] = metric
return d
def get_data(self):
form_data = self.form_data
df = self.get_df()
df.sort_values(by=df.columns[0], inplace=True)
compare_lag = form_data.get("compare_lag", "")
compare_lag = int(compare_lag) if compare_lag and compare_lag.isdigit() else 0
return {
'data': df.values.tolist(),
'compare_lag': compare_lag,
'compare_suffix': form_data.get('compare_suffix', ''),
}
class BigNumberTotalViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number_total"
verbose_name = _("Big Number")
credits = 'a <a href="https://github.com/airbnb/caravel">Caravel</a> original'
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'metric',
'subheader',
'y_axis_format',
)
},)
form_overrides = {
'y_axis_format': {
'label': _('Number format'),
}
}
def reassignments(self):
metric = self.form_data.get('metric')
if not metric:
self.form_data['metric'] = self.orig_form_data.get('metrics')
def query_obj(self):
d = super(BigNumberTotalViz, self).query_obj()
metric = self.form_data.get('metric')
if not metric:
raise Exception("Pick a metric!")
d['metrics'] = [self.form_data.get('metric')]
self.form_data['metric'] = metric
return d
def get_data(self):
form_data = self.form_data
df = self.get_df()
df.sort_values(by=df.columns[0], inplace=True)
return {
'data': df.values.tolist(),
'subheader': form_data.get('subheader', ''),
}
class NVD3TimeSeriesViz(NVD3Viz):
"""A rich line chart component with tons of options"""
viz_type = "line"
verbose_name = _("Time Series - Line Chart")
sort_series = False
is_timeseries = True
fieldsets = ({
'label': None,
'fields': (
'metrics',
'groupby', 'limit',
),
}, {
'label': _('Chart Options'),
'fields': (
('show_brush', 'show_legend'),
('rich_tooltip', 'y_axis_zero'),
('y_log_scale', 'contribution'),
('show_markers', 'x_axis_showminmax'),
('line_interpolation', None),
('x_axis_format', 'y_axis_format'),
('x_axis_label', 'y_axis_label'),
),
}, {
'label': _('Advanced Analytics'),
'description': _(
"This section contains options "
"that allow for advanced analytical post processing "
"of query results"),
'fields': (
('rolling_type', 'rolling_periods'),
'time_compare',
('num_period_compare', 'period_ratio_type'),
None,
('resample_how', 'resample_rule',), 'resample_fillmethod'
),
},)
def get_df(self, query_obj=None):
form_data = self.form_data
df = super(NVD3TimeSeriesViz, self).get_df(query_obj)
df = df.fillna(0)
if form_data.get("granularity") == "all":
raise Exception("Pick a time granularity for your time series")
df = df.pivot_table(
index="timestamp",
columns=form_data.get('groupby'),
values=form_data.get('metrics'))
fm = form_data.get("resample_fillmethod")
if not fm:
fm = None
how = form_data.get("resample_how")
rule = form_data.get("resample_rule")
if how and rule:
df = df.resample(rule, how=how, fill_method=fm)
if not fm:
df = df.fillna(0)
if self.sort_series:
dfs = df.sum()
dfs.sort_values(ascending=False, inplace=True)
df = df[dfs.index]
if form_data.get("contribution"):
dft = df.T
df = (dft / dft.sum()).T
num_period_compare = form_data.get("num_period_compare")
if num_period_compare:
num_period_compare = int(num_period_compare)
prt = form_data.get('period_ratio_type')
if prt and prt == 'growth':
df = (df / df.shift(num_period_compare)) - 1
elif prt and prt == 'value':
df = df - df.shift(num_period_compare)
else:
df = df / df.shift(num_period_compare)
df = df[num_period_compare:]
rolling_periods = form_data.get("rolling_periods")
rolling_type = form_data.get("rolling_type")
if rolling_type in ('mean', 'std', 'sum') and rolling_periods:
if rolling_type == 'mean':
df = pd.rolling_mean(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'std':
df = pd.rolling_std(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'sum':
df = pd.rolling_sum(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'cumsum':
df = df.cumsum()
return df
def to_series(self, df, classed='', title_suffix=''):
cols = []
for col in df.columns:
if col == '':
cols.append('N/A')
elif col is None:
cols.append('NULL')
else:
cols.append(col)
df.columns = cols
series = df.to_dict('series')
chart_data = []
for name in df.T.index.tolist():
ys = series[name]
if df[name].dtype.kind not in "biufc":
continue
df['timestamp'] = pd.to_datetime(df.index, utc=False)
if isinstance(name, string_types):
series_title = name
else:
name = ["{}".format(s) for s in name]
if len(self.form_data.get('metrics')) > 1:
series_title = ", ".join(name)
else:
series_title = ", ".join(name[1:])
if title_suffix:
series_title += title_suffix
d = {
"key": series_title,
"classed": classed,
"values": [
{'x': ds, 'y': ys[ds] if ds in ys else None}
for ds in df.timestamp
],
}
chart_data.append(d)
return chart_data
def get_data(self):
df = self.get_df()
chart_data = self.to_series(df)
time_compare = self.form_data.get('time_compare')
if time_compare:
query_object = self.query_obj()
delta = utils.parse_human_timedelta(time_compare)
query_object['inner_from_dttm'] = query_object['from_dttm']
query_object['inner_to_dttm'] = query_object['to_dttm']
query_object['from_dttm'] -= delta
query_object['to_dttm'] -= delta
df2 = self.get_df(query_object)
df2.index += delta
chart_data += self.to_series(
df2, classed='caravel', title_suffix="---")
chart_data = sorted(chart_data, key=lambda x: x['key'])
return chart_data
class NVD3TimeSeriesBarViz(NVD3TimeSeriesViz):
"""A bar chart where the x axis is time"""
viz_type = "bar"
sort_series = True
verbose_name = _("Time Series - Bar Chart")
fieldsets = [NVD3TimeSeriesViz.fieldsets[0]] + [{
'label': _('Chart Options'),
'fields': (
('show_brush', 'show_legend', 'show_bar_value'),
('rich_tooltip', 'y_axis_zero'),
('y_log_scale', 'contribution'),
('x_axis_format', 'y_axis_format'),
('line_interpolation', 'bar_stacked'),
('x_axis_showminmax', 'bottom_margin'),
('x_axis_label', 'y_axis_label'),
('reduce_x_ticks', 'show_controls'),
), }] + [NVD3TimeSeriesViz.fieldsets[2]]
class NVD3CompareTimeSeriesViz(NVD3TimeSeriesViz):
"""A line chart component where you can compare the % change over time"""
viz_type = 'compare'
verbose_name = _("Time Series - Percent Change")
class NVD3TimeSeriesStackedViz(NVD3TimeSeriesViz):
"""A rich stack area chart"""
viz_type = "area"
verbose_name = _("Time Series - Stacked")
sort_series = True
fieldsets = [NVD3TimeSeriesViz.fieldsets[0]] + [{
'label': _('Chart Options'),
'fields': (
('show_brush', 'show_legend'),
('rich_tooltip', 'y_axis_zero'),
('y_log_scale', 'contribution'),
('x_axis_format', 'y_axis_format'),
('x_axis_showminmax', 'show_controls'),
('line_interpolation', 'stacked_style'),
), }] + [NVD3TimeSeriesViz.fieldsets[2]]
class DistributionPieViz(NVD3Viz):
"""Annoy visualization snobs with this controversial pie chart"""
viz_type = "pie"
verbose_name = _("Distribution - NVD3 - Pie Chart")
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'metrics', 'groupby',
'limit',
'pie_label_type',
('donut', 'show_legend'),
'labels_outside',
)
},)
def query_obj(self):
d = super(DistributionPieViz, self).query_obj()
d['is_timeseries'] = False
return d
def get_df(self, query_obj=None):
df = super(DistributionPieViz, self).get_df(query_obj)
df = df.pivot_table(
index=self.groupby,
values=[self.metrics[0]])
df.sort_values(by=self.metrics[0], ascending=False, inplace=True)
return df
def get_data(self):
df = self.get_df()
df = df.reset_index()
df.columns = ['x', 'y']
return df.to_dict(orient="records")
class HistogramViz(BaseViz):
"""Histogram"""
viz_type = "histogram"
verbose_name = _("Histogram")
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
('all_columns_x',),
'row_limit',
)
}, {
'label': _("Histogram Options"),
'fields': (
'link_length',
)
},)
form_overrides = {
'all_columns_x': {
'label': _('Numeric Column'),
'description': _("Select the numeric column to draw the histogram"),
},
'link_length': {
'label': _("No of Bins"),
'description': _("Select number of bins for the histogram"),
'default': 5
}
}
def query_obj(self):
"""Returns the query object for this visualization"""
d = super(HistogramViz, self).query_obj()
d['row_limit'] = self.form_data.get('row_limit', int(config.get('ROW_LIMIT')))
numeric_column = self.form_data.get('all_columns_x')
if numeric_column is None:
raise Exception("Must have one numeric column specified")
d['columns'] = [numeric_column]
return d
def get_df(self, query_obj=None):
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
self.results = self.datasource.query(**query_obj)
self.query = self.results.query
df = self.results.df
if df is None or df.empty:
raise Exception("No data, to build histogram")
df.replace([np.inf, -np.inf], np.nan)
df = df.fillna(0)
return df
def get_data(self):
"""Returns the chart data"""
df = self.get_df()
chart_data = df[df.columns[0]].values.tolist()
return chart_data
class DistributionBarViz(DistributionPieViz):
"""A good old bar chart"""
viz_type = "dist_bar"
verbose_name = _("Distribution - Bar Chart")
is_timeseries = False
fieldsets = ({
'label': _('Chart Options'),
'fields': (
'groupby',
'columns',
'metrics',
'row_limit',
('show_legend', 'show_bar_value', 'bar_stacked'),
('y_axis_format', 'bottom_margin'),
('x_axis_label', 'y_axis_label'),
('reduce_x_ticks', 'contribution'),
('show_controls', None),
)
},)
form_overrides = {
'groupby': {
'label': _('Series'),
},
'columns': {
'label': _('Breakdowns'),
'description': _("Defines how each series is broken down"),
},
}
def query_obj(self):
d = super(DistributionPieViz, self).query_obj() # noqa
fd = self.form_data
d['is_timeseries'] = False
gb = fd.get('groupby') or []
cols = fd.get('columns') or []
d['groupby'] = set(gb + cols)
if len(d['groupby']) < len(gb) + len(cols):
raise Exception("Can't have overlap between Series and Breakdowns")
if not self.metrics:
raise Exception("Pick at least one metric")
if not self.groupby:
raise Exception("Pick at least one field for [Series]")
return d
def get_df(self, query_obj=None):
df = super(DistributionPieViz, self).get_df(query_obj) # noqa
fd = self.form_data
row = df.groupby(self.groupby).sum()[self.metrics[0]].copy()
row.sort_values(ascending=False, inplace=True)
columns = fd.get('columns') or []
pt = df.pivot_table(
index=self.groupby,
columns=columns,
values=self.metrics)
if fd.get("contribution"):
pt = pt.fillna(0)
pt = pt.T
pt = (pt / pt.sum()).T
pt = pt.reindex(row.index)
return pt
def get_data(self):
df = self.get_df()
chart_data = []
for name, ys in df.iteritems():
if df[name].dtype.kind not in "biufc":
continue
if isinstance(name, string_types):
series_title = name
elif len(self.metrics) > 1:
series_title = ", ".join(name)
else:
l = [str(s) for s in name[1:]]
series_title = ", ".join(l)
d = {
"key": series_title,
"values": [
{'x': i, 'y': v}
for i, v in ys.iteritems()]
}
chart_data.append(d)
return chart_data
class SunburstViz(BaseViz):
"""A multi level sunburst chart"""
viz_type = "sunburst"
verbose_name = _("Sunburst")
is_timeseries = False
credits = (
'Kerry Rodden '
'@<a href="https://bl.ocks.org/kerryrodden/7090426">bl.ocks.org</a>')
fieldsets = ({
'label': None,
'fields': (
'groupby',
'metric', 'secondary_metric',
'row_limit',
)
},)
form_overrides = {
'metric': {
'label': _('Primary Metric'),
'description': _(
"The primary metric is used to "
"define the arc segment sizes"),
},
'secondary_metric': {
'label': _('Secondary Metric'),
'description': _(
"This secondary metric is used to "
"define the color as a ratio against the primary metric. "
"If the two metrics match, color is mapped level groups"),
},
'groupby': {
'label': _('Hierarchy'),
'description': _("This defines the level of the hierarchy"),
},
}
def get_df(self, query_obj=None):
df = super(SunburstViz, self).get_df(query_obj)
return df
def get_data(self):
df = self.get_df()
# if m1 == m2 duplicate the metric column
cols = self.form_data.get('groupby')
metric = self.form_data.get('metric')
secondary_metric = self.form_data.get('secondary_metric')
if metric == secondary_metric:
ndf = df
ndf.columns = [cols + ['m1', 'm2']]
else:
cols += [
self.form_data['metric'], self.form_data['secondary_metric']]
ndf = df[cols]
return json.loads(ndf.to_json(orient="values")) # TODO fix this nonsense
def query_obj(self):
qry = super(SunburstViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric'], self.form_data['secondary_metric']]
return qry
class SankeyViz(BaseViz):
"""A Sankey diagram that requires a parent-child dataset"""
viz_type = "sankey"
verbose_name = _("Sankey")
is_timeseries = False
credits = '<a href="https://www.npmjs.com/package/d3-sankey">d3-sankey on npm</a>'
fieldsets = ({
'label': None,
'fields': (
'groupby',
'metric',
'row_limit',
)
},)
form_overrides = {
'groupby': {
'label': _('Source / Target'),
'description': _("Choose a source and a target"),
},
}
def query_obj(self):
qry = super(SankeyViz, self).query_obj()
if len(qry['groupby']) != 2:
raise Exception("Pick exactly 2 columns as [Source / Target]")
qry['metrics'] = [
self.form_data['metric']]
return qry
def get_data(self):
df = self.get_df()
df.columns = ['source', 'target', 'value']
recs = df.to_dict(orient='records')
hierarchy = defaultdict(set)
for row in recs:
hierarchy[row['source']].add(row['target'])
def find_cycle(g):
"""Whether there's a cycle in a directed graph"""
path = set()
def visit(vertex):
path.add(vertex)
for neighbour in g.get(vertex, ()):
if neighbour in path or visit(neighbour):
return (vertex, neighbour)
path.remove(vertex)
for v in g:
cycle = visit(v)
if cycle:
return cycle
cycle = find_cycle(hierarchy)
if cycle:
raise Exception(
"There's a loop in your Sankey, please provide a tree. "
"Here's a faulty link: {}".format(cycle))
return recs
class DirectedForceViz(BaseViz):
"""An animated directed force layout graph visualization"""
viz_type = "directed_force"
verbose_name = _("Directed Force Layout")
credits = 'd3noob @<a href="http://bl.ocks.org/d3noob/5141278">bl.ocks.org</a>'
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'groupby',
'metric',
'row_limit',
)
}, {
'label': _('Force Layout'),
'fields': (
'link_length',
'charge',
)
},)
form_overrides = {
'groupby': {
'label': _('Source / Target'),
'description': _("Choose a source and a target"),
},
}
def query_obj(self):
qry = super(DirectedForceViz, self).query_obj()
if len(self.form_data['groupby']) != 2:
raise Exception("Pick exactly 2 columns to 'Group By'")
qry['metrics'] = [self.form_data['metric']]
return qry
def get_data(self):
df = self.get_df()
df.columns = ['source', 'target', 'value']
return df.to_dict(orient='records')
class WorldMapViz(BaseViz):
"""A country centric world map"""
viz_type = "world_map"
verbose_name = _("World Map")
is_timeseries = False
credits = 'datamaps on <a href="https://www.npmjs.com/package/datamaps">npm</a>'
fieldsets = ({
'label': None,
'fields': (
'entity',
'country_fieldtype',
'metric',
)
}, {
'label': _('Bubbles'),
'fields': (
('show_bubbles', None),
'secondary_metric',
'max_bubble_size',
)
})
form_overrides = {
'entity': {
'label': _('Country Field'),
'description': _("3 letter code of the country"),
},
'metric': {
'label': _('Metric for color'),
'description': _("Metric that defines the color of the country"),
},
'secondary_metric': {
'label': _('Bubble size'),
'description': _("Metric that defines the size of the bubble"),
},
}
def query_obj(self):
qry = super(WorldMapViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric'], self.form_data['secondary_metric']]
qry['groupby'] = [self.form_data['entity']]
return qry
def get_data(self):
from caravel.data import countries
df = self.get_df()
cols = [self.form_data.get('entity')]
metric = self.form_data.get('metric')
secondary_metric = self.form_data.get('secondary_metric')
if metric == secondary_metric:
ndf = df[cols]
# df[metric] will be a DataFrame
# because there are duplicate column names
ndf['m1'] = df[metric].iloc[:, 0]
ndf['m2'] = ndf['m1']
else:
cols += [metric, secondary_metric]
ndf = df[cols]
df = ndf
df.columns = ['country', 'm1', 'm2']
d = df.to_dict(orient='records')
for row in d:
country = None
if isinstance(row['country'], string_types):
country = countries.get(
self.form_data.get('country_fieldtype'), row['country'])
if country:
row['country'] = country['cca3']
row['latitude'] = country['lat']
row['longitude'] = country['lng']
row['name'] = country['name']
else:
row['country'] = "XXX"
return d
class FilterBoxViz(BaseViz):
"""A multi filter, multi-choice filter box to make dashboards interactive"""
viz_type = "filter_box"
verbose_name = _("Filters")
is_timeseries = False
credits = 'a <a href="https://github.com/airbnb/caravel">Caravel</a> original'
fieldsets = ({
'label': None,
'fields': (
('date_filter', None),
'groupby',
'metric',
)
},)
form_overrides = {
'groupby': {
'label': _('Filter fields'),
'description': _("The fields you want to filter on"),
},
}
def query_obj(self):
qry = super(FilterBoxViz, self).query_obj()
groupby = self.form_data.get('groupby')
if len(groupby) < 1 and not self.form_data.get('date_filter'):
raise Exception("Pick at least one filter field")
qry['metrics'] = [
self.form_data['metric']]
return qry
def get_data(self):
qry = self.query_obj()
filters = [g for g in self.form_data['groupby']]
d = {}
for flt in filters:
qry['groupby'] = [flt]
df = super(FilterBoxViz, self).get_df(qry)
d[flt] = [{
'id': row[0],
'text': row[0],
'filter': flt,
'metric': row[1]}
for row in df.itertuples(index=False)
]
return d
class IFrameViz(BaseViz):
"""You can squeeze just about anything in this iFrame component"""
viz_type = "iframe"
verbose_name = _("iFrame")
credits = 'a <a href="https://github.com/airbnb/caravel">Caravel</a> original'
is_timeseries = False
fieldsets = ({
'label': None,
'fields': ('url',)
},)
class ParallelCoordinatesViz(BaseViz):
"""Interactive parallel coordinate implementation
Uses this amazing javascript library
https://github.com/syntagmatic/parallel-coordinates
"""
viz_type = "para"
verbose_name = _("Parallel Coordinates")
credits = (
'<a href="https://syntagmatic.github.io/parallel-coordinates/">'
'Syntagmatic\'s library</a>')
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'series',
'metrics',
'secondary_metric',
'limit',
('show_datatable', 'include_series'),
)
},)
def query_obj(self):
d = super(ParallelCoordinatesViz, self).query_obj()
fd = self.form_data
d['metrics'] = copy.copy(fd.get('metrics'))
second = fd.get('secondary_metric')
if second not in d['metrics']:
d['metrics'] += [second]
d['groupby'] = [fd.get('series')]
return d
def get_data(self):
df = self.get_df()
return df.to_dict(orient="records")
class HeatmapViz(BaseViz):
"""A nice heatmap visualization that support high density through canvas"""
viz_type = "heatmap"
verbose_name = _("Heatmap")
is_timeseries = False
credits = (
'inspired from mbostock @<a href="http://bl.ocks.org/mbostock/3074470">'
'bl.ocks.org</a>')
fieldsets = ({
'label': None,
'fields': (
'all_columns_x',
'all_columns_y',
'metric',
)
}, {
'label': _('Heatmap Options'),
'fields': (
'linear_color_scheme',
('xscale_interval', 'yscale_interval'),
'canvas_image_rendering',
'normalize_across',
)
},)
def query_obj(self):
d = super(HeatmapViz, self).query_obj()
fd = self.form_data
d['metrics'] = [fd.get('metric')]
d['groupby'] = [fd.get('all_columns_x'), fd.get('all_columns_y')]
return d
def get_data(self):
df = self.get_df()
fd = self.form_data
x = fd.get('all_columns_x')
y = fd.get('all_columns_y')
v = fd.get('metric')
if x == y:
df.columns = ['x', 'y', 'v']
else:
df = df[[x, y, v]]
df.columns = ['x', 'y', 'v']
norm = fd.get('normalize_across')
overall = False
if norm == 'heatmap':
overall = True
else:
gb = df.groupby(norm, group_keys=False)
if len(gb) <= 1:
overall = True
else:
df['perc'] = (
gb.apply(
lambda x: (x.v - x.v.min()) / (x.v.max() - x.v.min()))
)
if overall:
v = df.v
min_ = v.min()
df['perc'] = (v - min_) / (v.max() - min_)
return df.to_dict(orient="records")
class HorizonViz(NVD3TimeSeriesViz):
"""Horizon chart
https://www.npmjs.com/package/d3-horizon-chart
"""
viz_type = "horizon"
verbose_name = _("Horizon Charts")
credits = (
'<a href="https://www.npmjs.com/package/d3-horizon-chart">'
'd3-horizon-chart</a>')
fieldsets = [NVD3TimeSeriesViz.fieldsets[0]] + [{
'label': _('Chart Options'),
'fields': (
('series_height', 'horizon_color_scale'),
), }]
class MapboxViz(BaseViz):
"""Rich maps made with Mapbox"""
viz_type = "mapbox"
verbose_name = _("Mapbox")
is_timeseries = False
credits = (
'<a href=https://www.mapbox.com/mapbox-gl-js/api/>Mapbox GL JS</a>')
fieldsets = ({
'label': None,
'fields': (
('all_columns_x', 'all_columns_y'),
'clustering_radius',
'row_limit',
'groupby',
'render_while_dragging',
)
}, {
'label': _('Points'),
'fields': (
'point_radius',
'point_radius_unit',
)
}, {
'label': _('Labelling'),
'fields': (
'mapbox_label',
'pandas_aggfunc',
)
}, {
'label': _('Visual Tweaks'),
'fields': (
'mapbox_style',
'global_opacity',
'mapbox_color',
)
}, {
'label': _('Viewport'),
'fields': (
'viewport_longitude',
'viewport_latitude',
'viewport_zoom',
)
},)
form_overrides = {
'all_columns_x': {
'label': _('Longitude'),
'description': _("Column containing longitude data"),
},
'all_columns_y': {
'label': _('Latitude'),
'description': _("Column containing latitude data"),
},
'pandas_aggfunc': {
'label': _('Cluster label aggregator'),
'description': _(
"Aggregate function applied to the list of points "
"in each cluster to produce the cluster label."),
},
'rich_tooltip': {
'label': _('Tooltip'),
'description': _(
"Show a tooltip when hovering over points and clusters "
"describing the label"),
},
'groupby': {
'description': _(
"One or many fields to group by. If grouping, latitude "
"and longitude columns must be present."),
},
}
def query_obj(self):
d = super(MapboxViz, self).query_obj()
fd = self.form_data
label_col = fd.get('mapbox_label')
if not fd.get('groupby'):
d['columns'] = [fd.get('all_columns_x'), fd.get('all_columns_y')]
if label_col and len(label_col) >= 1:
if label_col[0] == "count":
raise Exception(
"Must have a [Group By] column to have 'count' as the [Label]")
d['columns'].append(label_col[0])
if fd.get('point_radius') != 'Auto':
d['columns'].append(fd.get('point_radius'))
d['columns'] = list(set(d['columns']))
else:
# Ensuring columns chosen are all in group by
if (label_col and len(label_col) >= 1 and
label_col[0] != "count" and
label_col[0] not in fd.get('groupby')):
raise Exception(
"Choice of [Label] must be present in [Group By]")
if (fd.get("point_radius") != "Auto" and
fd.get("point_radius") not in fd.get('groupby')):
raise Exception(
"Choice of [Point Radius] must be present in [Group By]")
if (fd.get('all_columns_x') not in fd.get('groupby') or
fd.get('all_columns_y') not in fd.get('groupby')):
raise Exception(
"[Longitude] and [Latitude] columns must be present in [Group By]")
return d
def get_data(self):
df = self.get_df()
fd = self.form_data
label_col = fd.get('mapbox_label')
custom_metric = label_col and len(label_col) >= 1
metric_col = [None] * len(df.index)
if custom_metric:
if label_col[0] == fd.get('all_columns_x'):
metric_col = df[fd.get('all_columns_x')]
elif label_col[0] == fd.get('all_columns_y'):
metric_col = df[fd.get('all_columns_y')]
else:
metric_col = df[label_col[0]]
point_radius_col = (
[None] * len(df.index)
if fd.get("point_radius") == "Auto"
else df[fd.get("point_radius")])
# using geoJSON formatting
geo_json = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {
"metric": metric,
"radius": point_radius,
},
"geometry": {
"type": "Point",
"coordinates": [lon, lat],
}
}
for lon, lat, metric, point_radius
in zip(
df[fd.get('all_columns_x')],
df[fd.get('all_columns_y')],
metric_col, point_radius_col)
]
}
return {
"geoJSON": geo_json,
"customMetric": custom_metric,
"mapboxApiKey": config.get('MAPBOX_API_KEY'),
"mapStyle": fd.get("mapbox_style"),
"aggregatorName": fd.get("pandas_aggfunc"),
"clusteringRadius": fd.get("clustering_radius"),
"pointRadiusUnit": fd.get("point_radius_unit"),
"globalOpacity": fd.get("global_opacity"),
"viewportLongitude": fd.get("viewport_longitude"),
"viewportLatitude": fd.get("viewport_latitude"),
"viewportZoom": fd.get("viewport_zoom"),
"renderWhileDragging": fd.get("render_while_dragging"),
"tooltip": fd.get("rich_tooltip"),
"color": fd.get("mapbox_color"),
}
class Ec3BarLinePieViz(BaseViz):
"""A basic html table that is sortable and searchable"""
viz_type = "ec3_barlinepie"
verbose_name = _("Ec3_BarLinePie_Viz")
credits = ''
fieldsets = ({
'label': _("GROUP BY"),
'description': _('查询需要用到group by聚合语句'),
'fields': ('groupby', 'metrics')
}, {
'label': _("NOT GROUPED BY"),
'description': _('查询原始记录不做group by聚合'),
'fields': ('all_columns', 'order_by_cols'),
}, {
'label': _("Options"),
'description': _('echart options'),
'fields': (
'options',
)
})
form_overrides = ({
'metrics': {
'default': [],
},
})
is_timeseries = False
def query_obj(self):
d = super(Ec3BarLinePieViz, self).query_obj()
fd = self.form_data
if fd.get('all_columns') and (fd.get('groupby') or fd.get('metrics')):
raise Exception(
"Choose either fields to [Group By] and [Metrics] or "
"[Columns], not both")
if fd.get('all_columns'):
d['columns'] = fd.get('all_columns')
d['groupby'] = []
if fd.get('order_by_cols', []):
d['orderby'] = [json.loads(t) for t in fd.get('order_by_cols', [])]
return d
def get_df(self, query_obj=None):
df = super(Ec3BarLinePieViz, self).get_df(query_obj)
if (
self.form_data.get("granularity") == "all" and
'timestamp' in df):
del df['timestamp']
return df
def get_data(self):
df = self.get_df()
return dict(
records=df.to_dict(orient="records"),
columns=list(df.columns),
)
def json_dumps(self, obj):
return json.dumps(obj, default=utils.json_iso_dttm_ser)
class Ec3Map(BaseViz):
"""A basic html table that is sortable and searchable"""
viz_type = "ec3_map"
verbose_name = _("Ec3_Map_Viz")
credits = ''
fieldsets = ({
'label': _("GROUP BY"),
'description': _('查询需要用到group by聚合语句'),
'fields': ('groupby', 'metrics')
}, {
'label': _("NOT GROUPED BY"),
'description': _('查询原始记录不做group by聚合'),
'fields': ('all_columns', 'order_by_cols'),
}, {
'label': _("Options"),
'description': _('echart options'),
'fields': (
'custom_map', 'options'
)
})
form_overrides = ({
'metrics': {
'default': [],
},
})
is_timeseries = False
def __init__(self, datasource, form_data, slice_=None):
super(Ec3Map, self).__init__(datasource, form_data, slice_)
fd = self.form_data
if fd.get('custom_map'):
from caravel import models
custom_map = db.session.query(models.EchartMapType)\
.filter_by(map_name = fd.get('custom_map')).first()
self.form_data["custom_map_url"] = custom_map.map_url
def query_obj(self):
d = super(Ec3Map, self).query_obj()
fd = self.form_data
if fd.get('all_columns') and (fd.get('groupby') or fd.get('metrics')):
raise Exception(
"Choose either fields to [Group By] and [Metrics] or "
"[Columns], not both")
if fd.get('all_columns'):
d['columns'] = fd.get('all_columns')
d['groupby'] = []
if fd.get('order_by_cols', []):
d['orderby'] = [json.loads(t) for t in fd.get('order_by_cols', [])]
return d
def get_df(self, query_obj=None):
df = super(Ec3Map, self).get_df(query_obj)
if (
self.form_data.get("granularity") == "all" and
'timestamp' in df):
del df['timestamp']
return df
def get_data(self):
df = self.get_df()
return dict(
records=df.to_dict(orient="records"),
columns=list(df.columns),
)
def json_dumps(self, obj):
return json.dumps(obj, default=utils.json_iso_dttm_ser)
viz_types_list = [
Ec3BarLinePieViz,
Ec3Map,
TableViz,
PivotTableViz,
NVD3TimeSeriesViz,
NVD3CompareTimeSeriesViz,
NVD3TimeSeriesStackedViz,
NVD3TimeSeriesBarViz,
DistributionBarViz,
DistributionPieViz,
BubbleViz,
MarkupViz,
WordCloudViz,
BigNumberViz,
BigNumberTotalViz,
SunburstViz,
DirectedForceViz,
SankeyViz,
WorldMapViz,
FilterBoxViz,
IFrameViz,
ParallelCoordinatesViz,
HeatmapViz,
BoxPlotViz,
TreemapViz,
CalHeatmapViz,
HorizonViz,
MapboxViz,
HistogramViz,
SeparatorViz,
]
viz_types = OrderedDict([(v.viz_type, v) for v in viz_types_list
if v.viz_type not in config.get('VIZ_TYPE_BLACKLIST')])
|
wbsljh/caravel
|
caravel/viz.py
|
Python
|
apache-2.0
| 67,464
|
[
"VisIt"
] |
dc3ff4ec182a5a641b5cf3bef4eb67e4d1593d243981c419a4abce69b0c8e083
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Google Chrome History database plugin."""
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import chrome_history
from tests.parsers.sqlite_plugins import test_lib
class GoogleChrome8HistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Google Chrome 8 history SQLite database plugin."""
def testProcess(self):
"""Tests the Process function on a Chrome History database file."""
plugin = chrome_history.GoogleChrome8HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
# The History file contains 71 events (69 page visits, 1 file downloads).
self.assertEqual(storage_writer.number_of_events, 71)
events = list(storage_writer.GetEvents())
# Check the first page visited entry.
expected_event_values = {
'data_type': 'chrome:history:page_visited',
'page_transition_type': 0,
'timestamp': '2011-04-07 12:03:11.000000',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'title': 'Ubuntu Start Page',
'typed_count': 0,
'url': 'http://start.ubuntu.com/10.04/Google/',
'visit_source': 3}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Check the first file downloaded entry.
expected_event_values = {
'data_type': 'chrome:history:file_downloaded',
'full_path': '/home/john/Downloads/funcats_scr.exe',
'received_bytes': 1132155,
'timestamp': '2011-05-23 08:35:30.000000',
'timestamp_desc': definitions.TIME_DESCRIPTION_FILE_DOWNLOADED,
'total_bytes': 1132155,
'url': 'http://fatloss4idiotsx.com/download/funcats/funcats_scr.exe'}
self.CheckEventValues(storage_writer, events[69], expected_event_values)
class GoogleChrome27HistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Google Chrome 27 history SQLite database plugin."""
def testProcess57(self):
"""Tests the Process function on a Google Chrome 57 History database."""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-57.0.2987.133'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
# The History file contains 2 events (1 page visits, 1 file downloads).
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
# Check the page visit event.
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
expected_event_values = {
'data_type': 'chrome:history:page_visited',
'timestamp': '2018-01-21 14:09:53.885478',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'title': '',
'typed_count': 0,
'url': expected_url}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Check the file downloaded event.
expected_event_values = {
'data_type': 'chrome:history:file_downloaded',
'full_path': '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi',
'received_bytes': 3080192,
'timestamp': '2018-01-21 14:09:53.900399',
'timestamp_desc': definitions.TIME_DESCRIPTION_FILE_DOWNLOADED,
'total_bytes': 3080192,
'url': (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
def testProcess58(self):
"""Tests the Process function on a Google Chrome 58 History database."""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-58.0.3029.96'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
# The History file contains 2 events (1 page visits, 1 file downloads).
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
# Check the page visit event.
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
expected_event_values = {
'data_type': 'chrome:history:page_visited',
'timestamp': '2018-01-21 14:09:27.315765',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'title': '',
'typed_count': 0,
'url': expected_url}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Check the file downloaded event.
expected_event_values = {
'data_type': 'chrome:history:file_downloaded',
'full_path': '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi',
'received_bytes': 3080192,
'timestamp': '2018-01-21 14:09:27.200398',
'timestamp_desc': definitions.TIME_DESCRIPTION_FILE_DOWNLOADED,
'total_bytes': 3080192,
'url': (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
def testProcess59(self):
"""Tests the Process function on a Google Chrome 59 History database."""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-59.0.3071.86'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
# The History file contains 2 events (1 page visits, 1 file downloads).
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
# Check the page visit event.
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
expected_event_values = {
'data_type': 'chrome:history:page_visited',
'timestamp': '2018-01-21 14:08:52.037692',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'title': '',
'typed_count': 0,
'url': expected_url}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Check the file downloaded event.
expected_event_values = {
'data_type': 'chrome:history:file_downloaded',
'full_path': '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi',
'received_bytes': 3080192,
'timestamp': '2018-01-21 14:08:51.811123',
'timestamp_desc': definitions.TIME_DESCRIPTION_FILE_DOWNLOADED,
'total_bytes': 3080192,
'url': (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
def testProcess59ExtraColumn(self):
"""Tests the Process function on a Google Chrome 59 History database,
manually modified to have an unexpected column.
"""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-59_added-fake-column'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
# The History file contains 2 events (1 page visits, 1 file downloads).
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
# Check the page visit event.
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
expected_event_values = {
'data_type': 'chrome:history:page_visited',
'timestamp': '2018-01-21 14:08:52.037692',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'title': '',
'typed_count': 0,
'url': expected_url}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Check the file downloaded event.
expected_event_values = {
'data_type': 'chrome:history:file_downloaded',
'full_path': '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi',
'received_bytes': 3080192,
'timestamp': '2018-01-21 14:08:51.811123',
'timestamp_desc': definitions.TIME_DESCRIPTION_FILE_DOWNLOADED,
'total_bytes': 3080192,
'url': (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
Onager/plaso
|
tests/parsers/sqlite_plugins/chrome_history.py
|
Python
|
apache-2.0
| 8,746
|
[
"VisIt"
] |
efa2246df9612aba0e1eb77ceda33d1856e5a812d18d43441ca5f49492fd8c0d
|
"""
The B{0install add-feed} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import SafeException, _
from zeroinstall.support import tasks, raw_input
from zeroinstall.cmd import UsageError
from zeroinstall.injector import model, writer
syntax = "[INTERFACE] NEW-FEED"
def add_options(parser):
parser.add_option("-o", "--offline", help=_("try to avoid using the network"), action='store_true')
def find_feed_import(iface, feed_url):
for f in iface.extra_feeds:
if f.uri == feed_url:
return f
return None
def handle(config, options, args, add_ok = True, remove_ok = False):
if len(args) == 2:
iface = config.iface_cache.get_interface(model.canonical_iface_uri(args[0]))
feed_url = model.canonical_iface_uri(args[1])
feed_import = find_feed_import(iface, feed_url)
if feed_import:
raise SafeException(_('Interface %(interface)s already has a feed %(feed)s') %
{'interface': iface.uri, 'feed': feed_url})
iface.extra_feeds.append(model.Feed(feed_url, arch = None, user_override = True))
writer.save_interface(iface)
return
elif len(args) != 1: raise UsageError()
x = args[0]
print(_("Feed '%s':") % x + '\n')
x = model.canonical_iface_uri(x)
if options.offline:
config.network_use = model.network_offline
if config.network_use != model.network_offline and config.iface_cache.is_stale(x, config.freshness):
blocker = config.fetcher.download_and_import_feed(x, config.iface_cache)
print(_("Downloading feed; please wait..."))
tasks.wait_for_blocker(blocker)
print(_("Done"))
candidate_interfaces = config.iface_cache.get_feed_targets(x)
assert candidate_interfaces
interfaces = []
for i in range(len(candidate_interfaces)):
iface = candidate_interfaces[i]
if find_feed_import(iface, x):
if remove_ok:
print(_("%(index)d) Remove as feed for '%(uri)s'") % {'index': i + 1, 'uri': iface.uri})
interfaces.append(iface)
else:
if add_ok:
print(_("%(index)d) Add as feed for '%(uri)s'") % {'index': i + 1, 'uri': iface.uri})
interfaces.append(iface)
if not interfaces:
if remove_ok:
raise SafeException(_("%(feed)s is not registered as a feed for %(interface)s") %
{'feed': x, 'interface': candidate_interfaces[0]})
else:
raise SafeException(_("%(feed)s already registered as a feed for %(interface)s") %
{'feed': x, 'interface': candidate_interfaces[0]})
print()
while True:
try:
i = raw_input(_('Enter a number, or CTRL-C to cancel [1]: ')).strip()
except KeyboardInterrupt:
print()
raise SafeException(_("Aborted at user request."))
if i == '':
i = 1
else:
try:
i = int(i)
except ValueError:
i = 0
if i > 0 and i <= len(interfaces):
break
print(_("Invalid number. Try again. (1 to %d)") % len(interfaces))
iface = interfaces[i - 1]
feed_import = find_feed_import(iface, x)
if feed_import:
iface.extra_feeds.remove(feed_import)
else:
iface.extra_feeds.append(model.Feed(x, arch = None, user_override = True))
writer.save_interface(iface)
print('\n' + _("Feed list for interface '%s' is now:") % iface.get_name())
if iface.extra_feeds:
for f in iface.extra_feeds:
print("- " + f.uri)
else:
print(_("(no feeds)"))
def complete(completion, args, cword):
if cword > 1: return
if cword == 0:
completion.expand_interfaces()
else:
completion.expand_files()
|
dsqmoore/0install
|
zeroinstall/cmd/add_feed.py
|
Python
|
lgpl-2.1
| 3,461
|
[
"VisIt"
] |
0085138101a82692a54987abe5d6553e94be63893c8093667c824f70be9eed78
|
import re
from pygments.lexers.theorem import IsabelleLexer
from pygments.lexer import RegexLexer, inherit, bygroups, words
from pygments.token import *
import encoding
__all__ = ['IsarLexer']
class IsarLexer(IsabelleLexer):
name = 'Isabelle/Isar'
keyword_cartouche_text = ('text', 'txt', 'text_raw',
'chapter', 'section', 'subsection', 'subsubsection',
'paragraph', 'subparagraph',
)
tokens = {
'root': [
(words(keyword_cartouche_text, prefix=r'\b', suffix=r'(%\w+)?(\s*\\<open>)'), bygroups(Keyword,Comment.Preproc,Comment), 'cartouche-text'),
(r'\\<comment>.*$', Comment),
(r'%\w+', Comment.Preproc),
(r'\\<open>', String.Other, 'fact'),
inherit,
],
'cartouche-text': [
(r'[^\\@]', Comment),
(r'(@\{)(\w+)', bygroups(String.Other, Keyword), 'antiquotation'),
(r'\\<open>', Text, '#push'),
(r'\\<close>', Comment, '#pop'),
(r'\\<[\^\w]+>', Comment.Symbol),
(r'\\', Comment),
],
'antiquotation': [
(r'[^\{\}\\]', Text),
(r'\{', String.Other, '#push'),
(r'\}', String.Other, '#pop'),
(r'\\<[\^\w]+>', String.Symbol),
(r'\\', Text),
],
'fact': [
(r'\\<close>', String.Other, '#pop'),
inherit,
],
}
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
value = isar_decode(value)
yield index, token, value
def isar_decode(raw):
global symbol_table
if symbol_table is None:
symbol_table = {}
for line in symbols_raw.splitlines():
if line:
if re.match(r"^#", line):
continue
m = re.match(r"^(\\<.*>)\s+code:\s+0x([0-9a-f]+).*$", line)
assert m, "Failed to parse " + line
n = int(m.group(2),16)
if n < 0x10000:
symbol_table[m.group(1)] = unichr(n)
if isinstance(raw, str):
raw = encoding.get_unicode(raw)
def repl(m):
if m.group(0) in symbol_table:
return symbol_table[m.group(0)]
else:
return m.group(0)
return re.sub(r"\\<[\^a-zA-Z]+>", repl, raw)
# ~~/etc/symbols from Isabelle2016
symbol_table = None
symbols_raw = """
\<zero> code: 0x01d7ec group: digit
\<one> code: 0x01d7ed group: digit
\<two> code: 0x01d7ee group: digit
\<three> code: 0x01d7ef group: digit
\<four> code: 0x01d7f0 group: digit
\<five> code: 0x01d7f1 group: digit
\<six> code: 0x01d7f2 group: digit
\<seven> code: 0x01d7f3 group: digit
\<eight> code: 0x01d7f4 group: digit
\<nine> code: 0x01d7f5 group: digit
\<A> code: 0x01d49c group: letter
\<B> code: 0x00212c group: letter
\<C> code: 0x01d49e group: letter
\<D> code: 0x01d49f group: letter
\<E> code: 0x002130 group: letter
\<F> code: 0x002131 group: letter
\<G> code: 0x01d4a2 group: letter
\<H> code: 0x00210b group: letter
\<I> code: 0x002110 group: letter
\<J> code: 0x01d4a5 group: letter
\<K> code: 0x01d4a6 group: letter
\<L> code: 0x002112 group: letter
\<M> code: 0x002133 group: letter
\<N> code: 0x01d4a9 group: letter
\<O> code: 0x01d4aa group: letter
\<P> code: 0x01d4ab group: letter
\<Q> code: 0x01d4ac group: letter
\<R> code: 0x00211b group: letter
\<S> code: 0x01d4ae group: letter
\<T> code: 0x01d4af group: letter
\<U> code: 0x01d4b0 group: letter
\<V> code: 0x01d4b1 group: letter
\<W> code: 0x01d4b2 group: letter
\<X> code: 0x01d4b3 group: letter
\<Y> code: 0x01d4b4 group: letter
\<Z> code: 0x01d4b5 group: letter
\<a> code: 0x01d5ba group: letter
\<b> code: 0x01d5bb group: letter
\<c> code: 0x01d5bc group: letter
\<d> code: 0x01d5bd group: letter
\<e> code: 0x01d5be group: letter
\<f> code: 0x01d5bf group: letter
\<g> code: 0x01d5c0 group: letter
\<h> code: 0x01d5c1 group: letter
\<i> code: 0x01d5c2 group: letter
\<j> code: 0x01d5c3 group: letter
\<k> code: 0x01d5c4 group: letter
\<l> code: 0x01d5c5 group: letter
\<m> code: 0x01d5c6 group: letter
\<n> code: 0x01d5c7 group: letter
\<o> code: 0x01d5c8 group: letter
\<p> code: 0x01d5c9 group: letter
\<q> code: 0x01d5ca group: letter
\<r> code: 0x01d5cb group: letter
\<s> code: 0x01d5cc group: letter
\<t> code: 0x01d5cd group: letter
\<u> code: 0x01d5ce group: letter
\<v> code: 0x01d5cf group: letter
\<w> code: 0x01d5d0 group: letter
\<x> code: 0x01d5d1 group: letter
\<y> code: 0x01d5d2 group: letter
\<z> code: 0x01d5d3 group: letter
\<AA> code: 0x01d504 group: letter
\<BB> code: 0x01d505 group: letter
\<CC> code: 0x00212d group: letter
\<DD> code: 0x01d507 group: letter
\<EE> code: 0x01d508 group: letter
\<FF> code: 0x01d509 group: letter
\<GG> code: 0x01d50a group: letter
\<HH> code: 0x00210c group: letter
\<II> code: 0x002111 group: letter
\<JJ> code: 0x01d50d group: letter
\<KK> code: 0x01d50e group: letter
\<LL> code: 0x01d50f group: letter
\<MM> code: 0x01d510 group: letter
\<NN> code: 0x01d511 group: letter
\<OO> code: 0x01d512 group: letter
\<PP> code: 0x01d513 group: letter
\<QQ> code: 0x01d514 group: letter
\<RR> code: 0x00211c group: letter
\<SS> code: 0x01d516 group: letter
\<TT> code: 0x01d517 group: letter
\<UU> code: 0x01d518 group: letter
\<VV> code: 0x01d519 group: letter
\<WW> code: 0x01d51a group: letter
\<XX> code: 0x01d51b group: letter
\<YY> code: 0x01d51c group: letter
\<ZZ> code: 0x002128 group: letter
\<aa> code: 0x01d51e group: letter
\<bb> code: 0x01d51f group: letter
\<cc> code: 0x01d520 group: letter
\<dd> code: 0x01d521 group: letter
\<ee> code: 0x01d522 group: letter
\<ff> code: 0x01d523 group: letter
\<gg> code: 0x01d524 group: letter
\<hh> code: 0x01d525 group: letter
\<ii> code: 0x01d526 group: letter
\<jj> code: 0x01d527 group: letter
\<kk> code: 0x01d528 group: letter
\<ll> code: 0x01d529 group: letter
\<mm> code: 0x01d52a group: letter
\<nn> code: 0x01d52b group: letter
\<oo> code: 0x01d52c group: letter
\<pp> code: 0x01d52d group: letter
\<qq> code: 0x01d52e group: letter
\<rr> code: 0x01d52f group: letter
\<ss> code: 0x01d530 group: letter
\<tt> code: 0x01d531 group: letter
\<uu> code: 0x01d532 group: letter
\<vv> code: 0x01d533 group: letter
\<ww> code: 0x01d534 group: letter
\<xx> code: 0x01d535 group: letter
\<yy> code: 0x01d536 group: letter
\<zz> code: 0x01d537 group: letter
\<alpha> code: 0x0003b1 group: greek
\<beta> code: 0x0003b2 group: greek
\<gamma> code: 0x0003b3 group: greek
\<delta> code: 0x0003b4 group: greek
\<epsilon> code: 0x0003b5 group: greek
\<zeta> code: 0x0003b6 group: greek
\<eta> code: 0x0003b7 group: greek
\<theta> code: 0x0003b8 group: greek
\<iota> code: 0x0003b9 group: greek
\<kappa> code: 0x0003ba group: greek
\<lambda> code: 0x0003bb group: greek abbrev: %
\<mu> code: 0x0003bc group: greek
\<nu> code: 0x0003bd group: greek
\<xi> code: 0x0003be group: greek
\<pi> code: 0x0003c0 group: greek
\<rho> code: 0x0003c1 group: greek
\<sigma> code: 0x0003c3 group: greek
\<tau> code: 0x0003c4 group: greek
\<upsilon> code: 0x0003c5 group: greek
\<phi> code: 0x0003c6 group: greek
\<chi> code: 0x0003c7 group: greek
\<psi> code: 0x0003c8 group: greek
\<omega> code: 0x0003c9 group: greek
\<Gamma> code: 0x000393 group: greek
\<Delta> code: 0x000394 group: greek
\<Theta> code: 0x000398 group: greek
\<Lambda> code: 0x00039b group: greek
\<Xi> code: 0x00039e group: greek
\<Pi> code: 0x0003a0 group: greek
\<Sigma> code: 0x0003a3 group: greek
\<Upsilon> code: 0x0003a5 group: greek
\<Phi> code: 0x0003a6 group: greek
\<Psi> code: 0x0003a8 group: greek
\<Omega> code: 0x0003a9 group: greek
\<bool> code: 0x01d539 group: letter
\<complex> code: 0x002102 group: letter
\<nat> code: 0x002115 group: letter
\<rat> code: 0x00211a group: letter
\<real> code: 0x00211d group: letter
\<int> code: 0x002124 group: letter
\<leftarrow> code: 0x002190 group: arrow abbrev: <.
\<longleftarrow> code: 0x0027f5 group: arrow abbrev: <.
\<longlongleftarrow> code: 0x00290e group: arrow abbrev: <.
\<longlonglongleftarrow> code: 0x0021e0 group: arrow abbrev: <.
\<rightarrow> code: 0x002192 group: arrow abbrev: .> abbrev: ->
\<longrightarrow> code: 0x0027f6 group: arrow abbrev: .> abbrev: -->
\<longlongrightarrow> code: 0x00290f group: arrow abbrev: .> abbrev: --->
\<longlonglongrightarrow> code: 0x0021e2 group: arrow abbrev: .> abbrev: --->
\<Leftarrow> code: 0x0021d0 group: arrow abbrev: <.
\<Longleftarrow> code: 0x0027f8 group: arrow abbrev: <.
\<Lleftarrow> code: 0x0021da group: arrow abbrev: <.
\<Rightarrow> code: 0x0021d2 group: arrow abbrev: .> abbrev: =>
\<Longrightarrow> code: 0x0027f9 group: arrow abbrev: .> abbrev: ==>
\<Rrightarrow> code: 0x0021db group: arrow abbrev: .>
\<leftrightarrow> code: 0x002194 group: arrow abbrev: <> abbrev: <->
\<longleftrightarrow> code: 0x0027f7 group: arrow abbrev: <> abbrev: <-> abbrev: <-->
\<Leftrightarrow> code: 0x0021d4 group: arrow abbrev: <>
\<Longleftrightarrow> code: 0x0027fa group: arrow abbrev: <>
\<mapsto> code: 0x0021a6 group: arrow abbrev: .> abbrev: |->
\<longmapsto> code: 0x0027fc group: arrow abbrev: .> abbrev: |-->
\<midarrow> code: 0x002500 group: arrow abbrev: <>
\<Midarrow> code: 0x002550 group: arrow abbrev: <>
\<hookleftarrow> code: 0x0021a9 group: arrow abbrev: <.
\<hookrightarrow> code: 0x0021aa group: arrow abbrev: .>
\<leftharpoondown> code: 0x0021bd group: arrow abbrev: <.
\<rightharpoondown> code: 0x0021c1 group: arrow abbrev: .>
\<leftharpoonup> code: 0x0021bc group: arrow abbrev: <.
\<rightharpoonup> code: 0x0021c0 group: arrow abbrev: .>
\<rightleftharpoons> code: 0x0021cc group: arrow abbrev: <> abbrev: ==
\<leadsto> code: 0x00219d group: arrow abbrev: .> abbrev: ~>
\<downharpoonleft> code: 0x0021c3 group: arrow
\<downharpoonright> code: 0x0021c2 group: arrow
\<upharpoonleft> code: 0x0021bf group: arrow
#\<upharpoonright> code: 0x0021be group: arrow
\<restriction> code: 0x0021be group: punctuation
\<Colon> code: 0x002237 group: punctuation
\<up> code: 0x002191 group: arrow
\<Up> code: 0x0021d1 group: arrow
\<down> code: 0x002193 group: arrow
\<Down> code: 0x0021d3 group: arrow
\<updown> code: 0x002195 group: arrow
\<Updown> code: 0x0021d5 group: arrow
\<langle> code: 0x0027e8 group: punctuation abbrev: <<
\<rangle> code: 0x0027e9 group: punctuation abbrev: >>
\<lceil> code: 0x002308 group: punctuation abbrev: [.
\<rceil> code: 0x002309 group: punctuation abbrev: .]
\<lfloor> code: 0x00230a group: punctuation abbrev: [.
\<rfloor> code: 0x00230b group: punctuation abbrev: .]
\<lparr> code: 0x002987 group: punctuation abbrev: (|
\<rparr> code: 0x002988 group: punctuation abbrev: |)
\<lbrakk> code: 0x0027e6 group: punctuation abbrev: [|
\<rbrakk> code: 0x0027e7 group: punctuation abbrev: |]
\<lbrace> code: 0x002983 group: punctuation abbrev: {|
\<rbrace> code: 0x002984 group: punctuation abbrev: |}
\<guillemotleft> code: 0x0000ab group: punctuation abbrev: <<
\<guillemotright> code: 0x0000bb group: punctuation abbrev: >>
\<bottom> code: 0x0022a5 group: logic
\<top> code: 0x0022a4 group: logic
\<and> code: 0x002227 group: logic abbrev: /\ abbrev: &
\<And> code: 0x0022c0 group: logic abbrev: !!
\<or> code: 0x002228 group: logic abbrev: \/ abbrev: |
\<Or> code: 0x0022c1 group: logic abbrev: ??
\<forall> code: 0x002200 group: logic abbrev: ! abbrev: ALL
\<exists> code: 0x002203 group: logic abbrev: ? abbrev: EX
\<nexists> code: 0x002204 group: logic abbrev: ~?
\<not> code: 0x0000ac group: logic abbrev: ~
\<box> code: 0x0025a1 group: logic
\<diamond> code: 0x0025c7 group: logic
\<diamondop> code: 0x0022c4 group: operator
\<turnstile> code: 0x0022a2 group: relation abbrev: |-
\<Turnstile> code: 0x0022a8 group: relation abbrev: |=
\<tturnstile> code: 0x0022a9 group: relation abbrev: |-
\<TTurnstile> code: 0x0022ab group: relation abbrev: |=
\<stileturn> code: 0x0022a3 group: relation abbrev: -|
\<surd> code: 0x00221a group: relation
\<le> code: 0x002264 group: relation abbrev: <=
\<ge> code: 0x002265 group: relation abbrev: >=
\<lless> code: 0x00226a group: relation abbrev: <<
\<ggreater> code: 0x00226b group: relation abbrev: >>
\<lesssim> code: 0x002272 group: relation
\<greatersim> code: 0x002273 group: relation
\<lessapprox> code: 0x002a85 group: relation
\<greaterapprox> code: 0x002a86 group: relation
\<in> code: 0x002208 group: relation abbrev: :
\<notin> code: 0x002209 group: relation abbrev: ~:
\<subset> code: 0x002282 group: relation
\<supset> code: 0x002283 group: relation
\<subseteq> code: 0x002286 group: relation abbrev: (=
\<supseteq> code: 0x002287 group: relation abbrev: )=
\<sqsubset> code: 0x00228f group: relation
\<sqsupset> code: 0x002290 group: relation
\<sqsubseteq> code: 0x002291 group: relation abbrev: [=
\<sqsupseteq> code: 0x002292 group: relation abbrev: ]=
\<inter> code: 0x002229 group: operator abbrev: Int
\<Inter> code: 0x0022c2 group: operator abbrev: Inter abbrev: INT
\<union> code: 0x00222a group: operator abbrev: Un
\<Union> code: 0x0022c3 group: operator abbrev: Union abbrev: UN
\<squnion> code: 0x002294 group: operator
\<Squnion> code: 0x002a06 group: operator abbrev: SUP
\<sqinter> code: 0x002293 group: operator
\<Sqinter> code: 0x002a05 group: operator abbrev: INF
\<setminus> code: 0x002216 group: operator
\<propto> code: 0x00221d group: operator
\<uplus> code: 0x00228e group: operator
\<Uplus> code: 0x002a04 group: operator
\<noteq> code: 0x002260 group: relation abbrev: ~=
\<sim> code: 0x00223c group: relation
\<doteq> code: 0x002250 group: relation abbrev: .=
\<simeq> code: 0x002243 group: relation
\<approx> code: 0x002248 group: relation
\<asymp> code: 0x00224d group: relation
\<cong> code: 0x002245 group: relation
\<smile> code: 0x002323 group: relation
\<equiv> code: 0x002261 group: relation abbrev: ==
\<frown> code: 0x002322 group: relation
\<Join> code: 0x0022c8
\<bowtie> code: 0x002a1d
\<prec> code: 0x00227a group: relation
\<succ> code: 0x00227b group: relation
\<preceq> code: 0x00227c group: relation
\<succeq> code: 0x00227d group: relation
\<parallel> code: 0x002225 group: punctuation abbrev: ||
\<bar> code: 0x0000a6 group: punctuation abbrev: ||
\<plusminus> code: 0x0000b1 group: operator
\<minusplus> code: 0x002213 group: operator
\<times> code: 0x0000d7 group: operator abbrev: <*>
\<div> code: 0x0000f7 group: operator
\<cdot> code: 0x0022c5 group: operator
\<star> code: 0x0022c6 group: operator
\<bullet> code: 0x002219 group: operator
\<circ> code: 0x002218 group: operator
\<dagger> code: 0x002020
\<ddagger> code: 0x002021
\<lhd> code: 0x0022b2 group: relation
\<rhd> code: 0x0022b3 group: relation
\<unlhd> code: 0x0022b4 group: relation
\<unrhd> code: 0x0022b5 group: relation
\<triangleleft> code: 0x0025c3 group: relation
\<triangleright> code: 0x0025b9 group: relation
\<triangle> code: 0x0025b3 group: relation
\<triangleq> code: 0x00225c group: relation
\<oplus> code: 0x002295 group: operator
\<Oplus> code: 0x002a01 group: operator
\<otimes> code: 0x002297 group: operator
\<Otimes> code: 0x002a02 group: operator
\<odot> code: 0x002299 group: operator
\<Odot> code: 0x002a00 group: operator
\<ominus> code: 0x002296 group: operator
\<oslash> code: 0x002298 group: operator
\<dots> code: 0x002026 group: punctuation abbrev: ...
\<cdots> code: 0x0022ef group: punctuation
\<Sum> code: 0x002211 group: operator abbrev: SUM
\<Prod> code: 0x00220f group: operator abbrev: PROD
\<Coprod> code: 0x002210 group: operator
\<infinity> code: 0x00221e
\<integral> code: 0x00222b group: operator
\<ointegral> code: 0x00222e group: operator
\<clubsuit> code: 0x002663
\<diamondsuit> code: 0x002662
\<heartsuit> code: 0x002661
\<spadesuit> code: 0x002660
\<aleph> code: 0x002135
\<emptyset> code: 0x002205
\<nabla> code: 0x002207
\<partial> code: 0x002202
\<flat> code: 0x00266d
\<natural> code: 0x00266e
\<sharp> code: 0x00266f
\<angle> code: 0x002220
\<copyright> code: 0x0000a9
\<registered> code: 0x0000ae
\<hyphen> code: 0x0000ad group: punctuation
\<inverse> code: 0x0000af group: punctuation
\<onequarter> code: 0x0000bc group: digit
\<onehalf> code: 0x0000bd group: digit
\<threequarters> code: 0x0000be group: digit
\<ordfeminine> code: 0x0000aa
\<ordmasculine> code: 0x0000ba
\<section> code: 0x0000a7
\<paragraph> code: 0x0000b6
\<exclamdown> code: 0x0000a1
\<questiondown> code: 0x0000bf
\<euro> code: 0x0020ac
\<pounds> code: 0x0000a3
\<yen> code: 0x0000a5
\<cent> code: 0x0000a2
\<currency> code: 0x0000a4
\<degree> code: 0x0000b0
\<amalg> code: 0x002a3f group: operator
\<mho> code: 0x002127 group: operator
\<lozenge> code: 0x0025ca
\<wp> code: 0x002118
\<wrong> code: 0x002240 group: relation
\<acute> code: 0x0000b4
\<index> code: 0x000131
\<dieresis> code: 0x0000a8
\<cedilla> code: 0x0000b8
\<hungarumlaut> code: 0x0002dd
\<bind> code: 0x00291c abbrev: >>=
\<then> code: 0x002aa2 abbrev: >>
\<some> code: 0x0003f5
\<hole> code: 0x002311
\<newline> code: 0x0023ce
\<comment> code: 0x002015 group: document font: IsabelleText
\<open> code: 0x002039 group: punctuation font: IsabelleText abbrev: <<
\<close> code: 0x00203a group: punctuation font: IsabelleText abbrev: >>
\<here> code: 0x002302 font: IsabelleText
\<^undefined> code: 0x002756 font: IsabelleText
\<^noindent> code: 0x0021e4 group: document font: IsabelleText
\<^smallskip> code: 0x002508 group: document font: IsabelleText
\<^medskip> code: 0x002509 group: document font: IsabelleText
\<^bigskip> code: 0x002501 group: document font: IsabelleText
\<^item> code: 0x0025aa group: document font: IsabelleText
\<^enum> code: 0x0025b8 group: document font: IsabelleText
\<^descr> code: 0x0027a7 group: document font: IsabelleText
\<^footnote> code: 0x00204b group: document font: IsabelleText
\<^verbatim> code: 0x0025a9 group: document font: IsabelleText
\<^theory_text> code: 0x002b1a group: document font: IsabelleText
\<^emph> code: 0x002217 group: document font: IsabelleText
\<^bold> code: 0x002759 group: control group: document font: IsabelleText
\<^sub> code: 0x0021e9 group: control font: IsabelleText
\<^sup> code: 0x0021e7 group: control font: IsabelleText
\<^bsub> code: 0x0021d8 group: control_block font: IsabelleText abbrev: =_(
\<^esub> code: 0x0021d9 group: control_block font: IsabelleText abbrev: =_)
\<^bsup> code: 0x0021d7 group: control_block font: IsabelleText abbrev: =^(
\<^esup> code: 0x0021d6 group: control_block font: IsabelleText abbrev: =^)
"""
|
lohner/Praktomat
|
src/utilities/isar_lexer.py
|
Python
|
gpl-2.0
| 23,943
|
[
"Bowtie"
] |
92a7d32658d3c52fcccde55e86f23aa2f3eac24e337efa751701996884a107e3
|
# -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
from base64 import standard_b64encode
from distutils import log
from distutils.errors import DistutilsOptionError
import os
import socket
import zipfile
import tempfile
import shutil
import itertools
import functools
import http.client
import urllib.parse
from pkg_resources import iter_entry_points
from .upload import upload
def _encode(s):
return s.encode('utf-8', 'surrogateescape')
class upload_docs(upload):
# override the default repository as upload_docs isn't
# supported by Warehouse (and won't be).
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
if 'pypi.python.org' in self.repository:
log.warn("Upload_docs command is deprecated. Use RTD instead.")
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
tmpl = "no files found in upload directory '%s'"
raise DistutilsOptionError(tmpl % self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
@staticmethod
def _build_part(item, sep_boundary):
key, values = item
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if isinstance(value, tuple):
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = _encode(value)
yield sep_boundary
yield _encode(title)
yield b"\n\n"
yield value
if value and value[-1:] == b'\r':
yield b'\n' # write an extra newline (lurve Macs)
@classmethod
def _build_multipart(cls, data):
"""
Build up the MIME payload for the POST data
"""
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b'\n--' + boundary.encode('ascii')
end_boundary = sep_boundary + b'--'
end_items = end_boundary, b"\n",
builder = functools.partial(
cls._build_part,
sep_boundary=sep_boundary,
)
part_groups = map(builder, data.items())
parts = itertools.chain.from_iterable(part_groups)
body_items = itertools.chain(parts, end_items)
content_type = 'multipart/form-data; boundary=%s' % boundary
return b''.join(body_items), content_type
def upload_file(self, filename):
with open(filename, 'rb') as f:
content = f.read()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = _encode(self.username + ':' + self.password)
credentials = standard_b64encode(credentials).decode('ascii')
auth = "Basic " + credentials
body, ct = self._build_multipart(data)
msg = "Submitting documentation to %s" % (self.repository)
self.announce(msg, log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urllib.parse.urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = http.client.HTTPConnection(netloc)
elif schema == 'https':
conn = http.client.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = ct
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error as e:
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
msg = 'Server response (%s): %s' % (r.status, r.reason)
self.announce(msg, log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
msg = 'Upload successful. Visit %s' % location
self.announce(msg, log.INFO)
else:
msg = 'Upload failed (%s): %s' % (r.status, r.reason)
self.announce(msg, log.ERROR)
if self.show_response:
print('-' * 75, r.read(), '-' * 75)
|
RalfBarkow/Zettelkasten
|
venv/lib/python3.9/site-packages/setuptools/command/upload_docs.py
|
Python
|
gpl-3.0
| 7,151
|
[
"VisIt"
] |
e3cd8177b92c0417f37689318cd42f9d1d00e9d83c5ce391120f700790702ad5
|
r"""protocols is a module that contains a set of VTK Web related
protocols that can be combined together to provide a flexible way to define
very specific web application.
"""
from time import time
import os, sys, logging, types, inspect, traceback, logging, re
from vtkWebCorePython import vtkWebApplication, vtkWebInteractionEvent
from autobahn.wamp import register as exportRpc
# =============================================================================
#
# Base class for any VTK Web based protocol
#
# =============================================================================
class vtkWebProtocol(object):
def setApplication(self, app):
self.Application = app
def getApplication(self):
return self.Application
def mapIdToObject(self, id):
"""
Maps global-id for a vtkObject to the vtkObject instance. May return None if the
id is not valid.
"""
id = int(id)
if id <= 0:
return None
return self.Application.GetObjectIdMap().GetVTKObject(id)
def getGlobalId(self, obj):
"""
Return the id for a given vtkObject
"""
return self.Application.GetObjectIdMap().GetGlobalId(obj)
def getView(self, vid):
"""
Returns the view for a given view ID, if vid is None then return the
current active view.
:param vid: The view ID
:type vid: str
"""
view = self.mapIdToObject(vid)
if not view:
# Use active view is none provided.
view = self.Application.GetObjectIdMap().GetActiveObject("VIEW")
if not view:
raise Exception("no view provided: " + vid)
return view
def setActiveView(self, view):
"""
Set a vtkRenderWindow to be the active one
"""
self.Application.GetObjectIdMap().SetActiveObject("VIEW", view)
# =============================================================================
#
# Handle Mouse interaction on any type of view
#
# =============================================================================
class vtkWebMouseHandler(vtkWebProtocol):
@exportRpc("viewport.mouse.interaction")
def mouseInteraction(self, event):
"""
RPC Callback for mouse interactions.
"""
view = self.getView(event['view'])
buttons = 0
if event["buttonLeft"]:
buttons |= vtkWebInteractionEvent.LEFT_BUTTON;
if event["buttonMiddle"]:
buttons |= vtkWebInteractionEvent.MIDDLE_BUTTON;
if event["buttonRight"]:
buttons |= vtkWebInteractionEvent.RIGHT_BUTTON;
modifiers = 0
if event["shiftKey"]:
modifiers |= vtkWebInteractionEvent.SHIFT_KEY
if event["ctrlKey"]:
modifiers |= vtkWebInteractionEvent.CTRL_KEY
if event["altKey"]:
modifiers |= vtkWebInteractionEvent.ALT_KEY
if event["metaKey"]:
modifiers |= vtkWebInteractionEvent.META_KEY
pvevent = vtkWebInteractionEvent()
pvevent.SetButtons(buttons)
pvevent.SetModifiers(modifiers)
if event.has_key("x"):
pvevent.SetX(event["x"])
if event.has_key("y"):
pvevent.SetY(event["y"])
if event.has_key("scroll"):
pvevent.SetScroll(event["scroll"])
if event["action"] == 'dblclick':
pvevent.SetRepeatCount(2)
#pvevent.SetKeyCode(event["charCode"])
retVal = self.getApplication().HandleInteractionEvent(view, pvevent)
del pvevent
return retVal
# =============================================================================
#
# Basic 3D Viewport API (Camera + Orientation + CenterOfRotation
#
# =============================================================================
class vtkWebViewPort(vtkWebProtocol):
@exportRpc("viewport.camera.reset")
def resetCamera(self, viewId):
"""
RPC callback to reset camera.
"""
view = self.getView(viewId)
camera = view.GetRenderer().GetActiveCamera()
camera.ResetCamera()
try:
# FIXME seb: view.CenterOfRotation = camera.GetFocalPoint()
print "FIXME"
except:
pass
self.getApplication().InvalidateCache(view)
return str(self.getGlobalId(view))
@exportRpc("viewport.axes.orientation.visibility.update")
def updateOrientationAxesVisibility(self, viewId, showAxis):
"""
RPC callback to show/hide OrientationAxis.
"""
view = self.getView(viewId)
# FIXME seb: view.OrientationAxesVisibility = (showAxis if 1 else 0);
self.getApplication().InvalidateCache(view)
return str(self.getGlobalId(view))
@exportRpc("viewport.axes.center.visibility.update")
def updateCenterAxesVisibility(self, viewId, showAxis):
"""
RPC callback to show/hide CenterAxesVisibility.
"""
view = self.getView(viewId)
# FIXME seb: view.CenterAxesVisibility = (showAxis if 1 else 0);
self.getApplication().InvalidateCache(view)
return str(self.getGlobalId(view))
@exportRpc("viewport.camera.update")
def updateCamera(self, view_id, focal_point, view_up, position):
view = self.getView(view_id)
camera = view.GetRenderer().GetActiveCamera()
camera.SetFocalPoint(focal_point)
camera.SetCameraViewUp(view_up)
camera.SetCameraPosition(position)
self.getApplication().InvalidateCache(view)
# =============================================================================
#
# Provide Image delivery mechanism
#
# =============================================================================
class vtkWebViewPortImageDelivery(vtkWebProtocol):
@exportRpc("viewport.image.render")
def stillRender(self, options):
"""
RPC Callback to render a view and obtain the rendered image.
"""
beginTime = int(round(time() * 1000))
view = self.getView(options["view"])
size = [view.GetSize()[0], view.GetSize()[1]]
resize = size != options.get("size", size)
if resize:
size = options["size"]
if size[0] > 0 and size[1] > 0:
view.SetSize(size)
t = 0
if options and options.has_key("mtime"):
t = options["mtime"]
quality = 100
if options and options.has_key("quality"):
quality = options["quality"]
localTime = 0
if options and options.has_key("localTime"):
localTime = options["localTime"]
reply = {}
app = self.getApplication()
if t == 0:
app.InvalidateCache(view)
reply["image"] = app.StillRenderToString(view, t, quality)
# Check that we are getting image size we have set if not wait until we
# do. The render call will set the actual window size.
tries = 10;
while resize and list(view.GetSize()) != size \
and size != [0, 0] and tries > 0:
app.InvalidateCache(view)
reply["image"] = app.StillRenderToString(view, t, quality)
tries -= 1
reply["stale"] = app.GetHasImagesBeingProcessed(view)
reply["mtime"] = app.GetLastStillRenderToStringMTime()
reply["size"] = [view.GetSize()[0], view.GetSize()[1]]
reply["format"] = "jpeg;base64"
reply["global_id"] = str(self.getGlobalId(view))
reply["localTime"] = localTime
endTime = int(round(time() * 1000))
reply["workTime"] = (endTime - beginTime)
return reply
# =============================================================================
#
# Provide Geometry delivery mechanism (WebGL)
#
# =============================================================================
class vtkWebViewPortGeometryDelivery(vtkWebProtocol):
@exportRpc("viewport.webgl.metadata")
def getSceneMetaData(self, view_id):
view = self.getView(view_id);
data = self.getApplication().GetWebGLSceneMetaData(view)
return data
@exportRpc("viewport.webgl.data")
def getWebGLData(self, view_id, object_id, part):
view = self.getView(view_id)
data = self.getApplication().GetWebGLBinaryData(view, str(object_id), part-1)
return data
# =============================================================================
#
# Provide File/Directory listing
#
# =============================================================================
class vtkWebFileBrowser(vtkWebProtocol):
def __init__(self, basePath, name, excludeRegex=r"^\.|~$|^\$", groupRegex=r"[0-9]+\."):
"""
Configure the way the WebFile browser will expose the server content.
- basePath: specify the base directory that we should start with
- name: Name of that base directory that will show up on the web
- excludeRegex: Regular expression of what should be excluded from the list of files/directories
"""
self.baseDirectory = basePath
self.rootName = name
self.pattern = re.compile(excludeRegex)
self.gPattern = re.compile(groupRegex)
@exportRpc("file.server.directory.list")
def listServerDirectory(self, relativeDir='.'):
"""
RPC Callback to list a server directory relative to the basePath
provided at start-up.
"""
path = [ self.rootName ]
if len(relativeDir) > len(self.rootName):
relativeDir = relativeDir[len(self.rootName)+1:]
path += relativeDir.replace('\\','/').split('/')
currentPath = os.path.join(self.baseDirectory, relativeDir)
result = { 'label': relativeDir, 'files': [], 'dirs': [], 'groups': [], 'path': path }
if relativeDir == '.':
result['label'] = self.rootName
for file in os.listdir(currentPath):
if os.path.isfile(os.path.join(currentPath, file)) and not re.search(self.pattern, file):
result['files'].append({'label': file, 'size': -1})
elif os.path.isdir(os.path.join(currentPath, file)) and not re.search(self.pattern, file):
result['dirs'].append(file)
# Filter files to create groups
files = result['files']
files.sort()
groups = result['groups']
groupIdx = {}
filesToRemove = []
for file in files:
fileSplit = re.split(self.gPattern, file['label'])
if len(fileSplit) == 2:
filesToRemove.append(file)
gName = '*.'.join(fileSplit)
if groupIdx.has_key(gName):
groupIdx[gName]['files'].append(file['label'])
else:
groupIdx[gName] = { 'files' : [file['label']], 'label': gName }
groups.append(groupIdx[gName])
for file in filesToRemove:
gName = '*.'.join(re.split(self.gPattern, file['label']))
if len(groupIdx[gName]['files']) > 1:
files.remove(file)
else:
groups.remove(groupIdx[gName])
return result
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Web/Python/vtk/web/protocols.py
|
Python
|
bsd-3-clause
| 11,521
|
[
"VTK"
] |
91892ff4f7b2e4323b8aab2a68380f3d03fb051544a09b8222b2366189fe740c
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 10:08:31 2015
This file is part of pyNLO.
pyNLO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyNLO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyNLO. If not, see <http://www.gnu.org/licenses/>.
@author: ycasg
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import constants
class OneDBeam_highV_WG:
""" Class for propagation and calculating field intensities in a waveguide.
Contains beam shape and propagation axis information. The mode area is
held constant for all colors, and does not change with z.
"""
_Aeff = 1.0
_lambda0 = None
_crystal_ID = None
_n_s_cache = None
def __init__(self, Aeff_squm = 10.0, this_pulse = None, axis = None):
""" Initialize class instance. Calculations are done from the effective
area. """
self._lambda0 = this_pulse.wl_mks
self.axis = axis
self.set_Aeff( Aeff_squm*1e-12 )
def set_Aeff(self, Aeff):
self._Aeff = Aeff
def _get_Aeff(self):
return self._Aeff
Aeff = property(_get_Aeff)
def calculate_gouy_phase(self, z, n_s):
""" Return the Gouy phase shift, which in a waveguide is constant (1.0)"""
return 1.0
def _rtP_to_a(self, n_s, z, waist = None):
""" Calculate conversion constant from electric field to average power from
indices of refraction: A = P_to_a * rtP """
return 1.0 / np.sqrt( self._Aeff * n_s * \
constants.epsilon_0 * constants.speed_of_light)
def rtP_to_a(self, n_s, z = None):
""" Calculate conversion constant from electric field to average power from
pulse and crystal class instances: A ** 2 = rtP_to_a**2 * P """
return self._rtP_to_a(n_s, z)
def rtP_to_a_2(self, pulse_instance, crystal_instance, z = None, waist = None):
""" Calculate conversion constant from electric field to average power from
pulse and crystal class instances: A ** 2 = rtP_to_a**2 * P """
n_s = self.get_n_in_crystal(pulse_instance, crystal_instance)
return self._rtP_to_a(n_s, z)
def calc_overlap_integral(self, z, this_pulse, othr_pulse, othr_beam,\
crystal_instance, reverse_order = False):
""" Calculate overlap integral (field-square) between this beam and Beam instance
second_beam inside of a crystal. In a high V number waveguide, the
modes have the same size, so 1.0 is returned."""
return 1.0
def get_n_in_crystal(self, pulse_instance, crystal_instance):
return crystal_instance.get_pulse_n(pulse_instance, self.axis)
def get_k_in_crystal(self, pulse_instance, crystal_instance):
return crystal_instance.get_pulse_k(pulse_instance, self.axis)
|
ycasg/PyNLO
|
src/pynlo/light/high_V_waveguide.py
|
Python
|
gpl-3.0
| 3,514
|
[
"CRYSTAL"
] |
ee4ad13c6ab6d0749a9ee0593b87185330073ddb87fc6a3504cc67b4caef84e4
|
import lb_loader
import pandas as pd
import simtk.openmm.app as app
import numpy as np
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
pd.set_option('display.width', 1000)
sysname = "chargedswitchedaccurateljbox"
system, positions, groups, temperature, timestep, langevin_timestep, testsystem, equil_steps, steps_per_hmc = lb_loader.load(sysname)
positions, boxes = lb_loader.equilibrate(testsystem, temperature, timestep, steps=equil_steps, minimize=True)
collision_rate = None
#del simulation, integrator
timestep = 40. * u.femtoseconds
extra_chances = 2
steps_per_hmc = 50
output_frequency = 1
integrator = hmc_integrators.XCGHMCIntegrator(temperature=temperature, steps_per_hmc=steps_per_hmc, timestep=timestep, extra_chances=extra_chances, collision_rate=collision_rate)
itype = type(integrator).__name__
simulation = lb_loader.build(testsystem, integrator, temperature)
for i in range(1):
simulation.step(100)
print("i=%d" % i)
print("Counts")
print(integrator.all_counts)
print(integrator.getGlobalVariableByName("nflip"))
print(integrator.getGlobalVariableByName("terminal_chance"))
|
kyleabeauchamp/HMCNotes
|
code/misc/debugging_xc.py
|
Python
|
gpl-2.0
| 1,183
|
[
"OpenMM"
] |
2c8d92f6a325385158204527fb1abcd2021b0ad11cc9973d64da980ceff3c0f6
|
#!/usr/bin/python
import music
from mpi4py import MPI
from matplotlib import mlab
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
plt.ion()
class DynamicUpdate():
#Suppose we know the x range
ax_min = -20
ax_max = 20
def on_launch(self):
#Set up plot
#self.figure, self.ax = plt.subplots()
self.figure = plt.figure("Network activity")
self.ax0 = self.figure.add_subplot(121)
self.ax1 = self.figure.add_subplot(122, projection = '3d')
self.lines, = self.ax0.plot([],[], 'o')
#Autoscale on unknown axis and known lims on the other
# self.ax.set_autoscaley_on(True)
#Other stuff
self.ax0.grid()
self.ax0.set_xlabel("time [s]")
self.ax0.set_ylabel("neuron id")
def on_running(self, xdata, ydata, pca):
self.ax0.set_xlim(min(xdata), max(xdata))
#self.ax.set_zlim(self.ax_min, self.ax_max)
#Update data (with the new _and_ the old points)
self.lines.set_xdata(xdata)
self.lines.set_ydata(ydata)
self.ax1.clear()
self.ax1.set_xlabel("PC 1")
self.ax1.set_ylabel("PC 2")
self.ax1.set_zlabel("PC 3")
self.ax1.set_xlim(self.ax_min, self.ax_max)
self.ax1.set_ylim(self.ax_min, self.ax_max)
self.ax1.set_ylim(self.ax_min, self.ax_max)
try:
self.ax1.plot(pca[:,0], pca[:,1], pca[:,2])
except:
pass
#Need both of these in order to rescale
self.ax0.relim()
self.ax0.autoscale_view()
#We need to draw *and* flush
self.figure.canvas.draw()
self.figure.canvas.flush_events()
comm = MPI.COMM_WORLD
global DEFAULT_TIMESTEP, timestep, setup, runtime, stoptime, tau, DEFAULT_TAU, state, state_hist, PCA_HIST_LENGTH, PROJ_HIST_LENGTH, spikes
DEFAULT_TIMESTEP = 0.001
DEFAULT_TAU = 0.18
PCA_HIST_LENGTH = 20 # in sec
SPIKE_HIST_LENGTH = 1 # seconds
PROJ_HIST_LENGTH = 5 #in sec
spikes = {'times': np.array([0]), 'senders': np.array([0])}
def main():
init()
initMUSIC()
runMUSIC()
def init():
print("initializing PCA adapter")
def eventfunc(d, t, i):
global state, tau, spikes
state[i] += tau
spikes['times'] = np.append(d, spikes['times'])
spikes['senders']= np.append(i, spikes['senders'])
#print "inc spike", i, d, t
def initMUSIC():
global DEFAULT_TIMESTEP, timestep, setup, stoptime, runtime, tau, DEFAULT_TAU, state, state_hist, d, num_neurons, proj_hist
setup = music.Setup()
try:
timestep = setup.config("music_timestep")
except:
timestep = DEFAULT_TIMESTEP
try:
tau = setup.config("tau")
except:
tau = DEFAULT_TAU
stoptime = setup.config("stoptime")
port_in = setup.publishEventInput("in")
port_in.map(eventfunc,
music.Index.GLOBAL,
base=0,
size=port_in.width(),
maxBuffered=1)
state = np.ones(port_in.width())
state_hist = {"states": [np.array(state)], "times": [0]}
for i in range(100):
state_hist['states'] = np.append(state_hist['states'], [state], axis = 0)
state_hist['times'] = np.append(state_hist['times'], [0], axis = 0)
proj = np.zeros(3)
proj_hist = {"projs": [np.array(proj)], "times": [0]}
d = DynamicUpdate()
d.on_launch()
num_neurons = port_in.width()
comm.Barrier()
runtime = music.Runtime(setup, timestep)
def runMUSIC():
global runtime, stoptime, timestep, state, state_hist, d, PCA_HIST_LENGTH, spikes, SPIKE_HIST_LENGTH, proj_hist
print "running PCA adapter"
t = 0
pca_created = False
while runtime.time() < stoptime:
if runtime.time() > PCA_HIST_LENGTH and not pca_created:
pca = mlab.PCA(state_hist['states'])
pca_created = True
state = state * np.exp(-timestep/ tau)
if t % 50 == 0:
if runtime.time() < PCA_HIST_LENGTH:
state_hist['states'] = np.append(state_hist['states'], [state], axis = 0)
state_hist['times'] = np.append(state_hist['times'], [runtime.time()], axis = 0)
state_hist_mask = np.where(state_hist['times'] > max(state_hist['times']) - PCA_HIST_LENGTH)
state_hist['times'] = state_hist['times'][state_hist_mask]
state_hist['states'] = state_hist['states'][state_hist_mask]
#print "states", state_hist['states']
if runtime.time() > PCA_HIST_LENGTH:
projection = pca.project(state)
#print "proj", len(projection)
projection = projection[:3]
proj_hist['projs'] = np.append(proj_hist['projs'], [projection], axis = 0)
proj_hist['times'] = np.append(proj_hist['times'], [runtime.time()], axis = 0)
proj_hist_mask = np.where(proj_hist['times'] > max(proj_hist['times']) - PROJ_HIST_LENGTH)
proj_hist['times'] = proj_hist['times'][proj_hist_mask]
proj_hist['projs'] = proj_hist['projs'][proj_hist_mask]
spike_hist_mask = np.where(spikes['times'] > max(spikes['times']) - SPIKE_HIST_LENGTH)
spikes['times'] = spikes['times'][spike_hist_mask]
spikes['senders'] = spikes['senders'][spike_hist_mask]
d.on_running(spikes['times'], spikes['senders'], proj_hist['projs'])
#print spikes
#print t, runtime.time()
runtime.tick()
t += 1
if __name__ == "__main__":
main()
|
weidel-p/ros_music_adapter
|
adapters/pca.py
|
Python
|
gpl-3.0
| 5,580
|
[
"NEURON"
] |
63bd2ef9571a582e0dbb077b8c16f673232f9c6bcdd31187c752461b07f848c6
|
#! /usr/bin/env python
"""Create landlab model grids."""
from ..core import load_params
from ..io import read_esri_ascii
from ..io.netcdf import read_netcdf
from ..values import constant, plane, random, sine
from .hex import HexModelGrid
from .network import NetworkModelGrid
from .radial import RadialModelGrid
from .raster import RasterModelGrid
from .voronoi import VoronoiDelaunayGrid
_MODEL_GRIDS = {
"RasterModelGrid": RasterModelGrid,
"HexModelGrid": HexModelGrid,
"VoronoiDelaunayGrid": VoronoiDelaunayGrid,
"NetworkModelGrid": NetworkModelGrid,
"RadialModelGrid": RadialModelGrid,
}
_SYNTHETIC_FIELD_CONSTRUCTORS = {
"plane": plane,
"random": random,
"sine": sine,
"constant": constant,
}
class Error(Exception):
"""Base class for exceptions from this module."""
pass
class BadGridTypeError(Error):
"""Raise this error for a bad grid type."""
def __init__(self, grid_type):
self._type = str(grid_type) # TODO: not tested.
def __str__(self):
return self._type # TODO: not tested.
def grid_from_dict(grid_type, params):
"""Create a grid from a dictionary of parameters."""
try:
cls = _MODEL_GRIDS[grid_type]
except KeyError:
raise ValueError("unknown grid type ({0})".format(grid_type))
args, kwargs = _parse_args_kwargs(params)
return cls(*args, **kwargs)
def grids_from_file(file_like, section=None):
"""Create grids from a file."""
params = load_params(file_like)
if section:
try:
grids = params[section]
except KeyError: # TODO: not tested.
raise ValueError(
"missing required section ({0})".format(section)
) # TODO: not tested.
else: # TODO: not tested.
grids = params # TODO: not tested.
new_grids = []
for grid_type, grid_desc in as_list_of_tuples(grids):
new_grids.append(grid_from_dict(grid_type, grid_desc))
return new_grids
def add_fields_from_dict(grid, fields):
"""Add fields to a grid from a dictionary."""
fields = dict(fields)
unknown_locations = set(fields) - set(grid.VALID_LOCATIONS)
if unknown_locations:
raise ValueError(
"unknown field locations ({0})".format(", ".join(unknown_locations))
)
for location, fields_at_location in fields.items():
for name, function in fields_at_location.items():
add_field_from_function(grid, name, function, at=location)
return grid
def add_field_from_function(grid, name, functions, at="node"):
"""Add a field to a grid as functions.
Parameters
----------
grid : ModelGrid
A landlab grid to add fields to.
name : str
Name of the new field.
functions : *(func_name, func_args)* or iterable of *(func_name, func_args)*
The functions to apply to the field. Functions are applied in the order
the appear in the list.
at : str
The grid element to which the field will be added.
Returns
-------
ModelGrid
The grid with the new field.
"""
valid_functions = set(_SYNTHETIC_FIELD_CONSTRUCTORS) | set(
["read_esri_ascii", "read_netcdf"]
)
for func_name, func_args in as_list_of_tuples(functions):
if func_name not in valid_functions:
raise ValueError("function not understood ({0})".format(func_name))
args, kwargs = _parse_args_kwargs(func_args)
if func_name in _SYNTHETIC_FIELD_CONSTRUCTORS:
# if any args, raise an error, there shouldn't be any.
synth_function = _SYNTHETIC_FIELD_CONSTRUCTORS[func_name]
synth_function(grid, name, at=at, **kwargs)
elif func_name == "read_esri_ascii":
read_esri_ascii(*args, grid=grid, name=name, **kwargs)
elif func_name == "read_netcdf":
read_netcdf(*args, grid=grid, name=name, **kwargs)
return grid
def add_boundary_conditions(grid, boundary_conditions=()):
for bc_name, bc_args in as_list_of_tuples(boundary_conditions):
args, kwargs = _parse_args_kwargs(bc_args)
try:
func = getattr(grid, bc_name)
except AttributeError:
raise ValueError(
"create_grid: No function {func} exists for grid types {grid}."
"If you think this type of grid should have such a "
"function. Please create a GitHub Issue to discuss "
"contributing it to the Landlab codebase.".format(
func=bc_name, grid=grid.__class__.__name__
)
)
else:
func(*args, **kwargs)
def as_list_of_tuples(items):
"""Convert a collection of key/values to a list of tuples.
Examples
--------
>>> from collections import OrderedDict
>>> from landlab.grid.create import as_list_of_tuples
>>> as_list_of_tuples({"eric": "idle"})
[('eric', 'idle')]
>>> as_list_of_tuples([("john", "cleese"), {"eric": "idle"}])
[('john', 'cleese'), ('eric', 'idle')]
>>> as_list_of_tuples(
... [("john", "cleese"), OrderedDict([("eric", "idle"), ("terry", "gilliam")])]
... )
[('john', 'cleese'), ('eric', 'idle'), ('terry', 'gilliam')]
"""
try:
items = list(items.items())
except AttributeError:
items = list(items)
if len(items) == 2 and isinstance(items[0], str):
items = [items]
tuples = []
for item in items:
try:
tuples.extend(list(item.items()))
except AttributeError:
tuples.append(tuple(item))
return tuples
def create_grid(file_like, section=None):
"""Create grid, initialize fields, and set boundary conditions.
**create_grid** expects a dictionary with three keys: "grid", "fields", and
"boundary_conditions".
**Dictionary Section "grid"**
The value associated with the "grid" key should itself be a dictionary
containing the name of a Landlab model grid type as its only key. The
following grid types are valid:
- :py:class:`~landlab.grid.raster.RasterModelGrid`
- :py:class:`~landlab.grid.voronoi.VoronoiDelaunayGrid`
- :py:class:`~landlab.grid.hex.HexModelGrid`
- :py:class:`~landlab.grid.radial.RadialModelGrid`
- :py:class:`~landlab.grid.network.NetworkModelGrid`
The value associated with the grid name key is a list containing the
arguments. If any keyword arguments are passed, they should be passed as
the last element of the list. For example the following code block is a
yaml file indicating a RasterModelGrid with shape (4, 5) and xy-spacing of
(3, 4).
.. code-block:: yaml
grid:
RasterModelGrid:
- [4, 5]
- xy_spacing: [3, 4]
These arguments and keyword arguments will be passed to the ``__init__``
constructor of the specified model grid. Refer to the documentation for
each grid to determine its requirements.
**Dictionary Section "fields"**
Fields can be created by reading from files or by creating synthetic
values.
The value associated with the "fields" key is a nested set of dictionaries
indicating where the fields are created, what the field names are, and how
to create the fields. As part of a grid's description, the value
associated with the "fields" key must be a dictionary with keys indicating
at which grid elements fields should be created (e.g. to create fields at
node, use "node").
The value associated with each "xxx" (i.e. "node", "link", "patch", etc.)
value is itself a dictionary
indicating the name of the field and how it should be created. A field can
either be created by reading from a file or creating synthetic values. The
:py:func:`~landlab.io.netcdf.read.read_netcdf` and
:py:func:`~landlab.io.esri_ascii.read_esri_ascii` functions, and the
:py:mod:`synthetic fields <landlab.values.synthetic>`
package are currently supported methods to create fields. These may be
chained together (as is shown in the Example section below). If these
functions do not meet your needs, we welcome contributions that extend the
capabilities of this function.
The following example would use the
:py:func:`~landlab.values.synthetic.plane` function from the synthetic
values package to create a *node* value for the field
*topographic__elevation*. The plane function adds values to a Landlab model
grid field that lie on a plane specified by a point and a normal vector. In
the below example the plane goes through the point (1.0, 1.0, 1.0) and has
a normal of (-2.0, -1.0, 1.0).
.. code-block:: yaml
grid:
RasterModelGrid:
- [4, 5]
- xy_spacing: [3, 4]
- fields:
node:
topographic__elevation:
plane:
- point: [1, 1, 1]
normal: [-2, -1, 1]
**Dictionary Section "boundary_conditions"**
The final portion of the input dictionary calls bound functions of the
model grid to set boundary conditions. Any valid bound function can be
called. The specified functions are provided in a list, and called in
order. If required, multiple functions may be called.
Each entry to the list is a dictionary with a single key, the name of the
bound function. The value associated with that key is a list of arguments
and keyword arguments, similar in structure to those described above.
As with the "fields" section, the "boundary_conditions" section must be
described under its associated grid description.
For example, the following sets closed boundaries at all sides of the grid.
.. code-block:: yaml
grid:
RasterModelGrid:
- [4, 5]
- xy_spacing: [3, 4]
- boundary_conditions:
- set_closed_boundaries_at_grid_edges:
- True
- True
- True
- True
Parameters
----------
file_like : file_like or str
Dictionary, contents of a dictionary as a string, a file-like object,
or the path to a file containing a YAML dictionary.
Examples
--------
>>> import numpy as np
>>> from landlab import create_grid
>>> np.random.seed(42)
>>> p = {
... "grid": {
... "RasterModelGrid": [
... (4, 5),
... {"xy_spacing": (3, 4)},
... {
... "fields": {
... "node": {
... "spam": {
... "plane": [{"point": (1, 1, 1), "normal": (-2, -1, 1)}],
... "random": [
... {"distribution": "uniform", "low": 1, "high": 4}
... ],
... }
... },
... "link": {
... "eggs": {"constant": [{"where": "ACTIVE_LINK", "value": 12}]}
... },
... }
... },
... {
... "boundary_conditions": [
... {"set_closed_boundaries_at_grid_edges": [True, True, True, True]}
... ]
... },
... ]
... }
... }
>>> mg = create_grid(p, section="grid")
>>> mg.number_of_nodes
20
>>> "spam" in mg.at_node
True
>>> "eggs" in mg.at_link
True
>>> mg.x_of_node
array([ 0., 3., 6., 9., 12.,
0., 3., 6., 9., 12.,
0., 3., 6., 9., 12.,
0., 3., 6., 9., 12.])
>>> mg.status_at_node
array([4, 4, 4, 4, 4,
4, 0, 0, 0, 4,
4, 0, 0, 0, 4,
4, 4, 4, 4, 4], dtype=uint8)
>>> np.round(mg.at_node['spam'].reshape(mg.shape), decimals=2)
array([[ 0.12, 7.85, 13.2 , 18.8 , 23.47],
[ 3.47, 9.17, 17.6 , 22.8 , 29.12],
[ 7.06, 15.91, 21.5 , 25.64, 31.55],
[ 11.55, 17.91, 24.57, 30.3 , 35.87]])
"""
if isinstance(file_like, dict):
params = file_like
else:
params = load_params(file_like)
if section:
grids = params[section]
else:
grids = params
new_grids = []
for grid_type, grid_desc in as_list_of_tuples(grids):
grid_desc = norm_grid_description(grid_desc)
fields = grid_desc.pop("fields", {})
boundary_conditions = grid_desc.pop("boundary_conditions", {})
grid = grid_from_dict(grid_type, grid_desc)
add_fields_from_dict(grid, fields)
add_boundary_conditions(grid, boundary_conditions)
new_grids.append(grid)
if len(new_grids) == 1:
return new_grids[0]
else:
return new_grids
def norm_grid_description(grid_desc):
"""Normalize a grid description into a canonical form.
Examples
--------
>>> from landlab.grid.create import norm_grid_description
>>> grid_desc = [
... (3, 4), {"xy_spacing": 4.0, "xy_of_lower_left": (1.0, 2.0)}
... ]
>>> normed_items = list(norm_grid_description(grid_desc).items())
>>> normed_items.sort()
>>> normed_items
[('args', [(3, 4)]), ('xy_of_lower_left', (1.0, 2.0)), ('xy_spacing', 4.0)]
"""
if not isinstance(grid_desc, dict):
args, kwds = [], {}
for arg in grid_desc:
if isinstance(arg, dict) and {"fields", "boundary_conditions"} & set(
arg.keys()
):
kwds.update(arg)
else:
args.append(arg)
if isinstance(args[-1], dict):
kwds.update(args.pop())
kwds.update({"args": args})
return kwds
return grid_desc
def _parse_args_kwargs(list_of_args_kwargs):
if isinstance(list_of_args_kwargs, dict):
args, kwargs = list_of_args_kwargs.pop("args", ()), list_of_args_kwargs
if not isinstance(args, (tuple, list)):
args = (args,)
else:
args, kwargs = [], {}
for arg in list(list_of_args_kwargs):
if isinstance(arg, dict) and {"fields", "boundary_conditions"} & set(
arg.keys()
):
kwargs.update(arg) # TODO: not tested.
else:
args.append(arg)
if isinstance(args[-1], dict):
kwargs.update(args.pop())
return tuple(args), kwargs
|
amandersillinois/landlab
|
landlab/grid/create.py
|
Python
|
mit
| 14,648
|
[
"NetCDF"
] |
a1a4a58ce517dab41e089eba9e427f4b53b0cf2f0fb1ff5fc78d468d3b1d5012
|
# coding: utf-8
# Copyright 2014-2020 Álvaro Justen <https://github.com/turicas/rows/>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import datetime
import json
import platform
import unittest
import uuid
from base64 import b64encode
from decimal import Decimal
import six
import rows
from rows import fields
if platform.system() == "Windows":
locale_name = "ptb_bra"
else:
locale_name = "pt_BR.UTF-8"
class FieldsTestCase(unittest.TestCase):
def test_Field(self):
self.assertEqual(fields.Field.TYPE, (type(None),))
self.assertIs(fields.Field.deserialize(None), None)
self.assertEqual(fields.Field.deserialize("Álvaro"), "Álvaro")
self.assertEqual(fields.Field.serialize(None), "")
self.assertIs(type(fields.Field.serialize(None)), six.text_type)
self.assertEqual(fields.Field.serialize("Álvaro"), "Álvaro")
self.assertIs(type(fields.Field.serialize("Álvaro")), six.text_type)
def test_BinaryField(self):
deserialized = "Álvaro".encode("utf-8")
serialized = b64encode(deserialized).decode("ascii")
self.assertEqual(type(deserialized), six.binary_type)
self.assertEqual(type(serialized), six.text_type)
self.assertEqual(fields.BinaryField.TYPE, (bytes,))
self.assertEqual(fields.BinaryField.serialize(None), "")
self.assertIs(type(fields.BinaryField.serialize(None)), six.text_type)
self.assertEqual(fields.BinaryField.serialize(deserialized), serialized)
self.assertIs(type(fields.BinaryField.serialize(deserialized)), six.text_type)
with self.assertRaises(ValueError):
fields.BinaryField.serialize(42)
with self.assertRaises(ValueError):
fields.BinaryField.serialize(3.14)
with self.assertRaises(ValueError):
fields.BinaryField.serialize("Álvaro")
with self.assertRaises(ValueError):
fields.BinaryField.serialize("123")
self.assertIs(fields.BinaryField.deserialize(None), b"")
self.assertEqual(fields.BinaryField.deserialize(serialized), deserialized)
self.assertIs(type(fields.BinaryField.deserialize(serialized)), six.binary_type)
with self.assertRaises(ValueError):
fields.BinaryField.deserialize(42)
with self.assertRaises(ValueError):
fields.BinaryField.deserialize(3.14)
with self.assertRaises(ValueError):
fields.BinaryField.deserialize("Álvaro")
self.assertEqual(fields.BinaryField.deserialize(deserialized), deserialized)
self.assertEqual(fields.BinaryField.deserialize(serialized), deserialized)
self.assertEqual(
fields.BinaryField.deserialize(serialized.encode("ascii")),
serialized.encode("ascii"),
)
def test_BoolField(self):
self.assertEqual(fields.BoolField.TYPE, (bool,))
self.assertEqual(fields.BoolField.serialize(None), "")
false_values = ("False", "false", "no", False)
for value in false_values:
self.assertIs(fields.BoolField.deserialize(value), False)
self.assertIs(fields.BoolField.deserialize(None), None)
self.assertEqual(fields.BoolField.deserialize(""), None)
true_values = ("True", "true", "yes", True)
for value in true_values:
self.assertIs(fields.BoolField.deserialize(value), True)
self.assertEqual(fields.BoolField.serialize(False), "false")
self.assertIs(type(fields.BoolField.serialize(False)), six.text_type)
self.assertEqual(fields.BoolField.serialize(True), "true")
self.assertIs(type(fields.BoolField.serialize(True)), six.text_type)
# '0' and '1' should be not accepted as boolean values because the
# sample could not contain other integers but the actual type could be
# integer
with self.assertRaises(ValueError):
fields.BoolField.deserialize("0")
with self.assertRaises(ValueError):
fields.BoolField.deserialize(b"0")
with self.assertRaises(ValueError):
fields.BoolField.deserialize("1")
with self.assertRaises(ValueError):
fields.BoolField.deserialize(b"1")
def test_IntegerField(self):
self.assertEqual(fields.IntegerField.TYPE, (int,))
self.assertEqual(fields.IntegerField.serialize(None), "")
self.assertIs(type(fields.IntegerField.serialize(None)), six.text_type)
self.assertIn(
type(fields.IntegerField.deserialize("42")), fields.IntegerField.TYPE
)
self.assertEqual(fields.IntegerField.deserialize("42"), 42)
self.assertEqual(fields.IntegerField.deserialize(42), 42)
self.assertEqual(fields.IntegerField.serialize(42), "42")
self.assertIs(type(fields.IntegerField.serialize(42)), six.text_type)
self.assertEqual(fields.IntegerField.deserialize(None), None)
self.assertEqual(
fields.IntegerField.deserialize("10152709355006317"), 10152709355006317
)
with rows.locale_context(locale_name):
self.assertEqual(fields.IntegerField.serialize(42000), "42000")
self.assertIs(type(fields.IntegerField.serialize(42000)), six.text_type)
self.assertEqual(
fields.IntegerField.serialize(42000, grouping=True), "42.000"
)
self.assertEqual(fields.IntegerField.deserialize("42.000"), 42000)
self.assertEqual(fields.IntegerField.deserialize(42), 42)
self.assertEqual(fields.IntegerField.deserialize(42.0), 42)
with self.assertRaises(ValueError):
fields.IntegerField.deserialize(1.23)
with self.assertRaises(ValueError):
fields.IntegerField.deserialize("013")
self.assertEqual(fields.IntegerField.deserialize("0"), 0)
def test_FloatField(self):
self.assertEqual(fields.FloatField.TYPE, (float,))
self.assertEqual(fields.FloatField.serialize(None), "")
self.assertIs(type(fields.FloatField.serialize(None)), six.text_type)
self.assertIn(
type(fields.FloatField.deserialize("42.0")), fields.FloatField.TYPE
)
self.assertEqual(fields.FloatField.deserialize("42.0"), 42.0)
self.assertEqual(fields.FloatField.deserialize(42.0), 42.0)
self.assertEqual(fields.FloatField.deserialize(42), 42.0)
self.assertEqual(fields.FloatField.deserialize(None), None)
self.assertEqual(fields.FloatField.serialize(42.0), "42.0")
self.assertIs(type(fields.FloatField.serialize(42.0)), six.text_type)
with rows.locale_context(locale_name):
self.assertEqual(fields.FloatField.serialize(42000.0), "42000,000000")
self.assertIs(type(fields.FloatField.serialize(42000.0)), six.text_type)
self.assertEqual(
fields.FloatField.serialize(42000, grouping=True), "42.000,000000"
)
self.assertEqual(fields.FloatField.deserialize("42.000,00"), 42000.0)
self.assertEqual(fields.FloatField.deserialize(42), 42.0)
self.assertEqual(fields.FloatField.deserialize(42.0), 42.0)
def test_DecimalField(self):
deserialized = Decimal("42.010")
self.assertEqual(fields.DecimalField.TYPE, (Decimal,))
self.assertEqual(fields.DecimalField.serialize(None), "")
self.assertIs(type(fields.DecimalField.serialize(None)), six.text_type)
self.assertEqual(fields.DecimalField.deserialize(""), None)
self.assertIn(
type(fields.DecimalField.deserialize("42.0")), fields.DecimalField.TYPE
)
self.assertEqual(fields.DecimalField.deserialize("42.0"), Decimal("42.0"))
self.assertEqual(fields.DecimalField.deserialize(deserialized), deserialized)
self.assertEqual(fields.DecimalField.serialize(deserialized), "42.010")
self.assertEqual(
type(fields.DecimalField.serialize(deserialized)), six.text_type
)
self.assertEqual(
fields.DecimalField.deserialize("21.21657469231"), Decimal("21.21657469231")
)
self.assertEqual(fields.DecimalField.deserialize("-21.34"), Decimal("-21.34"))
self.assertEqual(fields.DecimalField.serialize(Decimal("-21.34")), "-21.34")
self.assertEqual(fields.DecimalField.deserialize(None), None)
with rows.locale_context(locale_name):
self.assertEqual(
six.text_type, type(fields.DecimalField.serialize(deserialized))
)
self.assertEqual(fields.DecimalField.serialize(Decimal("4200")), "4200")
self.assertEqual(fields.DecimalField.serialize(Decimal("42.0")), "42,0")
self.assertEqual(
fields.DecimalField.serialize(Decimal("42000.0")), "42000,0"
)
self.assertEqual(fields.DecimalField.serialize(Decimal("-42.0")), "-42,0")
self.assertEqual(
fields.DecimalField.deserialize("42.000,00"), Decimal("42000.00")
)
self.assertEqual(
fields.DecimalField.deserialize("-42.000,00"), Decimal("-42000.00")
)
self.assertEqual(
fields.DecimalField.serialize(Decimal("42000.0"), grouping=True),
"42.000,0",
)
self.assertEqual(fields.DecimalField.deserialize(42000), Decimal("42000"))
self.assertEqual(fields.DecimalField.deserialize(42000.0), Decimal("42000"))
def test_PercentField(self):
deserialized = Decimal("0.42010")
self.assertEqual(fields.PercentField.TYPE, (Decimal,))
self.assertIn(
type(fields.PercentField.deserialize("42.0%")), fields.PercentField.TYPE
)
self.assertEqual(fields.PercentField.deserialize("42.0%"), Decimal("0.420"))
self.assertEqual(
fields.PercentField.deserialize(Decimal("0.420")), Decimal("0.420")
)
self.assertEqual(fields.PercentField.deserialize(deserialized), deserialized)
self.assertEqual(fields.PercentField.deserialize(None), None)
self.assertEqual(fields.PercentField.serialize(deserialized), "42.010%")
self.assertEqual(
type(fields.PercentField.serialize(deserialized)), six.text_type
)
self.assertEqual(fields.PercentField.serialize(Decimal("42.010")), "4201.0%")
self.assertEqual(fields.PercentField.serialize(Decimal("0")), "0.00%")
self.assertEqual(fields.PercentField.serialize(None), "")
self.assertEqual(fields.PercentField.serialize(Decimal("0.01")), "1%")
with rows.locale_context(locale_name):
self.assertEqual(
type(fields.PercentField.serialize(deserialized)), six.text_type
)
self.assertEqual(fields.PercentField.serialize(Decimal("42.0")), "4200%")
self.assertEqual(
fields.PercentField.serialize(Decimal("42000.0")), "4200000%"
)
self.assertEqual(
fields.PercentField.deserialize("42.000,00%"), Decimal("420.0000")
)
self.assertEqual(
fields.PercentField.serialize(Decimal("42000.00"), grouping=True),
"4.200.000%",
)
with self.assertRaises(ValueError):
fields.PercentField.deserialize(42)
def test_DateField(self):
# TODO: test timezone-aware datetime.date
serialized = "2015-05-27"
deserialized = datetime.date(2015, 5, 27)
self.assertEqual(fields.DateField.TYPE, (datetime.date,))
self.assertEqual(fields.DateField.serialize(None), "")
self.assertIs(type(fields.DateField.serialize(None)), six.text_type)
self.assertIn(
type(fields.DateField.deserialize(serialized)), fields.DateField.TYPE
)
self.assertEqual(fields.DateField.deserialize(serialized), deserialized)
self.assertEqual(fields.DateField.deserialize(deserialized), deserialized)
self.assertEqual(fields.DateField.deserialize(None), None)
self.assertEqual(fields.DateField.deserialize(""), None)
self.assertEqual(fields.DateField.serialize(deserialized), serialized)
self.assertIs(type(fields.DateField.serialize(deserialized)), six.text_type)
with self.assertRaises(ValueError):
fields.DateField.deserialize(42)
with self.assertRaises(ValueError):
fields.DateField.deserialize(serialized + "T00:00:00")
with self.assertRaises(ValueError):
fields.DateField.deserialize("Álvaro")
with self.assertRaises(ValueError):
fields.DateField.deserialize(serialized.encode("utf-8"))
def test_DatetimeField(self):
# TODO: test timezone-aware datetime.date
serialized = "2015-05-27T01:02:03"
self.assertEqual(fields.DatetimeField.TYPE, (datetime.datetime,))
deserialized = fields.DatetimeField.deserialize(serialized)
self.assertIn(type(deserialized), fields.DatetimeField.TYPE)
self.assertEqual(fields.DatetimeField.serialize(None), "")
self.assertIs(type(fields.DatetimeField.serialize(None)), six.text_type)
value = datetime.datetime(2015, 5, 27, 1, 2, 3)
self.assertEqual(fields.DatetimeField.deserialize(serialized), value)
self.assertEqual(fields.DatetimeField.deserialize(deserialized), deserialized)
self.assertEqual(fields.DatetimeField.deserialize(None), None)
self.assertEqual(fields.DatetimeField.serialize(value), serialized)
self.assertIs(type(fields.DatetimeField.serialize(value)), six.text_type)
with self.assertRaises(ValueError):
fields.DatetimeField.deserialize(42)
with self.assertRaises(ValueError):
fields.DatetimeField.deserialize("2015-01-01")
with self.assertRaises(ValueError):
fields.DatetimeField.deserialize("Álvaro")
with self.assertRaises(ValueError):
fields.DatetimeField.deserialize(serialized.encode("utf-8"))
def test_EmailField(self):
# TODO: accept spaces also
serialized = "test@domain.com"
self.assertEqual(fields.EmailField.TYPE, (six.text_type,))
deserialized = fields.EmailField.deserialize(serialized)
self.assertIn(type(deserialized), fields.EmailField.TYPE)
self.assertEqual(fields.EmailField.serialize(None), "")
self.assertIs(type(fields.EmailField.serialize(None)), six.text_type)
self.assertEqual(fields.EmailField.serialize(serialized), serialized)
self.assertEqual(fields.EmailField.deserialize(serialized), serialized)
self.assertEqual(fields.EmailField.deserialize(None), None)
self.assertEqual(fields.EmailField.deserialize(""), None)
self.assertIs(type(fields.EmailField.serialize(serialized)), six.text_type)
with self.assertRaises(ValueError):
fields.EmailField.deserialize(42)
with self.assertRaises(ValueError):
fields.EmailField.deserialize("2015-01-01")
with self.assertRaises(ValueError):
fields.EmailField.deserialize("Álvaro")
with self.assertRaises(ValueError):
fields.EmailField.deserialize("test@example.com".encode("utf-8"))
def test_TextField(self):
self.assertEqual(fields.TextField.TYPE, (six.text_type,))
self.assertEqual(fields.TextField.serialize(None), "")
self.assertIs(type(fields.TextField.serialize(None)), six.text_type)
self.assertIn(type(fields.TextField.deserialize("test")), fields.TextField.TYPE)
self.assertEqual(fields.TextField.deserialize("Álvaro"), "Álvaro")
self.assertIs(fields.TextField.deserialize(None), None)
self.assertIs(fields.TextField.deserialize(""), "")
self.assertEqual(fields.TextField.serialize("Álvaro"), "Álvaro")
self.assertIs(type(fields.TextField.serialize("Álvaro")), six.text_type)
with self.assertRaises(ValueError) as exception_context:
fields.TextField.deserialize("Álvaro".encode("utf-8"))
self.assertEqual(exception_context.exception.args[0], "Binary is not supported")
def test_JSONField(self):
self.assertEqual(fields.JSONField.TYPE, (list, dict))
self.assertEqual(type(fields.JSONField.deserialize("[]")), list)
self.assertEqual(type(fields.JSONField.deserialize("{}")), dict)
deserialized = {"a": 123, "b": 3.14, "c": [42, 24]}
serialized = json.dumps(deserialized)
self.assertEqual(fields.JSONField.deserialize(serialized), deserialized)
def test_UUIDField(self):
with self.assertRaises(ValueError) as exception_context:
fields.UUIDField.deserialize("not an UUID value")
with self.assertRaises(ValueError) as exception_context:
# "z" not hex
fields.UUIDField.deserialize("z" * 32)
fields.UUIDField.deserialize("a" * 32) # no exception should be raised
data = uuid.uuid4()
assert fields.UUIDField.deserialize(data) == data
assert fields.UUIDField.deserialize(str(data)) == data
assert fields.UUIDField.deserialize(str(data).replace("-", "")) == data
class FieldUtilsTestCase(unittest.TestCase):
maxDiff = None
def setUp(self):
with open("tests/data/all-field-types.csv", "rb") as fobj:
data = fobj.read().decode("utf-8")
lines = [line.split(",") for line in data.splitlines()]
self.fields = lines[0]
self.data = lines[1:]
self.expected = {
"bool_column": fields.BoolField,
"integer_column": fields.IntegerField,
"float_column": fields.FloatField,
"decimal_column": fields.FloatField,
"percent_column": fields.PercentField,
"date_column": fields.DateField,
"datetime_column": fields.DatetimeField,
"unicode_column": fields.TextField,
}
def test_slug(self):
self.assertEqual(fields.slug(None), "")
self.assertEqual(fields.slug("Álvaro Justen"), "alvaro_justen")
self.assertEqual(fields.slug("Moe's Bar"), "moe_s_bar")
self.assertEqual(fields.slug("-----te-----st------"), "te_st")
self.assertEqual(
fields.slug("first line\nsecond line"), "first_line_second_line"
)
self.assertEqual(fields.slug("first/second"), "first_second")
self.assertEqual(fields.slug("first\xa0second"), "first_second")
# As in <https://github.com/turicas/rows/issues/179>
self.assertEqual(
fields.slug('Query Occurrence"( % ),"First Seen'),
"query_occurrence_first_seen",
)
self.assertEqual(fields.slug(" ÁLVARO justen% "), "alvaro_justen")
self.assertEqual(fields.slug(42), "42")
self.assertEqual(fields.slug("^test"), "test")
self.assertEqual(
fields.slug("^test", permitted_chars=fields.SLUG_CHARS + "^"), "^test"
)
self.assertEqual(
fields.slug("this/is\ta\ntest", separator="-"), "this-is-a-test"
)
def test_detect_types_no_sample(self):
expected = {key: fields.TextField for key in self.expected.keys()}
result = fields.detect_types(self.fields, [])
self.assertDictEqual(dict(result), expected)
def test_detect_types_binary(self):
# first, try values as (`bytes`/`str`)
expected = {key: fields.BinaryField for key in self.expected.keys()}
values = [
[b"some binary data" for _ in range(len(self.data[0]))] for __ in range(20)
]
result = fields.detect_types(self.fields, values)
self.assertDictEqual(dict(result), expected)
# second, try base64-encoded values (as `str`/`unicode`)
expected = {key: fields.TextField for key in self.expected.keys()}
values = [
[b64encode(value.encode("utf-8")).decode("ascii") for value in row]
for row in self.data
]
result = fields.detect_types(self.fields, values)
self.assertDictEqual(dict(result), expected)
def test_detect_types(self):
result = fields.detect_types(self.fields, self.data)
self.assertDictEqual(dict(result), self.expected)
def test_detect_types_different_number_of_fields(self):
result = fields.detect_types(["f1", "f2"], [["a", "b", "c"]])
self.assertEqual(list(result.keys()), ["f1", "f2", "field_2"])
def test_empty_sequences_should_not_be_bool(self):
result = fields.detect_types(["field_1"], [[""], [""]])["field_1"]
expected = fields.TextField
self.assertEqual(result, expected)
def test_precedence(self):
field_types = [
("bool", fields.BoolField),
("integer", fields.IntegerField),
("float", fields.FloatField),
("datetime", fields.DatetimeField),
("date", fields.DateField),
("float", fields.FloatField),
("percent", fields.PercentField),
("json", fields.JSONField),
("email", fields.EmailField),
("binary1", fields.BinaryField),
("binary2", fields.BinaryField),
("text", fields.TextField),
]
data = [
[
"false",
"42",
"3.14",
"2016-08-15T05:21:10",
"2016-08-15",
"2.71",
"76.38%",
'{"key": "value"}',
"test@example.com",
b"cHl0aG9uIHJ1bGVz",
b"python rules",
"Álvaro Justen",
]
]
result = fields.detect_types(
[item[0] for item in field_types],
data,
field_types=[item[1] for item in field_types],
)
self.assertDictEqual(dict(result), dict(field_types))
class FieldsFunctionsTestCase(unittest.TestCase):
def test_is_null(self):
self.assertTrue(fields.is_null(None))
self.assertTrue(fields.is_null(""))
self.assertTrue(fields.is_null(" \t "))
self.assertTrue(fields.is_null("null"))
self.assertTrue(fields.is_null("nil"))
self.assertTrue(fields.is_null("none"))
self.assertTrue(fields.is_null("-"))
self.assertFalse(fields.is_null("Álvaro"))
self.assertFalse(fields.is_null("Álvaro".encode("utf-8")))
def test_as_string(self):
self.assertEqual(fields.as_string(None), "None")
self.assertEqual(fields.as_string(42), "42")
self.assertEqual(fields.as_string(3.141592), "3.141592")
self.assertEqual(fields.as_string("Álvaro"), "Álvaro")
with self.assertRaises(ValueError) as exception_context:
fields.as_string("Álvaro".encode("utf-8"))
self.assertEqual(exception_context.exception.args[0], "Binary is not supported")
def test_get_items(self):
func = fields.get_items(2)
self.assertEqual(func("a b c d e f".split()), ("c",))
func = fields.get_items(0, 2, 3)
self.assertEqual(func("a b c d e f".split()), ("a", "c", "d"))
self.assertEqual(func("a b c".split()), ("a", "c", None))
|
turicas/rows
|
tests/tests_fields.py
|
Python
|
lgpl-3.0
| 24,002
|
[
"MOE"
] |
f7f8e4d0a69bbeb1b75dd872813603e713b33ca761629f8848d1f615462121ab
|
"""
Test courseware search
"""
import json
import uuid
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.lms.course_home import CourseHomePage
from common.test.acceptance.pages.lms.instructor_dashboard import InstructorDashboardPage
from common.test.acceptance.pages.lms.staff_view import StaffCoursewarePage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage as StudioCourseOutlinePage
from common.test.acceptance.pages.studio.settings_group_configurations import GroupConfigurationsPage
from common.test.acceptance.pages.studio.xblock_editor import XBlockVisibilityEditorView
from common.test.acceptance.tests.discussion.helpers import CohortTestMixin
from common.test.acceptance.tests.helpers import remove_file
from common.test.acceptance.tests.studio.base_studio_test import ContainerBase
class CoursewareSearchCohortTest(ContainerBase, CohortTestMixin):
"""
Test courseware search.
"""
shard = 1
TEST_INDEX_FILENAME = "test_root/index_file.dat"
def setUp(self, is_staff=True):
"""
Create search page and course content to search
"""
# create test file in which index for this test will live
with open(self.TEST_INDEX_FILENAME, "w+") as index_file:
json.dump({}, index_file)
self.addCleanup(remove_file, self.TEST_INDEX_FILENAME)
super(CoursewareSearchCohortTest, self).setUp(is_staff=is_staff)
self.staff_user = self.user
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.content_group_a = "Content Group A"
self.content_group_b = "Content Group B"
# Create a student who will be in "Cohort A"
self.cohort_a_student_username = "cohort_a_" + str(uuid.uuid4().hex)[:12]
self.cohort_a_student_email = self.cohort_a_student_username + "@example.com"
AutoAuthPage(
self.browser, username=self.cohort_a_student_username, email=self.cohort_a_student_email, no_login=True
).visit()
# Create a student who will be in "Cohort B"
self.cohort_b_student_username = "cohort_b_" + str(uuid.uuid4().hex)[:12]
self.cohort_b_student_email = self.cohort_b_student_username + "@example.com"
AutoAuthPage(
self.browser, username=self.cohort_b_student_username, email=self.cohort_b_student_email, no_login=True
).visit()
# Create a student who will end up in the default cohort group
self.cohort_default_student_username = "cohort_default_student"
self.cohort_default_student_email = "cohort_default_student@example.com"
AutoAuthPage(
self.browser, username=self.cohort_default_student_username,
email=self.cohort_default_student_email, no_login=True
).visit()
self.course_home_page = CourseHomePage(self.browser, self.course_id)
# Enable Cohorting and assign cohorts and content groups
self._auto_auth(self.staff_user["username"], self.staff_user["email"], True)
self.enable_cohorting(self.course_fixture)
self.create_content_groups()
self.link_html_to_content_groups_and_publish()
self.create_cohorts_and_assign_students()
self._studio_reindex()
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def _studio_reindex(self):
"""
Reindex course content on studio course page
"""
self._auto_auth(self.staff_user["username"], self.staff_user["email"], True)
self.studio_course_outline.visit()
self.studio_course_outline.start_reindex()
self.studio_course_outline.wait_for_ajax()
def _goto_staff_page(self):
"""
Open staff page with assertion
"""
self.course_home_page.visit()
self.course_home_page.resume_course_from_header()
staff_page = StaffCoursewarePage(self.browser, self.course_id)
self.assertEqual(staff_page.staff_view_mode, 'Staff')
return staff_page
def _search_for_term(self, term):
"""
Search for term in course and return results.
"""
self.course_home_page.visit()
course_search_results_page = self.course_home_page.search_for_term(term)
results = course_search_results_page.search_results.html
return results[0] if len(results) > 0 else []
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
self.group_a_html = 'GROUPACONTENT'
self.group_b_html = 'GROUPBCONTENT'
self.group_a_and_b_html = 'GROUPAANDBCONTENT'
self.visible_to_all_html = 'VISIBLETOALLCONTENT'
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', self.group_a_html, data='<html>GROUPACONTENT</html>'),
XBlockFixtureDesc('html', self.group_b_html, data='<html>GROUPBCONTENT</html>'),
XBlockFixtureDesc('html', self.group_a_and_b_html, data='<html>GROUPAANDBCONTENT</html>'),
XBlockFixtureDesc('html', self.visible_to_all_html, data='<html>VISIBLETOALLCONTENT</html>')
)
)
)
)
def create_content_groups(self):
"""
Creates two content groups in Studio Group Configurations Settings.
"""
group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_configurations_page.visit()
group_configurations_page.create_first_content_group()
config = group_configurations_page.content_groups[0]
config.name = self.content_group_a
config.save()
group_configurations_page.add_content_group()
config = group_configurations_page.content_groups[1]
config.name = self.content_group_b
config.save()
def link_html_to_content_groups_and_publish(self):
"""
Updates 3 of the 4 existing html to limit their visibility by content group.
Publishes the modified units.
"""
container_page = self.go_to_unit_page()
def set_visibility(html_block_index, groups):
"""
Set visibility on html blocks to specified groups.
"""
html_block = container_page.xblocks[html_block_index]
html_block.edit_visibility()
visibility_dialog = XBlockVisibilityEditorView(self.browser, html_block.locator)
visibility_dialog.select_groups_in_partition_scheme(visibility_dialog.CONTENT_GROUP_PARTITION, groups)
set_visibility(1, [self.content_group_a])
set_visibility(2, [self.content_group_b])
set_visibility(3, [self.content_group_a, self.content_group_b])
container_page.publish()
def create_cohorts_and_assign_students(self):
"""
Adds 2 manual cohorts, linked to content groups, to the course.
Each cohort is assigned one student.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
cohort_management_page = instructor_dashboard_page.select_cohort_management()
def add_cohort_with_student(cohort_name, content_group, student):
"""
Create cohort and assign student to it.
"""
cohort_management_page.add_cohort(cohort_name, content_group=content_group)
cohort_management_page.add_students_to_selected_cohort([student])
add_cohort_with_student("Cohort A", self.content_group_a, self.cohort_a_student_username)
add_cohort_with_student("Cohort B", self.content_group_b, self.cohort_b_student_username)
cohort_management_page.wait_for_ajax()
def test_cohorted_search_user_a_a_content(self):
"""
Test user can search content restricted to his cohort.
"""
self._auto_auth(self.cohort_a_student_username, self.cohort_a_student_email, False)
search_results = self._search_for_term(self.group_a_html)
assert self.group_a_html in search_results
def test_cohorted_search_user_b_a_content(self):
"""
Test user can not search content restricted to his cohort.
"""
self._auto_auth(self.cohort_b_student_username, self.cohort_b_student_email, False)
search_results = self._search_for_term(self.group_a_html)
assert self.group_a_html not in search_results
def test_cohorted_search_user_staff_all_content(self):
"""
Test staff user can search all public content if cohorts used on course.
"""
self._auto_auth(self.staff_user["username"], self.staff_user["email"], False)
self._goto_staff_page().set_staff_view_mode('Staff')
search_results = self._search_for_term(self.visible_to_all_html)
assert self.visible_to_all_html in search_results
search_results = self._search_for_term(self.group_a_and_b_html)
assert self.group_a_and_b_html in search_results
search_results = self._search_for_term(self.group_a_html)
assert self.group_a_html in search_results
search_results = self._search_for_term(self.group_b_html)
assert self.group_b_html in search_results
def test_cohorted_search_user_staff_masquerade_student_content(self):
"""
Test staff user can search just student public content if selected from preview menu.
NOTE: Although it would be wise to combine these masquerading tests into
a single test due to expensive setup, doing so revealed a very low
priority bug where searching seems to stick/cache the access of the
first user who searches for future searches.
"""
self._auto_auth(self.staff_user["username"], self.staff_user["email"], False)
self._goto_staff_page().set_staff_view_mode('Learner')
search_results = self._search_for_term(self.visible_to_all_html)
assert self.visible_to_all_html in search_results
search_results = self._search_for_term(self.group_a_and_b_html)
assert self.group_a_and_b_html not in search_results
search_results = self._search_for_term(self.group_a_html)
assert self.group_a_html not in search_results
search_results = self._search_for_term(self.group_b_html)
assert self.group_b_html not in search_results
def test_cohorted_search_user_staff_masquerade_cohort_content(self):
"""
Test staff user can search cohort and public content if selected from preview menu.
"""
self._auto_auth(self.staff_user["username"], self.staff_user["email"], False)
self._goto_staff_page().set_staff_view_mode('Learner in ' + self.content_group_a)
search_results = self._search_for_term(self.visible_to_all_html)
assert self.visible_to_all_html in search_results
search_results = self._search_for_term(self.group_a_and_b_html)
assert self.group_a_and_b_html in search_results
search_results = self._search_for_term(self.group_a_html)
assert self.group_a_html in search_results
search_results = self._search_for_term(self.group_b_html)
assert self.group_b_html not in search_results
|
cpennington/edx-platform
|
common/test/acceptance/tests/lms/test_lms_cohorted_courseware_search.py
|
Python
|
agpl-3.0
| 12,205
|
[
"VisIt"
] |
4b520fccdb9405a2403afd170e91a3258583233c7a22d4dc9d97b7dec03ad8b8
|
#! /usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division
import numpy as np
from scipy.stats import rankdata
from .base import CategoricalStats
class ANOSIM(CategoricalStats):
"""ANOSIM statistical method executor.
Analysis of Similarities (ANOSIM) is a non-parametric method that tests
whether two or more groups of objects are significantly different based on
a categorical factor. The ranks of the distances in the distance matrix are
used to calculate an R statistic, which ranges between -1 (anti-grouping)
to +1 (strong grouping), with an R value of 0 indicating random grouping.
Notes
-----
See [1]_ for the original ANOSIM reference. The general algorithm and
interface are similar to ``vegan::anosim``, available in R's vegan package
[2]_.
References
----------
.. [1] Clarke, KR. "Non-parametric multivariate analyses of changes in
community structure." Australian journal of ecology 18.1 (1993):
117-143.
.. [2] http://cran.r-project.org/web/packages/vegan/index.html
"""
short_method_name = 'ANOSIM'
long_method_name = 'Analysis of Similarities'
test_statistic_name = 'R statistic'
def __init__(self, distance_matrix, grouping):
super(ANOSIM, self).__init__(distance_matrix, grouping)
self._divisor = self._dm.shape[0] * ((self._dm.shape[0] - 1) / 4)
self._ranked_dists = rankdata(self._dm.condensed_form(),
method='average')
def _run(self, grouping):
"""Compute ANOSIM R statistic (between -1 and +1)."""
# Create a matrix where True means that the two objects are in the same
# group. This ufunc requires that grouping is a numeric vector (e.g.,
# it won't work with a grouping vector of strings).
grouping_matrix = np.equal.outer(grouping, grouping)
# Extract upper triangle from the grouping matrix. It is important to
# extract the values in the same order that the distances are extracted
# from the distance matrix (see self._ranked_dists). Extracting the
# upper triangle (excluding the diagonal) preserves this order.
grouping_tri = grouping_matrix[self._tri_idxs]
return self._compute_r_stat(grouping_tri)
def _compute_r_stat(self, grouping_tri):
# within
r_W = np.mean(self._ranked_dists[grouping_tri])
# between
r_B = np.mean(self._ranked_dists[np.invert(grouping_tri)])
return (r_B - r_W) / self._divisor
|
Jorge-C/bipy
|
skbio/maths/stats/distance/anosim.py
|
Python
|
bsd-3-clause
| 2,908
|
[
"scikit-bio"
] |
0d25e7b82f1e5f8294dfd6d071e9241dac7c509dfc08b12c170914af209b57f3
|
"""
This migration script adds the history_dataset_association_display_at_authorization table,
which allows 'private' datasets to be displayed at external sites without making them public.
If using mysql, this script will display the following error, which is corrected in the next
migration script:
history_dataset_association_display_at_authorization table failed: (OperationalError)
(1059, "Identifier name 'ix_history_dataset_association_display_at_authorization_update_time'
is too long
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.exc import *
from migrate import *
from migrate.changeset import *
import datetime
now = datetime.datetime.utcnow
import sys, logging
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
# Need our custom types, but don't import anything else from model
from galaxy.model.custom_types import *
metadata = MetaData( migrate_engine )
db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) )
def display_migration_details():
print "========================================"
print "This migration script adds the history_dataset_association_display_at_authorization table, which"
print "allows 'private' datasets to be displayed at external sites without making them public."
print ""
print "If using mysql, this script will display the following error, which is corrected in the next migration"
print "script: history_dataset_association_display_at_authorization table failed: (OperationalError)"
print "(1059, 'Identifier name 'ix_history_dataset_association_display_at_authorization_update_time'"
print "is too long."
print "========================================"
HistoryDatasetAssociationDisplayAtAuthorization_table = Table( "history_dataset_association_display_at_authorization", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, index=True, default=now, onupdate=now ),
Column( "history_dataset_association_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "site", TrimmedString( 255 ) ) )
def upgrade():
display_migration_details()
# Load existing tables
metadata.reflect()
try:
HistoryDatasetAssociationDisplayAtAuthorization_table.create()
except Exception, e:
log.debug( "Creating history_dataset_association_display_at_authorization table failed: %s" % str( e ) )
def downgrade():
# Load existing tables
metadata.reflect()
try:
HistoryDatasetAssociationDisplayAtAuthorization_table.drop()
except Exception, e:
log.debug( "Dropping history_dataset_association_display_at_authorization table failed: %s" % str( e ) )
|
volpino/Yeps-EURAC
|
lib/galaxy/model/migrate/versions/0010_hda_display_at_authz_table.py
|
Python
|
mit
| 3,090
|
[
"Galaxy"
] |
b6a366e611c136212b27d58585540ae69ff541ce79b1e54e3d2c03ef9ef7fed3
|
# coding=utf-8
"""**Utilities for storage module**
"""
import os
import re
import copy
import numpy
import math
from ast import literal_eval
from osgeo import ogr
from geometry import Polygon
from safe.common.numerics import ensure_numeric
from safe.common.utilities import verify
from safe.common.exceptions import BoundingBoxError, InaSAFEError
# Default attribute to assign to vector layers
from safe.common.utilities import ugettext as tr
DEFAULT_ATTRIBUTE = 'inapolygon'
# Spatial layer file extensions that are recognised in Risiko
# FIXME: Perhaps add '.gml', '.zip', ...
LAYER_TYPES = ['.shp', '.asc', '.tif', '.tiff', '.geotif', '.geotiff']
# Map between extensions and ORG drivers
DRIVER_MAP = {'.sqlite': 'SQLITE',
'.shp': 'ESRI Shapefile',
'.gml': 'GML',
'.tif': 'GTiff',
'.asc': 'AAIGrid'}
# Map between Python types and OGR field types
# FIXME (Ole): I can't find a double precision type for OGR
TYPE_MAP = {type(None): ogr.OFTString, # What else should this be?
type(''): ogr.OFTString,
type(True): ogr.OFTInteger,
type(0): ogr.OFTInteger,
type(0.0): ogr.OFTReal,
type(numpy.array([0.0])[0]): ogr.OFTReal, # numpy.float64
type(numpy.array([[0.0]])[0]): ogr.OFTReal} # numpy.ndarray
# Map between verbose types and OGR geometry types
INVERSE_GEOMETRY_TYPE_MAP = {'point': ogr.wkbPoint,
'line': ogr.wkbLineString,
'polygon': ogr.wkbPolygon}
# Miscellaneous auxiliary functions
def _keywords_to_string(keywords, sublayer=None):
"""Create a string from a keywords dict.
Args:
* keywords: A required dictionary containing the keywords to stringify.
* sublayer: str optional group marker for a sub layer.
Returns:
str: a String containing the rendered keywords list
Raises:
Any exceptions are propogated.
.. note: Only simple keyword dicts should be passed here, not multilayer
dicts.
For example you pass a dict like this::
{'datatype': 'osm',
'category': 'exposure',
'title': 'buildings_osm_4326',
'subcategory': 'building',
'purpose': 'dki'}
and the following string would be returned:
datatype: osm
category: exposure
title: buildings_osm_4326
subcategory: building
purpose: dki
If sublayer is provided e.g. _keywords_to_string(keywords, sublayer='foo'),
the following:
[foo]
datatype: osm
category: exposure
title: buildings_osm_4326
subcategory: building
purpose: dki
"""
# Write
result = ''
if sublayer is not None:
result = '[%s]\n' % sublayer
for k, v in keywords.items():
# Create key
msg = ('Key in keywords dictionary must be a string. '
'I got %s with type %s' % (k, str(type(k))[1:-1]))
verify(isinstance(k, basestring), msg)
key = k
msg = ('Key in keywords dictionary must not contain the ":" '
'character. I got "%s"' % key)
verify(':' not in key, msg)
# Create value
msg = ('Value in keywords dictionary must be convertible to a string. '
'For key %s, I got %s with type %s'
% (k, v, str(type(v))[1:-1]))
try:
val = str(v)
except:
raise Exception(msg)
# Store
result += '%s: %s\n' % (key, val)
return result
def write_keywords(keywords, filename, sublayer=None):
"""Write keywords dictonary to file
:param keywords: Dictionary of keyword, value pairs
:type keywords: dict
:param filename: Name of keywords file. Extension expected to be .keywords
:type filename: str
:param sublayer: Optional sublayer applicable only to multilayer formats
such as sqlite or netcdf which can potentially hold more than
one layer. The string should map to the layer group as per the
example below. **If the keywords file contains sublayer
definitions but no sublayer was defined, keywords file content
will be removed and replaced with only the keywords provided
here.**
:type sublayer: str
A keyword file with sublayers may look like this:
[osm_buildings]
datatype: osm
category: exposure
subcategory: building
purpose: dki
title: buildings_osm_4326
[osm_flood]
datatype: flood
category: hazard
subcategory: building
title: flood_osm_4326
Keys must be strings not containing the ":" character
Values can be anything that can be converted to a string (using
Python's str function)
Surrounding whitespace is removed from values, but keys are unmodified
The reason being that keys must always be valid for the dictionary they
came from. For values we have decided to be flexible and treat entries like
'unit:m' the same as 'unit: m', or indeed 'unit: m '.
Otherwise, unintentional whitespace in values would lead to surprising
errors in the application.
"""
# Input checks
basename, ext = os.path.splitext(filename)
msg = ('Unknown extension for file %s. '
'Expected %s.keywords' % (filename, basename))
verify(ext == '.keywords', msg)
# First read any keywords out of the file so that we can retain
# keywords for other sublayers
existing_keywords = read_keywords(filename, all_blocks=True)
first_value = None
if len(existing_keywords) > 0:
first_value = existing_keywords[existing_keywords.keys()[0]]
multilayer_flag = type(first_value) == dict
handle = file(filename, 'w')
if multilayer_flag:
if sublayer is not None and sublayer != '':
#replace existing keywords / add new for this layer
existing_keywords[sublayer] = keywords
for key, value in existing_keywords.iteritems():
handle.write(_keywords_to_string(value, sublayer=key))
handle.write('\n')
else:
# It is currently a multilayer but we will replace it with
# a single keyword block since the user passed no sublayer
handle.write(_keywords_to_string(keywords))
else:
#currently a simple layer so replace it with our content
handle.write(_keywords_to_string(keywords, sublayer=sublayer))
handle.close()
def read_keywords(filename, sublayer=None, all_blocks=False):
"""Read keywords dictionary from file
:param filename: Name of keywords file. Extension expected to be .keywords
The format of one line is expected to be either
string: string or string
:type filename: str
:param sublayer: Optional sublayer applicable only to multilayer formats
such as sqlite or netcdf which can potentially hold more than
one layer. The string should map to the layer group as per the
example below. If the keywords file contains sublayer definitions
but no sublayer was defined, the first layer group will be
returned.
:type sublayer: str
:param all_blocks: Optional, defaults to False. If True will return
a dict of dicts, where the top level dict entries each represent
a sublayer, and the values of that dict will be dicts of keyword
entries.
:type all_blocks: bool
:returns: keywords: Dictionary of keyword, value pairs
A keyword layer with sublayers may look like this:
[osm_buildings]
datatype: osm
category: exposure
subcategory: building
purpose: dki
title: buildings_osm_4326
[osm_flood]
datatype: flood
category: hazard
subcategory: building
title: flood_osm_4326
Whereas a simple keywords file would look like this
datatype: flood
category: hazard
subcategory: building
title: flood_osm_4326
If filename does not exist, an empty dictionary is returned
Blank lines are ignored
Surrounding whitespace is removed from values, but keys are unmodified
If there are no ':', then the keyword is treated as a key with no value
"""
# Input checks
basename, ext = os.path.splitext(filename)
msg = ('Unknown extension for file %s. '
'Expected %s.keywords' % (filename, basename))
verify(ext == '.keywords', msg)
if not os.path.isfile(filename):
return {}
# Read all entries
blocks = {}
keywords = {}
fid = open(filename, 'r')
current_block = None
first_keywords = None
for line in fid.readlines():
# Remove trailing (but not preceeding!) whitespace
# FIXME: Can be removed altogether
text = line.rstrip()
# Ignore blank lines
if text == '':
continue
# Check if it is an ini style group header
block_flag = re.search(r'^\[.*]$', text, re.M | re.I)
if block_flag:
# Write the old block if it exists - must have a current
# block to prevent orphans
if len(keywords) > 0 and current_block is not None:
blocks[current_block] = keywords
if first_keywords is None and len(keywords) > 0:
first_keywords = keywords
# Now set up for a new block
current_block = text[1:-1]
# Reset the keywords each time we encounter a new block
# until we know we are on the desired one
keywords = {}
continue
if ':' not in text:
key = text.strip()
val = None
else:
# Get splitting point
idx = text.find(':')
# Take key as everything up to the first ':'
key = text[:idx]
# Take value as everything after the first ':'
textval = text[idx + 1:].strip()
try:
# Take care of python structures like
# booleans, None, lists, dicts etc
val = literal_eval(textval)
except (ValueError, SyntaxError):
val = textval
# Add entry to dictionary
keywords[key] = val
fid.close()
# Write our any unfinalised block data
if len(keywords) > 0 and current_block is not None:
blocks[current_block] = keywords
if first_keywords is None:
first_keywords = keywords
# Ok we have generated a structure that looks like this:
# blocks = {{ 'foo' : { 'a': 'b', 'c': 'd'},
# { 'bar' : { 'd': 'e', 'f': 'g'}}
# where foo and bar are sublayers and their dicts are the sublayer keywords
if all_blocks:
return blocks
if sublayer is not None:
if sublayer in blocks:
return blocks[sublayer]
else:
return first_keywords
# noinspection PyExceptionInherit
def check_geotransform(geotransform):
"""Check that geotransform is valid
:param geotransform: GDAL geotransform (6-tuple).
(top left x, w-e pixel resolution, rotation,
top left y, rotation, n-s pixel resolution).
See e.g. http://www.gdal.org/gdal_tutorial.html
:type geotransform: tuple
.. note::
This assumes that the spatial reference uses geographic coordinates,
so will not work for projected coordinate systems.
"""
msg = ('Supplied geotransform must be a tuple with '
'6 numbers. I got %s' % str(geotransform))
verify(len(geotransform) == 6, msg)
for x in geotransform:
try:
float(x)
except TypeError:
raise InaSAFEError(msg)
# Check longitude
msg = ('Element in 0 (first) geotransform must be a valid '
'longitude. I got %s' % geotransform[0])
verify(-180 <= geotransform[0] <= 180, msg)
# Check latitude
msg = ('Element 3 (fourth) in geotransform must be a valid '
'latitude. I got %s' % geotransform[3])
verify(-90 <= geotransform[3] <= 90, msg)
# Check cell size
msg = ('Element 1 (second) in geotransform must be a positive '
'number. I got %s' % geotransform[1])
verify(geotransform[1] > 0, msg)
msg = ('Element 5 (sixth) in geotransform must be a negative '
'number. I got %s' % geotransform[1])
verify(geotransform[5] < 0, msg)
def geotransform_to_bbox(geotransform, columns, rows):
"""Convert geotransform to bounding box
:param geotransform: GDAL geotransform (6-tuple).
(top left x, w-e pixel resolution, rotation,
top left y, rotation, n-s pixel resolution).
See e.g. http://www.gdal.org/gdal_tutorial.html
:type geotransform: tuple
:param columns: Number of columns in grid
:type columns: int
:param rows: Number of rows in grid
:type rows: int
:returns: bbox: Bounding box as a list of geographic coordinates
[west, south, east, north]
.. note::
Rows and columns are needed to determine eastern and northern bounds.
FIXME: Not sure if the pixel vs gridline registration issue is observed
correctly here. Need to check against gdal > v1.7
"""
x_origin = geotransform[0] # top left x
y_origin = geotransform[3] # top left y
x_res = geotransform[1] # w-e pixel resolution
y_res = geotransform[5] # n-s pixel resolution
x_pix = columns
y_pix = rows
min_x = x_origin
max_x = x_origin + (x_pix * x_res)
min_y = y_origin + (y_pix * y_res)
max_y = y_origin
return [min_x, min_y, max_x, max_y]
def geotransform_to_resolution(geotransform, isotropic=False):
"""Convert geotransform to resolution
:param geotransform: GDAL geotransform (6-tuple).
(top left x, w-e pixel resolution, rotation,
top left y, rotation, n-s pixel resolution).
See e.g. http://www.gdal.org/gdal_tutorial.html
:type geotransform: tuple
:param isotropic: If True, return the average (dx + dy) / 2
:type isotropic: bool
:returns: resolution: grid spacing (res_x, res_y) in (positive) decimal
degrees ordered as longitude first, then latitude.
or (res_x + res_y) / 2 (if isotropic is True)
"""
res_x = geotransform[1] # w-e pixel resolution
res_y = -geotransform[5] # n-s pixel resolution (always negative)
if isotropic:
return (res_x + res_y) / 2
else:
return res_x, res_y
def raster_geometry_to_geotransform(longitudes, latitudes):
"""Convert vectors of longitudes and latitudes to geotransform
Note:
This is the inverse operation of Raster.get_geometry().
:param longitudes: Vectors of geographic coordinates
:type longitudes:
:param latitudes: Vectors of geographic coordinates
:type latitudes:
:returns: geotransform: 6-tuple (top left x, w-e pixel resolution,
rotation, top left y, rotation, n-s pixel resolution)
"""
nx = len(longitudes)
ny = len(latitudes)
msg = ('You must specify more than 1 longitude to make geotransform: '
'I got %s' % str(longitudes))
verify(nx > 1, msg)
msg = ('You must specify more than 1 latitude to make geotransform: '
'I got %s' % str(latitudes))
verify(ny > 1, msg)
dx = float(longitudes[1] - longitudes[0]) # Longitudinal resolution
dy = float(latitudes[0] - latitudes[1]) # Latitudinal resolution (neg)
# Define pixel centers along each directions
# This is to achieve pixel registration rather
# than gridline registration
dx2 = dx / 2
dy2 = dy / 2
geotransform = (longitudes[0] - dx2, # Longitude of upper left corner
dx, # w-e pixel resolution
0, # rotation
latitudes[-1] - dy2, # Latitude of upper left corner
0, # rotation
dy) # n-s pixel resolution
return geotransform
# noinspection PyExceptionInherit
def bbox_intersection(*args):
"""Compute intersection between two or more bounding boxes
:param args: two or more bounding boxes.
Each is assumed to be a list or a tuple with
four coordinates (W, S, E, N)
:returns: The minimal common bounding box
"""
msg = 'Function bbox_intersection must take at least 2 arguments.'
verify(len(args) > 1, msg)
result = [-180, -90, 180, 90]
for a in args:
if a is None:
continue
msg = ('Bounding box expected to be a list of the '
'form [W, S, E, N]. '
'Instead i got "%s"' % str(a))
try:
box = list(a)
except:
raise Exception(msg)
if not len(box) == 4:
raise BoundingBoxError(msg)
msg = ('Western boundary must be less than or equal to eastern. '
'I got %s' % box)
if not box[0] <= box[2]:
raise BoundingBoxError(msg)
msg = ('Southern boundary must be less than or equal to northern. '
'I got %s' % box)
if not box[1] <= box[3]:
raise BoundingBoxError(msg)
# Compute intersection
# West and South
for i in [0, 1]:
result[i] = max(result[i], box[i])
# East and North
for i in [2, 3]:
result[i] = min(result[i], box[i])
# Check validity and return
if result[0] <= result[2] and result[1] <= result[3]:
return result
else:
return None
def minimal_bounding_box(bbox, min_res, eps=1.0e-6):
"""Grow bounding box to exceed specified resolution if needed
:param bbox: Bounding box with format [W, S, E, N]
:type bbox: list
:param min_res: Minimal acceptable resolution to exceed
:type min_res: float
:param eps: Optional tolerance that will be applied to 'buffer' result
:type eps: float
:returns: Adjusted bounding box guaranteed to exceed specified resolution
"""
# FIXME (Ole): Probably obsolete now
bbox = copy.copy(list(bbox))
delta_x = bbox[2] - bbox[0]
delta_y = bbox[3] - bbox[1]
if delta_x < min_res:
dx = (min_res - delta_x) / 2 + eps
bbox[0] -= dx
bbox[2] += dx
if delta_y < min_res:
dy = (min_res - delta_y) / 2 + eps
bbox[1] -= dy
bbox[3] += dy
return bbox
def buffered_bounding_box(bbox, resolution):
"""Grow bounding box with one unit of resolution in each direction
Note:
This will ensure there are enough pixels to robustly provide
interpolated values without having to painstakingly deal with
all corner cases such as 1 x 1, 1 x 2 and 2 x 1 arrays.
The border will also make sure that points that would otherwise fall
outside the domain (as defined by a tight bounding box) get assigned
values.
:param bbox: Bounding box with format [W, S, E, N]
:type bbox: list
:param resolution: (resx, resy) - Raster resolution in each direction.
res - Raster resolution in either direction
If resolution is None bbox is returned unchanged.
:type resolution: tuple
:returns: Adjusted bounding box
Note:
Case in point: Interpolation point O would fall outside this domain
even though there are enough grid points to support it
::
--------------
| |
| * * | * *
| O|
| |
| * * | * *
--------------
"""
bbox = copy.copy(list(bbox))
if resolution is None:
return bbox
try:
resx, resy = resolution
except TypeError:
resx = resy = resolution
bbox[0] -= resx
bbox[1] -= resy
bbox[2] += resx
bbox[3] += resy
return bbox
def get_geometry_type(geometry, geometry_type):
"""Determine geometry type based on data
:param geometry: A list of either point coordinates [lon, lat] or polygons
which are assumed to be numpy arrays of coordinates
:type geometry: list
:param geometry_type: Optional type - 'point', 'line', 'polygon' or None
:type geometry_type: str, None
:returns: geometry_type: Either ogr.wkbPoint, ogr.wkbLineString or
ogr.wkbPolygon
Note:
If geometry type cannot be determined an Exception is raised.
There is no consistency check across all entries of the
geometry list, only the first element is used in this determination.
"""
# FIXME (Ole): Perhaps use OGR's own symbols
msg = ('Argument geometry_type must be either "point", "line", '
'"polygon" or None')
verify(geometry_type is None or
geometry_type in [1, 2, 3] or
geometry_type.lower() in ['point', 'line', 'polygon'], msg)
if geometry_type is not None:
if isinstance(geometry_type, basestring):
return INVERSE_GEOMETRY_TYPE_MAP[geometry_type.lower()]
else:
return geometry_type
# FIXME (Ole): Should add some additional checks to see if choice
# makes sense
msg = 'Argument geometry must be a sequence. I got %s ' % type(geometry)
verify(is_sequence(geometry), msg)
if len(geometry) == 0:
# Default to point if there is no data
return ogr.wkbPoint
msg = ('The first element in geometry must be a sequence of length > 2. '
'I got %s ' % str(geometry[0]))
verify(is_sequence(geometry[0]), msg)
verify(len(geometry[0]) >= 2, msg)
if len(geometry[0]) == 2:
try:
float(geometry[0][0])
float(geometry[0][1])
except (ValueError, TypeError, IndexError):
pass
else:
# This geometry appears to be point data
geometry_type = ogr.wkbPoint
elif len(geometry[0]) > 2:
try:
x = numpy.array(geometry[0])
except ValueError:
pass
else:
# This geometry appears to be polygon data
if x.shape[0] > 2 and x.shape[1] == 2:
geometry_type = ogr.wkbPolygon
if geometry_type is None:
msg = 'Could not determine geometry type'
raise Exception(msg)
return geometry_type
def is_sequence(x):
"""Determine if x behaves like a true sequence but not a string
:param x: Sequence like object
:type x: object
:returns: Test result
:rtype: bool
Note:
This will for example return True for lists, tuples and numpy arrays
but False for strings and dictionaries.
"""
if isinstance(x, basestring):
return False
try:
list(x)
except TypeError:
return False
else:
return True
def array_to_line(A, geometry_type=ogr.wkbLinearRing):
"""Convert coordinates to linear_ring
:param A: Nx2 Array of coordinates representing either a polygon or a line.
A can be either a numpy array or a list of coordinates.
:type A: numpy.ndarray, list
:param geometry_type: A valid OGR geometry type.
Default type ogr.wkbLinearRing
:type geometry_type: ogr.wkbLinearRing, include ogr.wkbLineString
Returns:
* ring: OGR line geometry
Note:
Based on http://www.packtpub.com/article/working-geospatial-data-python
"""
try:
A = ensure_numeric(A, numpy.float)
except Exception, e:
msg = ('Array (%s) could not be converted to numeric array. '
'I got type %s. Error message: %s'
% (A, str(type(A)), e))
raise Exception(msg)
msg = 'Array must be a 2d array of vertices. I got %s' % (str(A.shape))
verify(len(A.shape) == 2, msg)
msg = 'A array must have two columns. I got %s' % (str(A.shape[0]))
verify(A.shape[1] == 2, msg)
N = A.shape[0] # Number of vertices
line = ogr.Geometry(geometry_type)
for i in range(N):
line.AddPoint(A[i, 0], A[i, 1])
return line
def rings_equal(x, y, rtol=1.0e-6, atol=1.0e-8):
"""Compares to linear rings as numpy arrays
:param x: A 2d array of the first ring
:type x: numpy.ndarray
:param y: A 2d array of the second ring
:type y: numpy.ndarray
:param rtol: The relative tolerance parameter
:type rtol: float
:param atol: The relative tolerance parameter
:type rtol: float
Returns:
* True if x == y or x' == y (up to the specified tolerance)
where x' is x reversed in the first dimension. This corresponds to
linear rings being seen as equal irrespective of whether they are
organised in clock wise or counter clock wise order
"""
x = ensure_numeric(x, numpy.float)
y = ensure_numeric(y, numpy.float)
msg = 'Arrays must a 2d arrays of vertices. I got %s and %s' % (x, y)
verify(len(x.shape) == 2 and len(y.shape) == 2, msg)
msg = 'Arrays must have two columns. I got %s and %s' % (x, y)
verify(x.shape[1] == 2 and y.shape[1] == 2, msg)
if (numpy.allclose(x, y, rtol=rtol, atol=atol) or
numpy.allclose(x, y[::-1], rtol=rtol, atol=atol)):
return True
else:
return False
# FIXME (Ole): We can retire this messy function now
# Positive: Delete it :-)
def array_to_wkt(A, geom_type='POLYGON'):
"""Convert coordinates to wkt format
:param A: Nx2 Array of coordinates representing either a polygon or a line.
A can be either a numpy array or a list of coordinates.
:type A: numpy.array
:param geom_type: Determines output keyword 'POLYGON' or 'LINESTRING'
:type geom_type: str
:returns: wkt: geometry in the format known to ogr: Examples
Note:
POLYGON((1020 1030,1020 1045,1050 1045,1050 1030,1020 1030))
LINESTRING(1000 1000, 1100 1050)
"""
try:
A = ensure_numeric(A, numpy.float)
except Exception, e:
msg = ('Array (%s) could not be converted to numeric array. '
'I got type %s. Error message: %s'
% (geom_type, str(type(A)), e))
raise Exception(msg)
msg = 'Array must be a 2d array of vertices. I got %s' % (str(A.shape))
verify(len(A.shape) == 2, msg)
msg = 'A array must have two columns. I got %s' % (str(A.shape[0]))
verify(A.shape[1] == 2, msg)
if geom_type == 'LINESTRING':
# One bracket
n = 1
elif geom_type == 'POLYGON':
# Two brackets (tsk tsk)
n = 2
else:
msg = 'Unknown geom_type: %s' % geom_type
raise Exception(msg)
wkt_string = geom_type + '(' * n
N = len(A)
for i in range(N):
# Works for both lists and arrays
wkt_string += '%f %f, ' % tuple(A[i])
return wkt_string[:-2] + ')' * n
# Map of ogr numerical geometry types to their textual representation
# FIXME (Ole): Some of them don't exist, even though they show up
# when doing dir(ogr) - Why?:
geometry_type_map = {ogr.wkbPoint: 'Point',
ogr.wkbPoint25D: 'Point25D',
ogr.wkbPolygon: 'Polygon',
ogr.wkbPolygon25D: 'Polygon25D',
#ogr.wkbLinePoint: 'LinePoint', # ??
ogr.wkbGeometryCollection: 'GeometryCollection',
ogr.wkbGeometryCollection25D: 'GeometryCollection25D',
ogr.wkbLineString: 'LineString',
ogr.wkbLineString25D: 'LineString25D',
ogr.wkbLinearRing: 'LinearRing',
ogr.wkbMultiLineString: 'MultiLineString',
ogr.wkbMultiLineString25D: 'MultiLineString25D',
ogr.wkbMultiPoint: 'MultiPoint',
ogr.wkbMultiPoint25D: 'MultiPoint25D',
ogr.wkbMultiPolygon: 'MultiPolygon',
ogr.wkbMultiPolygon25D: 'MultiPolygon25D',
ogr.wkbNDR: 'NDR',
ogr.wkbNone: 'None',
ogr.wkbUnknown: 'Unknown'}
def geometry_type_to_string(g_type):
"""Provides string representation of numeric geometry types
:param g_type: geometry type:
:type g_type: ogr.wkb*, None
FIXME (Ole): I can't find anything like this in ORG. Why?
"""
if g_type in geometry_type_map:
return geometry_type_map[g_type]
elif g_type is None:
return 'No geometry type assigned'
else:
return 'Unknown geometry type: %s' % str(g_type)
# FIXME: Move to common numerics area along with polygon.py
def calculate_polygon_area(polygon, signed=False):
"""Calculate the signed area of non-self-intersecting polygon
:param polygon: Numeric array of points (longitude, latitude). It is
assumed to be closed, i.e. first and last points are identical
:type polygon: numpy.ndarray
:param signed: Optional flag deciding whether returned area retains its
sign:
If points are ordered counter clockwise, the signed area
will be positive.
If points are ordered clockwise, it will be negative
Default is False which means that the area is always
positive.
:type signed: bool
:returns: area: Area of polygon (subject to the value of argument signed)
:rtype: numpy.ndarray
Note:
Sources
http://paulbourke.net/geometry/polyarea/
http://en.wikipedia.org/wiki/Centroid
"""
# Make sure it is numeric
P = numpy.array(polygon)
msg = ('Polygon is assumed to consist of coordinate pairs. '
'I got second dimension %i instead of 2' % P.shape[1])
verify(P.shape[1] == 2, msg)
x = P[:, 0]
y = P[:, 1]
# Calculate 0.5 sum_{i=0}^{N-1} (x_i y_{i+1} - x_{i+1} y_i)
a = x[:-1] * y[1:]
b = y[:-1] * x[1:]
A = numpy.sum(a - b) / 2.
if signed:
return A
else:
return abs(A)
def calculate_polygon_centroid(polygon):
"""Calculate the centroid of non-self-intersecting polygon
:param polygon: Numeric array of points (longitude, latitude). It is
assumed to be closed, i.e. first and last points are identical
:type polygon: numpy.ndarray
:returns: calculated centroid
:rtype: numpy.ndarray
.. note::
Sources
http://paulbourke.net/geometry/polyarea/
http://en.wikipedia.org/wiki/Centroid
"""
# Make sure it is numeric
P = numpy.array(polygon)
# Normalise to ensure numerical accurracy.
# This requirement in backed by tests in test_io.py and without it
# centroids at building footprint level may get shifted outside the
# polygon!
P_origin = numpy.amin(P, axis=0)
P = P - P_origin
# Get area. This calculation could be incorporated to save time
# if necessary as the two formulas are very similar.
A = calculate_polygon_area(polygon, signed=True)
x = P[:, 0]
y = P[:, 1]
# Calculate
# Cx = sum_{i=0}^{N-1} (x_i + x_{i+1})(x_i y_{i+1} - x_{i+1} y_i)/(6A)
# Cy = sum_{i=0}^{N-1} (y_i + y_{i+1})(x_i y_{i+1} - x_{i+1} y_i)/(6A)
a = x[:-1] * y[1:]
b = y[:-1] * x[1:]
cx = x[:-1] + x[1:]
cy = y[:-1] + y[1:]
Cx = numpy.sum(cx * (a - b)) / (6. * A)
Cy = numpy.sum(cy * (a - b)) / (6. * A)
# Translate back to real location
C = numpy.array([Cx, Cy]) + P_origin
return C
def points_between_points(point1, point2, delta):
"""Creates an array of points between two points given a delta
:param point1: The first point
:type point1: numpy.ndarray
:param point2: The second point
:type point2: numpy.ndarray
:param delta: The increment between inserted points
:type delta: float
:returns: Array of points.
:rtype: numpy.ndarray
Note:
u = (x1-x0, y1-y0)/L, where
L=sqrt( (x1-x0)^2 + (y1-y0)^2).
If r is the resolution, then the
points will be given by
(x0, y0) + u * n * r for n = 1, 2, ....
while len(n*u*r) < L
"""
x0, y0 = point1
x1, y1 = point2
L = math.sqrt(math.pow((x1 - x0), 2) + math.pow((y1 - y0), 2))
pieces = int(L / delta)
uu = numpy.array([x1 - x0, y1 - y0]) / L
points = [point1]
for nn in range(pieces):
point = point1 + uu * (nn + 1) * delta
points.append(point)
return numpy.array(points)
def points_along_line(line, delta):
"""Calculate a list of points along a line with a given delta
:param line: Numeric array of points (longitude, latitude).
:type line: numpy.ndarray
:param delta: Decimal number to be used as step
:type delta: float
:returns: Numeric array of points (longitude, latitude).
:rtype: numpy.ndarray
Note:
Sources
http://paulbourke.net/geometry/polyarea/
http://en.wikipedia.org/wiki/Centroid
"""
# Make sure it is numeric
P = numpy.array(line)
points = []
for i in range(len(P) - 1):
pts = points_between_points(P[i], P[i + 1], delta)
# If the first point of this list is the same
# as the last one recorded, do not use it
if len(points) > 0:
if numpy.allclose(points[-1], pts[0]):
pts = pts[1:]
points.extend(pts)
C = numpy.array(points)
return C
def combine_polygon_and_point_layers(layers):
"""Combine polygon and point layers
:param layers: List of vector layers of type polygon or point
:type layers: list
:returns: One point layer with all input point layers and centroids from
all input polygon layers.
:rtype: numpy.ndarray
:raises: InaSAFEError (in case attribute names are not the same.)
"""
# This is to implement issue #276
print layers
def get_ring_data(ring):
"""Extract coordinates from OGR ring object
:param ring: OGR ring object
:type ring:
:returns: Nx2 numpy array of vertex coordinates (lon, lat)
:rtype: numpy.array
"""
N = ring.GetPointCount()
# noinspection PyTypeChecker
A = numpy.zeros((N, 2), dtype='d')
# FIXME (Ole): Is there any way to get the entire data vectors?
for j in range(N):
A[j, :] = ring.GetX(j), ring.GetY(j)
# Return ring as an Nx2 numpy array
return A
def get_polygon_data(G):
"""Extract polygon data from OGR geometry
:param G: OGR polygon geometry
:return: List of InaSAFE polygon instances
"""
# Get outer ring, then inner rings
# http://osgeo-org.1560.n6.nabble.com/
# gdal-dev-Polygon-topology-td3745761.html
number_of_rings = G.GetGeometryCount()
# Get outer ring
outer_ring = get_ring_data(G.GetGeometryRef(0))
# Get inner rings if any
inner_rings = []
if number_of_rings > 1:
for i in range(1, number_of_rings):
inner_ring = get_ring_data(G.GetGeometryRef(i))
inner_rings.append(inner_ring)
# Return Polygon instance
return Polygon(outer_ring=outer_ring,
inner_rings=inner_rings)
def safe_to_qgis_layer(layer):
"""Helper function to make a QgsMapLayer from a safe read_layer layer.
:param layer: Layer object as provided by InaSAFE engine.
:type layer: read_layer
:returns: A validated QGIS layer or None. Returns None when QGIS is not
available.
:rtype: QgsMapLayer, QgsVectorLayer, QgsRasterLayer, None
:raises: Exception if layer is not valid.
"""
try:
from qgis.core import QgsVectorLayer, QgsRasterLayer
except ImportError:
return None
# noinspection PyUnresolvedReferences
message = tr(
'Input layer must be a InaSAFE spatial object. I got %s'
) % (str(type(layer)))
if not hasattr(layer, 'is_inasafe_spatial_object'):
raise Exception(message)
if not layer.is_inasafe_spatial_object:
raise Exception(message)
# Get associated filename and symbolic name
filename = layer.get_filename()
name = layer.get_name()
qgis_layer = None
# Read layer
if layer.is_vector:
qgis_layer = QgsVectorLayer(filename, name, 'ogr')
elif layer.is_raster:
qgis_layer = QgsRasterLayer(filename, name)
# Verify that new qgis layer is valid
if qgis_layer.isValid():
return qgis_layer
else:
# noinspection PyUnresolvedReferences
message = tr('Loaded impact layer "%s" is not valid') % filename
raise Exception(message)
|
drayanaindra/inasafe
|
safe/storage/utilities.py
|
Python
|
gpl-3.0
| 36,585
|
[
"NetCDF"
] |
70309fdff29a117a94e83608acb8a4a12f1e7c8d11b001500d8cd3133619a2d1
|
"""
picasso.simulate
~~~~~~~~~~~~~~~~
Simulate single molcule fluorescence data
:author: Maximilian Thomas Strauss, 2016-2018
:copyright: Copyright (c) 2016-2018 Jungmann Lab, MPI of Biochemistry
"""
import numpy as _np
from . import io as _io
from numba import njit
magfac = 0.79
@njit
def calculate_zpsf(z, cx, cy):
z = z / magfac
z2 = z * z
z3 = z * z2
z4 = z * z3
z5 = z * z4
z6 = z * z5
wx = (
cx[0] * z6
+ cx[1] * z5
+ cx[2] * z4
+ cx[3] * z3
+ cx[4] * z2
+ cx[5] * z
+ cx[6]
)
wy = (
cy[0] * z6
+ cy[1] * z5
+ cy[2] * z4
+ cy[3] * z3
+ cy[4] * z2
+ cy[5] * z
+ cy[6]
)
return (wx, wy)
def test_calculate_zpsf():
cx = _np.array([1, 2, 3, 4, 5, 6, 7])
cy = _np.array([1, 2, 3, 4, 5, 6, 7])
z = _np.array([1, 2, 3, 4, 5, 6, 7])
wx, wy = calculate_zpsf(z, cx, cy)
result = [4.90350522e+01, 7.13644987e+02, 5.52316597e+03,
2.61621620e+04, 9.06621337e+04, 2.54548124e+05,
6.14947219e+05]
delta = wx - result
assert sum(delta**2) < 0.001
def saveInfo(filename, info):
_io.save_info(filename, [info], default_flow_style=True)
def noisy(image, mu, sigma):
"""
Add gaussian noise to an image.
"""
row, col = image.shape # Variance for _np.random is 1
gauss = sigma * _np.random.normal(0, 1, (row, col)) + mu
gauss = gauss.reshape(row, col)
noisy = image + gauss
noisy[noisy < 0] = 0
return noisy
def noisy_p(image, mu):
"""
# Add poissonian noise to an image or movie
"""
poiss = _np.random.poisson(mu, image.shape).astype(float)
noisy = image + poiss
return noisy
def check_type(movie):
movie[movie >= (2 ** 16) - 1] = (2 ** 16) - 1
movie = movie.astype("<u2") # little-endian 16-bit unsigned int
return movie
def paintgen(
meandark, meanbright, frames, time, photonrate, photonratestd, photonbudget
):
"""
Paint-Generator:
Generates on and off-traces for given parameters.
Calculates the number of Photons in each frame for a binding site.
"""
meanlocs = 4 * int(
_np.ceil(frames * time / (meandark + meanbright))
) # This is an estimate for the total number of binding events
if meanlocs < 10:
meanlocs = meanlocs * 10
dark_times = _np.random.exponential(meandark, meanlocs)
bright_times = _np.random.exponential(meanbright, meanlocs)
events = _np.vstack((dark_times, bright_times)).reshape(
(-1,), order="F"
) # Interweave dark_times and bright_times [dt,bt,dt,bt..]
eventsum = _np.cumsum(events)
maxloc = _np.argmax(
eventsum > (frames * time)
) # Find the first event that exceeds the total integration time
simulatedmeandark = _np.mean(events[:maxloc:2])
simulatedmeanbright = _np.mean(events[1:maxloc:2])
# check trace
if _np.mod(maxloc, 2): # uneven -> ends with an OFF-event
onevents = int(_np.floor(maxloc / 2))
else: # even -> ends with bright event
onevents = int(maxloc / 2)
bright_events = _np.floor(maxloc / 2) # number of bright_events
photonsinframe = _np.zeros(
int(frames + _np.ceil(meanbright / time * 20))
) # an on-event might be longer than the movie, so allocate more memory
# calculate photon numbers
for i in range(1, maxloc, 2):
if photonratestd == 0:
photons = _np.round(photonrate * time)
else:
photons = _np.round(
_np.random.normal(photonrate, photonratestd) * time
) # Number of Photons that are emitted in one frame
if photons < 0:
photons = 0
tempFrame = int(
_np.floor(eventsum[i - 1] / time)
) # Get the first frame in which something happens in on-event
onFrames = int(
_np.ceil((eventsum[i] - tempFrame * time) / time)
) # Number of frames in which photon emittance happens
if photons * onFrames > photonbudget:
onFrames = int(
_np.ceil(photonbudget / (photons * onFrames) * onFrames)
) # Reduce the number of on-frames if the photonbudget is reached
for j in range(0, (onFrames)):
if onFrames == 1: # CASE 1: all photons are emitted in one frame
photonsinframe[1 + tempFrame] = int(
_np.random.poisson(
((tempFrame + 1) * time - eventsum[i - 1])
/ time
* photons
)
)
elif (
onFrames == 2
): # CASE 2: all photons are emitted in two frames
emittedphotons = (
((tempFrame + 1) * time - eventsum[i - 1]) / time * photons
)
if j == 1: # photons in first onframe
photonsinframe[1 + tempFrame] = int(
_np.random.poisson(
((tempFrame + 1) * time - eventsum[i - 1])
/ time
* photons
)
)
else: # photons in second onframe
photonsinframe[2 + tempFrame] = int(
_np.random.poisson(
(eventsum[i] - (tempFrame + 1) * time)
/ time
* photons
)
)
else: # CASE 3: all photons are mitted in three or more frames
if j == 1:
photonsinframe[1 + tempFrame] = int(
_np.random.poisson(
((tempFrame + 1) * time - eventsum[i - 1])
/ time
* photons
)
) # Indexing starts with 0
elif j == onFrames:
photonsinframe[onFrames + tempFrame] = int(
_np.random.poisson(
(eventsum(i) - (tempFrame + onFrames - 1) * time)
/ time
* photons
)
)
else:
photonsinframe[tempFrame + j] = int(
_np.random.poisson(photons)
)
totalphotons = _np.sum(
photonsinframe[1 + tempFrame: tempFrame + 1 + onFrames]
)
if totalphotons > photonbudget:
photonsinframe[onFrames + tempFrame] = int(
photonsinframe[onFrames + tempFrame]
- (totalphotons - photonbudget)
)
photonsinframe = photonsinframe[0:frames]
timetrace = events[0:maxloc]
if onevents > 0:
spotkinetics = [
onevents,
sum(photonsinframe > 0),
simulatedmeandark,
simulatedmeanbright,
]
else:
spotkinetics = [0, sum(photonsinframe > 0), 0, 0]
return photonsinframe, timetrace, spotkinetics
def distphotons(
structures,
itime,
frames,
taud,
taub,
photonrate,
photonratestd,
photonbudget,
):
"""
Distrbute Photons
"""
time = itime
meandark = int(taud)
meanbright = int(taub)
bindingsitesx = structures[0, :]
bindingsitesy = structures[1, :]
nosites = len(bindingsitesx)
photonposall = _np.zeros((2, 0))
photonposall = [1, 1]
photonsinframe, timetrace, spotkinetics = paintgen(
meandark,
meanbright,
frames,
time,
photonrate,
photonratestd,
photonbudget,
)
return photonsinframe, timetrace, spotkinetics
def distphotonsxy(runner, photondist, structures, psf, mode3Dstate, cx, cy):
bindingsitesx = structures[0, :]
bindingsitesy = structures[1, :]
bindingsitesz = structures[4, :]
nosites = len(bindingsitesx) # number of binding sites in image
tempphotons = _np.array(photondist[:, runner]).astype(int)
n_photons = _np.sum(tempphotons)
n_photons_step = _np.cumsum(tempphotons)
n_photons_step = _np.insert(n_photons_step, 0, 0)
# Allocate memory
photonposframe = _np.zeros((n_photons, 2))
for i in range(0, nosites):
photoncount = int(photondist[i, runner])
if mode3Dstate:
wx, wy = calculate_zpsf(bindingsitesz[i], cx, cy)
cov = [[wx * wx, 0], [0, wy * wy]]
else:
cov = [[psf * psf, 0], [0, psf * psf]]
if photoncount > 0:
mu = [bindingsitesx[i], bindingsitesy[i]]
photonpos = _np.random.multivariate_normal(mu, cov, photoncount)
photonposframe[
n_photons_step[i]: n_photons_step[i + 1], :
] = photonpos
return photonposframe
def convertMovie(
runner,
photondist,
structures,
imagesize,
frames,
psf,
photonrate,
background,
noise,
mode3Dstate,
cx,
cy,
):
edges = range(0, imagesize + 1)
photonposframe = distphotonsxy(
runner, photondist, structures, psf, mode3Dstate, cx, cy
)
if len(photonposframe) == 0:
simframe = _np.zeros((imagesize, imagesize))
else:
x = photonposframe[:, 0]
y = photonposframe[:, 1]
simframe, xedges, yedges = _np.histogram2d(y, x, bins=(edges, edges))
simframe = _np.flipud(simframe) # to be consistent with render
return simframe
def saveMovie(filename, movie, info):
_io.save_raw(filename, movie, [info])
# Function to store the coordinates of a structure in a container.
# The coordinates wil be adjustet so that the center of mass is the origin
def defineStructure(
structurexxpx,
structureyypx,
structureex,
structure3d,
pixelsize,
mean=True,
):
if mean:
structurexxpx = structurexxpx - _np.mean(structurexxpx)
structureyypx = structureyypx - _np.mean(structureyypx)
# from px to nm
structurexx = []
for x in structurexxpx:
structurexx.append(x / pixelsize)
structureyy = []
for x in structureyypx:
structureyy.append(x / pixelsize)
structure = _np.array(
[structurexx, structureyy, structureex, structure3d]
) # FORMAT: x-pos,y-pos,exchange information
return structure
def generatePositions(number, imagesize, frame, arrangement):
"""
Generate a set of positions where structures will be placed
"""
if arrangement == 0:
spacing = int(_np.ceil((number ** 0.5)))
linpos = _np.linspace(frame, imagesize - frame, spacing)
[xxgridpos, yygridpos] = _np.meshgrid(linpos, linpos)
xxgridpos = _np.ravel(xxgridpos)
yygridpos = _np.ravel(yygridpos)
xxpos = xxgridpos[0:number]
yypos = yygridpos[0:number]
gridpos = _np.vstack((xxpos, yypos))
gridpos = _np.transpose(gridpos)
else:
gridpos = (imagesize - 2 * frame) * _np.random.rand(number, 2) + frame
return gridpos
def rotateStructure(structure):
"""
Rotate a structure randomly
"""
angle_rad = _np.random.rand(1) * 2 * _np.pi
newstructure = _np.array(
[
(structure[0, :]) * _np.cos(angle_rad)
- (structure[1, :]) * _np.sin(angle_rad),
(structure[0, :]) * _np.sin(angle_rad)
+ (structure[1, :]) * _np.cos(angle_rad),
structure[2, :],
structure[3, :],
]
)
return newstructure
def incorporateStructure(structure, incorporation):
"""
Returns a subset of the strucutre to reflect incorporation of stpales
"""
newstructure = structure[
:, (_np.random.rand(structure.shape[1]) < incorporation)
]
return newstructure
def randomExchange(pos):
"""
Randomly shuffle exchange parameters for rnadom labeling
"""
arraytoShuffle = pos[2, :]
_np.random.shuffle(arraytoShuffle)
newpos = _np.array([pos[0, :], pos[1, :], arraytoShuffle, pos[3, :]])
return newpos
def prepareStructures(
structure, gridpos, orientation, number, incorporation, exchange
):
"""
prepareStructures:
Input positions, the structure definitionconsider rotation etc.
"""
newpos = []
oldstructure = _np.array(
[structure[0, :], structure[1, :], structure[2, :], structure[3, :]]
)
for i in range(0, len(gridpos)):
if orientation == 0:
structure = oldstructure
else:
structure = rotateStructure(oldstructure)
if incorporation == 1:
pass
else:
structure = incorporateStructure(structure, incorporation)
newx = structure[0, :] + gridpos[i, 0]
newy = structure[1, :] + gridpos[i, 1]
newstruct = _np.array(
[
newx,
newy,
structure[2, :],
structure[2, :] * 0 + i,
structure[3, :],
]
)
if i == 0:
newpos = newstruct
else:
newpos = _np.concatenate((newpos, newstruct), axis=1)
if exchange == 1:
newpos = randomExchange(newpos)
return newpos
|
jungmannlab/picasso
|
picasso/simulate.py
|
Python
|
mit
| 13,389
|
[
"Gaussian"
] |
cf86ca7025a76084a85e9f3600808e2fe4384cf182f894c02744a71b79643be2
|
import json
import logging
import networkx as nx
import pytz
from itertools import imap
from functools import partial
from collections import defaultdict
from math import sqrt
from datetime import datetime
from django.core.serializers.json import DjangoJSONEncoder
from django.db import connection
from django.http import HttpResponse
from rest_framework.decorators import api_view
from catmaid.models import UserRole, ClassInstance, Treenode, \
TreenodeClassInstance, ConnectorClassInstance, Review
from catmaid.control import export_NeuroML_Level3
from catmaid.control.authentication import requires_user_role
from catmaid.control.common import get_relation_to_id_map
from catmaid.control.review import get_treenodes_to_reviews, \
get_treenodes_to_reviews_with_time
from tree_util import edge_count_to_root, partition
try:
from exportneuroml import neuroml_single_cell, neuroml_network
except ImportError:
logging.getLogger(__name__).warn("NeuroML module could not be loaded.")
def get_treenodes_qs(project_id=None, skeleton_id=None, with_labels=True):
treenode_qs = Treenode.objects.filter(skeleton_id=skeleton_id)
if with_labels:
labels_qs = TreenodeClassInstance.objects.filter(
relation__relation_name='labeled_as',
treenode__skeleton_id=skeleton_id).select_related('treenode', 'class_instance')
labelconnector_qs = ConnectorClassInstance.objects.filter(
relation__relation_name='labeled_as',
connector__treenodeconnector__treenode__skeleton_id=skeleton_id).select_related('connector', 'class_instance')
else:
labels_qs = []
labelconnector_qs = []
return treenode_qs, labels_qs, labelconnector_qs
def get_swc_string(treenodes_qs):
all_rows = []
for tn in treenodes_qs:
swc_row = [tn.id]
swc_row.append(0)
swc_row.append(tn.location_x)
swc_row.append(tn.location_y)
swc_row.append(tn.location_z)
swc_row.append(max(tn.radius, 0))
swc_row.append(-1 if tn.parent_id is None else tn.parent_id)
all_rows.append(swc_row)
result = ""
for row in all_rows:
result += " ".join(map(str, row)) + "\n"
return result
def export_skeleton_response(request, project_id=None, skeleton_id=None, format=None):
treenode_qs, labels_qs, labelconnector_qs = get_treenodes_qs(project_id, skeleton_id)
if format == 'swc':
return HttpResponse(get_swc_string(treenode_qs), content_type='text/plain')
elif format == 'json':
return HttpResponse(get_json_string(treenode_qs), content_type='application/json')
else:
raise Exception, "Unknown format ('%s') in export_skeleton_response" % (format,)
@requires_user_role(UserRole.Browse)
def compact_skeleton(request, project_id=None, skeleton_id=None, with_connectors=None, with_tags=None):
"""
Performance-critical function. Do not edit unless to improve performance.
Returns, in JSON, [[nodes], [connectors], {nodeID: [tags]}], with connectors and tags being empty when 0 == with_connectors and 0 == with_tags, respectively
"""
# Sanitize
project_id = int(project_id)
skeleton_id = int(skeleton_id)
with_connectors = int(with_connectors)
with_tags = int(with_tags)
cursor = connection.cursor()
cursor.execute('''
SELECT id, parent_id, user_id,
location_x, location_y, location_z,
radius, confidence
FROM treenode
WHERE skeleton_id = %s
''' % skeleton_id)
nodes = tuple(cursor.fetchall())
if 0 == len(nodes):
# Check if the skeleton exists
if 0 == ClassInstance.objects.filter(pk=skeleton_id).count():
raise Exception("Skeleton #%s doesn't exist" % skeleton_id)
# Otherwise returns an empty list of nodes
connectors = ()
tags = defaultdict(list)
if 0 != with_connectors or 0 != with_tags:
# postgres is caching this query
cursor.execute("SELECT relation_name, id FROM relation WHERE project_id=%s" % project_id)
relations = dict(cursor.fetchall())
if 0 != with_connectors:
# Fetch all connectors with their partner treenode IDs
pre = relations['presynaptic_to']
post = relations['postsynaptic_to']
gj = relations.get('gapjunction_with', -1)
cursor.execute('''
SELECT tc.treenode_id, tc.connector_id, tc.relation_id,
c.location_x, c.location_y, c.location_z
FROM treenode_connector tc,
connector c
WHERE tc.skeleton_id = %s
AND tc.connector_id = c.id
AND (tc.relation_id = %s OR tc.relation_id = %s OR tc.relation_id = %s)
''' % (skeleton_id, pre, post, gj))
relation_index = {pre: 0, post: 1, gj: 2}
connectors = tuple((row[0], row[1], relation_index.get(row[2], -1), row[3], row[4], row[5]) for row in cursor.fetchall())
if 0 != with_tags:
# Fetch all node tags
cursor.execute('''
SELECT c.name, tci.treenode_id
FROM treenode t,
treenode_class_instance tci,
class_instance c
WHERE t.skeleton_id = %s
AND t.id = tci.treenode_id
AND tci.relation_id = %s
AND c.id = tci.class_instance_id
''' % (skeleton_id, relations['labeled_as']))
for row in cursor.fetchall():
tags[row[0]].append(row[1])
return HttpResponse(json.dumps((nodes, connectors, tags), separators=(',', ':')))
@requires_user_role(UserRole.Browse)
def compact_arbor(request, project_id=None, skeleton_id=None, with_nodes=None, with_connectors=None, with_tags=None):
"""
Performance-critical function. Do not edit unless to improve performance.
Returns, in JSON, [[nodes], [connections], {nodeID: [tags]}],
with connections being empty when 0 == with_connectors,
and the dict of node tags being empty 0 == with_tags, respectively.
The difference between this function and the compact_skeleton function is that
the connections contain the whole chain from the skeleton of interest to the
partner skeleton:
[treenode_id, confidence,
connector_id,
confidence, treenode_id, skeleton_id,
relation_id, relation_id]
where the first 2 values are from the given skeleton_id,
then the connector_id,
then the next 3 values are from the partner skeleton,
and finally the two relations: first for the given skeleton_id and then for the other skeleton.
The relation_id is 0 for pre and 1 for post.
"""
# Sanitize
project_id = int(project_id)
skeleton_id = int(skeleton_id)
with_nodes = int(with_nodes)
with_connectors = int(with_connectors)
with_tags = int(with_tags)
cursor = connection.cursor()
nodes = ()
connectors = []
tags = defaultdict(list)
if 0 != with_nodes:
cursor.execute('''
SELECT id, parent_id, user_id,
location_x, location_y, location_z,
radius, confidence
FROM treenode
WHERE skeleton_id = %s
''' % skeleton_id)
nodes = tuple(cursor.fetchall())
if 0 == len(nodes):
# Check if the skeleton exists
if 0 == ClassInstance.objects.filter(pk=skeleton_id).count():
raise Exception("Skeleton #%s doesn't exist" % skeleton_id)
# Otherwise returns an empty list of nodes
if 0 != with_connectors or 0 != with_tags:
# postgres is caching this query
cursor.execute("SELECT relation_name, id FROM relation WHERE project_id=%s" % project_id)
relations = dict(cursor.fetchall())
if 0 != with_connectors:
# Fetch all inputs and outputs
pre = relations['presynaptic_to']
post = relations['postsynaptic_to']
cursor.execute('''
SELECT tc1.treenode_id, tc1.confidence,
tc1.connector_id,
tc2.confidence, tc2.treenode_id, tc2.skeleton_id,
tc1.relation_id, tc2.relation_id
FROM treenode_connector tc1,
treenode_connector tc2
WHERE tc1.skeleton_id = %s
AND tc1.id != tc2.id
AND tc1.connector_id = tc2.connector_id
AND (tc1.relation_id = %s OR tc1.relation_id = %s)
''' % (skeleton_id, pre, post))
for row in cursor.fetchall():
# Ignore all other kinds of relation pairs (there shouldn't be any)
if row[6] == pre and row[7] == post:
connectors.append((row[0], row[1], row[2], row[3], row[4], row[5], 0, 1))
elif row[6] == post and row[7] == pre:
connectors.append((row[0], row[1], row[2], row[3], row[4], row[5], 1, 0))
if 0 != with_tags:
# Fetch all node tags
cursor.execute('''
SELECT c.name, tci.treenode_id
FROM treenode t,
treenode_class_instance tci,
class_instance c
WHERE t.skeleton_id = %s
AND t.id = tci.treenode_id
AND tci.relation_id = %s
AND c.id = tci.class_instance_id
''' % (skeleton_id, relations['labeled_as']))
for row in cursor.fetchall():
tags[row[0]].append(row[1])
return HttpResponse(json.dumps((nodes, connectors, tags), separators=(',', ':')))
@requires_user_role([UserRole.Browse])
def treenode_time_bins(request, project_id=None, skeleton_id=None):
""" Return a map of time bins (minutes) vs. list of nodes. """
minutes = defaultdict(list)
epoch = datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
for row in Treenode.objects.filter(skeleton_id=int(skeleton_id)).values_list('id', 'creation_time'):
minutes[int((row[1] - epoch).total_seconds() / 60)].append(row[0])
return HttpResponse(json.dumps(minutes, separators=(',', ':')))
@requires_user_role([UserRole.Browse])
def compact_arbor_with_minutes(request, project_id=None, skeleton_id=None, with_nodes=None, with_connectors=None, with_tags=None):
r = compact_arbor(request, project_id=project_id, skeleton_id=skeleton_id, with_nodes=with_nodes, with_connectors=with_connectors, with_tags=with_tags)
r.content = "%s, %s]" % (r.content[:-1], treenode_time_bins(request, project_id=project_id, skeleton_id=skeleton_id).content)
return r
# DEPRECATED. Will be removed.
def _skeleton_for_3d_viewer(skeleton_id, project_id, with_connectors=True, lean=0, all_field=False):
""" with_connectors: when False, connectors are not returned
lean: when not zero, both connectors and tags are returned as empty arrays. """
skeleton_id = int(skeleton_id) # sanitize
cursor = connection.cursor()
# Fetch the neuron name
cursor.execute(
'''SELECT name
FROM class_instance ci,
class_instance_class_instance cici
WHERE cici.class_instance_a = %s
AND cici.class_instance_b = ci.id
''' % skeleton_id)
row = cursor.fetchone()
if not row:
# Check that the skeleton exists
cursor.execute('''SELECT id FROM class_instance WHERE id=%s''' % skeleton_id)
if not cursor.fetchone():
raise Exception("Skeleton #%s doesn't exist!" % skeleton_id)
else:
raise Exception("No neuron found for skeleton #%s" % skeleton_id)
name = row[0]
if all_field:
added_fields = ', creation_time, edition_time'
else:
added_fields = ''
# Fetch all nodes, with their tags if any
cursor.execute(
'''SELECT id, parent_id, user_id, location_x, location_y, location_z, radius, confidence %s
FROM treenode
WHERE skeleton_id = %s
''' % (added_fields, skeleton_id) )
# array of properties: id, parent_id, user_id, x, y, z, radius, confidence
nodes = tuple(cursor.fetchall())
tags = defaultdict(list) # node ID vs list of tags
connectors = []
# Get all reviews for this skeleton
if all_field:
reviews = get_treenodes_to_reviews_with_time(skeleton_ids=[skeleton_id])
else:
reviews = get_treenodes_to_reviews(skeleton_ids=[skeleton_id])
if 0 == lean: # meaning not lean
# Text tags
cursor.execute("SELECT id FROM relation WHERE project_id=%s AND relation_name='labeled_as'" % int(project_id))
labeled_as = cursor.fetchall()[0][0]
cursor.execute(
''' SELECT treenode_class_instance.treenode_id, class_instance.name
FROM treenode, class_instance, treenode_class_instance
WHERE treenode.skeleton_id = %s
AND treenode.id = treenode_class_instance.treenode_id
AND treenode_class_instance.class_instance_id = class_instance.id
AND treenode_class_instance.relation_id = %s
''' % (skeleton_id, labeled_as))
for row in cursor.fetchall():
tags[row[1]].append(row[0])
if with_connectors:
if all_field:
added_fields = ', c.creation_time'
else:
added_fields = ''
# Fetch all connectors with their partner treenode IDs
cursor.execute(
''' SELECT tc.treenode_id, tc.connector_id, r.relation_name,
c.location_x, c.location_y, c.location_z %s
FROM treenode_connector tc,
connector c,
relation r
WHERE tc.skeleton_id = %s
AND tc.connector_id = c.id
AND tc.relation_id = r.id
''' % (added_fields, skeleton_id) )
# Above, purposefully ignoring connector tags. Would require a left outer join on the inner join of connector_class_instance and class_instance, and frankly connector tags are pointless in the 3d viewer.
# List of (treenode_id, connector_id, relation_id, x, y, z)n with relation_id replaced by 0 (presynaptic) or 1 (postsynaptic)
# 'presynaptic_to' has an 'r' at position 1:
for row in cursor.fetchall():
x, y, z = imap(float, (row[3], row[4], row[5]))
connectors.append((row[0],
row[1],
0 if 'r' == row[2][1] else 1,
x, y, z,
row[6] if all_field else None))
return name, nodes, tags, connectors, reviews
return name, nodes, tags, connectors, reviews
# DEPRECATED. Will be removed.
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeleton_for_3d_viewer(request, project_id=None, skeleton_id=None):
return HttpResponse(json.dumps(_skeleton_for_3d_viewer(skeleton_id, project_id, with_connectors=request.POST.get('with_connectors', True), lean=int(request.POST.get('lean', 0)), all_field=request.POST.get('all_fields', False)), separators=(',', ':')))
# DEPRECATED. Will be removed.
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeleton_with_metadata(request, project_id=None, skeleton_id=None):
def default(obj):
"""Default JSON serializer."""
import calendar, datetime
if isinstance(obj, datetime.datetime):
if obj.utcoffset() is not None:
obj = obj - obj.utcoffset()
millis = int(
calendar.timegm(obj.timetuple()) * 1000 +
obj.microsecond / 1000
)
return millis
return HttpResponse(json.dumps(_skeleton_for_3d_viewer(skeleton_id, project_id, \
with_connectors=True, lean=0, all_field=True), separators=(',', ':'), default=default))
def _measure_skeletons(skeleton_ids):
if not skeleton_ids:
raise Exception("Must provide the ID of at least one skeleton.")
skids_string = ",".join(map(str, skeleton_ids))
cursor = connection.cursor()
cursor.execute('''
SELECT id, parent_id, skeleton_id, location_x, location_y, location_z
FROM treenode
WHERE skeleton_id IN (%s)
''' % skids_string)
# TODO should be all done with numpy,
# TODO by partitioning the skeleton into sequences of x,y,z representing the slabs
# TODO and then convolving them.
class Skeleton():
def __init__(self):
self.nodes = {}
self.raw_cable = 0
self.smooth_cable = 0
self.principal_branch_cable = 0
self.n_ends = 0
self.n_branch = 0
self.n_pre = 0
self.n_post = 0
class Node():
def __init__(self, parent_id, x, y, z):
self.parent_id = parent_id
self.x = x
self.y = y
self.z = z
self.wx = x # weighted average of itself and neighbors
self.wy = y
self.wz = z
self.children = {} # node ID vs distance
skeletons = defaultdict(dict) # skeleton ID vs (node ID vs Node)
for row in cursor.fetchall():
skeleton = skeletons.get(row[2])
if not skeleton:
skeleton = Skeleton()
skeletons[row[2]] = skeleton
skeleton.nodes[row[0]] = Node(row[1], row[3], row[4], row[5])
for skeleton in skeletons.itervalues():
nodes = skeleton.nodes
tree = nx.DiGraph()
root = None
# Accumulate children
for nodeID, node in nodes.iteritems():
if not node.parent_id:
root = nodeID
continue
tree.add_edge(node.parent_id, nodeID)
parent = nodes[node.parent_id]
distance = sqrt( pow(node.x - parent.x, 2)
+ pow(node.y - parent.y, 2)
+ pow(node.z - parent.z, 2))
parent.children[nodeID] = distance
# Measure raw cable, given that we have the parent already
skeleton.raw_cable += distance
# Utilize accumulated children and the distances to them
for nodeID, node in nodes.iteritems():
# Count end nodes and branch nodes
n_children = len(node.children)
if not node.parent_id:
if 1 == n_children:
skeleton.n_ends += 1
continue
if n_children > 2:
skeleton.n_branch += 1
continue
# Else, if 2 == n_children, the root node is in the middle of the skeleton, being a slab node
elif 0 == n_children:
skeleton.n_ends += 1
continue
elif n_children > 1:
skeleton.n_branch += 1
continue
# Compute weighted position for slab nodes only
# (root, branch and end nodes do not move)
oids = node.children.copy()
if node.parent_id:
oids[node.parent_id] = skeleton.nodes[node.parent_id].children[nodeID]
sum_distances = sum(oids.itervalues())
wx, wy, wz = 0, 0, 0
for oid, distance in oids.iteritems():
other = skeleton.nodes[oid]
w = distance / sum_distances if sum_distances != 0 else 0
wx += other.x * w
wy += other.y * w
wz += other.z * w
node.wx = node.x * 0.4 + wx * 0.6
node.wy = node.y * 0.4 + wy * 0.6
node.wz = node.z * 0.4 + wz * 0.6
# Find out nodes that belong to the principal branch
principal_branch_nodes = set(sorted(partition(tree, root), key=len)[-1])
# Compute smoothed cable length, also for principal branch
for nodeID, node in nodes.iteritems():
if not node.parent_id:
# root node
continue
parent = nodes[node.parent_id]
length = sqrt( pow(node.wx - parent.wx, 2)
+ pow(node.wy - parent.wy, 2)
+ pow(node.wz - parent.wz, 2))
skeleton.smooth_cable += length
if nodeID in principal_branch_nodes:
skeleton.principal_branch_cable += length
# Count inputs
cursor.execute('''
SELECT tc.skeleton_id, count(tc.skeleton_id)
FROM treenode_connector tc,
relation r
WHERE tc.skeleton_id IN (%s)
AND tc.relation_id = r.id
AND r.relation_name = 'postsynaptic_to'
GROUP BY tc.skeleton_id
''' % skids_string)
for row in cursor.fetchall():
skeletons[row[0]].n_pre = row[1]
# Count outputs
cursor.execute('''
SELECT tc1.skeleton_id, count(tc1.skeleton_id)
FROM treenode_connector tc1,
treenode_connector tc2,
relation r1,
relation r2
WHERE tc1.skeleton_id IN (%s)
AND tc1.connector_id = tc2.connector_id
AND tc1.relation_id = r1.id
AND r1.relation_name = 'presynaptic_to'
AND tc2.relation_id = r2.id
AND r2.relation_name = 'postsynaptic_to'
GROUP BY tc1.skeleton_id
''' % skids_string)
for row in cursor.fetchall():
skeletons[row[0]].n_post = row[1]
return skeletons
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def measure_skeletons(request, project_id=None):
skeleton_ids = tuple(int(v) for k,v in request.POST.iteritems() if k.startswith('skeleton_ids['))
def asRow(skid, sk):
return (skid, int(sk.raw_cable), int(sk.smooth_cable), sk.n_pre, sk.n_post, len(sk.nodes), sk.n_branch, sk.n_ends, sk.principal_branch_cable)
return HttpResponse(json.dumps([asRow(skid, sk) for skid, sk in _measure_skeletons(skeleton_ids).iteritems()]))
def _skeleton_neuroml_cell(skeleton_id, preID, postID):
skeleton_id = int(skeleton_id) # sanitize
cursor = connection.cursor()
cursor.execute('''
SELECT id, parent_id, location_x, location_y, location_z, radius
FROM treenode
WHERE skeleton_id = %s
''' % skeleton_id)
nodes = {row[0]: (row[1], (row[2], row[3], row[4]), row[5]) for row in cursor.fetchall()}
cursor.execute('''
SELECT tc.treenode_id, tc.connector_id, tc.relation_id
FROM treenode_connector tc
WHERE tc.skeleton_id = %s
AND (tc.relation_id = %s OR tc.relation_id = %s)
''' % (skeleton_id, preID, postID))
pre = defaultdict(list) # treenode ID vs list of connector ID
post = defaultdict(list)
for row in cursor.fetchall():
if row[2] == preID:
pre[row[0]].append(row[1])
else:
post[row[0]].append(row[1])
return neuroml_single_cell(skeleton_id, nodes, pre, post)
@requires_user_role(UserRole.Browse)
def skeletons_neuroml(request, project_id=None):
""" Export a list of skeletons each as a Cell in NeuroML. """
project_id = int(project_id) # sanitize
skeleton_ids = tuple(int(v) for k,v in request.POST.iteritems() if k.startswith('skids['))
cursor = connection.cursor()
relations = get_relation_to_id_map(project_id, ('presynaptic_to', 'postsynaptic_to'), cursor)
preID = relations['presynaptic_to']
postID = relations['postsynaptic_to']
# TODO could certainly fetch all nodes and synapses in one single query and then split them up.
cells = (_skeleton_neuroml_cell(skeleton_id, preID, postID) for skeleton_id in skeleton_ids)
response = HttpResponse(content_type='text/txt')
response['Content-Disposition'] = 'attachment; filename="data.neuroml"'
neuroml_network(cells, response)
return response
@requires_user_role(UserRole.Browse)
def export_neuroml_level3_v181(request, project_id=None):
"""Export the NeuroML Level 3 version 1.8.1 representation of one or more skeletons.
Considers synapses among the requested skeletons only. """
skeleton_ids = tuple(int(v) for v in request.POST.getlist('skids[]'))
mode = int(request.POST.get('mode'))
skeleton_strings = ",".join(map(str, skeleton_ids))
cursor = connection.cursor()
relations = get_relation_to_id_map(project_id, ('presynaptic_to', 'postsynaptic_to'), cursor)
presynaptic_to = relations['presynaptic_to']
postsynaptic_to = relations['postsynaptic_to']
cursor.execute('''
SELECT cici.class_instance_a, ci.name
FROM class_instance_class_instance cici,
class_instance ci,
relation r
WHERE cici.class_instance_a IN (%s)
AND cici.class_instance_b = ci.id
AND cici.relation_id = r.id
AND r.relation_name = 'model_of'
''' % skeleton_strings)
neuron_names = dict(cursor.fetchall())
skeleton_query = '''
SELECT id, parent_id, location_x, location_y, location_z,
radius, skeleton_id
FROM treenode
WHERE skeleton_id IN (%s)
ORDER BY skeleton_id
''' % skeleton_strings
if 0 == mode:
cursor.execute('''
SELECT treenode_id, connector_id, relation_id, skeleton_id
FROM treenode_connector
WHERE skeleton_id IN (%s)
AND (relation_id = %s OR relation_id = %s)
''' % (skeleton_strings, presynaptic_to, postsynaptic_to))
# Dictionary of connector ID vs map of relation_id vs list of treenode IDs
connectors = defaultdict(partial(defaultdict, list))
for row in cursor.fetchall():
connectors[row[1]][row[2]].append((row[0], row[3]))
# Dictionary of presynaptic skeleton ID vs map of postsynaptic skeleton ID vs list of tuples with presynaptic treenode ID and postsynaptic treenode ID.
connections = defaultdict(partial(defaultdict, list))
for connectorID, m in connectors.iteritems():
for pre_treenodeID, skID1 in m[presynaptic_to]:
for post_treenodeID, skID2 in m[postsynaptic_to]:
connections[skID1][skID2].append((pre_treenodeID, post_treenodeID))
cursor.execute(skeleton_query)
generator = export_NeuroML_Level3.exportMutual(neuron_names, cursor.fetchall(), connections)
else:
if len(skeleton_ids) > 1:
raise Exception("Expected a single skeleton for mode %s!" % mode)
input_ids = tuple(int(v) for v in request.POST.getlist('inputs[]', []))
input_strings = ",".join(map(str, input_ids))
if 2 == mode:
constraint = "AND tc2.skeleton_id IN (%s)" % input_strings
elif 1 == mode:
constraint = ""
else:
raise Exception("Unknown mode %s" % mode)
cursor.execute('''
SELECT tc2.skeleton_id, tc1.treenode_id
FROM treenode_connector tc1,
treenode_connector tc2
WHERE tc1.skeleton_id = %s
AND tc1.connector_id = tc2.connector_id
AND tc1.treenode_id != tc2.treenode_id
AND tc1.relation_id = %s
AND tc2.relation_id = %s
%s
''' % (skeleton_strings, postsynaptic_to, presynaptic_to, constraint))
# Dictionary of skeleton ID vs list of treenode IDs at which the neuron receives inputs
inputs = defaultdict(list)
for row in cursor.fetchall():
inputs[row[0]].append(row[1])
cursor.execute(skeleton_query)
generator = export_NeuroML_Level3.exportSingle(neuron_names, cursor.fetchall(), inputs)
response = HttpResponse(generator, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=neuronal-circuit.neuroml'
return response
@requires_user_role(UserRole.Browse)
def skeleton_swc(*args, **kwargs):
kwargs['format'] = 'swc'
return export_skeleton_response(*args, **kwargs)
def _export_review_skeleton(project_id=None, skeleton_id=None,
subarbor_node_id=None):
""" Returns a list of segments for the requested skeleton. Each segment
contains information about the review status of this part of the skeleton.
If a valid subarbor_node_id is given, only data for the sub-arbor is
returned that starts at this node.
"""
# Get all treenodes of the requested skeleton
cursor = connection.cursor()
cursor.execute("""
SELECT
t.id,
t.parent_id,
t.location_x,
t.location_y,
t.location_z,
ARRAY_AGG(svt.orientation),
ARRAY_AGG(svt.location_coordinate)
FROM treenode t
LEFT OUTER JOIN suppressed_virtual_treenode svt
ON (t.id = svt.child_id)
WHERE t.skeleton_id = %s
GROUP BY t.id;
""", (skeleton_id,))
treenodes = cursor.fetchall()
# Get all reviews for the requested skeleton
reviews = get_treenodes_to_reviews_with_time(skeleton_ids=[skeleton_id])
if 0 == len(treenodes):
return []
# The root node will be assigned below, depending on retrieved nodes and
# sub-arbor requests
root_id = None
# Add each treenode to a networkx graph and attach reviewer information to
# it.
g = nx.DiGraph()
reviewed = set()
for t in treenodes:
# While at it, send the reviewer IDs, which is useful to iterate fwd
# to the first unreviewed node in the segment.
g.add_node(t[0], {'id': t[0],
'x': t[2],
'y': t[3],
'z': t[4],
'rids': reviews[t[0]],
'sup': [[o, l] for [o, l] in zip(t[5], t[6]) if o is not None]})
if reviews[t[0]]:
reviewed.add(t[0])
if t[1]: # if parent
g.add_edge(t[1], t[0]) # edge from parent to child
else:
root_id = t[0]
if subarbor_node_id and subarbor_node_id != root_id:
# Make sure the subarbor node ID (if any) is part of this skeleton
if subarbor_node_id not in g:
raise ValueError("Supplied subarbor node ID (%s) is not part of "
"provided skeleton (%s)" % (subarbor_node_id, skeleton_id))
# Remove connection to parent
parent = g.predecessors(subarbor_node_id)[0]
g.remove_edge(parent, subarbor_node_id)
# Remove all nodes that are upstream from the subarbor node
to_delete = set()
to_lookat = [root_id]
while to_lookat:
n = to_lookat.pop()
to_lookat.extend(g.successors(n))
to_delete.add(n)
g.remove_nodes_from(to_delete)
# Replace root id with sub-arbor ID
root_id=subarbor_node_id
if not root_id:
if subarbor_node_id:
raise ValueError("Couldn't find a reference root node in provided "
"skeleton (%s)" % (skeleton_id,))
else:
raise ValueError("Couldn't find a reference root node for provided "
"subarbor (%s) in provided skeleton (%s)" % (subarbor_node_id, skeleton_id))
# Create all sequences, as long as possible and always from end towards root
distances = edge_count_to_root(g, root_node=root_id) # distance in number of edges from root
seen = set()
sequences = []
# Iterate end nodes sorted from highest to lowest distance to root
endNodeIDs = (nID for nID in g.nodes() if 0 == len(g.successors(nID)))
for nodeID in sorted(endNodeIDs, key=distances.get, reverse=True):
sequence = [g.node[nodeID]]
parents = g.predecessors(nodeID)
while parents:
parentID = parents[0]
sequence.append(g.node[parentID])
if parentID in seen:
break
seen.add(parentID)
parents = g.predecessors(parentID)
if len(sequence) > 1:
sequences.append(sequence)
# Calculate status
segments = []
for sequence in sorted(sequences, key=len, reverse=True):
segments.append({
'id': len(segments),
'sequence': sequence,
'status': '%.2f' % (100.0 * sum(1 for node in sequence if node['id'] in reviewed) / len(sequence)),
'nr_nodes': len(sequence)
})
return segments
@api_view(['POST'])
@requires_user_role(UserRole.Browse)
def export_review_skeleton(request, project_id=None, skeleton_id=None):
"""Export skeleton as a set of segments with per-node review information.
Export the skeleton as a list of segments of non-branching node paths,
with detailed information on reviewers and review times for each node.
---
parameters:
- name: subarbor_node_id
description: |
If provided, only the subarbor starting at this treenode is returned.
required: false
type: integer
paramType: form
models:
export_review_skeleton_segment:
id: export_review_skeleton_segment
properties:
status:
description: |
Percentage of nodes in this segment reviewed by the request user
type: number
format: double
required: true
id:
description: |
Index of this segment in the list (order by descending segment
node count)
type: integer
required: true
nr_nodes:
description: Number of nodes in this segment
type: integer
required: true
sequence:
description: Detail for nodes in this segment
type: array
items:
type: export_review_skeleton_segment_node
required: true
export_review_skeleton_segment_node:
id: export_review_skeleton_segment_node
properties:
id:
description: ID of this treenode
type: integer
required: true
x:
type: double
required: true
y:
type: double
required: true
z:
type: double
required: true
rids:
type: array
items:
type: export_review_skeleton_segment_node_review
required: true
sup:
type: array
items:
type: export_review_skeleton_segment_node_sup
required: true
export_review_skeleton_segment_node_review:
id: export_review_skeleton_segment_node_review
properties:
- description: Reviewer ID
type: integer
required: true
- description: Review timestamp
type: string
format: date-time
required: true
export_review_skeleton_segment_node_sup:
id: export_review_skeleton_segment_node_sup
properties:
- description: |
Stack orientation to determine which axis is the coordinate of the
plane where virtual nodes are suppressed. 0 for z, 1 for y, 2 for x.
required: true
type: integer
- description: |
Coordinate along the edge from this node to its parent where
virtual nodes are suppressed.
required: true
type: number
format: double
type:
- type: array
items:
type: export_review_skeleton_segment
required: true
"""
try:
subarbor_node_id = int(request.POST.get('subarbor_node_id', ''))
except ValueError:
subarbor_node_id = None
segments = _export_review_skeleton(project_id, skeleton_id, subarbor_node_id)
return HttpResponse(json.dumps(segments, cls=DjangoJSONEncoder),
content_type='application/json')
@requires_user_role(UserRole.Browse)
def skeleton_connectors_by_partner(request, project_id):
""" Return a dict of requested skeleton vs relation vs partner skeleton vs list of connectors.
Connectors lacking a skeleton partner will of course not be included. """
skeleton_ids = set(int(v) for k,v in request.POST.iteritems() if k.startswith('skids['))
cursor = connection.cursor()
relations = get_relation_to_id_map(project_id, ('presynaptic_to', 'postsynaptic_to'), cursor)
pre = relations['presynaptic_to']
post = relations['postsynaptic_to']
cursor.execute('''
SELECT tc1.skeleton_id, tc1.relation_id,
tc2.skeleton_id, tc1.connector_id
FROM treenode_connector tc1,
treenode_connector tc2
WHERE tc1.skeleton_id IN (%s)
AND tc1.connector_id = tc2.connector_id
AND tc1.skeleton_id != tc2.skeleton_id
AND tc1.relation_id != tc2.relation_id
AND (tc1.relation_id = %s OR tc1.relation_id = %s)
AND (tc2.relation_id = %s OR tc2.relation_id = %s)
''' % (','.join(map(str, skeleton_ids)), pre, post, pre, post))
# Dict of skeleton vs relation vs skeleton vs list of connectors
partners = defaultdict(partial(defaultdict, partial(defaultdict, list)))
for row in cursor.fetchall():
relation_name = 'presynaptic_to' if row[1] == pre else 'postsynaptic_to'
partners[row[0]][relation_name][row[2]].append(row[3])
return HttpResponse(json.dumps(partners))
@requires_user_role(UserRole.Browse)
def export_skeleton_reviews(request, project_id=None, skeleton_id=None):
""" Return a map of treenode ID vs list of reviewer IDs,
without including any unreviewed treenode. """
m = defaultdict(list)
for row in Review.objects.filter(skeleton_id=int(skeleton_id)).values_list('treenode_id', 'reviewer_id', 'review_time').iterator():
m[row[0]].append(row[1:3])
return HttpResponse(json.dumps(m, separators=(',', ':'), cls=DjangoJSONEncoder))
@requires_user_role(UserRole.Browse)
def partners_by_connector(request, project_id=None):
""" Return a list of skeleton IDs related to the given list of connector IDs of the given skeleton ID.
Will optionally filter for only presynaptic (relation=0) or only postsynaptic (relation=1). """
skid = request.POST.get('skid', None)
if not skid:
raise Exception("Need a reference skeleton ID!")
skid = int(skid)
connectors = tuple(int(v) for k,v in request.POST.iteritems() if k.startswith('connectors['))
rel_type = int(request.POST.get("relation", 0))
size_mode = int(request.POST.get("size_mode", 0))
query = '''
SELECT DISTINCT tc2.skeleton_id
FROM treenode_connector tc1,
treenode_connector tc2
WHERE tc1.project_id = %s
AND tc1.skeleton_id = %s
AND tc1.connector_id = tc2.connector_id
AND tc1.skeleton_id != tc2.skeleton_id
AND tc1.relation_id != tc2.relation_id
AND tc1.connector_id IN (%s)
''' % (project_id, skid, ",".join(str(x) for x in connectors))
# Constrain the relation of the second part
if 0 == rel_type or 1 == rel_type:
query += "AND tc2.relation_id = (SELECT id FROM relation WHERE project_id = %s AND relation_name = '%s')" % (project_id, 'presynaptic_to' if 1 == rel_type else 'postsynaptic_to')
cursor = connection.cursor()
cursor.execute(query)
if 0 == size_mode or 1 == size_mode:
# Filter by size: only those with more than one treenode or with exactly one
cursor.execute('''
SELECT skeleton_id
FROM treenode
WHERE skeleton_id IN (%s)
GROUP BY skeleton_id
HAVING count(*) %s 1
''' % (",".join(str(row[0]) for row in cursor.fetchall()), ">" if 0 == size_mode else "="))
return HttpResponse(json.dumps(tuple(row[0] for row in cursor.fetchall())))
|
catsop/CATMAID
|
django/applications/catmaid/control/skeletonexport.py
|
Python
|
gpl-3.0
| 39,451
|
[
"NEURON"
] |
5045383d243f7d194fda7038dd4c769c025223984f86a029b93ef22d794acba3
|
import brain_state_calculate_c as bsc
import numpy as np
import copy
import random as rnd
import pickle
import matplotlib.pyplot as plt
from collections import OrderedDict
from cpp_file_tools_c import cpp_file_tools
class ChangeObs:
def __init__(self, l_obs):
rnd.seed(42)
#wich col we should move
self.move_chan = []
#where we should move the col
self.move_chan_to = []
#how much we should modulate the given col
self.value_modulate = []
#param for mean modulation
mu = 0
sigma = 1
l_obs = np.array(l_obs)
#index of modulated channel
self.mod_chan = l_obs.sum(0).nonzero()[0]
#number of channel
self.nbchan = len(l_obs[0])
#params for number of chan to move
#35% of modulated chan lost or gain par day with 28% of std
mean_move = 0.35 * self.mod_chan.shape[0]
std_move = 0.28 * self.mod_chan.shape[0]
change_x_chan = 0
while change_x_chan < 1:
change_x_chan = self.f2i(rnd.gauss(mean_move, std_move))
for i in range(change_x_chan):
self.move_chan.append(self.f2i(rnd.uniform(0, self.mod_chan.shape[0]-1)))
self.move_chan_to.append(self.f2i(rnd.uniform(0, self.nbchan-1)))
for i in range(self.nbchan):
self.value_modulate.append(self.f2i(rnd.gauss(mu, sigma)))
print self.mod_chan
print self.move_chan
print self.move_chan_to
print self.value_modulate
def change(self, l_obs):
l_obs = np.array(l_obs)
save_obs=copy.copy(l_obs)
for c in range(l_obs.shape[1]):
if c in self.mod_chan:
l_obs[:, c] = l_obs[:, c]+self.value_modulate[c]
if c in self.move_chan:
ind = self.move_chan.index(c)
move_to = self.move_chan_to[ind]
tmp = copy.copy(l_obs[:, move_to])
l_obs[:, move_to] = l_obs[:, c]
l_obs[:, c] = tmp
#we allow burst count to be negative in order to avoid all value set to zero after X "day"
#l_obs[l_obs < 0] = 0
return l_obs
@staticmethod
def f2i(number):
#convert float to the nearest int
return int(round(number, 0))
@staticmethod
def expand_walk(l_res, extend_before, extend_after):
#expand walk if we want to simulate cue
start_after = []
for i in range(len(l_res)-1):
if l_res[i] != l_res[i+1]:
if l_res[i] == [1, 0]:
for n in range(i-extend_before, i+1):
if 0 < n < len(l_res):
l_res[n] = [0, 1]
else:
start_after.append(i)
for i in start_after:
for n in range(i, i+extend_after):
if 0 < n < len(l_res):
l_res[n] = [0, 1]
return l_res
#class to create a new exception
class NotImplementedException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#do some plot and analysis
class Analyse_Result:
def __init__(self, nb_chan, group_by):
self.ext_img = '.png'
self.save_img = True
self.show = False
self.img_save_path = 'benchmark_img/'
self.ground_truth = ['gnd_truth']
self.my_cft = cpp_file_tools(nb_chan, group_by, self.ext_img, self.save_img, self.show,ion=False)
@staticmethod
def import_file(filename):
with open(filename, 'rb') as my_file:
return pickle.load(my_file)
def success_rate_over_day(self, res_dict, group_by=1):
#comput success rate for each trial. trial can be grouped if group_by>1
for rat in res_dict:
#foreach rat in dicitonary we compute success rate for each classifier
for date in res_dict[rat]:
if date not in ['success_rate', 'success_rate_mean', 'date_change', 'accuracy', 'accuracy_mean']:
res_dict[rat][date]['success_rate']={}
for lor in res_dict[rat][date]['l_of_res']:
for res in lor:
if res not in self.ground_truth:
success_rate = self.my_cft.success_rate(lor[res], lor[self.ground_truth[0]])
try:
res_dict[rat][date]['success_rate'][res].append(success_rate)
except:
res_dict[rat][date]['success_rate'][res] = [success_rate]
#success rate are in the date layer and we want them on the rat layer to plot more easily
return self.group_day(res_dict, 'success_rate', group_by=group_by)
def success_rate_mean_day(self, res_dict):
self.success_rate_over_day(res_dict)
for rat in res_dict:
#foreach rat we compute the mean success rate of each day
res_dict[rat]['success_rate_mean'] = {}
for date in res_dict[rat]:
if date not in ['success_rate', 'success_rate_mean', 'date_change', 'accuracy', 'accuracy_mean']:
for res in res_dict[rat][date]['success_rate']:
mean = np.array(res_dict[rat][date]['success_rate'][res]).mean()
try:
res_dict[rat]['success_rate_mean'][res].append(mean)
except:
res_dict[rat]['success_rate_mean'][res] = [mean]
return res_dict
def accuracy_over_day(self, res_dict, group_by=1):
#same as success_rate but for accuracy
#accuracy is (%correct_walk + %correct_rest)/2
for rat in res_dict:
for date in res_dict[rat]:
if date not in ['success_rate', 'success_rate_mean', 'date_change', 'accuracy', 'accuracy_mean']:
res_dict[rat][date]['accuracy'] = {}
for lor in res_dict[rat][date]['l_of_res']:
for res in lor:
if res not in self.ground_truth:
accuracy = self.my_cft.accuracy(lor[res], lor[self.ground_truth[-1]])
try:
res_dict[rat][date]['accuracy'][res].append(accuracy)
except:
res_dict[rat][date]['accuracy'][res] = [accuracy]
return self.group_day(res_dict, 'accuracy', group_by=group_by)
def accuracy_mean_day(self, res_dict):
#same as succes rate but for accuracy
self.accuracy_over_day(res_dict)
for rat in res_dict:
res_dict[rat]['accuracy_mean'] = {}
for date in res_dict[rat]:
if date not in ['success_rate', 'success_rate_mean', 'date_change', 'accuracy', 'accuracy_mean']:
for res in res_dict[rat][date]['accuracy']:
mean = np.array(res_dict[rat][date]['accuracy'][res]).mean()
try:
res_dict[rat]['accuracy_mean'][res].append(mean)
except:
res_dict[rat]['accuracy_mean'][res] = [mean]
return res_dict
def group_day(self, res_dict, key, group_by=1):
for rat in res_dict:
res_dict[rat][key] = {}
res_dict[rat]['date_change'] = []
cpt = 0
i = 0
#we search the name of one classifier to compute the number of trial per day
while True:
first_date = res_dict[rat].keys()[i]
i += 1
if first_date not in ['success_rate', 'success_rate_mean', 'date_change', 'accuracy', 'accuracy_mean']:
break
i = 0
while True:
first_res = res_dict[rat][first_date][key].keys()[i]
i += 1
if first_res not in self.ground_truth:
break
for date in res_dict[rat]:
#exclude not date
if date not in ['success_rate', 'success_rate_mean', 'date_change', 'accuracy', 'accuracy_mean']:
for res in res_dict[rat][date][key]:
tmp = res_dict[rat][date][key][res]
tmp_val = 0
for i in range(len(tmp)):
tmp_val += tmp[i]
if (i+1) % group_by == 0:
tmp_val /= float(group_by)
try:
res_dict[rat][key][res].append(tmp_val)
except:
res_dict[rat][key][res] = [tmp_val]
tmp_val = 0
if res == first_res:
cpt += 1
#if at the end there is not enough trial to group_by
if tmp_val != 0:
tmp_val /= float(len(tmp) % group_by)
if res == first_res:
cpt += 1
try:
res_dict[rat][key][res].append(tmp_val)
except:
res_dict[rat][key][res] = [tmp_val]
if res == first_res:
res_dict[rat]['date_change'].append(cpt-0.5)
return res_dict
def plot_over_day(self, res_dict, key, exclude_res=None, width=0, height=0):
color = ['b', 'r', 'g', 'c', 'm', 'y', 'k']
if exclude_res is None:
exclude_res = []
for rat in res_dict:
if width == 0 and height == 0:
plt.figure()
else:
plt.figure(figsize=(width, height))
cpt = 0
res_count = len(res_dict[rat][key].keys())
for res in res_dict[rat][key]:
if len(exclude_res) == 0 or res in exclude_res:
plt.subplot(res_count, 1, cpt)
plt.plot(res_dict[rat][key][res], color[cpt % len(color)]+'o-', label=res)
cpt += 1
plt.ylabel(res)
plt.ylim(-0.1, 1.1)
for end in res_dict[rat]['date_change']:
plt.vlines(end, -0.1, 1.1)
plt.tight_layout()
if self.save_img:
plt.savefig(self.img_save_path+'evo_'+key+'_over_day_'+rat+self.ext_img)
if self.show:
plt.show()
else:
plt.close()
def plot_mean(self, res_dict, key, exclude_res=None):
if exclude_res is None:
exclude_res=[]
for rat in res_dict:
plt.figure()
for res in res_dict[rat][key]:
if len(exclude_res) == 0 or res in exclude_res:
plt.plot(res_dict[rat][key][res], label=res)
plt.ylim(-0.1, 1.1)
plt.legend()
if self.save_img:
plt.savefig(self.img_save_path+'evo_'+key+'_mean_'+rat+self.ext_img)
if self.show:
plt.show()
else:
plt.close()
class Benchmark(object):
def __init__(self, nb_chan, group_by):
#general option
self.save_obj = False
self.ext_img = '.png'
self.save_img = True
self.show = False
self.img_save_path = 'benchmark_img/'
self.my_cft = cpp_file_tools(nb_chan, group_by, self.ext_img, self.save_img, self.show, ion=False)
self.res_dict={}
#simulated benchmark option
self.simulated_dir_name = '../data/RT_classifier/BMIOutputs/0423_r600/'
simulated_iteration = 5
self.simulated_files = [2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14]
self.simulated_date = 't_0423'
self.simulated_rat = 'r0'
self.simulated_corename = 'healthyOutput_'
self.simulated_change_every = len(self.simulated_files)
self.simulated_first_train = 3
tmp=[]
for i in range(simulated_iteration):
tmp += self.simulated_files
self.simulated_files = tmp
#SCI benchmark option
self.SCI_dir_name = '../data/RT_classifier/BMIOutputs/BMISCIOutputs/'
self.SCI_corename = 'SCIOutput_'
self.SCI_first_train = 5
self.SCI_min_obs = 10
self.SCI_files = {'r31': OrderedDict([
('03', range(1, 25)+range(52, 58)),
('04', range(1, 45)),
('06', range(78, 113)),
('07', range(27, 51)),
('10', range(6, 31)),
('11', range(1, 16)),
('12', range(1, 27)),
('13', range(63, 89)),
('14', range(1, 23))]),
'r32': OrderedDict([
('03', range(25, 52)),
('04', range(45, 83)),
('06', range(42, 78)),
('07', range(51, 82)),
('10', range(31, 69)),
('11', range(1, 36)),
('12', range(27, 54)),
('13', range(32, 63))]),
'r34': OrderedDict([
('06', range(1, 42)),
('07', range(1, 27)),
('11', range(1, 31)),
('12', range(54, 87)),
('13', range(1, 32)),
('14', range(23, 48))])
}
def benchmark_SCI_data(self, shuffle_obs=False):
self.res_dict = {}
for rat in self.SCI_files.keys():
init_networks = True
self.res_dict[rat] = {}
for date in self.SCI_files[rat].keys():
dir_name = self.SCI_dir_name + 'Dec' + date + '/' + rat + '/'
fulldate = '12'+date
self.res_dict[rat][fulldate] = {'l_of_res': []}
print '---------- ' + rat + ' ' + date + ' ----------'
files = self.my_cft.convert_to_filename_list(dir_name, fulldate, self.SCI_files[rat][date][0:self.SCI_first_train], self.SCI_corename)
if init_networks:
init_networks = False
self.init_classifier()
self.init_test(files)
new_date = True
#for each file of the day (=date)
for n in range(self.SCI_first_train, len(self.SCI_files[rat][date])-1):
print '### ### ### ### ### ### ### ### ###'
print rat+'_'+str(fulldate)+'_'+str(n)+str(self.SCI_files[rat][date][n:n+1])
#get obs
files = self.my_cft.convert_to_filename_list(dir_name, fulldate, self.SCI_files[rat][date][n:n+1], self.SCI_corename)
l_res, l_obs = self.my_cft.read_cpp_files(files, use_classifier_result=False, cut_after_cue=True)
#if the trial is too short or have no neuron modulated we don't train
if len(l_obs) > self.SCI_min_obs and np.array(l_obs).sum() > 0:
if shuffle_obs:
l_obs=self.shuffle_obs(l_obs)
l_of_res = self.test_network_with_obs(l_obs, l_res)
self.res_dict[rat][fulldate]['l_of_res'].append(l_of_res)
if self.save_img or self.show:
self.my_cft.plot_result(l_of_res, 'SCI_data_'+rat+'_'+str(fulldate)+'_'+str(n)+str(self.SCI_files[rat][date][n:n+1]), self.img_save_path)
#when new day first learn with mod_chan
try:
self.train_with_obs(l_obs, l_res, new_date)
if new_date:
new_date = False
except ValueError:
print 'goto the next trial'
print('###############')
print('#### END ####')
return self.res_dict
def benchmark_simulated_data_from_healthy(self, shuffle_obs=False):
#save the res
chg_obs = []
rnd.seed(42)
rat = self.simulated_rat
self.res_dict = {rat: {str(len(chg_obs)): {'l_of_res': []}}}
date = 0
self.init_classifier()
#init net
files = self.my_cft.convert_to_filename_list(self.simulated_dir_name, self.simulated_date, self.simulated_files[0:self.simulated_first_train], self.simulated_corename)
self.init_test(files)
for i in range(self.simulated_first_train, len(self.simulated_files)):
files = self.my_cft.convert_to_filename_list(self.simulated_dir_name, self.simulated_date, self.simulated_files[i:i+1], self.simulated_corename)
l_res, l_obs = self.my_cft.read_cpp_files(files, use_classifier_result=False, cut_after_cue=False)
#change the value
for chg in chg_obs:
l_obs = chg.change(l_obs)
#prepare to change the value
if i % self.simulated_change_every == 0:
chg_obs.append(ChangeObs(l_obs))
l_obs = chg_obs[-1].change(l_obs)
print 'change obs:'+str(len(chg_obs))
date = str(len(chg_obs))
self.res_dict[rat][date] = {'l_of_res': []}
#to simulate the cue we add
extend_before = ChangeObs.f2i(rnd.gauss(0.4/0.1, 0.5))
extend_after = ChangeObs.f2i(rnd.uniform(10, 30))
l_res = ChangeObs.expand_walk(l_res, extend_before, extend_after)
if shuffle_obs:
l_obs=self.shuffle_obs(l_obs)
print '### ### ### ### ### ### ### ### ###'
print rat+'_'+str(date)+'_'+str(i)+str(self.simulated_files[i:i+1])
l_of_res = self.test_network_with_obs(l_obs, l_res)
l_res_gnd_truth, l_obs_trash = self.my_cft.convert_cpp_file(self.simulated_dir_name, self.simulated_date,
self.simulated_files[i:i + 1],
use_classifier_result=False,
file_core_name=self.simulated_corename,
cut_after_cue=False)
l_of_res['real_gnd_truth'] = np.array(l_res_gnd_truth).argmax(1)
self.res_dict[rat][str(len(chg_obs))]['l_of_res'].append(l_of_res)
if self.save_img or self.show:
self.my_cft.plot_result(l_of_res, 'simulated_data_'+rat+'_'+str(date)+'_'+str(i)+str(self.simulated_files[i:i+1]),self.img_save_path)
try:
if i % self.simulated_change_every == 0:
self.train_with_obs(l_obs, l_res, True)
else:
self.train_with_obs(l_obs, l_res, False)
except ValueError:
print 'goto the next trial'
print('###############')
print('#### END ####')
return self.res_dict
def save_result(self, path='', extra_txt=''):
filename = path+'result_'+extra_txt+'.pyObj'
with open(filename, 'wb') as my_file:
my_pickler = pickle.Pickler(my_file)
my_pickler.dump(self.res_dict)
@staticmethod
def shuffle_obs(l_obs):
rnd.shuffle(l_obs)
def change_chan_group_by(self, nb_chan, group_by):
self.my_cft = cpp_file_tools(nb_chan, group_by, self.ext_img, self.save_img, self.show, ion=False)
def init_classifier(self):
raise NotImplementedException("Subclasses are responsible for creating this method")
def init_test(self, files):
raise NotImplementedException("Subclasses are responsible for creating this method")
def test_network_with_files(self, files):
raise NotImplementedException("Subclasses are responsible for creating this method")
def test_network_with_obs(self, l_obs, l_res):
raise NotImplementedException("Subclasses are responsible for creating this method")
def train_with_file(self, files, new_day):
raise NotImplementedException("Subclasses are responsible for creating this method")
def train_with_obs(self, l_obs, l_res, new_day):
raise NotImplementedException("Subclasses are responsible for creating this method")
class Benchmark_Koho(Benchmark):
def __init__(self, nb_chan, group_by, input_classifier):
super(Benchmark_Koho, self).__init__(nb_chan, group_by)
self.input_count_classifier = input_classifier
def init_classifier(self):
my_bsc = bsc.brain_state_calculate(self.input_count_classifier, 'koho', self.ext_img, self.save_img, self.show)
self.classifier = my_bsc
def init_test(self, files):
self.classifier.init_networks(files, self.my_cft, train_mod_chan=True)
def test_network_with_files(self, files):
l_res, l_obs = self.my_cft.read_cpp_files(files, use_classifier_result=False, cut_after_cue=True)
return self.test_network_with_obs(l_obs, l_res)
def test_network_with_obs(self, l_obs, l_res):
#test and plot
success, l_of_res = self.classifier.test(l_obs, l_res)
return l_of_res
def train_with_file(self, files, new_day):
l_res, l_obs = self.my_cft.read_cpp_files(files, use_classifier_result=False, cut_after_cue=True)
self.train_with_obs(l_obs, l_res, new_day)
def train_with_obs(self, l_obs, l_res, new_day):
if new_day:
self.classifier.train_nets_new_day(l_obs, l_res, self.my_cft)
self.classifier.train_nets(l_obs, l_res, self.my_cft, with_RL=True, obs_to_add=0, train_mod_chan=True)
|
scauglog/brain_record_toolbox
|
benchmark_walk_classifier.py
|
Python
|
mit
| 22,052
|
[
"NEURON"
] |
147039419a310093978f273b06c5fdeb6092856f64139db396d48023eb81b5a3
|
#!/usr/bin/env python
from traits.api import \
HasTraits, Str, Int, List, Button, File, Instance, Dict,Enum, \
on_trait_change, Array, Bool, Color, Tuple, Button
from traitsui.api import Group, View, Handler, Item, \
OKButton, CancelButton, EnumEditor, TableEditor, \
CheckListEditor, ObjectColumn
import numpy as np
import os
import nibabel as nib
# Mayavi classes
from mayavi import mlab
from mayavi.core.api import PipelineBase, Source
from mayavi.core.ui.api import SceneEditor
from mayavi.tools.mlab_scene_model import MlabSceneModel
from tvtk.pyface.scene import Scene
from tvtk.api import tvtk
class ScalarVolume(HasTraits):
# Data
filepath = File("")
ijk = Tuple
scalars = Array
indices = Array
# Holds the mayavi objects
source = Instance(Source)
glyph = Instance(PipelineBase)
splatter = Instance(PipelineBase)
# MayaVi data options
color_map = Enum(
[ "Blues", "Oranges", "pink", "Greens"] )
render_type = Enum(["sized_cubes","static_cubes","splatter"])
static_color = Color
visible = Bool(True)
b_render = Button(label="Render")
def _b_render_fired(self):
self.clear()
self.render()
def _filepath_changed(self):
data = nib.load(self.filepath).get_data()
self.indices = np.nonzero(data)
self.scalars = data[self.indices]
def _visible_changed(self):
if self.glyph is not None:
self.glyph.visible = self.visible
def clear(self):
if self.glyph is not None:
try:
self.glyph.remove()
except Exception, e:
print e
if not self.splatter is None:
try:
self.splatter.remove()
except Exception, e:
print e
def render(self):
if not self.visible: return
try:
color = self.static_color.toTuple()
except:
color = self.static_color
static_color = color[0]/255., color[1]/255., color[2]/255.
self.source = mlab.pipeline.scalar_scatter(
self.indices[0],self.indices[1],self.indices[2],self.scalars)
if self.render_type == "sized_cubes":
self.glyph = mlab.pipeline.glyph(
self.source, colormap=self.color_map, mode="cube" )
elif self.render_type == "splatter":
self.splatter = mlab.pipeline.gaussian_splatter(self.source)
self.glyph = mlab.pipeline.volume(
self.splatter,
color=static_color)
if self.render_type == "static_cubes":
self.source = mlab.pipeline.scalar_scatter(
self.indices[0],self.indices[1],self.indices[2])
self.glyph = mlab.pipeline.glyph(
self.source, color=static_color, mode="cube" )
def _color_map_changed(self):
self.clear()
self.render()
instance_view = View(
Group(
Item("filepath"),
Group(Item("visible"),Item("glyph"),Item("splatter"),Item("source"),orientation="horizontal"),
Item("static_color"),
Item("b_render"),
orientation="vertical")
)
volume_table = TableEditor(
columns = [
ObjectColumn(name="color_map", editable=True),
ObjectColumn(name="static_color", editable=True),
ObjectColumn(name="render_type", editable=True),
ObjectColumn(name="visible",editable=True),
ObjectColumn(name="filepath", editable=True),
],
deletable = True,
auto_size = True,
show_toolbar = True,
edit_view="instance_view",
row_factory=ScalarVolume,
orientation="vertical"
)
class ScalarVolumes(HasTraits):
volumes = List(Instance(ScalarVolume))
scene3d = Instance(MlabSceneModel)
def _scene3d_default(self):
return MlabSceneModel()
def render_regions(self):
self.scene3d.disable_render = True
for volume in self.volumes:
volume.render()
self.scene3d.disable_render = False
test_view = View(
Item("volumes",editor=volume_table),
Group(
Item("scene3d",
editor=SceneEditor(scene_class=Scene),
height=500, width=500),
show_labels=False),
resizable=True
)
traits_view = View(
Group(
Item("volumes",editor=volume_table),
show_labels=False
),
resizable=True
)
|
mattcieslak/DSI2
|
dsi2/volumes/scalar_volume.py
|
Python
|
gpl-3.0
| 4,498
|
[
"Mayavi"
] |
3501a0fbff79a75d495f3618e1eb5ab64abee60b230af8e5539c593d82a17848
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Views tests for the OSF.'''
from __future__ import absolute_import
import unittest
import json
import datetime as dt
import mock
import httplib as http
import math
import time
from nose.tools import * # noqa PEP8 asserts
from tests.test_features import requires_search
from modularodm import Q, fields
from modularodm.exceptions import ValidationError
from dateutil.parser import parse as parse_date
from framework import auth
from framework.exceptions import HTTPError
from framework.auth import User, Auth
from framework.auth.utils import impute_names_model
from framework.auth.exceptions import InvalidTokenError
from framework.tasks import handlers
from website import mailchimp_utils
from website.views import _rescale_ratio
from website.util import permissions
from website.models import Node, Pointer, NodeLog
from website.project.model import ensure_schemas, has_anonymous_link
from website.project.views.contributor import (
send_claim_email,
deserialize_contributors,
send_claim_registered_email,
notify_added_contributor
)
from website.profile.utils import add_contributor_json, serialize_unregistered
from website.profile.views import fmt_date_or_none
from website.util import api_url_for, web_url_for
from website import mails, settings
from website.util import rubeus
from website.project.views.node import _view_project, abbrev_authors, _should_show_wiki_widget
from website.project.views.comment import serialize_comment
from website.project.decorators import check_can_access
from website.project.signals import contributor_added
from website.addons.github.model import AddonGitHubOauthSettings
from website.archiver import utils as archiver_utils
from tests.base import (
OsfTestCase,
fake,
capture_signals,
assert_is_redirect,
assert_datetime_equal,
)
from tests.factories import (
UserFactory, ApiOAuth2ApplicationFactory, ProjectFactory, WatchConfigFactory,
NodeFactory, NodeLogFactory, AuthUserFactory, UnregUserFactory,
RegistrationFactory, CommentFactory, PrivateLinkFactory, UnconfirmedUserFactory, DashboardFactory, FolderFactory,
ProjectWithAddonFactory, MockAddonNodeSettings,
)
from website.settings import ALL_MY_REGISTRATIONS_ID, ALL_MY_PROJECTS_ID
class Addon(MockAddonNodeSettings):
@property
def complete(self):
return True
def archive_errors(self):
return 'Error'
class Addon2(MockAddonNodeSettings):
@property
def complete(self):
return True
def archive_errors(self):
return 'Error'
class TestViewingProjectWithPrivateLink(OsfTestCase):
def setUp(self):
super(TestViewingProjectWithPrivateLink, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory()
self.link.nodes.append(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_not_anonymous_for_public_project(self):
anonymous_link = PrivateLinkFactory(anonymous=True)
anonymous_link.nodes.append(self.project)
anonymous_link.save()
self.project.set_privacy('public')
self.project.save()
self.project.reload()
auth = Auth(user=self.user, private_key=anonymous_link.key)
assert_false(has_anonymous_link(self.project, auth))
def test_has_private_link_key(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_equal(res.status_code, 200)
def test_not_logged_in_no_key(self):
res = self.app.get(self.project_url, {'view_only': None})
assert_is_redirect(res)
res = res.follow(expect_errors=True)
assert_equal(res.status_code, 301)
assert_equal(
res.request.path,
'/login'
)
def test_logged_in_no_private_key(self):
res = self.app.get(self.project_url, {'view_only': None}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_logged_in_has_key(self):
res = self.app.get(
self.project_url, {'view_only': self.link.key}, auth=self.user.auth)
assert_equal(res.status_code, 200)
@unittest.skip('Skipping for now until we find a way to mock/set the referrer')
def test_prepare_private_key(self):
res = self.app.get(self.project_url, {'key': self.link.key})
res = res.click('Registrations')
assert_is_redirect(res)
res = res.follow()
assert_equal(res.status_code, 200)
assert_equal(res.request.GET['key'], self.link.key)
def test_check_can_access_valid(self):
contributor = AuthUserFactory()
self.project.add_contributor(contributor, auth=Auth(self.project.creator))
self.project.save()
assert_true(check_can_access(self.project, contributor))
def test_check_user_access_invalid(self):
noncontrib = AuthUserFactory()
with assert_raises(HTTPError):
check_can_access(self.project, noncontrib)
def test_check_user_access_if_user_is_None(self):
assert_false(check_can_access(self.project, None))
class TestProjectViews(OsfTestCase):
ADDONS_UNDER_TEST = {
'addon1': {
'node_settings': Addon,
},
'addon2': {
'node_settings': Addon2,
},
}
def setUp(self):
super(TestProjectViews, self).setUp()
ensure_schemas()
self.user1 = AuthUserFactory()
self.user1.save()
self.consolidate_auth1 = Auth(user=self.user1)
self.auth = self.user1.auth
self.user2 = UserFactory()
# A project has 2 contributors
self.project = ProjectFactory(
title="Ham",
description='Honey-baked',
creator=self.user1
)
self.project.add_contributor(self.user2, auth=Auth(self.user1))
self.project.save()
def test_cannot_remove_only_visible_contributor_before_remove_contributor(self):
self.project.visible_contributor_ids.remove(self.user1._id)
self.project.save()
url = self.project.api_url_for('project_before_remove_contributor')
res = self.app.post_json(
url, {'id': self.user2._id}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, http.FORBIDDEN)
assert_equal(res.json['message_long'], 'Must have at least one bibliographic contributor')
def test_cannot_remove_only_visible_contributor_remove_contributor(self):
self.project.visible_contributor_ids.remove(self.user1._id)
self.project.save()
url = self.project.api_url_for('project_removecontributor')
res = self.app.post_json(
url, {'id': self.user2._id}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, http.FORBIDDEN)
assert_equal(res.json['message_long'], 'Must have at least one bibliographic contributor')
assert_true(self.project.is_contributor(self.user2))
def test_remove_only_visible_contributor_return_false(self):
self.project.visible_contributor_ids.remove(self.user1._id)
self.project.save()
ret = self.project.remove_contributor(contributor=self.user2, auth=self.consolidate_auth1)
assert_false(ret)
self.project.reload()
assert_true(self.project.is_contributor(self.user2))
def test_can_view_nested_project_as_admin(self):
self.parent_project = NodeFactory(
title='parent project',
category='project',
parent=self.project,
is_public=False
)
self.parent_project.save()
self.child_project = NodeFactory(
title='child project',
category='project',
parent=self.parent_project,
is_public=False
)
self.child_project.save()
url = self.child_project.web_url_for('view_project')
res = self.app.get(url, auth=self.auth)
assert_not_in('Private Project', res.body)
assert_in('parent project', res.body)
def test_edit_description(self):
url = "/api/v1/project/{0}/edit/".format(self.project._id)
self.app.post_json(url,
{"name": "description", "value": "Deep-fried"},
auth=self.auth)
self.project.reload()
assert_equal(self.project.description, "Deep-fried")
def test_project_api_url(self):
url = self.project.api_url
res = self.app.get(url, auth=self.auth)
data = res.json
assert_equal(data['node']['category'], 'Project')
assert_equal(data['node']['node_type'], 'project')
assert_equal(data['node']['title'], self.project.title)
assert_equal(data['node']['is_public'], self.project.is_public)
assert_equal(data['node']['is_registration'], False)
assert_equal(data['node']['id'], self.project._primary_key)
assert_equal(data['node']['watched_count'], 0)
assert_true(data['user']['is_contributor'])
assert_equal(data['node']['description'], self.project.description)
assert_equal(data['node']['url'], self.project.url)
assert_equal(data['node']['tags'], [t._primary_key for t in self.project.tags])
assert_in('forked_date', data['node'])
assert_in('watched_count', data['node'])
assert_in('registered_from_url', data['node'])
# TODO: Test "parent" and "user" output
def test_api_get_folder_pointers(self):
dashboard = DashboardFactory(creator=self.user1)
project_one = ProjectFactory(creator=self.user1)
project_two = ProjectFactory(creator=self.user1)
url = dashboard.api_url_for("get_folder_pointers")
dashboard.add_pointer(project_one, auth=self.consolidate_auth1)
dashboard.add_pointer(project_two, auth=self.consolidate_auth1)
res = self.app.get(url, auth=self.auth)
pointers = res.json
assert_in(project_one._id, pointers)
assert_in(project_two._id, pointers)
assert_equal(len(pointers), 2)
def test_api_get_folder_pointers_from_non_folder(self):
project_one = ProjectFactory(creator=self.user1)
project_two = ProjectFactory(creator=self.user1)
url = project_one.api_url_for("get_folder_pointers")
project_one.add_pointer(project_two, auth=self.consolidate_auth1)
res = self.app.get(url, auth=self.auth)
pointers = res.json
assert_equal(len(pointers), 0)
def test_new_user_gets_dashboard_on_dashboard_path(self):
my_user = AuthUserFactory()
dashboard = my_user.node__contributed.find(Q('is_dashboard', 'eq', True))
assert_equal(dashboard.count(), 0)
url = api_url_for('get_dashboard')
self.app.get(url, auth=my_user.auth)
my_user.reload()
dashboard = my_user.node__contributed.find(Q('is_dashboard', 'eq', True))
assert_equal(dashboard.count(), 1)
def test_add_contributor_post(self):
# Two users are added as a contributor via a POST request
project = ProjectFactory(creator=self.user1, is_public=True)
user2 = UserFactory()
user3 = UserFactory()
url = "/api/v1/project/{0}/contributors/".format(project._id)
dict2 = add_contributor_json(user2)
dict3 = add_contributor_json(user3)
dict2.update({
'permission': 'admin',
'visible': True,
})
dict3.update({
'permission': 'write',
'visible': False,
})
self.app.post_json(
url,
{
'users': [dict2, dict3],
'node_ids': [project._id],
},
content_type="application/json",
auth=self.auth,
).maybe_follow()
project.reload()
assert_in(user2._id, project.contributors)
# A log event was added
assert_equal(project.logs[-1].action, "contributor_added")
assert_equal(len(project.contributors), 3)
assert_in(user2._id, project.permissions)
assert_in(user3._id, project.permissions)
assert_equal(project.permissions[user2._id], ['read', 'write', 'admin'])
assert_equal(project.permissions[user3._id], ['read', 'write'])
def test_manage_permissions(self):
url = self.project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': self.project.creator._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': self.user1._id, 'permission': 'read',
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': 'admin',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
assert_equal(self.project.get_permissions(self.user1), ['read'])
assert_equal(self.project.get_permissions(self.user2), ['read', 'write', 'admin'])
def test_manage_permissions_again(self):
url = self.project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': self.user1._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': 'admin',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
self.app.post_json(
url,
{
'contributors': [
{'id': self.user1._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': 'read',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
assert_equal(self.project.get_permissions(self.user2), ['read'])
assert_equal(self.project.get_permissions(self.user1), ['read', 'write', 'admin'])
def test_contributor_manage_reorder(self):
# Two users are added as a contributor via a POST request
project = ProjectFactory(creator=self.user1, is_public=True)
reg_user1, reg_user2 = UserFactory(), UserFactory()
project.add_contributors(
[
{'user': reg_user1, 'permissions': [
'read', 'write', 'admin'], 'visible': True},
{'user': reg_user2, 'permissions': [
'read', 'write', 'admin'], 'visible': False},
]
)
# Add a non-registered user
unregistered_user = project.add_unregistered_contributor(
fullname=fake.name(), email=fake.email(),
auth=self.consolidate_auth1,
save=True,
)
url = project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': reg_user2._id, 'permission': 'admin',
'registered': True, 'visible': False},
{'id': project.creator._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': unregistered_user._id, 'permission': 'admin',
'registered': False, 'visible': True},
{'id': reg_user1._id, 'permission': 'admin',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
project.reload()
assert_equal(
# Note: Cast ForeignList to list for comparison
list(project.contributors),
[reg_user2, project.creator, unregistered_user, reg_user1]
)
assert_equal(
project.visible_contributors,
[project.creator, unregistered_user, reg_user1]
)
def test_project_remove_contributor(self):
url = "/api/v1/project/{0}/removecontributors/".format(self.project._id)
# User 1 removes user2
self.app.post(url, json.dumps({"id": self.user2._id}),
content_type="application/json",
auth=self.auth).maybe_follow()
self.project.reload()
assert_not_in(self.user2._id, self.project.contributors)
# A log event was added
assert_equal(self.project.logs[-1].action, "contributor_removed")
def test_get_contributors_abbrev(self):
# create a project with 3 registered contributors
project = ProjectFactory(creator=self.user1, is_public=True)
reg_user1, reg_user2 = UserFactory(), UserFactory()
project.add_contributors(
[
{'user': reg_user1, 'permissions': [
'read', 'write', 'admin'], 'visible': True},
{'user': reg_user2, 'permissions': [
'read', 'write', 'admin'], 'visible': True},
]
)
# add an unregistered contributor
project.add_unregistered_contributor(
fullname=fake.name(), email=fake.email(),
auth=self.consolidate_auth1,
save=True,
)
url = project.api_url_for('get_node_contributors_abbrev')
res = self.app.get(url, auth=self.auth)
assert_equal(len(project.contributors), 4)
assert_equal(len(res.json['contributors']), 3)
assert_equal(len(res.json['others_count']), 1)
assert_equal(res.json['contributors'][0]['separator'], ',')
assert_equal(res.json['contributors'][1]['separator'], ',')
assert_equal(res.json['contributors'][2]['separator'], ' &')
def test_edit_node_title(self):
url = "/api/v1/project/{0}/edit/".format(self.project._id)
# The title is changed though posting form data
self.app.post_json(url, {"name": "title", "value": "Bacon"},
auth=self.auth).maybe_follow()
self.project.reload()
# The title was changed
assert_equal(self.project.title, "Bacon")
# A log event was saved
assert_equal(self.project.logs[-1].action, "edit_title")
def test_make_public(self):
self.project.is_public = False
self.project.save()
url = "/api/v1/project/{0}/permissions/public/".format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
self.project.reload()
assert_true(self.project.is_public)
assert_equal(res.json['status'], 'success')
def test_make_private(self):
self.project.is_public = True
self.project.save()
url = "/api/v1/project/{0}/permissions/private/".format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
self.project.reload()
assert_false(self.project.is_public)
assert_equal(res.json['status'], 'success')
def test_cant_make_public_if_not_admin(self):
non_admin = AuthUserFactory()
self.project.add_contributor(non_admin, permissions=['read', 'write'])
self.project.is_public = False
self.project.save()
url = "/api/v1/project/{0}/permissions/public/".format(self.project._id)
res = self.app.post_json(
url, {}, auth=non_admin.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
assert_false(self.project.is_public)
def test_cant_make_private_if_not_admin(self):
non_admin = AuthUserFactory()
self.project.add_contributor(non_admin, permissions=['read', 'write'])
self.project.is_public = True
self.project.save()
url = "/api/v1/project/{0}/permissions/private/".format(self.project._id)
res = self.app.post_json(
url, {}, auth=non_admin.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
assert_true(self.project.is_public)
def test_add_tag(self):
url = self.project.api_url_for('project_add_tag')
self.app.post_json(url, {'tag': "foo'ta#@%#%^&g?"}, auth=self.auth)
self.project.reload()
assert_in("foo'ta#@%#%^&g?", self.project.tags)
assert_equal("foo'ta#@%#%^&g?", self.project.logs[-1].params['tag'])
def test_remove_tag(self):
self.project.add_tag("foo'ta#@%#%^&g?", auth=self.consolidate_auth1, save=True)
assert_in("foo'ta#@%#%^&g?", self.project.tags)
url = self.project.api_url_for("project_remove_tag")
self.app.delete_json(url, {"tag": "foo'ta#@%#%^&g?"}, auth=self.auth)
self.project.reload()
assert_not_in("foo'ta#@%#%^&g?", self.project.tags)
assert_equal("tag_removed", self.project.logs[-1].action)
assert_equal("foo'ta#@%#%^&g?", self.project.logs[-1].params['tag'])
@mock.patch('website.archiver.tasks.archive')
def test_register_template_page(self, mock_archive):
url = "/api/v1/project/{0}/register/Replication_Recipe_(Brandt_et_al.,_2013):_Post-Completion/".format(
self.project._primary_key)
self.app.post_json(url, {'registrationChoice': 'Make registration public immediately'}, auth=self.auth)
self.project.reload()
# A registration was added to the project's registration list
assert_equal(len(self.project.node__registrations), 1)
# A log event was saved
assert_equal(self.project.logs[-1].action, "registration_initiated")
# Most recent node is a registration
reg = Node.load(self.project.node__registrations[-1])
assert_true(reg.is_registration)
@mock.patch('website.archiver.tasks.archive')
def test_register_template_with_embargo_creates_embargo(self, mock_archive):
url = "/api/v1/project/{0}/register/Replication_Recipe_(Brandt_et_al.,_2013):_Post-Completion/".format(
self.project._primary_key)
self.app.post_json(
url,
{
'registrationChoice': 'embargo',
'embargoEndDate': "Fri, 01 Jan {year} 05:00:00 GMT".format(year=str(dt.date.today().year + 1))
},
auth=self.auth)
self.project.reload()
# Most recent node is a registration
reg = Node.load(self.project.node__registrations[-1])
assert_true(reg.is_registration)
# The registration created is not public
assert_false(reg.is_public)
# The registration is pending an embargo that has not been approved
assert_true(reg.is_pending_embargo)
def test_register_template_page_with_invalid_template_name(self):
url = self.project.web_url_for('node_register_template_page', template='invalid')
res = self.app.get(url, expect_errors=True, auth=self.auth)
assert_equal(res.status_code, 404)
assert_in('Template not found', res)
def test_register_project_with_multiple_errors(self):
self.project.add_addon('addon1', auth=Auth(self.user1))
component = NodeFactory(parent=self.project, creator=self.user1)
component.add_addon('addon1', auth=Auth(self.user1))
component.add_addon('addon2', auth=Auth(self.user1))
self.project.save()
component.save()
url = self.project.api_url_for('project_before_register')
res = self.app.get(url, auth=self.auth)
data = res.json
assert_equal(res.status_code, 200)
assert_equal(len(data['errors']), 2)
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/1478
@mock.patch('website.archiver.tasks.archive')
def test_registered_projects_contributions(self, mock_archive):
# register a project
self.project.register_node(None, Auth(user=self.project.creator), '', None)
# get the first registered project of a project
url = self.project.api_url_for('get_registrations')
res = self.app.get(url, auth=self.auth)
data = res.json
pid = data['nodes'][0]['id']
url2 = api_url_for('get_summary', pid=pid)
# count contributions
res2 = self.app.get(url2, {'rescale_ratio': data['rescale_ratio']}, auth=self.auth)
data = res2.json
assert_is_not_none(data['summary']['nlogs'])
def test_forks_contributions(self):
# fork a project
self.project.fork_node(Auth(user=self.project.creator))
# get the first forked project of a project
url = self.project.api_url_for('get_forks')
res = self.app.get(url, auth=self.auth)
data = res.json
pid = data['nodes'][0]['id']
url2 = api_url_for('get_summary', pid=pid)
# count contributions
res2 = self.app.get(url2, {'rescale_ratio': data['rescale_ratio']}, auth=self.auth)
data = res2.json
assert_is_not_none(data['summary']['nlogs'])
@mock.patch('framework.transactions.commands.begin')
@mock.patch('framework.transactions.commands.rollback')
@mock.patch('framework.transactions.commands.commit')
def test_get_logs(self, *mock_commands):
# Add some logs
for _ in range(5):
self.project.logs.append(
NodeLogFactory(
user=self.user1,
action='file_added',
params={'node': self.project._id}
)
)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, auth=self.auth)
for mock_command in mock_commands:
assert_false(mock_command.called)
self.project.reload()
data = res.json
assert_equal(len(data['logs']), len(self.project.logs))
assert_equal(data['total'], len(self.project.logs))
assert_equal(data['page'], 0)
assert_equal(data['pages'], 1)
most_recent = data['logs'][0]
assert_equal(most_recent['action'], 'file_added')
def test_get_logs_invalid_page_input(self):
url = self.project.api_url_for('get_logs')
invalid_input = 'invalid page'
res = self.app.get(
url, {'page': invalid_input}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "page".'
)
def test_get_logs_negative_page_num(self):
url = self.project.api_url_for('get_logs')
invalid_input = -1
res = self.app.get(
url, {'page': invalid_input}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "page".'
)
def test_get_logs_page_num_beyond_limit(self):
url = self.project.api_url_for('get_logs')
size = 10
page_num = math.ceil(len(self.project.logs)/ float(size))
res = self.app.get(
url, {'page': page_num}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "page".'
)
def test_get_logs_with_count_param(self):
# Add some logs
for _ in range(5):
self.project.logs.append(
NodeLogFactory(
user=self.user1,
action='file_added',
params={'node': self.project._id}
)
)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, {'count': 3}, auth=self.auth)
assert_equal(len(res.json['logs']), 3)
# 1 project create log, 1 add contributor log, then 5 generated logs
assert_equal(res.json['total'], 5 + 2)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 3)
def test_get_logs_defaults_to_ten(self):
# Add some logs
for _ in range(12):
self.project.logs.append(
NodeLogFactory(
user=self.user1,
action='file_added',
params={'node': self.project._id}
)
)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, auth=self.auth)
assert_equal(len(res.json['logs']), 10)
# 1 project create log, 1 add contributor log, then 5 generated logs
assert_equal(res.json['total'], 12 + 2)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 2)
def test_get_more_logs(self):
# Add some logs
for _ in range(12):
self.project.logs.append(
NodeLogFactory(
user=self.user1,
action="file_added",
params={"node": self.project._id}
)
)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, {"page": 1}, auth=self.auth)
assert_equal(len(res.json['logs']), 4)
#1 project create log, 1 add contributor log, then 12 generated logs
assert_equal(res.json['total'], 12 + 2)
assert_equal(res.json['page'], 1)
assert_equal(res.json['pages'], 2)
def test_logs_private(self):
"""Add logs to a public project, then to its private component. Get
the ten most recent logs; assert that ten logs are returned and that
all belong to the project and not its component.
"""
# Add some logs
for _ in range(15):
self.project.add_log(
auth=self.consolidate_auth1,
action='file_added',
params={'node': self.project._id}
)
self.project.is_public = True
self.project.save()
child = NodeFactory(parent=self.project)
for _ in range(5):
child.add_log(
auth=self.consolidate_auth1,
action='file_added',
params={'node': child._id}
)
url = self.project.api_url_for('get_logs')
res = self.app.get(url).maybe_follow()
assert_equal(len(res.json['logs']), 10)
# 1 project create log, 1 add contributor log, then 15 generated logs
assert_equal(res.json['total'], 15 + 2)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 2)
assert_equal(
[self.project._id] * 10,
[
log['params']['node']
for log in res.json['logs']
]
)
def test_can_view_public_log_from_private_project(self):
project = ProjectFactory(is_public=True)
fork = project.fork_node(auth=self.consolidate_auth1)
url = fork.api_url_for('get_logs')
res = self.app.get(url, auth=self.auth)
assert_equal(
[each['action'] for each in res.json['logs']],
['node_forked', 'project_created'],
)
project.is_public = False
project.save()
res = self.app.get(url, auth=self.auth)
assert_equal(
[each['action'] for each in res.json['logs']],
['node_forked', 'project_created'],
)
def test_for_private_component_log(self):
for _ in range(5):
self.project.add_log(
auth=self.consolidate_auth1,
action='file_added',
params={'node': self.project._id}
)
self.project.is_public = True
self.project.save()
child = NodeFactory(parent=self.project)
child.is_public = False
child.set_title("foo", auth=self.consolidate_auth1)
child.set_title("bar", auth=self.consolidate_auth1)
child.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url).maybe_follow()
assert_equal(len(res.json['logs']), 7)
assert_not_in(
child._id,
[
log['params']['node']
for log in res.json['logs']
]
)
def test_remove_project(self):
url = self.project.api_url
res = self.app.delete_json(url, {}, auth=self.auth).maybe_follow()
self.project.reload()
assert_equal(self.project.is_deleted, True)
assert_in('url', res.json)
assert_equal(res.json['url'], '/dashboard/')
def test_private_link_edit_name(self):
link = PrivateLinkFactory()
link.nodes.append(self.project)
link.save()
assert_equal(link.name, "link")
url = self.project.api_url + 'private_link/edit/'
self.app.put_json(
url,
{'pk': link._id, "value": "new name"},
auth=self.auth,
).maybe_follow()
self.project.reload()
link.reload()
assert_equal(link.name, "new name")
def test_remove_private_link(self):
link = PrivateLinkFactory()
link.nodes.append(self.project)
link.save()
url = self.project.api_url_for('remove_private_link')
self.app.delete_json(
url,
{'private_link_id': link._id},
auth=self.auth,
).maybe_follow()
self.project.reload()
link.reload()
assert_true(link.is_deleted)
def test_remove_component(self):
node = NodeFactory(parent=self.project, creator=self.user1)
url = node.api_url
res = self.app.delete_json(url, {}, auth=self.auth).maybe_follow()
node.reload()
assert_equal(node.is_deleted, True)
assert_in('url', res.json)
assert_equal(res.json['url'], self.project.url)
def test_cant_remove_component_if_not_admin(self):
node = NodeFactory(parent=self.project, creator=self.user1)
non_admin = AuthUserFactory()
node.add_contributor(
non_admin,
permissions=['read', 'write'],
save=True,
)
url = node.api_url
res = self.app.delete_json(
url, {}, auth=non_admin.auth,
expect_errors=True,
).maybe_follow()
assert_equal(res.status_code, http.FORBIDDEN)
assert_false(node.is_deleted)
def test_watch_and_unwatch(self):
url = self.project.api_url_for('togglewatch_post')
self.app.post_json(url, {}, auth=self.auth)
res = self.app.get(self.project.api_url, auth=self.auth)
assert_equal(res.json['node']['watched_count'], 1)
self.app.post_json(url, {}, auth=self.auth)
res = self.app.get(self.project.api_url, auth=self.auth)
assert_equal(res.json['node']['watched_count'], 0)
def test_view_project_returns_whether_to_show_wiki_widget(self):
user = AuthUserFactory()
project = ProjectFactory.build(creator=user, is_public=True)
project.add_contributor(user)
project.save()
url = project.api_url_for('view_project')
res = self.app.get(url, auth=user.auth)
assert_equal(res.status_code, http.OK)
assert_in('show_wiki_widget', res.json['user'])
def test_fork_count_does_not_include_deleted_forks(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
auth = Auth(project.creator)
fork = project.fork_node(auth)
project.save()
fork.remove_node(auth)
fork.save()
url = project.api_url_for('view_project')
res = self.app.get(url, auth=user.auth)
assert_in('fork_count', res.json['node'])
assert_equal(0, res.json['node']['fork_count'])
def test_statistic_page_redirect(self):
url = self.project.web_url_for('project_statistics_redirect')
res = self.app.get(url, auth=self.auth)
assert_equal(res.status_code, 302)
assert_in(self.project.web_url_for('project_statistics', _guid=True), res.location)
class TestEditableChildrenViews(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=False)
self.child = ProjectFactory(parent=self.project, creator=self.user, is_public=True)
self.grandchild = ProjectFactory(parent=self.child, creator=self.user, is_public=False)
self.great_grandchild = ProjectFactory(parent=self.grandchild, creator=self.user, is_public=True)
self.great_great_grandchild = ProjectFactory(parent=self.great_grandchild, creator=self.user, is_public=False)
url = self.project.api_url_for('get_editable_children')
self.project_results = self.app.get(url, auth=self.user.auth).json
def test_get_editable_children(self):
assert_equal(len(self.project_results['children']), 4)
assert_equal(self.project_results['node']['id'], self.project._id)
def test_editable_children_order(self):
assert_equal(self.project_results['children'][0]['id'], self.child._id)
assert_equal(self.project_results['children'][1]['id'], self.grandchild._id)
assert_equal(self.project_results['children'][2]['id'], self.great_grandchild._id)
assert_equal(self.project_results['children'][3]['id'], self.great_great_grandchild._id)
def test_editable_children_indents(self):
assert_equal(self.project_results['children'][0]['indent'], 0)
assert_equal(self.project_results['children'][1]['indent'], 1)
assert_equal(self.project_results['children'][2]['indent'], 2)
assert_equal(self.project_results['children'][3]['indent'], 3)
def test_editable_children_parents(self):
assert_equal(self.project_results['children'][0]['parent_id'], self.project._id)
assert_equal(self.project_results['children'][1]['parent_id'], self.child._id)
assert_equal(self.project_results['children'][2]['parent_id'], self.grandchild._id)
assert_equal(self.project_results['children'][3]['parent_id'], self.great_grandchild._id)
def test_editable_children_privacy(self):
assert_false(self.project_results['node']['is_public'])
assert_true(self.project_results['children'][0]['is_public'])
assert_false(self.project_results['children'][1]['is_public'])
assert_true(self.project_results['children'][2]['is_public'])
assert_false(self.project_results['children'][3]['is_public'])
def test_editable_children_titles(self):
assert_equal(self.project_results['node']['title'], self.project.title)
assert_equal(self.project_results['children'][0]['title'], self.child.title)
assert_equal(self.project_results['children'][1]['title'], self.grandchild.title)
assert_equal(self.project_results['children'][2]['title'], self.great_grandchild.title)
assert_equal(self.project_results['children'][3]['title'], self.great_great_grandchild.title)
class TestChildrenViews(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
def test_get_children(self):
project = ProjectFactory(creator=self.user)
child = NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
nodes = res.json['nodes']
assert_equal(len(nodes), 1)
assert_equal(nodes[0]['id'], child._primary_key)
def test_get_children_includes_pointers(self):
project = ProjectFactory(creator=self.user)
pointed = ProjectFactory()
project.add_pointer(pointed, Auth(self.user))
project.save()
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
nodes = res.json['nodes']
assert_equal(len(nodes), 1)
assert_equal(nodes[0]['title'], pointed.title)
pointer = Pointer.find_one(Q('node', 'eq', pointed))
assert_equal(nodes[0]['id'], pointer._primary_key)
def test_get_children_filter_for_permissions(self):
# self.user has admin access to this project
project = ProjectFactory(creator=self.user)
# self.user only has read access to this project, which project points
# to
read_only_pointed = ProjectFactory()
read_only_creator = read_only_pointed.creator
read_only_pointed.add_contributor(self.user, auth=Auth(read_only_creator), permissions=['read'])
read_only_pointed.save()
# self.user only has read access to this project, which is a subproject
# of project
read_only = ProjectFactory()
read_only_pointed.add_contributor(self.user, auth=Auth(read_only_creator), permissions=['read'])
project.nodes.append(read_only)
# self.user adds a pointer to read_only
project.add_pointer(read_only_pointed, Auth(self.user))
project.save()
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json['nodes']), 2)
url = project.api_url_for('get_children', permissions='write')
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json['nodes']), 0)
def test_get_children_rescale_ratio(self):
project = ProjectFactory(creator=self.user)
child = NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
rescale_ratio = res.json['rescale_ratio']
assert_is_instance(rescale_ratio, float)
assert_equal(rescale_ratio, _rescale_ratio(Auth(self.user), [child]))
def test_get_children_render_nodes_receives_auth(self):
project = ProjectFactory(creator=self.user)
NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
perm = res.json['nodes'][0]['permissions']
assert_equal(perm, 'admin')
class TestUserProfile(OsfTestCase):
def setUp(self):
super(TestUserProfile, self).setUp()
self.user = AuthUserFactory()
def test_sanitization_of_edit_profile(self):
url = api_url_for('edit_profile', uid=self.user._id)
post_data = {'name': 'fullname', 'value': 'new<b> name</b> '}
request = self.app.post(url, post_data, auth=self.user.auth)
assert_equal('new name', request.json['name'])
def test_fmt_date_or_none(self):
with assert_raises(HTTPError) as cm:
#enter a date before 1900
fmt_date_or_none(dt.datetime(1890, 10, 31, 18, 23, 29, 227))
# error should be raised because date is before 1900
assert_equal(cm.exception.code, http.BAD_REQUEST)
def test_unserialize_social(self):
url = api_url_for('unserialize_social')
payload = {
'personal': 'http://frozen.pizza.com/reviews',
'twitter': 'howtopizza',
'github': 'frozenpizzacode',
}
self.app.put_json(
url,
payload,
auth=self.user.auth,
)
self.user.reload()
for key, value in payload.iteritems():
assert_equal(self.user.social[key], value)
assert_true(self.user.social['researcherId'] is None)
def test_unserialize_social_validation_failure(self):
url = api_url_for('unserialize_social')
# personal URL is invalid
payload = {
'personal': 'http://invalidurl',
'twitter': 'howtopizza',
'github': 'frozenpizzacode',
}
res = self.app.put_json(
url,
payload,
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Invalid personal URL.')
def test_serialize_social_editable(self):
self.user.social['twitter'] = 'howtopizza'
self.user.save()
url = api_url_for('serialize_social')
res = self.app.get(
url,
auth=self.user.auth,
)
assert_equal(res.json.get('twitter'), 'howtopizza')
assert_true(res.json.get('github') is None)
assert_true(res.json['editable'])
def test_serialize_social_not_editable(self):
user2 = AuthUserFactory()
self.user.social['twitter'] = 'howtopizza'
self.user.save()
url = api_url_for('serialize_social', uid=self.user._id)
res = self.app.get(
url,
auth=user2.auth,
)
assert_equal(res.json.get('twitter'), 'howtopizza')
assert_true(res.json.get('github') is None)
assert_false(res.json['editable'])
def test_serialize_social_addons_editable(self):
self.user.add_addon('github')
user_github = self.user.get_addon('github')
oauth_settings = AddonGitHubOauthSettings()
oauth_settings.github_user_id = 'testuser'
oauth_settings.save()
user_github.oauth_settings = oauth_settings
user_github.save()
user_github.github_user_name = 'howtogithub'
oauth_settings.save()
url = api_url_for('serialize_social')
res = self.app.get(
url,
auth=self.user.auth,
)
assert_equal(
res.json['addons']['github'],
'howtogithub'
)
def test_serialize_social_addons_not_editable(self):
user2 = AuthUserFactory()
self.user.add_addon('github')
user_github = self.user.get_addon('github')
oauth_settings = AddonGitHubOauthSettings()
oauth_settings.github_user_id = 'testuser'
oauth_settings.save()
user_github.oauth_settings = oauth_settings
user_github.save()
user_github.github_user_name = 'howtogithub'
oauth_settings.save()
url = api_url_for('serialize_social', uid=self.user._id)
res = self.app.get(
url,
auth=user2.auth,
)
assert_not_in('addons', res.json)
def test_unserialize_and_serialize_jobs(self):
jobs = [{
'institution': 'an institution',
'department': 'a department',
'title': 'a title',
'startMonth': 'January',
'startYear': '2001',
'endMonth': 'March',
'endYear': '2001',
'ongoing': False,
}, {
'institution': 'another institution',
'department': None,
'title': None,
'startMonth': 'May',
'startYear': '2001',
'endMonth': None,
'endYear': None,
'ongoing': True,
}]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(len(self.user.jobs), 2)
url = api_url_for('serialize_jobs')
res = self.app.get(
url,
auth=self.user.auth,
)
for i, job in enumerate(jobs):
assert_equal(job, res.json['contents'][i])
def test_unserialize_and_serialize_schools(self):
schools = [{
'institution': 'an institution',
'department': 'a department',
'degree': 'a degree',
'startMonth': 1,
'startYear': '2001',
'endMonth': 5,
'endYear': '2001',
'ongoing': False,
}, {
'institution': 'another institution',
'department': None,
'degree': None,
'startMonth': 5,
'startYear': '2001',
'endMonth': None,
'endYear': None,
'ongoing': True,
}]
payload = {'contents': schools}
url = api_url_for('unserialize_schools')
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(len(self.user.schools), 2)
url = api_url_for('serialize_schools')
res = self.app.get(
url,
auth=self.user.auth,
)
for i, job in enumerate(schools):
assert_equal(job, res.json['contents'][i])
def test_unserialize_jobs(self):
jobs = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'title': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# jobs field is updated
assert_equal(self.user.jobs, jobs)
def test_unserialize_names(self):
fake_fullname_w_spaces = ' {} '.format(fake.name())
names = {
'full': fake_fullname_w_spaces,
'given': 'Tea',
'middle': 'Gray',
'family': 'Pot',
'suffix': 'Ms.',
}
url = api_url_for('unserialize_names')
res = self.app.put_json(url, names, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# user is updated
assert_equal(self.user.fullname, fake_fullname_w_spaces.strip())
assert_equal(self.user.given_name, names['given'])
assert_equal(self.user.middle_names, names['middle'])
assert_equal(self.user.family_name, names['family'])
assert_equal(self.user.suffix, names['suffix'])
def test_unserialize_schools(self):
schools = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'degree': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': schools}
url = api_url_for('unserialize_schools')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# schools field is updated
assert_equal(self.user.schools, schools)
def test_unserialize_jobs_valid(self):
jobs_cached = self.user.jobs
jobs = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'title': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_get_current_user_gravatar_default_size(self):
url = api_url_for('current_user_gravatar')
res = self.app.get(url, auth=self.user.auth)
current_user_gravatar = res.json['gravatar_url']
assert_true(current_user_gravatar is not None)
url = api_url_for('get_gravatar', uid=self.user._id)
res = self.app.get(url, auth=self.user.auth)
my_user_gravatar = res.json['gravatar_url']
assert_equal(current_user_gravatar, my_user_gravatar)
def test_get_other_user_gravatar_default_size(self):
user2 = AuthUserFactory()
url = api_url_for('current_user_gravatar')
res = self.app.get(url, auth=self.user.auth)
current_user_gravatar = res.json['gravatar_url']
url = api_url_for('get_gravatar', uid=user2._id)
res = self.app.get(url, auth=self.user.auth)
user2_gravatar = res.json['gravatar_url']
assert_true(user2_gravatar is not None)
assert_not_equal(current_user_gravatar, user2_gravatar)
def test_get_current_user_gravatar_specific_size(self):
url = api_url_for('current_user_gravatar')
res = self.app.get(url, auth=self.user.auth)
current_user_default_gravatar = res.json['gravatar_url']
url = api_url_for('current_user_gravatar', size=11)
res = self.app.get(url, auth=self.user.auth)
current_user_small_gravatar = res.json['gravatar_url']
assert_true(current_user_small_gravatar is not None)
assert_not_equal(current_user_default_gravatar, current_user_small_gravatar)
def test_get_other_user_gravatar_specific_size(self):
user2 = AuthUserFactory()
url = api_url_for('get_gravatar', uid=user2._id)
res = self.app.get(url, auth=self.user.auth)
gravatar_default_size = res.json['gravatar_url']
url = api_url_for('get_gravatar', uid=user2._id, size=11)
res = self.app.get(url, auth=self.user.auth)
gravatar_small = res.json['gravatar_url']
assert_true(gravatar_small is not None)
assert_not_equal(gravatar_default_size, gravatar_small)
def test_update_user_timezone(self):
assert_equal(self.user.timezone, 'Etc/UTC')
payload = {'timezone': 'America/New_York', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.timezone, 'America/New_York')
def test_update_user_locale(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': 'de_DE', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'de_DE')
def test_update_user_locale_none(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': None, 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'en_US')
def test_update_user_locale_empty_string(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': '', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'en_US')
def test_cannot_update_user_without_user_id(self):
user1 = AuthUserFactory()
url = api_url_for('update_user')
header = {'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], '"id" is required')
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_update_user_mailing_lists(self, mock_get_mailchimp_api, send_mail):
email = fake.email()
self.user.emails.append(email)
list_name = 'foo'
self.user.mailing_lists[list_name] = True
self.user.save()
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
url = api_url_for('update_user', uid=self.user._id)
emails = [
{'address': self.user.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}]
payload = {'locale': '', 'id': self.user._id, 'emails': emails}
self.app.put_json(url, payload, auth=self.user.auth)
mock_client.lists.unsubscribe.assert_called_with(
id=list_id,
email={'email': self.user.username}
)
mock_client.lists.subscribe.assert_called_with(
id=list_id,
email={'email': email},
merge_vars={
'fname': self.user.given_name,
'lname': self.user.family_name,
},
double_optin=False,
update_existing=True
)
handlers.celery_teardown_request()
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_unsubscribe_mailchimp_not_called_if_user_not_subscribed(self, mock_get_mailchimp_api, send_mail):
email = fake.email()
self.user.emails.append(email)
list_name = 'foo'
self.user.mailing_lists[list_name] = False
self.user.save()
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
url = api_url_for('update_user', uid=self.user._id)
emails = [
{'address': self.user.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}]
payload = {'locale': '', 'id': self.user._id, 'emails': emails}
self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(mock_client.lists.unsubscribe.call_count, 0)
assert_equal(mock_client.lists.subscribe.call_count, 0)
handlers.celery_teardown_request()
# TODO: Uncomment once outstanding issues with this feature are addressed
# def test_twitter_redirect_success(self):
# self.user.social['twitter'] = fake.last_name()
# self.user.save()
# res = self.app.get(web_url_for('redirect_to_twitter', twitter_handle=self.user.social['twitter']))
# assert_equals(res.status_code, http.FOUND)
# assert_in(self.user.url, res.location)
# def test_twitter_redirect_is_case_insensitive(self):
# self.user.social['twitter'] = fake.last_name()
# self.user.save()
# res1 = self.app.get(web_url_for('redirect_to_twitter', twitter_handle=self.user.social['twitter']))
# res2 = self.app.get(web_url_for('redirect_to_twitter', twitter_handle=self.user.social['twitter'].lower()))
# assert_equal(res1.location, res2.location)
# def test_twitter_redirect_unassociated_twitter_handle_returns_404(self):
# unassociated_handle = fake.last_name()
# expected_error = 'There is no active user associated with the Twitter handle: {0}.'.format(unassociated_handle)
# res = self.app.get(
# web_url_for('redirect_to_twitter', twitter_handle=unassociated_handle),
# expect_errors=True
# )
# assert_equal(res.status_code, http.NOT_FOUND)
# assert_true(expected_error in res.body)
# def test_twitter_redirect_handle_with_multiple_associated_accounts_redirects_to_selection_page(self):
# self.user.social['twitter'] = fake.last_name()
# self.user.save()
# user2 = AuthUserFactory()
# user2.social['twitter'] = self.user.social['twitter']
# user2.save()
# expected_error = 'There are multiple OSF accounts associated with the Twitter handle: <strong>{0}</strong>.'.format(self.user.social['twitter'])
# res = self.app.get(
# web_url_for(
# 'redirect_to_twitter',
# twitter_handle=self.user.social['twitter'],
# expect_error=True
# )
# )
# assert_equal(res.status_code, http.MULTIPLE_CHOICES)
# assert_true(expected_error in res.body)
# assert_true(self.user.url in res.body)
# assert_true(user2.url in res.body)
class TestUserProfileApplicationsPage(OsfTestCase):
def setUp(self):
super(TestUserProfileApplicationsPage, self).setUp()
self.user = AuthUserFactory()
self.user2 = AuthUserFactory()
self.platform_app = ApiOAuth2ApplicationFactory(owner=self.user)
self.detail_url = web_url_for('oauth_application_detail', client_id=self.platform_app.client_id)
def test_non_owner_cant_access_detail_page(self):
res = self.app.get(self.detail_url, auth=self.user2.auth, expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_owner_cant_access_deleted_application(self):
self.platform_app.active = False
self.platform_app.save()
res = self.app.get(self.detail_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.GONE)
def test_owner_cant_access_nonexistent_application(self):
url = web_url_for('oauth_application_detail', client_id='nonexistent')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.NOT_FOUND)
class TestUserAccount(OsfTestCase):
def setUp(self):
super(TestUserAccount, self).setUp()
self.user = AuthUserFactory()
self.user.set_password('password')
self.user.save()
@mock.patch('website.profile.views.push_status_message')
def test_password_change_valid(self, mock_push_status_message):
old_password = 'password'
new_password = 'Pa$$w0rd'
confirm_password = new_password
url = web_url_for('user_account_password')
post_data = {
'old_password': old_password,
'new_password': new_password,
'confirm_password': confirm_password,
}
res = self.app.post(url, post_data, auth=(self.user.username, old_password))
assert_true(302, res.status_code)
res = res.follow(auth=(self.user.username, new_password))
assert_true(200, res.status_code)
self.user.reload()
assert_true(self.user.check_password(new_password))
assert_true(mock_push_status_message.called)
assert_in('Password updated successfully', mock_push_status_message.mock_calls[0][1][0])
@mock.patch('website.profile.views.push_status_message')
def test_password_change_invalid(self, mock_push_status_message, old_password='', new_password='',
confirm_password='', error_message='Old password is invalid'):
url = web_url_for('user_account_password')
post_data = {
'old_password': old_password,
'new_password': new_password,
'confirm_password': confirm_password,
}
res = self.app.post(url, post_data, auth=self.user.auth)
assert_true(302, res.status_code)
res = res.follow(auth=self.user.auth)
assert_true(200, res.status_code)
self.user.reload()
assert_false(self.user.check_password(new_password))
assert_true(mock_push_status_message.called)
assert_in(error_message, mock_push_status_message.mock_calls[0][1][0])
def test_password_change_invalid_old_password(self):
self.test_password_change_invalid(
old_password='invalid old password',
new_password='new password',
confirm_password='new password',
error_message='Old password is invalid',
)
def test_password_change_invalid_confirm_password(self):
self.test_password_change_invalid(
old_password='password',
new_password='new password',
confirm_password='invalid confirm password',
error_message='Password does not match the confirmation',
)
def test_password_change_invalid_new_password_length(self):
self.test_password_change_invalid(
old_password='password',
new_password='12345',
confirm_password='12345',
error_message='Password should be at least six characters',
)
def test_password_change_invalid_blank_password(self, old_password='', new_password='', confirm_password=''):
self.test_password_change_invalid(
old_password=old_password,
new_password=new_password,
confirm_password=confirm_password,
error_message='Passwords cannot be blank',
)
def test_password_change_invalid_blank_new_password(self):
for password in ('', ' '):
self.test_password_change_invalid_blank_password('password', password, 'new password')
def test_password_change_invalid_blank_confirm_password(self):
for password in ('', ' '):
self.test_password_change_invalid_blank_password('password', 'new password', password)
class TestAddingContributorViews(OsfTestCase):
def setUp(self):
super(TestAddingContributorViews, self).setUp()
ensure_schemas()
self.creator = AuthUserFactory()
self.project = ProjectFactory(creator=self.creator)
# Authenticate all requests
self.app.authenticate(*self.creator.auth)
contributor_added.connect(notify_added_contributor)
def test_serialize_unregistered_without_record(self):
name, email = fake.name(), fake.email()
res = serialize_unregistered(fullname=name, email=email)
assert_equal(res['fullname'], name)
assert_equal(res['email'], email)
assert_equal(res['id'], None)
assert_false(res['registered'])
assert_true(res['gravatar'])
assert_false(res['active'])
def test_deserialize_contributors(self):
contrib = UserFactory()
unreg = UnregUserFactory()
name, email = fake.name(), fake.email()
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [
add_contributor_json(contrib),
serialize_unregistered(fake.name(), unreg.username),
unreg_no_record
]
contrib_data[0]['permission'] = 'admin'
contrib_data[1]['permission'] = 'write'
contrib_data[2]['permission'] = 'read'
contrib_data[0]['visible'] = True
contrib_data[1]['visible'] = True
contrib_data[2]['visible'] = True
res = deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator))
assert_equal(len(res), len(contrib_data))
assert_true(res[0]['user'].is_registered)
assert_false(res[1]['user'].is_registered)
assert_true(res[1]['user']._id)
assert_false(res[2]['user'].is_registered)
assert_true(res[2]['user']._id)
def test_deserialize_contributors_validates_fullname(self):
name = "<img src=1 onerror=console.log(1)>"
email = fake.email()
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [unreg_no_record]
contrib_data[0]['permission'] = 'admin'
contrib_data[0]['visible'] = True
with assert_raises(ValidationError):
deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator),
validate=True)
def test_deserialize_contributors_validates_email(self):
name = fake.name()
email = "!@#$%%^&*"
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [unreg_no_record]
contrib_data[0]['permission'] = 'admin'
contrib_data[0]['visible'] = True
with assert_raises(ValidationError):
deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator),
validate=True)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_deserialize_contributors_sends_unreg_contributor_added_signal(self, _):
unreg = UnregUserFactory()
from website.project.signals import unreg_contributor_added
serialized = [serialize_unregistered(fake.name(), unreg.username)]
serialized[0]['visible'] = True
with capture_signals() as mock_signals:
deserialize_contributors(self.project, serialized,
auth=Auth(self.creator))
assert_equal(mock_signals.signals_sent(), set([unreg_contributor_added]))
def test_serialize_unregistered_with_record(self):
name, email = fake.name(), fake.email()
user = self.project.add_unregistered_contributor(fullname=name,
email=email, auth=Auth(self.project.creator))
self.project.save()
res = serialize_unregistered(
fullname=name,
email=email
)
assert_false(res['active'])
assert_false(res['registered'])
assert_equal(res['id'], user._primary_key)
assert_true(res['gravatar_url'])
assert_equal(res['fullname'], name)
assert_equal(res['email'], email)
def test_add_contributor_with_unreg_contribs_and_reg_contribs(self):
n_contributors_pre = len(self.project.contributors)
reg_user = UserFactory()
name, email = fake.name(), fake.email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': 'admin',
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = 'admin'
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
self.project.reload()
assert_equal(len(self.project.contributors),
n_contributors_pre + len(payload['users']))
new_unreg = auth.get_user(email=email)
assert_false(new_unreg.is_registered)
# unclaimed record was added
new_unreg.reload()
assert_in(self.project._primary_key, new_unreg.unclaimed_records)
rec = new_unreg.get_unclaimed_record(self.project._primary_key)
assert_equal(rec['name'], name)
assert_equal(rec['email'], email)
@mock.patch('website.project.views.contributor.send_claim_email')
def test_add_contributors_post_only_sends_one_email_to_unreg_user(
self, mock_send_claim_email):
# Project has s
comp1, comp2 = NodeFactory(
creator=self.creator), NodeFactory(creator=self.creator)
self.project.nodes.append(comp1)
self.project.nodes.append(comp2)
self.project.save()
# An unreg user is added to the project AND its components
unreg_user = { # dict because user has not previous unreg record
'id': None,
'registered': False,
'fullname': fake.name(),
'email': fake.email(),
'permission': 'admin',
'visible': True,
}
payload = {
'users': [unreg_user],
'node_ids': [comp1._primary_key, comp2._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert_true(self.project.can_edit(user=self.creator))
self.app.post_json(url, payload, auth=self.creator.auth)
# finalize_invitation should only have been called once
assert_equal(mock_send_claim_email.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_add_contributors_post_only_sends_one_email_to_registered_user(self, mock_send_mail):
# Project has components
comp1 = NodeFactory(creator=self.creator, parent=self.project)
comp2 = NodeFactory(creator=self.creator, parent=self.project)
# A registered user is added to the project AND its components
user = UserFactory()
user_dict = {
'id': user._id,
'fullname': user.fullname,
'email': user.username,
'permission': 'write',
'visible': True}
payload = {
'users': [user_dict],
'node_ids': [comp1._primary_key, comp2._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert self.project.can_edit(user=self.creator)
self.app.post_json(url, payload, auth=self.creator.auth)
# send_mail should only have been called once
assert_equal(mock_send_mail.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_add_contributors_post_sends_email_if_user_not_contributor_on_parent_node(self, mock_send_mail):
# Project has a component with a sub-component
component = NodeFactory(creator=self.creator, parent=self.project)
sub_component = NodeFactory(creator=self.creator, parent=component)
# A registered user is added to the project and the sub-component, but NOT the component
user = UserFactory()
user_dict = {
'id': user._id,
'fullname': user.fullname,
'email': user.username,
'permission': 'write',
'visible': True}
payload = {
'users': [user_dict],
'node_ids': [sub_component._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert self.project.can_edit(user=self.creator)
self.app.post_json(url, payload, auth=self.creator.auth)
# send_mail is called for both the project and the sub-component
assert_equal(mock_send_mail.call_count, 2)
@mock.patch('website.project.views.contributor.send_claim_email')
def test_email_sent_when_unreg_user_is_added(self, send_mail):
name, email = fake.name(), fake.email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': 'admin',
'visible': True,
}
payload = {
'users': [pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
assert_true(send_mail.called)
assert_true(send_mail.called_with(email=email))
@mock.patch('website.mails.send_mail')
def test_email_sent_when_reg_user_is_added(self, send_mail):
contributor = UserFactory()
contributors = [{
'user': contributor,
'visible': True,
'permissions': ['read', 'write']
}]
project = ProjectFactory()
project.add_contributors(contributors, auth=Auth(self.project.creator))
project.save()
assert_true(send_mail.called)
send_mail.assert_called_with(
contributor.username,
mails.CONTRIBUTOR_ADDED,
user=contributor,
node=project)
assert_equal(contributor.contributor_added_email_records[project._id]['last_sent'], int(time.time()))
@mock.patch('website.mails.send_mail')
def test_contributor_added_email_not_sent_to_unreg_user(self, send_mail):
unreg_user = UnregUserFactory()
contributors = [{
'user': unreg_user,
'visible': True,
'permissions': ['read', 'write']
}]
project = ProjectFactory()
project.add_contributors(contributors, auth=Auth(self.project.creator))
project.save()
assert_false(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_forking_project_does_not_send_contributor_added_email(self, send_mail):
project = ProjectFactory()
project.fork_node(auth=Auth(project.creator))
assert_false(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_templating_project_does_not_send_contributor_added_email(self, send_mail):
project = ProjectFactory()
project.use_as_template(auth=Auth(project.creator))
assert_false(send_mail.called)
@mock.patch('website.archiver.tasks.archive')
@mock.patch('website.mails.send_mail')
def test_registering_project_does_not_send_contributor_added_email(self, send_mail, mock_archive):
project = ProjectFactory()
project.register_node(None, Auth(user=project.creator), '', None)
assert_false(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_notify_contributor_email_does_not_send_before_throttle_expires(self, send_mail):
contributor = UserFactory()
project = ProjectFactory()
notify_added_contributor(project, contributor)
assert_true(send_mail.called)
# 2nd call does not send email because throttle period has not expired
notify_added_contributor(project, contributor)
assert_equal(send_mail.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_notify_contributor_email_sends_after_throttle_expires(self, send_mail):
throttle = 0.5
contributor = UserFactory()
project = ProjectFactory()
notify_added_contributor(project, contributor, throttle=throttle)
assert_true(send_mail.called)
time.sleep(1) # throttle period expires
notify_added_contributor(project, contributor, throttle=throttle)
assert_equal(send_mail.call_count, 2)
def test_add_multiple_contributors_only_adds_one_log(self):
n_logs_pre = len(self.project.logs)
reg_user = UserFactory()
name = fake.name()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': fake.email(),
'permission': 'write',
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = 'admin'
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
self.project.reload()
assert_equal(len(self.project.logs), n_logs_pre + 1)
def test_add_contribs_to_multiple_nodes(self):
child = NodeFactory(parent=self.project, creator=self.creator)
n_contributors_pre = len(child.contributors)
reg_user = UserFactory()
name, email = fake.name(), fake.email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': 'admin',
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = 'admin'
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': [self.project._primary_key, child._primary_key]
}
url = "/api/v1/project/{0}/contributors/".format(self.project._id)
self.app.post_json(url, payload).maybe_follow()
child.reload()
assert_equal(len(child.contributors),
n_contributors_pre + len(payload['users']))
def tearDown(self):
super(TestAddingContributorViews, self).tearDown()
contributor_added.disconnect(notify_added_contributor)
class TestUserInviteViews(OsfTestCase):
def setUp(self):
super(TestUserInviteViews, self).setUp()
ensure_schemas()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.invite_url = '/api/v1/project/{0}/invite_contributor/'.format(
self.project._primary_key)
def test_invite_contributor_post_if_not_in_db(self):
name, email = fake.name(), fake.email()
res = self.app.post_json(
self.invite_url,
{'fullname': name, 'email': email},
auth=self.user.auth,
)
contrib = res.json['contributor']
assert_true(contrib['id'] is None)
assert_equal(contrib['fullname'], name)
assert_equal(contrib['email'], email)
def test_invite_contributor_post_if_unreg_already_in_db(self):
# A n unreg user is added to a different project
name, email = fake.name(), fake.email()
project2 = ProjectFactory()
unreg_user = project2.add_unregistered_contributor(fullname=name, email=email,
auth=Auth(project2.creator))
project2.save()
res = self.app.post_json(self.invite_url,
{'fullname': name, 'email': email}, auth=self.user.auth)
expected = add_contributor_json(unreg_user)
expected['fullname'] = name
expected['email'] = email
assert_equal(res.json['contributor'], expected)
def test_invite_contributor_post_if_emaiL_already_registered(self):
reg_user = UserFactory()
# Tries to invite user that is already regiestered
res = self.app.post_json(self.invite_url,
{'fullname': fake.name(), 'email': reg_user.username},
auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_invite_contributor_post_if_user_is_already_contributor(self):
unreg_user = self.project.add_unregistered_contributor(
fullname=fake.name(), email=fake.email(),
auth=Auth(self.project.creator)
)
self.project.save()
# Tries to invite unreg user that is already a contributor
res = self.app.post_json(self.invite_url,
{'fullname': fake.name(), 'email': unreg_user.username},
auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_invite_contributor_with_no_email(self):
name = fake.name()
res = self.app.post_json(self.invite_url,
{'fullname': name, 'email': None}, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
data = res.json
assert_equal(data['status'], 'success')
assert_equal(data['contributor']['fullname'], name)
assert_true(data['contributor']['email'] is None)
assert_false(data['contributor']['registered'])
def test_invite_contributor_requires_fullname(self):
res = self.app.post_json(self.invite_url,
{'email': 'brian@queen.com', 'fullname': ''}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_to_given_email(self, send_mail):
project = ProjectFactory()
given_email = fake.email()
unreg_user = project.add_unregistered_contributor(
fullname=fake.name(),
email=given_email,
auth=Auth(project.creator),
)
project.save()
send_claim_email(email=given_email, user=unreg_user, node=project)
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr=given_email,
mail=mails.INVITE
))
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_to_referrer(self, send_mail):
project = ProjectFactory()
referrer = project.creator
given_email, real_email = fake.email(), fake.email()
unreg_user = project.add_unregistered_contributor(fullname=fake.name(),
email=given_email, auth=Auth(
referrer)
)
project.save()
send_claim_email(email=real_email, user=unreg_user, node=project)
assert_true(send_mail.called)
# email was sent to referrer
assert_true(send_mail.called_with(
to_addr=referrer.username,
mail=mails.FORWARD_INVITE
))
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_before_throttle_expires(self, send_mail):
project = ProjectFactory()
given_email = fake.email()
unreg_user = project.add_unregistered_contributor(
fullname=fake.name(),
email=given_email,
auth=Auth(project.creator),
)
project.save()
send_claim_email(email=fake.email(), user=unreg_user, node=project)
# 2nd call raises error because throttle hasn't expired
with assert_raises(HTTPError):
send_claim_email(email=fake.email(), user=unreg_user, node=project)
send_mail.assert_not_called()
class TestClaimViews(OsfTestCase):
def setUp(self):
super(TestClaimViews, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
self.given_name = fake.name()
self.given_email = fake.email()
self.user = self.project.add_unregistered_contributor(
fullname=self.given_name,
email=self.given_email,
auth=Auth(user=self.referrer)
)
self.project.save()
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_with_registered_user_id(self, send_mail):
# registered user who is attempting to claim the unclaimed contributor
reg_user = UserFactory()
payload = {
# pk of unreg user record
'pk': self.user._primary_key,
'claimerId': reg_user._primary_key
}
url = '/api/v1/user/{uid}/{pid}/claim/email/'.format(
uid=self.user._primary_key,
pid=self.project._primary_key,
)
res = self.app.post_json(url, payload)
# mail was sent
assert_true(send_mail.called)
# ... to the correct address
assert_true(send_mail.called_with(to_addr=self.given_email))
# view returns the correct JSON
assert_equal(res.json, {
'status': 'success',
'email': reg_user.username,
'fullname': self.given_name,
})
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_registered_email(self, mock_send_mail):
reg_user = UserFactory()
send_claim_registered_email(
claimer=reg_user,
unreg_user=self.user,
node=self.project
)
mock_send_mail.assert_called()
assert_equal(mock_send_mail.call_count, 2)
first_call_args = mock_send_mail.call_args_list[0][0]
assert_equal(first_call_args[0], self.referrer.username)
second_call_args = mock_send_mail.call_args_list[1][0]
assert_equal(second_call_args[0], reg_user.username)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_registered_email_before_throttle_expires(self, mock_send_mail):
reg_user = UserFactory()
send_claim_registered_email(
claimer=reg_user,
unreg_user=self.user,
node=self.project,
)
# second call raises error because it was called before throttle period
with assert_raises(HTTPError):
send_claim_registered_email(
claimer=reg_user,
unreg_user=self.user,
node=self.project,
)
mock_send_mail.assert_not_called()
@mock.patch('website.project.views.contributor.send_claim_registered_email')
def test_claim_user_post_with_email_already_registered_sends_correct_email(
self, send_claim_registered_email):
reg_user = UserFactory()
payload = {
'value': reg_user.username,
'pk': self.user._primary_key
}
url = self.project.api_url_for('claim_user_post', uid=self.user._id)
self.app.post_json(url, payload)
assert_true(send_claim_registered_email.called)
def test_user_with_removed_unclaimed_url_claiming(self):
""" Tests that when an unclaimed user is removed from a project, the
unregistered user object does not retain the token.
"""
self.project.remove_contributor(self.user, Auth(user=self.referrer))
assert_not_in(
self.project._primary_key,
self.user.unclaimed_records.keys()
)
def test_user_with_claim_url_cannot_claim_twice(self):
""" Tests that when an unclaimed user is replaced on a project with a
claimed user, the unregistered user object does not retain the token.
"""
reg_user = AuthUserFactory()
self.project.replace_contributor(self.user, reg_user)
assert_not_in(
self.project._primary_key,
self.user.unclaimed_records.keys()
)
def test_claim_user_form_redirects_to_password_confirm_page_if_user_is_logged_in(self):
reg_user = AuthUserFactory()
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(url, auth=reg_user.auth)
assert_equal(res.status_code, 302)
res = res.follow(auth=reg_user.auth)
token = self.user.get_unclaimed_record(self.project._primary_key)['token']
expected = self.project.web_url_for(
'claim_user_registered',
uid=self.user._id,
token=token,
)
assert_equal(res.request.path, expected)
def test_get_valid_form(self):
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(url).maybe_follow()
assert_equal(res.status_code, 200)
def test_invalid_claim_form_redirects_to_register_page(self):
uid = self.user._primary_key
pid = self.project._primary_key
url = '/user/{uid}/{pid}/claim/?token=badtoken'.format(**locals())
res = self.app.get(url, expect_errors=True).maybe_follow()
assert_equal(res.status_code, 200)
assert_equal(res.request.path, web_url_for('auth_login'))
def test_posting_to_claim_form_with_valid_data(self):
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.post(url, {
'username': self.user.username,
'password': 'killerqueen',
'password2': 'killerqueen'
}).maybe_follow()
assert_equal(res.status_code, 200)
self.user.reload()
assert_true(self.user.is_registered)
assert_true(self.user.is_active)
assert_not_in(self.project._primary_key, self.user.unclaimed_records)
def test_posting_to_claim_form_removes_all_unclaimed_data(self):
# user has multiple unclaimed records
p2 = ProjectFactory(creator=self.referrer)
self.user.add_unclaimed_record(node=p2, referrer=self.referrer,
given_name=fake.name())
self.user.save()
assert_true(len(self.user.unclaimed_records.keys()) > 1) # sanity check
url = self.user.get_claim_url(self.project._primary_key)
self.app.post(url, {
'username': self.given_email,
'password': 'bohemianrhap',
'password2': 'bohemianrhap'
})
self.user.reload()
assert_equal(self.user.unclaimed_records, {})
def test_posting_to_claim_form_sets_fullname_to_given_name(self):
# User is created with a full name
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
# User invited with a different name
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.project.creator),
)
self.project.save()
# Goes to claim url
claim_url = new_user.get_claim_url(self.project._id)
self.app.post(claim_url, {
'username': unreg.username,
'password': 'killerqueen', 'password2': 'killerqueen'
})
unreg.reload()
# Full name was set correctly
assert_equal(unreg.fullname, different_name)
# CSL names were set correctly
parsed_name = impute_names_model(different_name)
assert_equal(unreg.given_name, parsed_name['given_name'])
assert_equal(unreg.family_name, parsed_name['family_name'])
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_returns_fullname(self, send_mail):
url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key,
self.project._primary_key)
res = self.app.post_json(url,
{'value': self.given_email,
'pk': self.user._primary_key},
auth=self.referrer.auth)
assert_equal(res.json['fullname'], self.given_name)
assert_true(send_mail.called)
assert_true(send_mail.called_with(to_addr=self.given_email))
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_if_email_is_different_from_given_email(self, send_mail):
email = fake.email() # email that is different from the one the referrer gave
url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key,
self.project._primary_key)
self.app.post_json(url,
{'value': email, 'pk': self.user._primary_key}
)
assert_true(send_mail.called)
assert_equal(send_mail.call_count, 2)
call_to_invited = send_mail.mock_calls[0]
assert_true(call_to_invited.called_with(
to_addr=email
))
call_to_referrer = send_mail.mock_calls[1]
assert_true(call_to_referrer.called_with(
to_addr=self.given_email
))
def test_claim_url_with_bad_token_returns_400(self):
url = self.project.web_url_for(
'claim_user_registered',
uid=self.user._id,
token='badtoken',
)
res = self.app.get(url, auth=self.referrer.auth, expect_errors=400)
assert_equal(res.status_code, 400)
def test_cannot_claim_user_with_user_who_is_already_contributor(self):
# user who is already a contirbutor to the project
contrib = AuthUserFactory()
self.project.add_contributor(contrib, auth=Auth(self.project.creator))
self.project.save()
# Claiming user goes to claim url, but contrib is already logged in
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(
url,
auth=contrib.auth,
).follow(
auth=contrib.auth,
expect_errors=True,
)
# Response is a 400
assert_equal(res.status_code, 400)
class TestWatchViews(OsfTestCase):
def setUp(self):
super(TestWatchViews, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.auth = self.user.auth # used for requests auth
# A public project
self.project = ProjectFactory(is_public=True)
self.project.save()
# Manually reset log date to 100 days ago so it won't show up in feed
self.project.logs[0].date = dt.datetime.utcnow() - dt.timedelta(days=100)
self.project.logs[0].save()
# A log added now
self.last_log = self.project.add_log(
NodeLog.TAG_ADDED,
params={'node': self.project._primary_key},
auth=self.consolidate_auth,
log_date=dt.datetime.utcnow(),
save=True,
)
# Clear watched list
self.user.watched = []
self.user.save()
def test_watching_a_project_appends_to_users_watched_list(self):
n_watched_then = len(self.user.watched)
url = '/api/v1/project/{0}/watch/'.format(self.project._id)
res = self.app.post_json(url,
params={"digest": True},
auth=self.auth)
assert_equal(res.json['watchCount'], 1)
self.user.reload()
n_watched_now = len(self.user.watched)
assert_equal(res.status_code, 200)
assert_equal(n_watched_now, n_watched_then + 1)
assert_true(self.user.watched[-1].digest)
def test_watching_project_twice_returns_400(self):
url = "/api/v1/project/{0}/watch/".format(self.project._id)
res = self.app.post_json(url,
params={},
auth=self.auth)
assert_equal(res.status_code, 200)
# User tries to watch a node she's already watching
res2 = self.app.post_json(url,
params={},
auth=self.auth,
expect_errors=True)
assert_equal(res2.status_code, http.BAD_REQUEST)
def test_unwatching_a_project_removes_from_watched_list(self):
# The user has already watched a project
watch_config = WatchConfigFactory(node=self.project)
self.user.watch(watch_config)
self.user.save()
n_watched_then = len(self.user.watched)
url = '/api/v1/project/{0}/unwatch/'.format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
self.user.reload()
n_watched_now = len(self.user.watched)
assert_equal(res.status_code, 200)
assert_equal(n_watched_now, n_watched_then - 1)
assert_false(self.user.is_watching(self.project))
def test_toggle_watch(self):
# The user is not watching project
assert_false(self.user.is_watching(self.project))
url = "/api/v1/project/{0}/togglewatch/".format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
# The response json has a watchcount and watched property
assert_equal(res.json['watchCount'], 1)
assert_true(res.json['watched'])
assert_equal(res.status_code, 200)
self.user.reload()
# The user is now watching the project
assert_true(res.json['watched'])
assert_true(self.user.is_watching(self.project))
def test_toggle_watch_node(self):
# The project has a public sub-node
node = NodeFactory(creator=self.user, parent=self.project, is_public=True)
url = "/api/v1/project/{}/node/{}/togglewatch/".format(self.project._id,
node._id)
res = self.app.post_json(url, {}, auth=self.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# The user is now watching the sub-node
assert_true(res.json['watched'])
assert_true(self.user.is_watching(node))
def test_get_watched_logs(self):
project = ProjectFactory()
# Add some logs
for _ in range(12):
project.logs.append(NodeLogFactory(user=self.user, action="file_added"))
project.save()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = "/api/v1/watched/logs/"
res = self.app.get(url, auth=self.auth)
assert_equal(len(res.json['logs']), 10)
assert_equal(res.json['logs'][0]['action'], 'file_added')
def test_get_watched_logs(self):
project = ProjectFactory()
# Add some logs
for _ in range(12):
project.logs.append(NodeLogFactory(user=self.user, action="file_added"))
project.save()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
res = self.app.get(url, auth=self.auth)
assert_equal(len(res.json['logs']), 10)
# 1 project create log then 12 generated logs
assert_equal(res.json['total'], 12 + 1)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 2)
assert_equal(res.json['logs'][0]['action'], 'file_added')
def test_get_more_watched_logs(self):
project = ProjectFactory()
# Add some logs
for _ in range(12):
project.logs.append(NodeLogFactory(user=self.user, action="file_added"))
project.save()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
page = 1
res = self.app.get(url, {'page': page}, auth=self.auth)
assert_equal(len(res.json['logs']), 3)
# 1 project create log then 12 generated logs
assert_equal(res.json['total'], 12 + 1)
assert_equal(res.json['page'], page)
assert_equal(res.json['pages'], 2)
assert_equal(res.json['logs'][0]['action'], 'file_added')
def test_get_more_watched_logs_invalid_page(self):
project = ProjectFactory()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
invalid_page = 'invalid page'
res = self.app.get(
url, {'page': invalid_page}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "page".'
)
def test_get_more_watched_logs_invalid_size(self):
project = ProjectFactory()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
invalid_size = 'invalid size'
res = self.app.get(
url, {'size': invalid_size}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "size".'
)
class TestPointerViews(OsfTestCase):
def setUp(self):
super(TestPointerViews, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1109
def test_get_pointed_excludes_folders(self):
pointer_project = ProjectFactory(is_public=True) # project that points to another project
pointed_project = ProjectFactory(creator=self.user) # project that other project points to
pointer_project.add_pointer(pointed_project, Auth(pointer_project.creator), save=True)
# Project is in a dashboard folder
folder = FolderFactory(creator=pointed_project.creator)
folder.add_pointer(pointed_project, Auth(pointed_project.creator), save=True)
url = pointed_project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
# pointer_project's id is included in response, but folder's id is not
pointer_ids = [each['id'] for each in res.json['pointed']]
assert_in(pointer_project._id, pointer_ids)
assert_not_in(folder._id, pointer_ids)
def test_add_pointers(self):
url = self.project.api_url + 'pointer/'
node_ids = [
NodeFactory()._id
for _ in range(5)
]
self.app.post_json(
url,
{'nodeIds': node_ids},
auth=self.user.auth,
).maybe_follow()
self.project.reload()
assert_equal(
len(self.project.nodes),
5
)
def test_add_the_same_pointer_more_than_once(self):
url = self.project.api_url + 'pointer/'
double_node = NodeFactory()
self.app.post_json(
url,
{'nodeIds': [double_node._id]},
auth=self.user.auth,
)
res = self.app.post_json(
url,
{'nodeIds': [double_node._id]},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_add_pointers_no_user_logg_in(self):
url = self.project.api_url_for('add_pointers')
node_ids = [
NodeFactory()._id
for _ in range(5)
]
res = self.app.post_json(
url,
{'nodeIds': node_ids},
auth=None,
expect_errors=True
)
assert_equal(res.status_code, 401)
def test_add_pointers_public_non_contributor(self):
project2 = ProjectFactory()
project2.set_privacy('public')
project2.save()
url = self.project.api_url_for('add_pointers')
self.app.post_json(
url,
{'nodeIds': [project2._id]},
auth=self.user.auth,
).maybe_follow()
self.project.reload()
assert_equal(
len(self.project.nodes),
1
)
def test_add_pointers_contributor(self):
user2 = AuthUserFactory()
self.project.add_contributor(user2)
self.project.save()
url = self.project.api_url_for('add_pointers')
node_ids = [
NodeFactory()._id
for _ in range(5)
]
self.app.post_json(
url,
{'nodeIds': node_ids},
auth=user2.auth,
).maybe_follow()
self.project.reload()
assert_equal(
len(self.project.nodes),
5
)
def test_add_pointers_not_provided(self):
url = self.project.api_url + 'pointer/'
res = self.app.post_json(url, {}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_move_pointers(self):
project_two = ProjectFactory(creator=self.user)
url = api_url_for('move_pointers')
node = NodeFactory()
pointer = self.project.add_pointer(node, auth=self.consolidate_auth)
assert_equal(len(self.project.nodes), 1)
assert_equal(len(project_two.nodes), 0)
user_auth = self.user.auth
move_request = \
{
'fromNodeId': self.project._id,
'toNodeId': project_two._id,
'pointerIds': [pointer.node._id],
}
self.app.post_json(
url,
move_request,
auth=user_auth,
).maybe_follow()
self.project.reload()
project_two.reload()
assert_equal(len(self.project.nodes), 0)
assert_equal(len(project_two.nodes), 1)
def test_remove_pointer(self):
url = self.project.api_url + 'pointer/'
node = NodeFactory()
pointer = self.project.add_pointer(node, auth=self.consolidate_auth)
self.app.delete_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth,
)
self.project.reload()
assert_equal(
len(self.project.nodes),
0
)
def test_remove_pointer_not_provided(self):
url = self.project.api_url + 'pointer/'
res = self.app.delete_json(url, {}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_remove_pointer_not_found(self):
url = self.project.api_url + 'pointer/'
res = self.app.delete_json(
url,
{'pointerId': None},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_remove_pointer_not_in_nodes(self):
url = self.project.api_url + 'pointer/'
node = NodeFactory()
pointer = Pointer(node=node)
res = self.app.delete_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_fork_pointer(self):
url = self.project.api_url + 'pointer/fork/'
node = NodeFactory(creator=self.user)
pointer = self.project.add_pointer(node, auth=self.consolidate_auth)
self.app.post_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth
)
def test_fork_pointer_not_provided(self):
url = self.project.api_url + 'pointer/fork/'
res = self.app.post_json(url, {}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 400)
def test_fork_pointer_not_found(self):
url = self.project.api_url + 'pointer/fork/'
res = self.app.post_json(
url,
{'pointerId': None},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_fork_pointer_not_in_nodes(self):
url = self.project.api_url + 'pointer/fork/'
node = NodeFactory()
pointer = Pointer(node=node)
res = self.app.post_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_before_register_with_pointer(self):
"Assert that link warning appears in before register callback."
node = NodeFactory()
self.project.add_pointer(node, auth=self.consolidate_auth)
url = self.project.api_url + 'fork/before/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your fork' in prompt
]
assert_equal(len(prompts), 1)
def test_before_fork_with_pointer(self):
"Assert that link warning appears in before fork callback."
node = NodeFactory()
self.project.add_pointer(node, auth=self.consolidate_auth)
url = self.project.api_url + 'beforeregister/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your registration' in prompt
]
assert_equal(len(prompts), 1)
def test_before_register_no_pointer(self):
"Assert that link warning does not appear in before register callback."
url = self.project.api_url + 'fork/before/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your fork' in prompt
]
assert_equal(len(prompts), 0)
def test_before_fork_no_pointer(self):
"""Assert that link warning does not appear in before fork callback.
"""
url = self.project.api_url + 'beforeregister/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your registration' in prompt
]
assert_equal(len(prompts), 0)
def test_get_pointed(self):
pointing_node = ProjectFactory(creator=self.user)
pointing_node.add_pointer(self.project, auth=Auth(self.user))
url = self.project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
pointed = res.json['pointed']
assert_equal(len(pointed), 1)
assert_equal(pointed[0]['url'], pointing_node.url)
assert_equal(pointed[0]['title'], pointing_node.title)
assert_equal(pointed[0]['authorShort'], abbrev_authors(pointing_node))
def test_get_pointed_private(self):
secret_user = UserFactory()
pointing_node = ProjectFactory(creator=secret_user)
pointing_node.add_pointer(self.project, auth=Auth(secret_user))
url = self.project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
pointed = res.json['pointed']
assert_equal(len(pointed), 1)
assert_equal(pointed[0]['url'], None)
assert_equal(pointed[0]['title'], 'Private Component')
assert_equal(pointed[0]['authorShort'], 'Private Author(s)')
class TestPublicViews(OsfTestCase):
def test_explore(self):
res = self.app.get("/explore/").maybe_follow()
assert_equal(res.status_code, 200)
def test_forgot_password_get(self):
res = self.app.get(web_url_for('forgot_password_get'))
assert_equal(res.status_code, 200)
assert_in('Forgot Password', res.body)
class TestAuthViews(OsfTestCase):
def setUp(self):
super(TestAuthViews, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
def test_merge_user(self):
dupe = UserFactory(
username="copy@cat.com",
emails=['copy@cat.com']
)
dupe.set_password("copycat")
dupe.save()
url = "/api/v1/user/merge/"
self.app.post_json(
url,
{
"merged_username": "copy@cat.com",
"merged_password": "copycat"
},
auth=self.auth,
)
self.user.reload()
dupe.reload()
assert_true(dupe.is_merged)
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_sends_confirm_email(self, send_mail):
url = '/register/'
self.app.post(url, {
'register-fullname': 'Freddie Mercury',
'register-username': 'fred@queen.com',
'register-password': 'killerqueen',
'register-username2': 'fred@queen.com',
'register-password2': 'killerqueen',
})
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr='fred@queen.com'
))
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_ok(self, _):
url = api_url_for('register_user')
name, email, password = fake.name(), fake.email(), 'underpressure'
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
user = User.find_one(Q('username', 'eq', email))
assert_equal(user.fullname, name)
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/2902
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_email_case_insensitive(self, _):
url = api_url_for('register_user')
name, email, password = fake.name(), fake.email(), 'underpressure'
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': str(email).upper(),
'password': password,
}
)
user = User.find_one(Q('username', 'eq', email))
assert_equal(user.fullname, name)
@mock.patch('framework.auth.views.send_confirm_email')
def test_register_scrubs_username(self, _):
url = api_url_for('register_user')
name = "<i>Eunice</i> O' \"Cornwallis\"<script type='text/javascript' src='http://www.cornify.com/js/cornify.js'></script><script type='text/javascript'>cornify_add()</script>"
email, password = fake.email(), 'underpressure'
res = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
expected_scrub_username = "Eunice O' \"Cornwallis\"cornify_add()"
user = User.find_one(Q('username', 'eq', email))
assert_equal(res.status_code, http.OK)
assert_equal(user.fullname, expected_scrub_username)
def test_register_email_mismatch(self):
url = api_url_for('register_user')
name, email, password = fake.name(), fake.email(), 'underpressure'
res = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email + 'lol',
'password': password,
},
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
users = User.find(Q('username', 'eq', email))
assert_equal(users.count(), 0)
def test_register_after_being_invited_as_unreg_contributor(self):
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/861
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1021
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1026
# A user is invited as an unregistered contributor
project = ProjectFactory()
name, email = fake.name(), fake.email()
project.add_unregistered_contributor(fullname=name, email=email,
auth=Auth(project.creator))
project.save()
# The new, unregistered user
new_user = User.find_one(Q('username', 'eq', email))
# Instead of following the invitation link, they register at the regular
# registration page
# They use a different name when they register, but same email
real_name = fake.name()
password = 'myprecious'
url = api_url_for('register_user')
payload = {
'fullName': real_name,
'email1': email,
'email2': email,
'password': password,
}
# Send registration request
self.app.post_json(url, payload)
new_user.reload()
# New user confirms by following confirmation link
confirm_url = new_user.get_confirmation_url(email, external=False)
self.app.get(confirm_url)
new_user.reload()
# Password and fullname should be updated
assert_true(new_user.is_confirmed)
assert_true(new_user.check_password(password))
assert_equal(new_user.fullname, real_name)
@mock.patch('framework.auth.views.send_confirm_email')
def test_register_sends_user_registered_signal(self, mock_send_confirm_email):
url = api_url_for('register_user')
name, email, password = fake.name(), fake.email(), 'underpressure'
with capture_signals() as mock_signals:
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_registered]))
mock_send_confirm_email.assert_called()
@mock.patch('framework.auth.views.send_confirm_email')
def test_register_post_sends_user_registered_signal(self, mock_send_confirm_email):
url = web_url_for('auth_register_post')
name, email, password = fake.name(), fake.email(), 'underpressure'
with capture_signals() as mock_signals:
self.app.post(url, {
'register-fullname': name,
'register-username': email,
'register-password': password,
'register-username2': email,
'register-password2': password
})
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_registered]))
mock_send_confirm_email.assert_called()
def test_resend_confirmation_get(self):
res = self.app.get('/resend/')
assert_equal(res.status_code, 200)
@mock.patch('framework.auth.views.mails.send_mail')
def test_resend_confirmation(self, send_mail):
email = 'test@example.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': False}
self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth)
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr=email
))
self.user.reload()
assert_not_equal(token, self.user.get_confirmation_token(email))
with assert_raises(InvalidTokenError):
self.user._get_unconfirmed_email_for_token(token)
def test_resend_confirmation_without_user_id(self):
email = 'test@example.com'
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': False}
res = self.app.put_json(url, {'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], '"id" is required')
def test_resend_confirmation_without_email(self):
url = api_url_for('resend_confirmation')
res = self.app.put_json(url, {'id': self.user._id}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_resend_confirmation_not_work_for_primary_email(self):
email = 'test@example.com'
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': True, 'confirmed': False}
res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Cannnot resend confirmation for confirmed emails')
def test_resend_confirmation_not_work_for_confirmed_email(self):
email = 'test@example.com'
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': True}
res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Cannnot resend confirmation for confirmed emails')
def test_confirm_email_clears_unclaimed_records_and_revokes_token(self):
unclaimed_user = UnconfirmedUserFactory()
# unclaimed user has been invited to a project.
referrer = UserFactory()
project = ProjectFactory(creator=referrer)
unclaimed_user.add_unclaimed_record(project, referrer, 'foo')
unclaimed_user.save()
# sanity check
assert_equal(len(unclaimed_user.email_verifications.keys()), 1)
# user goes to email confirmation link
token = unclaimed_user.get_confirmation_token(unclaimed_user.username)
url = web_url_for('confirm_email_get', uid=unclaimed_user._id, token=token)
res = self.app.get(url)
assert_equal(res.status_code, 302)
# unclaimed records and token are cleared
unclaimed_user.reload()
assert_equal(unclaimed_user.unclaimed_records, {})
assert_equal(len(unclaimed_user.email_verifications.keys()), 0)
def test_confirmation_link_registers_user(self):
user = User.create_unconfirmed('brian@queen.com', 'bicycle123', 'Brian May')
assert_false(user.is_registered) # sanity check
user.save()
confirmation_url = user.get_confirmation_url('brian@queen.com', external=False)
res = self.app.get(confirmation_url)
assert_equal(res.status_code, 302, 'redirects to settings page')
res = res.follow()
user.reload()
assert_true(user.is_registered)
# TODO: Use mock add-on
class TestAddonUserViews(OsfTestCase):
def setUp(self):
super(TestAddonUserViews, self).setUp()
self.user = AuthUserFactory()
def test_choose_addons_add(self):
"""Add add-ons; assert that add-ons are attached to project.
"""
url = '/api/v1/settings/addons/'
self.app.post_json(
url,
{'github': True},
auth=self.user.auth,
).maybe_follow()
self.user.reload()
assert_true(self.user.get_addon('github'))
def test_choose_addons_remove(self):
# Add, then delete, add-ons; assert that add-ons are not attached to
# project.
url = '/api/v1/settings/addons/'
self.app.post_json(
url,
{'github': True},
auth=self.user.auth,
).maybe_follow()
self.app.post_json(
url,
{'github': False},
auth=self.user.auth
).maybe_follow()
self.user.reload()
assert_false(self.user.get_addon('github'))
class TestConfigureMailingListViews(OsfTestCase):
@classmethod
def setUpClass(cls):
super(TestConfigureMailingListViews, cls).setUpClass()
cls._original_enable_email_subscriptions = settings.ENABLE_EMAIL_SUBSCRIPTIONS
settings.ENABLE_EMAIL_SUBSCRIPTIONS = True
@unittest.skipIf(settings.USE_CELERY, 'Subscription must happen synchronously for this test')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_user_choose_mailing_lists_updates_user_dict(self, mock_get_mailchimp_api):
user = AuthUserFactory()
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
payload = {settings.MAILCHIMP_GENERAL_LIST: True}
url = api_url_for('user_choose_mailing_lists')
res = self.app.post_json(url, payload, auth=user.auth)
user.reload()
# check user.mailing_lists is updated
assert_true(user.mailing_lists[settings.MAILCHIMP_GENERAL_LIST])
assert_equal(
user.mailing_lists[settings.MAILCHIMP_GENERAL_LIST],
payload[settings.MAILCHIMP_GENERAL_LIST]
)
# check that user is subscribed
mock_client.lists.subscribe.assert_called_with(id=list_id,
email={'email': user.username},
merge_vars= {'fname': user.given_name,
'lname': user.family_name,
},
double_optin=False,
update_existing=True)
def test_get_mailchimp_get_endpoint_returns_200(self):
url = api_url_for('mailchimp_get_endpoint')
res = self.app.get(url)
assert_equal(res.status_code, 200)
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_mailchimp_webhook_subscribe_action_does_not_change_user(self, mock_get_mailchimp_api):
""" Test that 'subscribe' actions sent to the OSF via mailchimp
webhooks update the OSF database.
"""
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is not subscribed to a list
user = AuthUserFactory()
user.mailing_lists = {'OSF General': False}
user.save()
# user subscribes and webhook sends request to OSF
data = {'type': 'subscribe',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type="application/x-www-form-urlencoded",
auth=user.auth)
# user field is updated on the OSF
user.reload()
assert_true(user.mailing_lists[list_name])
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_mailchimp_webhook_profile_action_does_not_change_user(self, mock_get_mailchimp_api):
""" Test that 'profile' actions sent to the OSF via mailchimp
webhooks do not cause any database changes.
"""
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is subscribed to a list
user = AuthUserFactory()
user.mailing_lists = {'OSF General': True}
user.save()
# user hits subscribe again, which will update the user's existing info on mailchimp
# webhook sends request (when configured to update on changes made through the API)
data = {'type': 'profile',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type="application/x-www-form-urlencoded",
auth=user.auth)
# user field does not change
user.reload()
assert_true(user.mailing_lists[list_name])
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_sync_data_from_mailchimp_unsubscribes_user(self, mock_get_mailchimp_api):
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is subscribed to a list
user = AuthUserFactory()
user.mailing_lists = {'OSF General': True}
user.save()
# user unsubscribes through mailchimp and webhook sends request
data = {'type': 'unsubscribe',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type="application/x-www-form-urlencoded",
auth=user.auth)
# user field is updated on the OSF
user.reload()
assert_false(user.mailing_lists[list_name])
def test_sync_data_from_mailchimp_fails_without_secret_key(self):
user = AuthUserFactory()
payload = {'values': {'type': 'unsubscribe',
'data': {'list_id': '12345',
'email': 'freddie@cos.io'}}}
url = api_url_for('sync_data_from_mailchimp')
res = self.app.post_json(url, payload, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, http.UNAUTHORIZED)
@classmethod
def tearDownClass(cls):
super(TestConfigureMailingListViews, cls).tearDownClass()
settings.ENABLE_EMAIL_SUBSCRIPTIONS = cls._original_enable_email_subscriptions
# TODO: Move to OSF Storage
class TestFileViews(OsfTestCase):
def setUp(self):
super(TestFileViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory.build(creator=self.user, is_public=True)
self.project.add_contributor(self.user)
self.project.save()
def test_files_get(self):
url = self.project.api_url_for('collect_file_trees')
res = self.app.get(url, auth=self.user.auth)
expected = _view_project(self.project, auth=Auth(user=self.user))
assert_equal(res.status_code, http.OK)
assert_equal(res.json['node'], expected['node'])
assert_in('tree_js', res.json)
assert_in('tree_css', res.json)
def test_grid_data(self):
url = self.project.api_url_for('grid_data')
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_equal(res.status_code, http.OK)
expected = rubeus.to_hgrid(self.project, auth=Auth(self.user))
data = res.json['data']
assert_equal(len(data), len(expected))
class TestComments(OsfTestCase):
def setUp(self):
super(TestComments, self).setUp()
self.project = ProjectFactory(is_public=True)
self.consolidated_auth = Auth(user=self.project.creator)
self.non_contributor = AuthUserFactory()
self.user = AuthUserFactory()
self.project.add_contributor(self.user)
self.project.save()
self.user.save()
def _configure_project(self, project, comment_level):
project.comment_level = comment_level
project.save()
def _add_comment(self, project, content=None, **kwargs):
content = content if content is not None else 'hammer to fall'
url = project.api_url + 'comment/'
return self.app.post_json(
url,
{
'content': content,
'isPublic': 'public',
},
**kwargs
)
def test_add_comment_public_contributor(self):
self._configure_project(self.project, 'public')
res = self._add_comment(
self.project, auth=self.project.creator.auth,
)
self.project.reload()
res_comment = res.json['comment']
date_created = parse_date(str(res_comment.pop('dateCreated')))
date_modified = parse_date(str(res_comment.pop('dateModified')))
serialized_comment = serialize_comment(self.project.commented[0], self.consolidated_auth)
date_created2 = parse_date(serialized_comment.pop('dateCreated'))
date_modified2 = parse_date(serialized_comment.pop('dateModified'))
assert_datetime_equal(date_created, date_created2)
assert_datetime_equal(date_modified, date_modified2)
assert_equal(len(self.project.commented), 1)
assert_equal(res_comment, serialized_comment)
def test_add_comment_public_non_contributor(self):
self._configure_project(self.project, 'public')
res = self._add_comment(
self.project, auth=self.non_contributor.auth,
)
self.project.reload()
res_comment = res.json['comment']
date_created = parse_date(res_comment.pop('dateCreated'))
date_modified = parse_date(res_comment.pop('dateModified'))
serialized_comment = serialize_comment(self.project.commented[0], Auth(user=self.non_contributor))
date_created2 = parse_date(serialized_comment.pop('dateCreated'))
date_modified2 = parse_date(serialized_comment.pop('dateModified'))
assert_datetime_equal(date_created, date_created2)
assert_datetime_equal(date_modified, date_modified2)
assert_equal(len(self.project.commented), 1)
assert_equal(res_comment, serialized_comment)
def test_add_comment_private_contributor(self):
self._configure_project(self.project, 'private')
res = self._add_comment(
self.project, auth=self.project.creator.auth,
)
self.project.reload()
res_comment = res.json['comment']
date_created = parse_date(str(res_comment.pop('dateCreated')))
date_modified = parse_date(str(res_comment.pop('dateModified')))
serialized_comment = serialize_comment(self.project.commented[0], self.consolidated_auth)
date_created2 = parse_date(serialized_comment.pop('dateCreated'))
date_modified2 = parse_date(serialized_comment.pop('dateModified'))
assert_datetime_equal(date_created, date_created2)
assert_datetime_equal(date_modified, date_modified2)
assert_equal(len(self.project.commented), 1)
assert_equal(res_comment, serialized_comment)
def test_add_comment_private_non_contributor(self):
self._configure_project(self.project, 'private')
res = self._add_comment(
self.project, auth=self.non_contributor.auth, expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
def test_add_comment_logged_out(self):
self._configure_project(self.project, 'public')
res = self._add_comment(self.project)
assert_equal(res.status_code, 302)
assert_in('login', res.headers.get('location'))
def test_add_comment_off(self):
self._configure_project(self.project, None)
res = self._add_comment(
self.project, auth=self.project.creator.auth, expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_add_comment_empty(self):
self._configure_project(self.project, 'public')
res = self._add_comment(
self.project, content='',
auth=self.project.creator.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_false(getattr(self.project, 'commented', []))
def test_add_comment_toolong(self):
self._configure_project(self.project, 'public')
res = self._add_comment(
self.project, content='toolong' * 500,
auth=self.project.creator.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_false(getattr(self.project, 'commented', []))
def test_add_comment_whitespace(self):
self._configure_project(self.project, 'public')
res = self._add_comment(
self.project, content=' ',
auth=self.project.creator.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_false(getattr(self.project, 'commented', []))
def test_edit_comment(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.put_json(
url,
{
'content': 'edited',
'isPublic': 'private',
},
auth=self.project.creator.auth,
)
comment.reload()
assert_equal(res.json['content'], 'edited')
assert_equal(comment.content, 'edited')
def test_edit_comment_short(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project, content='short')
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.put_json(
url,
{
'content': '',
'isPublic': 'private',
},
auth=self.project.creator.auth,
expect_errors=True,
)
comment.reload()
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(comment.content, 'short')
def test_edit_comment_toolong(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project, content='short')
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.put_json(
url,
{
'content': 'toolong' * 500,
'isPublic': 'private',
},
auth=self.project.creator.auth,
expect_errors=True,
)
comment.reload()
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(comment.content, 'short')
def test_edit_comment_non_author(self):
"Contributors who are not the comment author cannot edit."
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
non_author = AuthUserFactory()
self.project.add_contributor(non_author, auth=self.consolidated_auth)
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.put_json(
url,
{
'content': 'edited',
'isPublic': 'private',
},
auth=non_author.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
def test_edit_comment_non_contributor(self):
"Non-contributors who are not the comment author cannot edit."
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.put_json(
url,
{
'content': 'edited',
'isPublic': 'private',
},
auth=self.non_contributor.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
def test_delete_comment_author(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
self.app.delete_json(
url,
auth=self.project.creator.auth,
)
comment.reload()
assert_true(comment.is_deleted)
def test_delete_comment_non_author(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.delete_json(
url,
auth=self.non_contributor.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
comment.reload()
assert_false(comment.is_deleted)
def test_report_abuse(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
reporter = AuthUserFactory()
url = self.project.api_url + 'comment/{0}/report/'.format(comment._id)
self.app.post_json(
url,
{
'category': 'spam',
'text': 'ads',
},
auth=reporter.auth,
)
comment.reload()
assert_in(reporter._id, comment.reports)
assert_equal(
comment.reports[reporter._id],
{'category': 'spam', 'text': 'ads'}
)
def test_can_view_private_comments_if_contributor(self):
self._configure_project(self.project, 'public')
CommentFactory(node=self.project, user=self.project.creator, is_public=False)
url = self.project.api_url + 'comments/'
res = self.app.get(url, auth=self.project.creator.auth)
assert_equal(len(res.json['comments']), 1)
def test_view_comments_with_anonymous_link(self):
self.project.save()
self.project.set_privacy('private')
self.project.reload()
user = AuthUserFactory()
link = PrivateLinkFactory(anonymous=True)
link.nodes.append(self.project)
link.save()
CommentFactory(node=self.project, user=self.project.creator, is_public=False)
url = self.project.api_url + 'comments/'
res = self.app.get(url, {"view_only": link.key}, auth=user.auth)
comment = res.json['comments'][0]
author = comment['author']
assert_in('A user', author['name'])
assert_false(author['gravatarUrl'])
assert_false(author['url'])
assert_false(author['id'])
def test_discussion_recursive(self):
self._configure_project(self.project, 'public')
comment_l0 = CommentFactory(node=self.project)
user_l1 = UserFactory()
user_l2 = UserFactory()
comment_l1 = CommentFactory(node=self.project, target=comment_l0, user=user_l1)
CommentFactory(node=self.project, target=comment_l1, user=user_l2)
url = self.project.api_url + 'comments/discussion/'
res = self.app.get(url)
assert_equal(len(res.json['discussion']), 3)
def test_discussion_no_repeats(self):
self._configure_project(self.project, 'public')
comment_l0 = CommentFactory(node=self.project)
comment_l1 = CommentFactory(node=self.project, target=comment_l0)
CommentFactory(node=self.project, target=comment_l1)
url = self.project.api_url + 'comments/discussion/'
res = self.app.get(url)
assert_equal(len(res.json['discussion']), 1)
def test_discussion_sort(self):
self._configure_project(self.project, 'public')
user1 = UserFactory()
user2 = UserFactory()
CommentFactory(node=self.project)
for _ in range(3):
CommentFactory(node=self.project, user=user1)
for _ in range(2):
CommentFactory(node=self.project, user=user2)
url = self.project.api_url + 'comments/discussion/'
res = self.app.get(url)
assert_equal(len(res.json['discussion']), 3)
observed = [user['id'] for user in res.json['discussion']]
expected = [user1._id, user2._id, self.project.creator._id]
assert_equal(observed, expected)
def test_view_comments_updates_user_comments_view_timestamp(self):
CommentFactory(node=self.project)
url = self.project.api_url_for('update_comments_timestamp')
res = self.app.put_json(url, auth=self.user.auth)
self.user.reload()
user_timestamp = self.user.comments_viewed_timestamp[self.project._id]
view_timestamp = dt.datetime.utcnow()
assert_datetime_equal(user_timestamp, view_timestamp)
def test_confirm_non_contrib_viewers_dont_have_pid_in_comments_view_timestamp(self):
url = self.project.api_url_for('update_comments_timestamp')
res = self.app.put_json(url, auth=self.user.auth)
self.non_contributor.reload()
assert_not_in(self.project._id, self.non_contributor.comments_viewed_timestamp)
def test_n_unread_comments_updates_when_comment_is_added(self):
self._add_comment(self.project, auth=self.project.creator.auth)
self.project.reload()
url = self.project.api_url_for('list_comments')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json.get('nUnread'), 1)
url = self.project.api_url_for('update_comments_timestamp')
res = self.app.put_json(url, auth=self.user.auth)
self.user.reload()
url = self.project.api_url_for('list_comments')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json.get('nUnread'), 0)
def test_n_unread_comments_updates_when_comment_reply(self):
comment = CommentFactory(node=self.project, user=self.project.creator)
reply = CommentFactory(node=self.project, user=self.user, target=comment)
self.project.reload()
url = self.project.api_url_for('list_comments')
res = self.app.get(url, auth=self.project.creator.auth)
assert_equal(res.json.get('nUnread'), 1)
def test_n_unread_comments_updates_when_comment_is_edited(self):
self.test_edit_comment()
self.project.reload()
url = self.project.api_url_for('list_comments')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json.get('nUnread'), 1)
def test_n_unread_comments_is_zero_when_no_comments(self):
url = self.project.api_url_for('list_comments')
res = self.app.get(url, auth=self.project.creator.auth)
assert_equal(res.json.get('nUnread'), 0)
class TestTagViews(OsfTestCase):
def setUp(self):
super(TestTagViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
@unittest.skip('Tags endpoint disabled for now.')
def test_tag_get_returns_200(self):
url = web_url_for('project_tag', tag='foo')
res = self.app.get(url)
assert_equal(res.status_code, 200)
@requires_search
class TestSearchViews(OsfTestCase):
def setUp(self):
super(TestSearchViews, self).setUp()
import website.search.search as search
search.delete_all()
self.project = ProjectFactory(creator=UserFactory(fullname='Robbie Williams'))
self.contrib = UserFactory(fullname='Brian May')
for i in range(0, 12):
UserFactory(fullname='Freddie Mercury{}'.format(i))
def tearDown(self):
super(TestSearchViews, self).tearDown()
import website.search.search as search
search.delete_all()
def test_search_contributor(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': self.contrib.fullname})
assert_equal(res.status_code, 200)
result = res.json['users']
assert_equal(len(result), 1)
brian = result[0]
assert_equal(brian['fullname'], self.contrib.fullname)
assert_in('gravatar_url', brian)
assert_equal(brian['registered'], self.contrib.is_registered)
assert_equal(brian['active'], self.contrib.is_active)
def test_search_pagination_default(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr'})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(pages, 3)
assert_equal(page, 0)
def test_search_pagination_default_page_1(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'page': 1})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 1)
def test_search_pagination_default_page_2(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'page': 2})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 2)
assert_equal(page, 2)
def test_search_pagination_smaller_pages(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'size': 5})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 0)
assert_equal(pages, 3)
def test_search_pagination_smaller_pages_page_2(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'page': 2, 'size': 5, })
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 2)
assert_equal(page, 2)
assert_equal(pages, 3)
def test_search_projects(self):
url = '/search/'
res = self.app.get(url, {'q': self.project.title})
assert_equal(res.status_code, 200)
class TestODMTitleSearch(OsfTestCase):
""" Docs from original method:
:arg term: The substring of the title.
:arg category: Category of the node.
:arg isDeleted: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isFolder: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isRegistration: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg includePublic: yes or no. Whether the projects listed should include public projects.
:arg includeContributed: yes or no. Whether the search should include projects the current user has
contributed to.
:arg ignoreNode: a list of nodes that should not be included in the search.
:return: a list of dictionaries of projects
"""
def setUp(self):
super(TestODMTitleSearch, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, title="foo")
self.project_two = ProjectFactory(creator=self.user_two, title="bar")
self.public_project = ProjectFactory(creator=self.user_two, is_public=True, title="baz")
self.registration_project = RegistrationFactory(creator=self.user, title="qux")
self.folder = FolderFactory(creator=self.user, title="quux")
self.dashboard = DashboardFactory(creator=self.user, title="Dashboard")
self.url = api_url_for('search_projects_by_title')
def test_search_projects_by_title(self):
res = self.app.get(self.url, {'term': self.project.title}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 2)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
class TestReorderComponents(OsfTestCase):
def setUp(self):
super(TestReorderComponents, self).setUp()
self.creator = AuthUserFactory()
self.contrib = AuthUserFactory()
# Project is public
self.project = ProjectFactory.build(creator=self.creator, public=True)
self.project.add_contributor(self.contrib, auth=Auth(self.creator))
# subcomponent that only creator can see
self.public_component = NodeFactory(creator=self.creator, public=True)
self.private_component = NodeFactory(creator=self.creator, public=False)
self.project.nodes.append(self.public_component)
self.project.nodes.append(self.private_component)
self.project.save()
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/489
def test_reorder_components_with_private_component(self):
# contrib tries to reorder components
payload = {
'new_list': [
'{0}:node'.format(self.private_component._primary_key),
'{0}:node'.format(self.public_component._primary_key),
]
}
url = self.project.api_url_for('project_reorder_components')
res = self.app.post_json(url, payload, auth=self.contrib.auth)
assert_equal(res.status_code, 200)
class TestDashboardViews(OsfTestCase):
def setUp(self):
super(TestDashboardViews, self).setUp()
self.creator = AuthUserFactory()
self.contrib = AuthUserFactory()
self.dashboard = DashboardFactory(creator=self.creator)
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/571
def test_components_with_are_accessible_from_dashboard(self):
project = ProjectFactory(creator=self.creator, public=False)
component = NodeFactory(creator=self.creator, parent=project)
component.add_contributor(self.contrib, auth=Auth(self.creator))
component.save()
# Get the All My Projects smart folder from the dashboard
url = api_url_for('get_dashboard', nid=ALL_MY_PROJECTS_ID)
res = self.app.get(url, auth=self.contrib.auth)
assert_equal(len(res.json['data']), 1)
def test_get_dashboard_nodes(self):
project = ProjectFactory(creator=self.creator)
component = NodeFactory(creator=self.creator, parent=project)
url = api_url_for('get_dashboard_nodes')
res = self.app.get(url, auth=self.creator.auth)
assert_equal(res.status_code, 200)
nodes = res.json['nodes']
assert_equal(len(nodes), 2)
project_serialized = nodes[0]
assert_equal(project_serialized['id'], project._primary_key)
def test_get_dashboard_nodes_shows_components_if_user_is_not_contrib_on_project(self):
# User creates a project with a component
project = ProjectFactory(creator=self.creator)
component = NodeFactory(creator=self.creator, parent=project)
# User adds friend as a contributor to the component but not the
# project
friend = AuthUserFactory()
component.add_contributor(friend, auth=Auth(self.creator))
component.save()
# friend requests their dashboard nodes
url = api_url_for('get_dashboard_nodes')
res = self.app.get(url, auth=friend.auth)
nodes = res.json['nodes']
# Response includes component
assert_equal(len(nodes), 1)
assert_equal(nodes[0]['id'], component._primary_key)
# friend requests dashboard nodes, filtering against components
url = api_url_for('get_dashboard_nodes', no_components=True)
res = self.app.get(url, auth=friend.auth)
nodes = res.json['nodes']
assert_equal(len(nodes), 0)
def test_get_dashboard_nodes_admin_only(self):
friend = AuthUserFactory()
project = ProjectFactory(creator=self.creator)
# Friend is added as a contributor with read+write (not admin)
# permissions
perms = permissions.expand_permissions(permissions.WRITE)
project.add_contributor(friend, auth=Auth(self.creator), permissions=perms)
project.save()
url = api_url_for('get_dashboard_nodes')
res = self.app.get(url, auth=friend.auth)
assert_equal(res.json['nodes'][0]['id'], project._primary_key)
# Can filter project according to permission
url = api_url_for('get_dashboard_nodes', permissions='admin')
res = self.app.get(url, auth=friend.auth)
assert_equal(len(res.json['nodes']), 0)
def test_get_dashboard_nodes_invalid_permission(self):
url = api_url_for('get_dashboard_nodes', permissions='not-valid')
res = self.app.get(url, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_registered_components_with_are_accessible_from_dashboard(self):
project = ProjectFactory(creator=self.creator, public=False)
component = NodeFactory(creator=self.creator, parent=project)
component.add_contributor(self.contrib, auth=Auth(self.creator))
component.save()
project.register_node(
None, Auth(self.creator), '', '',
)
# Get the All My Registrations smart folder from the dashboard
url = api_url_for('get_dashboard', nid=ALL_MY_REGISTRATIONS_ID)
res = self.app.get(url, auth=self.contrib.auth)
assert_equal(len(res.json['data']), 1)
def test_archiving_nodes_appear_in_all_my_registrations(self):
project = ProjectFactory(creator=self.creator, public=False)
reg = RegistrationFactory(project=project, user=self.creator)
# Get the All My Registrations smart folder from the dashboard
url = api_url_for('get_dashboard', nid=ALL_MY_REGISTRATIONS_ID)
res = self.app.get(url, auth=self.creator.auth)
assert_equal(res.json['data'][0]['node_id'], reg._id)
def test_untouched_node_is_collapsed(self):
found_item = False
folder = FolderFactory(creator=self.creator, public=True)
self.dashboard.add_pointer(folder, auth=Auth(self.creator))
url = api_url_for('get_dashboard', nid=self.dashboard._id)
dashboard_data = self.app.get(url, auth=self.creator.auth)
dashboard_json = dashboard_data.json[u'data']
for dashboard_item in dashboard_json:
if dashboard_item[u'node_id'] == folder._id:
found_item = True
assert_false(dashboard_item[u'expand'], "Expand state was not set properly.")
assert_true(found_item, "Did not find the folder in the dashboard.")
def test_expand_node_sets_expand_to_true(self):
found_item = False
folder = FolderFactory(creator=self.creator, public=True)
self.dashboard.add_pointer(folder, auth=Auth(self.creator))
url = api_url_for('expand', pid=folder._id)
self.app.post(url, auth=self.creator.auth)
url = api_url_for('get_dashboard', nid=self.dashboard._id)
dashboard_data = self.app.get(url, auth=self.creator.auth)
dashboard_json = dashboard_data.json[u'data']
for dashboard_item in dashboard_json:
if dashboard_item[u'node_id'] == folder._id:
found_item = True
assert_true(dashboard_item[u'expand'], "Expand state was not set properly.")
assert_true(found_item, "Did not find the folder in the dashboard.")
def test_collapse_node_sets_expand_to_true(self):
found_item = False
folder = FolderFactory(creator=self.creator, public=True)
self.dashboard.add_pointer(folder, auth=Auth(self.creator))
# Expand the folder
url = api_url_for('expand', pid=folder._id)
self.app.post(url, auth=self.creator.auth)
# Serialize the dashboard and test
url = api_url_for('get_dashboard', nid=self.dashboard._id)
dashboard_data = self.app.get(url, auth=self.creator.auth)
dashboard_json = dashboard_data.json[u'data']
for dashboard_item in dashboard_json:
if dashboard_item[u'node_id'] == folder._id:
found_item = True
assert_true(dashboard_item[u'expand'], "Expand state was not set properly.")
assert_true(found_item, "Did not find the folder in the dashboard.")
# Collapse the folder
found_item = False
url = api_url_for('collapse', pid=folder._id)
self.app.post(url, auth=self.creator.auth)
# Serialize the dashboard and test
url = api_url_for('get_dashboard', nid=self.dashboard._id)
dashboard_data = self.app.get(url, auth=self.creator.auth)
dashboard_json = dashboard_data.json[u'data']
for dashboard_item in dashboard_json:
if dashboard_item[u'node_id'] == folder._id:
found_item = True
assert_false(dashboard_item[u'expand'], "Expand state was not set properly.")
assert_true(found_item, "Did not find the folder in the dashboard.")
def test_folder_new_post(self):
url = api_url_for('folder_new_post', nid=self.dashboard._id)
found_item = False
# Make the folder
title = 'New test folder'
payload = {'title': title, }
self.app.post_json(url, payload, auth=self.creator.auth)
# Serialize the dashboard and test
url = api_url_for('get_dashboard', nid=self.dashboard._id)
dashboard_data = self.app.get(url, auth=self.creator.auth)
dashboard_json = dashboard_data.json[u'data']
for dashboard_item in dashboard_json:
if dashboard_item[u'name'] == title:
found_item = True
assert_true(found_item, "Did not find the folder in the dashboard.")
class TestWikiWidgetViews(OsfTestCase):
def setUp(self):
super(TestWikiWidgetViews, self).setUp()
# project with no home wiki page
self.project = ProjectFactory()
self.read_only_contrib = AuthUserFactory()
self.project.add_contributor(self.read_only_contrib, permissions='read')
self.noncontributor = AuthUserFactory()
# project with no home wiki content
self.project2 = ProjectFactory(creator=self.project.creator)
self.project2.add_contributor(self.read_only_contrib, permissions='read')
self.project2.update_node_wiki(name='home', content='', auth=Auth(self.project.creator))
def test_show_wiki_for_contributors_when_no_wiki_or_content(self):
assert_true(_should_show_wiki_widget(self.project, self.project.creator))
assert_true(_should_show_wiki_widget(self.project2, self.project.creator))
def test_show_wiki_is_false_for_read_contributors_when_no_wiki_or_content(self):
assert_false(_should_show_wiki_widget(self.project, self.read_only_contrib))
assert_false(_should_show_wiki_widget(self.project2, self.read_only_contrib))
def test_show_wiki_is_false_for_noncontributors_when_no_wiki_or_content(self):
assert_false(_should_show_wiki_widget(self.project, self.noncontributor))
assert_false(_should_show_wiki_widget(self.project2, self.read_only_contrib))
class TestForkViews(OsfTestCase):
def setUp(self):
super(TestForkViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory.build(creator=self.user, is_public=True)
self.consolidated_auth = Auth(user=self.project.creator)
self.user.save()
self.project.save()
def test_fork_private_project_non_contributor(self):
self.project.set_privacy("private")
self.project.save()
url = self.project.api_url_for('node_fork_page')
non_contributor = AuthUserFactory()
res = self.app.post_json(url,
auth=non_contributor.auth,
expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_fork_public_project_non_contributor(self):
url = self.project.api_url_for('node_fork_page')
non_contributor = AuthUserFactory()
res = self.app.post_json(url, auth=non_contributor.auth)
assert_equal(res.status_code, 200)
def test_fork_project_contributor(self):
contributor = AuthUserFactory()
self.project.set_privacy("private")
self.project.add_contributor(contributor)
self.project.save()
url = self.project.api_url_for('node_fork_page')
res = self.app.post_json(url, auth=contributor.auth)
assert_equal(res.status_code, 200)
def test_registered_forks_dont_show_in_fork_list(self):
fork = self.project.fork_node(self.consolidated_auth)
RegistrationFactory(project=fork)
url = self.project.api_url_for('get_forks')
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json['nodes']), 1)
assert_equal(res.json['nodes'][0]['id'], fork._id)
class TestProjectCreation(OsfTestCase):
def setUp(self):
super(TestProjectCreation, self).setUp()
self.creator = AuthUserFactory()
self.url = api_url_for('project_new_post')
def test_needs_title(self):
res = self.app.post_json(self.url, {}, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_create_component_strips_html(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
url = web_url_for('project_new_node', pid=project._id)
post_data = {'title': '<b>New <blink>Component</blink> Title</b>', 'category': ''}
request = self.app.post(url, post_data, auth=user.auth).follow()
project.reload()
child = project.nodes[0]
# HTML has been stripped
assert_equal(child.title, 'New Component Title')
def test_strip_html_from_title(self):
payload = {
'title': 'no html <b>here</b>'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_equal('no html here', node.title)
def test_only_needs_title(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
def test_title_must_be_one_long(self):
payload = {
'title': ''
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_title_must_be_less_than_200(self):
payload = {
'title': ''.join([str(x) for x in xrange(0, 250)])
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_fails_to_create_project_with_whitespace_title(self):
payload = {
'title': ' '
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_creates_a_project(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.title, 'Im a real title')
def test_new_project_returns_serialized_node_data(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = res.json['newNode']
assert_true(node)
assert_equal(node['title'], 'Im a real title')
def test_description_works(self):
payload = {
'title': 'Im a real title',
'description': 'I describe things!'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.description, 'I describe things!')
def test_can_template(self):
other_node = ProjectFactory(creator=self.creator)
payload = {
'title': 'Im a real title',
'template': other_node._id
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.template_node, other_node)
def test_project_before_template_no_addons(self):
project = ProjectFactory()
res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth)
assert_equal(res.json['prompts'], [])
def test_project_before_template_with_addons(self):
project = ProjectWithAddonFactory(addon='github')
res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth)
assert_in('GitHub', res.json['prompts'])
def test_project_new_from_template_non_user(self):
project = ProjectFactory()
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=None)
assert_equal(res.status_code, 302)
res2 = res.follow(expect_errors=True)
assert_equal(res2.status_code, 301)
assert_equal(res2.request.path, '/login')
def test_project_new_from_template_public_non_contributor(self):
non_contributor = AuthUserFactory()
project = ProjectFactory(is_public=True)
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=non_contributor.auth)
assert_equal(res.status_code, 201)
def test_project_new_from_template_contributor(self):
contributor = AuthUserFactory()
project = ProjectFactory(is_public=False)
project.add_contributor(contributor)
project.save()
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=contributor.auth)
assert_equal(res.status_code, 201)
class TestUnconfirmedUserViews(OsfTestCase):
def test_can_view_profile(self):
user = UnconfirmedUserFactory()
url = web_url_for('profile_view_id', uid=user._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
class TestProfileNodeList(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.public = ProjectFactory(is_public=True)
self.public_component = NodeFactory(parent=self.public, is_public=True)
self.private = ProjectFactory(is_public=False)
self.deleted = ProjectFactory(is_public=True, is_deleted=True)
for node in (self.public, self.public_component, self.private, self.deleted):
node.add_contributor(self.user, auth=Auth(node.creator))
node.save()
def test_get_public_projects(self):
url = api_url_for('get_public_projects', uid=self.user._id)
res = self.app.get(url)
node_ids = [each['id'] for each in res.json['nodes']]
assert_in(self.public._id, node_ids)
assert_not_in(self.private._id, node_ids)
assert_not_in(self.deleted._id, node_ids)
assert_not_in(self.public_component._id, node_ids)
def test_get_public_components(self):
url = api_url_for('get_public_components', uid=self.user._id)
res = self.app.get(url)
node_ids = [each['id'] for each in res.json['nodes']]
assert_in(self.public_component._id, node_ids)
assert_not_in(self.public._id, node_ids)
assert_not_in(self.private._id, node_ids)
assert_not_in(self.deleted._id, node_ids)
class TestStaticFileViews(OsfTestCase):
def test_robots_dot_txt(self):
res = self.app.get('/robots.txt')
assert_equal(res.status_code, 200)
assert_in('User-agent', res)
assert_in('text/plain', res.headers['Content-Type'])
def test_favicon(self):
res = self.app.get('/favicon.ico')
assert_equal(res.status_code, 200)
assert_in('image/vnd.microsoft.icon', res.headers['Content-Type'])
def test_getting_started_page(self):
res = self.app.get('/getting-started/')
assert_equal(res.status_code, 200)
class TestUserConfirmSignal(OsfTestCase):
def test_confirm_user_signal_called_when_user_claims_account(self):
unclaimed_user = UnconfirmedUserFactory()
# unclaimed user has been invited to a project.
referrer = UserFactory()
project = ProjectFactory(creator=referrer)
unclaimed_user.add_unclaimed_record(project, referrer, 'foo')
unclaimed_user.save()
token = unclaimed_user.get_unclaimed_record(project._primary_key)['token']
with capture_signals() as mock_signals:
url = web_url_for('claim_user_form', pid=project._id, uid=unclaimed_user._id, token=token)
payload = {'username': unclaimed_user.username,
'password': 'password',
'password2': 'password'}
res = self.app.post(url, payload)
assert_equal(res.status_code, 302)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed]))
def test_confirm_user_signal_called_when_user_confirms_email(self):
unconfirmed_user = UnconfirmedUserFactory()
unconfirmed_user.save()
# user goes to email confirmation link
token = unconfirmed_user.get_confirmation_token(unconfirmed_user.username)
with capture_signals() as mock_signals:
url = web_url_for('confirm_email_get', uid=unconfirmed_user._id, token=token)
res = self.app.get(url)
assert_equal(res.status_code, 302)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed]))
if __name__ == '__main__':
unittest.main()
|
ckc6cz/osf.io
|
tests/test_views.py
|
Python
|
apache-2.0
| 181,349
|
[
"Brian"
] |
1441303e85786233e21827dfb469d96dc6dbf8c3ac0bd79a140effd08d8b6fef
|
"""
Tools for creating graph inputs from molecule data
"""
import itertools
import os
import sys
from collections import deque
from functools import partial
from multiprocessing import Pool
from typing import Dict, List, Union
import numpy as np
from pymatgen.analysis.local_env import NearNeighbors
from pymatgen.core import Element, Molecule
from pymatgen.io.babel import BabelMolAdaptor
from megnet.data.graph import (BaseGraphBatchGenerator, Converter,
GaussianDistance, GraphBatchGenerator,
StructureGraph)
from megnet.utils.general import fast_label_binarize
from .qm9 import ring_to_vector
try:
import pybel # type: ignore
except ImportError:
try:
from openbabel import pybel
except ImportError:
pybel = None
try:
from rdkit import Chem # type: ignore
except ImportError:
Chem = None
__date__ = "12/01/2018"
# List of features to use by default for each atom
_ATOM_FEATURES = [
"element",
"chirality",
"formal_charge",
"ring_sizes",
"hybridization",
"donor",
"acceptor",
"aromatic",
]
# List of features to use by default for each bond
_BOND_FEATURES = ["bond_type", "same_ring", "spatial_distance", "graph_distance"]
# List of elements in library to use by default
_ELEMENTS = ["H", "C", "N", "O", "F"]
class SimpleMolGraph(StructureGraph):
"""
Default using all atom pairs as bonds. The distance between atoms are used
as bond features. By default the distance is expanded using a Gaussian
expansion with centers at np.linspace(0, 4, 20) and width of 0.5
"""
def __init__(
self,
nn_strategy: Union[str, NearNeighbors] = "AllAtomPairs",
atom_converter: Converter = None,
bond_converter: Converter = None,
):
"""
Args:
nn_strategy (str): NearNeighbor strategy
atom_converter (Converter): atomic features converter object
bond_converter (Converter): bond features converter object
"""
if bond_converter is None:
bond_converter = GaussianDistance(np.linspace(0, 4, 20), 0.5)
super().__init__(nn_strategy=nn_strategy, atom_converter=atom_converter, bond_converter=bond_converter)
class MolecularGraph(StructureGraph):
"""Class for generating the graph inputs from a molecule
Computes many different features for the atoms and bonds in a molecule, and prepares them
in a form compatible with MEGNet models. The :meth:`convert` method takes a OpenBabel molecule
and, besides computing features, also encodes them in a form compatible with machine learning.
Namely, the `convert` method one-hot encodes categorical variables and concatenates
the atomic features
## Atomic Features
This class can compute the following features for each atom
- `atomic_num`: The atomic number
- `element`: (categorical) Element identity. (Unlike `atomic_num`, element is one-hot-encoded)
- `chirality`: (categorical) R, S, or not a Chiral center (one-hot encoded).
- `formal_charge`: Formal charge of the atom
- `ring_sizes`: For rings with 9 or fewer atoms, how many unique rings
of each size include this atom
- `hybridization`: (categorical) Hybridization of atom: sp, sp2, sp3, sq.
planer, trig, octahedral, or hydrogen
- `donor`: (boolean) Whether the atom is a hydrogen bond donor
- `acceptor`: (boolean) Whether the atom is a hydrogen bond acceptor
- `aromatic`: (boolean) Whether the atom is part of an aromatic system
## Atom Pair Features
The class also computes features for each pair of atoms
- `bond_type`: (categorical) Whether the pair are unbonded, or in a single, double, triple, or aromatic bond
- `same_ring`: (boolean) Whether the atoms are in the same aromatic ring
- `graph_distance`: Distance of shortest path between atoms on the bonding graph
- `spatial_distance`: Euclidean distance between the atoms. By default, this distance is expanded into
a vector of 20 different values computed using the `GaussianDistance` converter
"""
def __init__(
self,
atom_features: List[str] = None,
bond_features: List[str] = None,
distance_converter: Converter = None,
known_elements: List[str] = None,
max_ring_size: int = 9,
):
"""
Args:
atom_features ([str]): List of atom features to compute
bond_features ([str]): List of bond features to compute
distance_converter (DistanceCovertor): Tool used to expand distances
from a single scalar vector to an array of values
known_elements ([str]): List of elements expected to be in dataset. Used only if the
feature `element` is used to describe each atom
max_ring_size (int): Maximum number of atom in the ring
"""
# Check if openbabel and RDKit are installed
if Chem is None or pybel is None:
raise RuntimeError("RDKit and openbabel must be installed")
super().__init__()
if bond_features is None:
bond_features = _BOND_FEATURES
if atom_features is None:
atom_features = _ATOM_FEATURES
if distance_converter is None:
distance_converter = GaussianDistance(np.linspace(0, 4, 20), 0.5)
if known_elements is None:
known_elements = _ELEMENTS
# Check if all feature names are valid
if any(i not in _ATOM_FEATURES for i in atom_features):
bad_features = set(atom_features).difference(_ATOM_FEATURES)
raise ValueError(f"Unrecognized atom features: {', '.join(bad_features)}")
self.atom_features = atom_features
if any(i not in _BOND_FEATURES for i in bond_features):
bad_features = set(bond_features).difference(_BOND_FEATURES)
raise ValueError(f"Unrecognized bond features: {', '.join(bad_features)}")
self.bond_features = bond_features
self.known_elements = known_elements
self.distance_converter = distance_converter
self.max_ring_size = max_ring_size
def convert(self, mol, state_attributes: List = None, full_pair_matrix: bool = True) -> Dict: # type: ignore
"""
Compute the representation for a molecule
Args:
mol (pybel.Molecule): Molecule to generate features for
state_attributes (list): State attributes. Uses average mass and number of bonds per atom as default
full_pair_matrix (bool): Whether to generate info for all atom pairs, not just bonded ones
Returns:
(dict): Dictionary of features
"""
# Get the features features for all atoms and bonds
atom_features = []
atom_pairs: List[Dict] = []
for idx, atom in enumerate(mol.atoms):
f = self.get_atom_feature(mol, atom)
atom_features.append(f)
atom_features = sorted(atom_features, key=lambda x: x["coordid"])
num_atoms = mol.OBMol.NumAtoms()
for i, j in itertools.combinations(range(0, num_atoms), 2):
bond_feature = self.get_pair_feature(mol, i, j, full_pair_matrix)
if bond_feature:
atom_pairs.append(bond_feature)
else:
continue
# Compute the graph distance, if desired
if "graph_distance" in self.bond_features:
graph_dist = self._dijkstra_distance(atom_pairs)
for pair in atom_pairs:
d: Dict = {"graph_distance": graph_dist[pair["a_idx"], pair["b_idx"]]}
pair.update(d)
# Generate the state attributes (that describe the whole network)
state_attributes = state_attributes or [
[mol.molwt / num_atoms, len([i for i in atom_pairs if i["bond_type"] > 0]) / num_atoms]
]
# Get the atom features in the order they are requested by the user as a 2D array
atoms = []
for atom in atom_features:
atoms.append(self._create_atom_feature_vector(atom))
# Get the bond features in the order request by the user
bonds = []
index1_temp = []
index2_temp = []
for bond in atom_pairs:
# Store the index of each bond
index1_temp.append(bond.pop("a_idx"))
index2_temp.append(bond.pop("b_idx"))
# Get the desired bond features
bonds.append(self._create_pair_feature_vector(bond))
# Given the bonds (i,j), make it so (i,j) == (j, i)
index1 = index1_temp + index2_temp
index2 = index2_temp + index1_temp
bonds = bonds + bonds
# Sort the arrays by the beginning index
sorted_arg = np.argsort(index1)
index1 = np.array(index1)[sorted_arg].tolist()
index2 = np.array(index2)[sorted_arg].tolist()
bonds = np.array(bonds)[sorted_arg].tolist()
return {"atom": atoms, "bond": bonds, "state": state_attributes, "index1": index1, "index2": index2}
def _create_pair_feature_vector(self, bond: Dict) -> List[int]:
"""Generate the feature vector from the bond feature dictionary
Handles the binarization of categorical variables, and performing the distance conversion
Args:
bond (dict): Features for a certain pair of atoms
Returns:
([float]) Values converted to a vector
"""
bond_temp: List[int] = []
for i in self.bond_features:
# Some features require conversion (e.g., binarization)
if i in bond:
if i == "bond_type":
bond_temp.extend(fast_label_binarize(bond[i], [0, 1, 2, 3, 4]))
elif i == "same_ring":
bond_temp.append(int(bond[i]))
elif i == "spatial_distance":
expanded = self.distance_converter.convert([bond[i]])[0]
if isinstance(expanded, np.ndarray):
# If we use a distance expansion
bond_temp.extend(expanded.tolist())
else:
# If not
bond_temp.append(expanded)
else:
bond_temp.append(bond[i])
return bond_temp
def _create_atom_feature_vector(self, atom: dict) -> List[int]:
"""Generate the feature vector from the atomic feature dictionary
Handles the binarization of categorical variables, and transforming the ring_sizes to a list
Args:
atom (dict): Dictionary of atomic features
Returns:
([int]): Atomic feature vector
"""
atom_temp = []
for i in self.atom_features:
if i == "chirality":
atom_temp.extend(fast_label_binarize(atom[i], [0, 1, 2]))
elif i == "element":
atom_temp.extend(fast_label_binarize(atom[i], self.known_elements))
elif i in ["aromatic", "donor", "acceptor"]:
atom_temp.append(int(atom[i]))
elif i == "hybridization":
atom_temp.extend(fast_label_binarize(atom[i], [1, 2, 3, 4, 5, 6]))
elif i == "ring_sizes":
atom_temp.extend(ring_to_vector(atom[i], self.max_ring_size))
else: # It is a scalar
atom_temp.append(atom[i])
return atom_temp
@staticmethod
def _dijkstra_distance(pairs: List[Dict]) -> np.ndarray:
"""
Compute the graph distance between each pair of atoms,
using the network defined by the bonded atoms.
Args:
pairs ([dict]): List of bond information
Returns:
([int]) Distance for each pair of bonds
"""
bonds = []
for p in pairs:
if p["bond_type"] > 0:
bonds.append([p["a_idx"], p["b_idx"]])
return dijkstra_distance(bonds)
def get_atom_feature(
self, mol, atom # type: ignore
) -> Dict: # type: ignore
"""
Generate all features of a particular atom
Args:
mol (pybel.Molecule): Molecule being evaluated
atom (pybel.Atom): Specific atom being evaluated
Return:
(dict): All features for that atom
"""
# Get the link to the OpenBabel representation of the atom
obatom = atom.OBAtom
atom_idx = atom.idx - 1 # (pybel atoms indices start from 1)
# Get the element
element = Element.from_Z(obatom.GetAtomicNum()).symbol
# Get the fast-to-compute properties
output = {
"element": element,
"atomic_num": obatom.GetAtomicNum(),
"formal_charge": obatom.GetFormalCharge(),
"hybridization": 6 if element == "H" else obatom.GetHyb(),
"acceptor": obatom.IsHbondAcceptor(),
"donor": obatom.IsHbondDonorH() if atom.type == "H" else obatom.IsHbondDonor(),
"aromatic": obatom.IsAromatic(),
"coordid": atom.coordidx,
}
# Get the chirality, if desired
if "chirality" in self.atom_features:
# Determine whether the molecule has chiral centers
chiral_cc = self._get_chiral_centers(mol)
if atom_idx not in chiral_cc:
output["chirality"] = 0
else:
# 1 --> 'R', 2 --> 'S'
output["chirality"] = 1 if chiral_cc[atom_idx] == "R" else 2
# Find the rings, if desired
if "ring_sizes" in self.atom_features:
rings = mol.OBMol.GetSSSR() # OpenBabel caches ring computation internally, no need to cache ourselves
output["ring_sizes"] = [r.Size() for r in rings if r.IsInRing(atom.idx)]
return output
@staticmethod
def create_bond_feature(mol, bid: int, eid: int) -> Dict:
"""
Create information for a bond for a pair of atoms that are not actually bonded
Args:
mol (pybel.Molecule): Molecule being featurized
bid (int): Index of atom beginning of the bond
eid (int): Index of atom at the end of the bond
"""
a1 = mol.OBMol.GetAtom(bid + 1)
a2 = mol.OBMol.GetAtom(eid + 1)
same_ring = mol.OBMol.AreInSameRing(a1, a2)
return {
"a_idx": bid,
"b_idx": eid,
"bond_type": 0,
"same_ring": bool(same_ring),
"spatial_distance": a1.GetDistance(a2),
}
def get_pair_feature(self, mol, bid: int, eid: int, full_pair_matrix: bool) -> Union[Dict, None]:
"""
Get the features for a certain bond
Args:
mol (pybel.Molecule): Molecule being featurized
bid (int): Index of atom beginning of the bond
eid (int): Index of atom at the end of the bond
full_pair_matrix (bool): Whether to compute the matrix for every atom - even those that
are not actually bonded
"""
# Find the bonded pair of atoms
bond = mol.OBMol.GetBond(bid + 1, eid + 1)
if not bond: # If the bond is ordered in the other direction
bond = mol.OBMol.GetBond(eid + 1, bid + 1)
# If the atoms are not bonded
if not bond:
if full_pair_matrix:
return self.create_bond_feature(mol, bid, eid)
return None
# Compute bond features
a1 = mol.OBMol.GetAtom(bid + 1)
a2 = mol.OBMol.GetAtom(eid + 1)
same_ring = mol.OBMol.AreInSameRing(a1, a2)
return {
"a_idx": bid,
"b_idx": eid,
"bond_type": 4 if bond.IsAromatic() else bond.GetBondOrder(),
"same_ring": bool(same_ring),
"spatial_distance": a1.GetDistance(a2),
}
@staticmethod
def _get_rdk_mol(mol, format: str = "smiles"):
"""
Return: RDKit Mol (w/o H)
"""
if format == "pdb":
return Chem.rdmolfiles.MolFromPDBBlock(mol.write("pdb"))
if format == "smiles":
return Chem.rdmolfiles.MolFromSmiles(mol.write("smiles"))
return None
def _get_chiral_centers(self, mol):
"""
Use RDKit to find the chiral centers with CIP(R/S) label
This provides the absolute stereochemistry. The chiral label obtained
from pybabel and rdkit.mol.getchiraltag is relative positions of the bonds as provided
Args:
mol (Molecule): Molecule to asses
Return:
(dict): Keys are the atom index and values are the CIP label
"""
mol_rdk = self._get_rdk_mol(mol, "smiles")
if mol_rdk is None:
# Conversion to RDKit has failed
return {}
chiral_cc = Chem.FindMolChiralCenters(mol_rdk)
return dict(chiral_cc)
def dijkstra_distance(bonds: List[List[int]]) -> np.ndarray:
"""
Compute the graph distance based on the dijkstra algorithm
Args:
bonds: (list of list), for example [[0, 1], [1, 2]] means two bonds formed by atom 0, 1 and atom 1, 2
Returns:
full graph distance matrix
"""
nb_atom = max(itertools.chain(*bonds)) + 1
graph_dist = np.ones((nb_atom, nb_atom), dtype=np.int32) * np.infty
for bond in bonds:
graph_dist[bond[0], bond[1]] = 1
graph_dist[bond[1], bond[0]] = 1
queue: deque = deque() # Queue used in all loops
visited: set = set() # Used in all loops
for i in range(nb_atom):
graph_dist[i, i] = 0
visited.clear()
queue.append(i)
while queue:
s = queue.pop()
visited.add(s)
for k in np.where(graph_dist[s, :] == 1)[0]:
if k not in visited:
queue.append(k)
graph_dist[i, k] = min(graph_dist[i, k], graph_dist[i, s] + 1)
graph_dist[k, i] = graph_dist[i, k]
return graph_dist
def mol_from_smiles(smiles: str):
"""
load molecule object from smiles string
Args:
smiles (string): smiles string
Returns:
openbabel molecule
"""
mol = pybel.readstring(format="smi", string=smiles)
mol.make3D()
return mol
def mol_from_pymatgen(mol: Molecule):
"""
Args:
mol(Molecule)
"""
mol = pybel.Molecule(BabelMolAdaptor(mol).openbabel_mol)
mol.make3D()
return mol
def mol_from_file(file_path: str, file_format: str = "xyz"):
"""
Args:
file_path(str)
file_format(str): allow formats that open babel supports
"""
mol = list(pybel.readfile(format=file_format, filename=file_path))[0]
return mol
def _convert_mol(mol: str, molecule_format: str, converter: MolecularGraph) -> Dict:
"""Convert a molecule from string to its graph features
Utility function used in the graph generator.
The parse and convert operations are both in this function due to Pybel objects
not being serializable. By not using the Pybel representation of each molecule
as an input to this function, we can use multiprocessing to parallelize conversion
over molecules as strings can be passed as pickle objects to the worker threads but
but Pybel objects cannot.
Args:
mol (str): String representation of a molecule
molecule_format (str): Format of the string representation
converter (MolecularGraph): Tool used to generate graph representation
Returns:
(dict): Graph representation of the molecule
"""
# Convert molecule into pybel format
if molecule_format == "smiles":
mol = mol_from_smiles(mol) # Used to generate 3D coordinates/H atoms
else:
mol = pybel.readstring(molecule_format, mol)
return converter.convert(mol)
class MolecularGraphBatchGenerator(BaseGraphBatchGenerator):
"""Generator that creates batches of molecular data by computing graph properties on demand
If your dataset is small enough that the descriptions of the whole dataset fit in memory,
we recommend using :class:`megnet.data.graph.GraphBatchGenerator` instead to avoid
the computational cost of dynamically computing graphs."""
def __init__(
self,
mols: List[str],
targets: List[np.ndarray] = None,
converter: MolecularGraph = None,
molecule_format: str = "xyz",
batch_size: int = 128,
shuffle: bool = True,
n_jobs: int = 1,
):
"""
Args:
mols ([str]): List of the string reprensetations of each molecule
targets ([ndarray]): Properties of each molecule to be predicted
converter (MolecularGraph): Converter used to generate graph features
molecule_format (str): Format of each of the string representations in `mols`
batch_size (int): Target size for each batch
shuffle (bool): Whether to shuffle the training data after each epoch
n_jobs (int): Number of worker threads (None to use all threads).
"""
super().__init__(len(mols), targets, batch_size, shuffle)
self.mols = np.array(mols)
if converter is None:
converter = MolecularGraph()
self.converter = converter
self.molecule_format = molecule_format
self.n_jobs = n_jobs
def mute():
with open(os.devnull, "w") as f:
sys.stdout = f
sys.stderr = f
self.pool = Pool(self.n_jobs, initializer=mute) if self.n_jobs != 1 else None
def __del__(self):
if self.pool is not None:
self.pool.close() # Kill thread pool if generator is deleted
def _generate_inputs(self, batch_index: list) -> np.ndarray:
# Get the molecules for this batch
mols = self.mols[batch_index]
# Generate the graphs
graphs = self._generate_graphs(mols)
# Return them as flattened into array format
return self.converter.get_flat_data(graphs)
def _generate_graphs(self, mols: List[str]) -> List[Dict]:
"""Generate graphs for a certain collection of molecules
Args:
mols ([string]): Molecules to process
Returns:
([dict]): Graphs for all of the molecules
"""
if self.pool is None:
graphs = [_convert_mol(m, self.molecule_format, self.converter) for m in mols]
else:
func = partial(_convert_mol, molecule_format=self.molecule_format, converter=self.converter)
graphs = self.pool.map(func, mols)
return graphs
def create_cached_generator(self) -> GraphBatchGenerator:
"""Generates features for all of the molecules and stores them in memory
Returns:
(GraphBatchGenerator) Graph genereator that relies on having the graphs in memory
"""
# Make all the graphs
graphs = self._generate_graphs(self.mols)
# Turn them into a fat array
atom_features, bond_features, state_features, index1_list, index2_list, targets = self.converter.get_flat_data(
graphs, self.targets
) # type: ignore
return GraphBatchGenerator(
atom_features=atom_features,
bond_features=bond_features,
state_features=state_features,
index1_list=index1_list,
index2_list=index2_list,
targets=targets,
is_shuffle=self.is_shuffle,
batch_size=self.batch_size,
)
|
materialsvirtuallab/megnet
|
megnet/data/molecule.py
|
Python
|
bsd-3-clause
| 23,634
|
[
"Gaussian",
"Open Babel",
"Pybel",
"RDKit",
"pymatgen"
] |
bde3adf4635d761d2723bb629cb3f2a8f1b4a398bf533ad34ee5aaa0eda6c50a
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for git_patrol."""
import asyncio
import datetime
import logging
import json
import os
import re
import shutil
import tempfile
import unittest
import unittest.mock
import uuid
import git_patrol
import yaml
class _FakeProcess():
"""Fake version of asyncio.subprocess.Process class.
Provides a fake implementation of the parts of the asyncio.subprocess.Process
class used by Git Patrol. The wait() and communicate() class methods are
`async def` defined so we can't just swap in a MagicMock class.
"""
def __init__(self, returncode, stdout, stderr):
self._returncode = returncode
self._stdout = stdout
self._stderr = stderr
async def wait(self):
return self._returncode
async def communicate(self):
return self._stdout, self._stderr
def _MakeFakeCommand(returncode_fn=None, stdout_fn=None, stderr_fn=None):
"""Construct a coroutine to return a FakeProcess.
Parameters are provided as lookup functions that are called with the args
provided to the subprocess' command line. This allows mock commands to behave
differently depending on the subcommand issued. Very useful for mocking
commands such as 'git' where behavior is determined by the second command
line argument (ex: 'git describe', 'git ls-remote', 'git clone').
Args:
returncode_fn: Lookup function that provides a return code based on the
subprocess args. If not provided, the return code defaults to zero.
stdout_fn: Lookup function that can provide a byte array for stdout based on
the subprocess args. If not provided, stdout defaults to an empty byte
array.
stderr_fn: Lookup function that can provide a byte array for stderr based on
the subprocess args. If not provided, stderr defaults to an empty byte
array.
Returns:
A coroutine that creates a FakeProcess instance.
"""
class FakeCommand:
"""Stateful fake command execution.
Keeps track of the number of times a specific command (with arguments) has
been run to permit responsive fake behavior. Useful for generating
different exit code/stdout/stderr based on command arguments and the number
of times a command has been run.
TODO(brian): Perhaps replace this design with layered AsyncioMock objects.
"""
def __init__(self, returncode_fn, stdout_fn, stderr_fn):
self._call_counts = {}
self._returncode_fn = returncode_fn
self._stdout_fn = stdout_fn
self._stderr_fn = stderr_fn
def __call__(self, *args):
call_str = ' '.join(['{!r}'.format(arg) for arg in args])
call_count = self._call_counts.get(call_str, 0)
self._call_counts[call_str] = call_count + 1
returncode = 0
stdout = ''.encode()
stderr = ''.encode()
if returncode_fn:
returncode = returncode_fn(*args, count=call_count)
if stdout_fn:
stdout = stdout_fn(*args, count=call_count)
if stderr_fn:
stderr = stderr_fn(*args, count=call_count)
return _FakeProcess(returncode, stdout, stderr)
fake_command = FakeCommand(returncode_fn, stdout_fn, stderr_fn)
async def _GetFakeProcess(*args):
return fake_command(*args)
return _GetFakeProcess
def AsyncioMock(*args, **kwargs):
"""Create a mock object to replace an 'async def' function.
Args:
*args: Positional arguments to the mock function.
**kwargs: Keyword arguments to the mock function.
Returns:
A coroutine that will call the MagicMock object.
"""
inner_mock = unittest.mock.MagicMock(*args, **kwargs)
async def _CallMockObject(*args, **kwargs):
return inner_mock(*args, **kwargs)
_CallMockObject.inner_mock = inner_mock
return _CallMockObject
class MockGitPatrolDb():
def __init__(self, record_git_poll=None, record_cloud_build=None):
self.record_git_poll = record_git_poll
self.record_cloud_build = record_cloud_build
class GitPatrolTest(unittest.TestCase):
async def _init_git_repo(self, git_dir):
proc = await asyncio.create_subprocess_exec(
'git', 'init', '--quiet', cwd=git_dir)
returncode = await proc.wait()
self.assertEqual(returncode, 0)
proc = await asyncio.create_subprocess_exec(
'git', 'config', 'user.name', '"The Author"', cwd=git_dir)
returncode = await proc.wait()
self.assertEqual(returncode, 0)
proc = await asyncio.create_subprocess_exec(
'git', 'config', 'user.email', 'the@author.com', cwd=git_dir)
returncode = await proc.wait()
self.assertEqual(returncode, 0)
proc = await asyncio.create_subprocess_exec(
'git', 'commit', '--quiet', '--allow-empty', '--message="First"',
cwd=git_dir)
returncode = await proc.wait()
self.assertEqual(returncode, 0)
proc = await asyncio.create_subprocess_exec(
'git', 'tag', '-a', 'r0001', '-m', 'Tag r0001', cwd=git_dir)
returncode = await proc.wait()
self.assertEqual(returncode, 0)
proc = await asyncio.create_subprocess_exec(
'git', 'commit', '--quiet', '--allow-empty', '--message="Second"',
cwd=git_dir)
returncode = await proc.wait()
self.assertEqual(returncode, 0)
proc = await asyncio.create_subprocess_exec(
'git', 'tag', '-a', 'r0002', '-m', 'Tag r0002', cwd=git_dir)
returncode = await proc.wait()
self.assertEqual(returncode, 0)
proc = await asyncio.create_subprocess_exec(
'git', 'show-ref', stdout=asyncio.subprocess.PIPE, cwd=git_dir)
stdout, _ = await proc.communicate()
returncode = await proc.wait()
self.assertEqual(returncode, 0)
raw_refs = stdout.decode('utf-8', 'ignore')
refs = re.findall(git_patrol.GIT_HASH_REFNAME_REGEX, raw_refs, re.MULTILINE)
self.assertEqual(len(refs), 3)
return {refname: commit for (commit, refname) in refs}
def setUp(self):
super(GitPatrolTest, self).setUp()
logging.disable(logging.CRITICAL)
self._temp_dir = tempfile.mkdtemp()
self._upstream_dir = os.path.join(self._temp_dir, 'upstream')
os.makedirs(self._upstream_dir)
self._refs = asyncio.get_event_loop().run_until_complete(
self._init_git_repo(self._upstream_dir))
def tearDown(self):
shutil.rmtree(self._temp_dir, ignore_errors=True)
super(GitPatrolTest, self).tearDown()
def testFetchGitRefsSuccess(self):
commands = git_patrol.GitPatrolCommands()
upstream_url = 'file://' + self._upstream_dir
ref_filters = []
refs = asyncio.get_event_loop().run_until_complete(
git_patrol.fetch_git_refs(commands, upstream_url, ref_filters))
self.assertDictEqual(refs, self._refs)
def testFetchGitRefsFilteredSuccess(self):
commands = git_patrol.GitPatrolCommands()
upstream_url = 'file://' + self._upstream_dir
ref_filters = ['refs/tags/*']
refs = asyncio.get_event_loop().run_until_complete(
git_patrol.fetch_git_refs(commands, upstream_url, ref_filters))
self.assertDictEqual(
refs,
{k: v for k, v in self._refs.items() if k.startswith('refs/tags/')})
def testWorkflowNotTriggered(self):
commands = git_patrol.GitPatrolCommands()
previous_uuid = uuid.uuid4()
current_uuid = uuid.uuid4()
mock_record_git_poll = AsyncioMock(return_value=current_uuid)
mock_db = MockGitPatrolDb(record_git_poll=mock_record_git_poll)
loop = asyncio.get_event_loop()
upstream_url = 'file://' + self._upstream_dir
ref_filters = []
utc_datetime = datetime.datetime.utcnow()
current_uuid, current_refs, new_refs = loop.run_until_complete(
git_patrol.run_workflow_triggers(
commands, mock_db, 'upstream', upstream_url, ref_filters,
utc_datetime, previous_uuid, self._refs))
# Ensure previous UUID is None since there is no change in the repository's
# git refs.
mock_record_git_poll.inner_mock.assert_called_with(
utc_datetime, upstream_url, 'upstream', None, self._refs, ref_filters)
# The git commit hashes are always unique across test runs, thus the
# acrobatics here to extract the HEAD and tag names only.
record_git_poll_args, _ = mock_record_git_poll.inner_mock.call_args
self.assertCountEqual(
['refs/heads/master', 'refs/tags/r0001', 'refs/tags/r0002'],
list(record_git_poll_args[4].keys()))
self.assertEqual(current_refs, self._refs)
self.assertFalse(new_refs)
def testWorkflowIsTriggered(self):
commands = git_patrol.GitPatrolCommands()
previous_uuid = uuid.uuid4()
current_uuid = uuid.uuid4()
mock_record_git_poll = AsyncioMock(return_value=current_uuid)
mock_db = MockGitPatrolDb(record_git_poll=mock_record_git_poll)
loop = asyncio.get_event_loop()
upstream_url = 'file://' + self._upstream_dir
ref_filters = []
utc_datetime = datetime.datetime.utcnow()
current_uuid, current_refs, new_refs = loop.run_until_complete(
git_patrol.run_workflow_triggers(
commands, mock_db, 'upstream', upstream_url, ref_filters,
utc_datetime, previous_uuid, {'refs/heads/master': 'none'}))
mock_record_git_poll.inner_mock.assert_called_with(
utc_datetime, upstream_url, 'upstream', previous_uuid,
self._refs, ref_filters)
# The git commit hashes are always unique across test runs, thus the
# acrobatics here to extract the HEADs and tag names only.
record_git_poll_args, _ = mock_record_git_poll.inner_mock.call_args
self.assertCountEqual(
['refs/heads/master', 'refs/tags/r0001', 'refs/tags/r0002'],
list(record_git_poll_args[4].keys()))
self.assertDictEqual(current_refs, self._refs)
self.assertDictEqual(new_refs, self._refs)
def testRunOneWorkflowSuccess(self):
cloud_build_uuid = '7d1bb5a7-545f-4c30-b640-f5461036e2e7'
cloud_build_json = [
('{ "createTime": "2018-11-01T20:49:31.802340417Z", '
'"id": "7d1bb5a7-545f-4c30-b640-f5461036e2e7", '
'"startTime": "2018-11-01T20:50:24.132599935Z", '
'"status": "QUEUED" }').encode(),
('{ "createTime": "2018-11-01T20:49:31.802340417Z", '
'"finishTime": "2018-11-01T22:44:36.303015Z", '
'"id": "7d1bb5a7-545f-4c30-b640-f5461036e2e7", '
'"startTime": "2018-11-01T20:50:24.132599935Z", '
'"status": "SUCCESS" }').encode()]
# Queue up three different stdout strings for the gcloud mock to return,
# one for each of the different commands we expect the client to call.
def gcloud_builds_stdout(*args, count):
if args[1] == 'submit':
return (
'7d1bb5a7-545f-4c30-b640-f5461036e2e7 '
'2018-11-01T20:49:31+00:00 '
'1H54M12S '
'- '
'- '
'QUEUED').encode()
if args[1] == 'log':
return ''.encode()
if args[1] == 'describe':
return cloud_build_json[count]
raise ValueError('Unexpected gcloud command: {}'.format(args[1]))
commands = git_patrol.GitPatrolCommands()
commands.gcloud = unittest.mock.MagicMock()
commands.gcloud.side_effect = _MakeFakeCommand(
stdout_fn=gcloud_builds_stdout)
# The "record_cloud_build()" method returns the journal_id of the created
# entry. This must be the value of parent_id for the next entry.
journal_ids = [1, 2]
mock_record_cloud_build = AsyncioMock(side_effect=journal_ids)
mock_db = MockGitPatrolDb(record_cloud_build=mock_record_cloud_build)
target_config = yaml.safe_load(
"""
alias: upstream
workflows:
- alias: first
config: first.yaml
sources: first.tar.gz
substitutions:
_VAR0: val0
_VAR1: val1
""")
workflow = target_config['workflows'][0]
substitutions = workflow['substitutions']
substitution_list = (
','.join('{!s}={!s}'.format(k, v) for (k, v) in substitutions.items()))
config_path = '/some/path'
git_poll_uuid = uuid.uuid4()
git_ref = ('refs/tags/r0002', 'deadbeef')
workflow_success = asyncio.get_event_loop().run_until_complete(
git_patrol.run_workflow_body(
commands, mock_db, config_path, target_config, git_poll_uuid,
git_ref))
self.assertTrue(workflow_success)
commands.gcloud.assert_any_call(
'builds', 'submit', '--async',
'--config={}'.format(os.path.join(config_path, workflow['config'])),
'--substitutions=TAG_NAME={},{}'.format(
git_ref[0].replace('refs/tags/', ''), substitution_list),
os.path.join(config_path, workflow['sources']))
commands.gcloud.assert_any_call(
'builds', 'log', '--stream', '--no-user-output-enabled',
cloud_build_uuid)
commands.gcloud.assert_any_call(
'builds', 'describe', '--format=json', cloud_build_uuid)
# We know the method will be called with only positional arguments so we
# can unpack call_args_list to discard the unused kwargs.
record_cloud_build_args = [
args for (args, _) in mock_record_cloud_build.inner_mock.call_args_list]
# There should be two calls to "record_cloud_build()".
self.assertEqual(len(record_cloud_build_args), 2)
# The first call should have parent_id set to "0", indicating this is the
# first entry. The second call should have parent_id set to "1", indicating
# this entry has a parent.
self.assertEqual(record_cloud_build_args[0][0], 0)
self.assertEqual(record_cloud_build_args[1][0], 1)
# The recorded Cloud Build JSON status should reflect what we passed via the
# fake gcloud commands.
self.assertEqual(
record_cloud_build_args[0][5].items(),
json.loads(cloud_build_json[0].decode('utf-8', 'ignore')).items())
self.assertEqual(
record_cloud_build_args[1][5].items(),
json.loads(cloud_build_json[1].decode('utf-8', 'ignore')).items())
if __name__ == '__main__':
unittest.main()
|
google/git-patrol
|
git_patrol_test.py
|
Python
|
apache-2.0
| 14,447
|
[
"Brian"
] |
5e426d0437e53facdcb467f45df84f6152b1fe01cab1202f13d5b44c747e134d
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Interface with command line GULP.
http://projects.ivec.org
WARNING: you need to have GULP installed on your system.
"""
__author__ = "Bharat Medasani, Wenhao Sun"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Bharat Medasani"
__email__ = "bkmedasani@lbl.gov,wenhao@mit.edu"
__status__ = "Production"
__date__ = "$Jun 22, 2013M$"
import os
import re
import subprocess
from monty.tempfile import ScratchDir
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
_anions = set(map(Element, ["O", "S", "F", "Cl", "Br", "N", "P"]))
_cations = set(
map(
Element,
[
"Li",
"Na",
"K", # alkali metals
"Be",
"Mg",
"Ca", # alkaline metals
"Al",
"Sc",
"Ti",
"V",
"Cr",
"Mn",
"Fe",
"Co",
"Ni",
"Cu",
"Zn",
"Ge",
"As",
"Y",
"Zr",
"Nb",
"Mo",
"Tc",
"Ru",
"Rh",
"Pd",
"Ag",
"Cd",
"In",
"Sn",
"Sb",
"Hf",
"Ta",
"W",
"Re",
"Os",
"Ir",
"Pt",
"Au",
"Hg",
"Tl",
"Pb",
"Bi",
"La",
"Ce",
"Pr",
"Nd",
"Pm",
"Sm",
"Eu",
"Gd",
"Tb",
"Dy",
"Ho",
"Er",
"Tm",
"Yb",
"Lu",
],
)
)
_gulp_kw = {
# Control of calculation type
"angle",
"bond",
"cosmo",
"cosmic",
"cost",
"defect",
"distance",
"eem",
"efg",
"fit",
"free_energy",
"gasteiger",
"genetic",
"gradients",
"md",
"montecarlo",
"noautobond",
"noenergy",
"optimise",
"pot",
"predict",
"preserve_Q",
"property",
"phonon",
"qeq",
"qbond",
"single",
"sm",
"static_first",
"torsion",
"transition_state",
# Geometric variable specification
"breathe",
"bulk_noopt",
"cellonly",
"conp",
"conv",
"isotropic",
"orthorhombic",
"nobreathe",
"noflgs",
"shell",
"unfix",
# Algorithm
"c6",
"dipole",
"fbfgs",
"fix_molecule",
"full",
"hill",
"kfull",
"marvinSE",
"madelung",
"minimum_image",
"molecule",
"molmec",
"molq",
"newda",
"noanisotropic_2b",
"nod2sym",
"nodsymmetry",
"noelectrostatics",
"noexclude",
"nofcentral",
"nofirst_point",
"noksymmetry",
"nolist_md",
"nomcediff",
"nonanal",
"noquicksearch",
"noreal",
"norecip",
"norepulsive",
"nosasinitevery",
"nosderv",
"nozeropt",
"numerical",
"qiter",
"qok",
"spatial",
"storevectors",
"nomolecularinternalke",
"voight",
"zsisa",
# Optimisation method
"conjugate",
"dfp",
"lbfgs",
"numdiag",
"positive",
"rfo",
"unit",
# Output control
"average",
"broaden_dos",
"cartesian",
"compare",
"conserved",
"dcharge",
"dynamical_matrix",
"eigenvectors",
"global",
"hessian",
"hexagonal",
"intensity",
"linmin",
"meanke",
"nodensity_out",
"nodpsym",
"nofirst_point",
"nofrequency",
"nokpoints",
"operators",
"outcon",
"prt_eam",
"prt_two",
"prt_regi_before",
"qsas",
"restore",
"save",
"terse",
# Structure control
"full",
"hexagonal",
"lower_symmetry",
"nosymmetry",
# PDF control
"PDF",
"PDFcut",
"PDFbelow",
"PDFkeep",
"coreinfo",
"nowidth",
"nopartial",
# Miscellaneous
"nomodcoord",
"oldunits",
"zero_potential",
}
class GulpIO:
"""
To generate GULP input and process output
"""
@staticmethod
def keyword_line(*args):
r"""
Checks if the input args are proper gulp keywords and
generates the 1st line of gulp input. Full keywords are expected.
Args:
\\*args: 1st line keywords
"""
# if len(list(filter(lambda x: x in _gulp_kw, args))) != len(args):
# raise GulpError("Wrong keywords given")
gin = " ".join(args)
gin += "\n"
return gin
@staticmethod
def structure_lines(
structure,
cell_flg=True,
frac_flg=True,
anion_shell_flg=True,
cation_shell_flg=False,
symm_flg=True,
):
"""
Generates GULP input string corresponding to pymatgen structure.
Args:
structure: pymatgen Structure object
cell_flg (default = True): Option to use lattice parameters.
fractional_flg (default = True): If True, fractional coordinates
are used. Else, cartesian coodinates in Angstroms are used.
******
GULP convention is to use fractional coordinates for periodic
structures and cartesian coordinates for non-periodic
structures.
******
anion_shell_flg (default = True): If True, anions are considered
polarizable.
cation_shell_flg (default = False): If True, cations are
considered polarizable.
symm_flg (default = True): If True, symmetry information is also
written.
Returns:
string containing structure for GULP input
"""
gin = ""
if cell_flg:
gin += "cell\n"
l = structure.lattice
lat_str = "{0:6f} {1:6f} {2:6f} {3:6f} {4:6f} {5:6f}".format(l.a, l.b, l.c, l.alpha, l.beta, l.gamma)
gin += lat_str + "\n"
if frac_flg:
gin += "frac\n"
coord_attr = "frac_coords"
else:
gin += "cart\n"
coord_attr = "coords"
for site in structure.sites:
coord = [str(i) for i in getattr(site, coord_attr)]
specie = site.specie
core_site_desc = specie.symbol + " core " + " ".join(coord) + "\n"
gin += core_site_desc
if (specie in _anions and anion_shell_flg) or (specie in _cations and cation_shell_flg):
shel_site_desc = specie.symbol + " shel " + " ".join(coord) + "\n"
gin += shel_site_desc
else:
pass
if symm_flg:
gin += "space\n"
gin += str(SpacegroupAnalyzer(structure).get_space_group_number()) + "\n"
return gin
@staticmethod
def specie_potential_lines(structure, potential, **kwargs):
r"""
Generates GULP input specie and potential string for pymatgen
structure.
Args:
structure: pymatgen.core.structure.Structure object
potential: String specifying the type of potential used
\\*\\*kwargs: Additional parameters related to potential. For
potential == "buckingham",
anion_shell_flg (default = False):
If True, anions are considered polarizable.
anion_core_chrg=float
anion_shell_chrg=float
cation_shell_flg (default = False):
If True, cations are considered polarizable.
cation_core_chrg=float
cation_shell_chrg=float
Returns:
string containing specie and potential specification for gulp
input.
"""
raise NotImplementedError("gulp_specie_potential not yet implemented." "\nUse library_line instead")
@staticmethod
def library_line(file_name):
"""
Specifies GULP library file to read species and potential parameters.
If using library don't specify species and potential
in the input file and vice versa. Make sure the elements of
structure are in the library file.
Args:
file_name: Name of GULP library file
Returns:
GULP input string specifying library option
"""
gulplib_set = "GULP_LIB" in os.environ.keys()
def readable(f):
return os.path.isfile(f) and os.access(f, os.R_OK)
gin = ""
dirpath, fname = os.path.split(file_name)
if dirpath and readable(file_name): # Full path specified
gin = "library " + file_name
else:
fpath = os.path.join(os.getcwd(), file_name) # Check current dir
if readable(fpath):
gin = "library " + fpath
elif gulplib_set: # Check the GULP_LIB path
fpath = os.path.join(os.environ["GULP_LIB"], file_name)
if readable(fpath):
gin = "library " + file_name
if gin:
return gin + "\n"
raise GulpError("GULP Library not found")
def buckingham_input(self, structure, keywords, library=None, uc=True, valence_dict=None):
"""
Gets a GULP input for an oxide structure and buckingham potential
from library.
Args:
structure: pymatgen.core.structure.Structure
keywords: GULP first line keywords.
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
valence_dict: {El: valence}
"""
gin = self.keyword_line(*keywords)
gin += self.structure_lines(structure, symm_flg=not uc)
if not library:
gin += self.buckingham_potential(structure, valence_dict)
else:
gin += self.library_line(library)
return gin
@staticmethod
def buckingham_potential(structure, val_dict=None):
"""
Generate species, buckingham, and spring options for an oxide structure
using the parameters in default libraries.
Ref:
1. G.V. Lewis and C.R.A. Catlow, J. Phys. C: Solid State Phys.,
18, 1149-1161 (1985)
2. T.S.Bush, J.D.Gale, C.R.A.Catlow and P.D. Battle,
J. Mater Chem., 4, 831-837 (1994)
Args:
structure: pymatgen.core.structure.Structure
val_dict (Needed if structure is not charge neutral): {El:valence}
dict, where El is element.
"""
if not val_dict:
try:
# If structure is oxidation state decorated, use that first.
el = [site.specie.symbol for site in structure]
valences = [site.specie.oxi_state for site in structure]
val_dict = dict(zip(el, valences))
except AttributeError:
bv = BVAnalyzer()
el = [site.specie.symbol for site in structure]
valences = bv.get_valences(structure)
val_dict = dict(zip(el, valences))
# Try bush library first
bpb = BuckinghamPotential("bush")
bpl = BuckinghamPotential("lewis")
gin = ""
for key in val_dict.keys():
use_bush = True
el = re.sub(r"[1-9,+,\-]", "", key)
if el not in bpb.species_dict.keys():
use_bush = False
elif val_dict[key] != bpb.species_dict[el]["oxi"]:
use_bush = False
if use_bush:
gin += "species \n"
gin += bpb.species_dict[el]["inp_str"]
gin += "buckingham \n"
gin += bpb.pot_dict[el]
gin += "spring \n"
gin += bpb.spring_dict[el]
continue
# Try lewis library next if element is not in bush
# use_lewis = True
if el != "O": # For metals the key is "Metal_OxiState+"
k = el + "_" + str(int(val_dict[key])) + "+"
if k not in bpl.species_dict.keys():
# use_lewis = False
raise GulpError("Element {} not in library".format(k))
gin += "species\n"
gin += bpl.species_dict[k]
gin += "buckingham\n"
gin += bpl.pot_dict[k]
else:
gin += "species\n"
k = "O_core"
gin += bpl.species_dict[k]
k = "O_shel"
gin += bpl.species_dict[k]
gin += "buckingham\n"
gin += bpl.pot_dict[key]
gin += "spring\n"
gin += bpl.spring_dict[key]
return gin
def tersoff_input(self, structure, periodic=False, uc=True, *keywords):
"""
Gets a GULP input with Tersoff potential for an oxide structure
Args:
structure: pymatgen.core.structure.Structure
periodic (Default=False): Flag denoting whether periodic
boundary conditions are used
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
keywords: GULP first line keywords.
"""
# gin="static noelectrostatics \n "
gin = self.keyword_line(*keywords)
gin += self.structure_lines(
structure,
cell_flg=periodic,
frac_flg=periodic,
anion_shell_flg=False,
cation_shell_flg=False,
symm_flg=not uc,
)
gin += self.tersoff_potential(structure)
return gin
@staticmethod
def tersoff_potential(structure):
"""
Generate the species, tersoff potential lines for an oxide structure
Args:
structure: pymatgen.core.structure.Structure
"""
bv = BVAnalyzer()
el = [site.specie.symbol for site in structure]
valences = bv.get_valences(structure)
el_val_dict = dict(zip(el, valences))
gin = "species \n"
qerfstring = "qerfc\n"
for key, value in el_val_dict.items():
if key != "O" and value % 1 != 0:
raise SystemError("Oxide has mixed valence on metal")
specie_string = key + " core " + str(value) + "\n"
gin += specie_string
qerfstring += key + " " + key + " 0.6000 10.0000 \n"
gin += "# noelectrostatics \n Morse \n"
met_oxi_ters = TersoffPotential().data
for key, value in el_val_dict.items():
if key != "O":
metal = key + "(" + str(int(value)) + ")"
ters_pot_str = met_oxi_ters[metal]
gin += ters_pot_str
gin += qerfstring
return gin
@staticmethod
def get_energy(gout):
"""
Args:
gout ():
Returns:
Energy
"""
energy = None
for line in gout.split("\n"):
if "Total lattice energy" in line and "eV" in line:
energy = line.split()
elif "Non-primitive unit cell" in line and "eV" in line:
energy = line.split()
if energy:
return float(energy[4])
raise GulpError("Energy not found in Gulp output")
@staticmethod
def get_relaxed_structure(gout):
"""
Args:
gout ():
Returns:
(Structure) relaxed structure.
"""
# Find the structure lines
structure_lines = []
cell_param_lines = []
output_lines = gout.split("\n")
no_lines = len(output_lines)
i = 0
# Compute the input lattice parameters
while i < no_lines:
line = output_lines[i]
if "Full cell parameters" in line:
i += 2
line = output_lines[i]
a = float(line.split()[8])
alpha = float(line.split()[11])
line = output_lines[i + 1]
b = float(line.split()[8])
beta = float(line.split()[11])
line = output_lines[i + 2]
c = float(line.split()[8])
gamma = float(line.split()[11])
i += 3
break
if "Cell parameters" in line:
i += 2
line = output_lines[i]
a = float(line.split()[2])
alpha = float(line.split()[5])
line = output_lines[i + 1]
b = float(line.split()[2])
beta = float(line.split()[5])
line = output_lines[i + 2]
c = float(line.split()[2])
gamma = float(line.split()[5])
i += 3
break
i += 1
while i < no_lines:
line = output_lines[i]
if "Final fractional coordinates of atoms" in line:
# read the site coordinates in the following lines
i += 6
line = output_lines[i]
while line[0:2] != "--":
structure_lines.append(line)
i += 1
line = output_lines[i]
# read the cell parameters
i += 9
line = output_lines[i]
if "Final cell parameters" in line:
i += 3
for del_i in range(6):
line = output_lines[i + del_i]
cell_param_lines.append(line)
break
i += 1
# Process the structure lines
if structure_lines:
sp = []
coords = []
for line in structure_lines:
fields = line.split()
if fields[2] == "c":
sp.append(fields[1])
coords.append(list(float(x) for x in fields[3:6]))
else:
raise IOError("No structure found")
if cell_param_lines:
a = float(cell_param_lines[0].split()[1])
b = float(cell_param_lines[1].split()[1])
c = float(cell_param_lines[2].split()[1])
alpha = float(cell_param_lines[3].split()[1])
beta = float(cell_param_lines[4].split()[1])
gamma = float(cell_param_lines[5].split()[1])
latt = Lattice.from_parameters(a, b, c, alpha, beta, gamma)
return Structure(latt, sp, coords)
class GulpCaller:
"""
Class to run gulp from commandline
"""
def __init__(self, cmd="gulp"):
"""
Initialize with the executable if not in the standard path
Args:
cmd: Command. Defaults to gulp.
"""
def is_exe(f):
return os.path.isfile(f) and os.access(f, os.X_OK)
fpath, fname = os.path.split(cmd)
if fpath:
if is_exe(cmd):
self._gulp_cmd = cmd
return
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
file = os.path.join(path, cmd)
if is_exe(file):
self._gulp_cmd = file
return
raise GulpError("Executable not found")
def run(self, gin):
"""
Run GULP using the gin as input
Args:
gin: GULP input string
Returns:
gout: GULP output string
"""
with ScratchDir("."):
with subprocess.Popen(
self._gulp_cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
) as p:
out, err = p.communicate(bytearray(gin, "utf-8"))
out = out.decode("utf-8")
err = err.decode("utf-8")
if "Error" in err or "error" in err:
print(gin)
print("----output_0---------")
print(out)
print("----End of output_0------\n\n\n")
print("----output_1--------")
print(out)
print("----End of output_1------")
raise GulpError(err)
# We may not need this
if "ERROR" in out:
raise GulpError(out)
# Sometimes optimisation may fail to reach convergence
conv_err_string = "Conditions for a minimum have not been satisfied"
if conv_err_string in out:
raise GulpConvergenceError()
gout = ""
for line in out.split("\n"):
gout = gout + line + "\n"
return gout
def get_energy_tersoff(structure, gulp_cmd="gulp"):
"""
Compute the energy of a structure using Tersoff potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.tersoff_input(structure)
gout = gc.run(gin)
return gio.get_energy(gout)
def get_energy_buckingham(structure, gulp_cmd="gulp", keywords=("optimise", "conp", "qok"), valence_dict=None):
"""
Compute the energy of a structure using Buckingham potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
keywords: GULP first line keywords
valence_dict: {El: valence}. Needed if the structure is not charge
neutral.
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.buckingham_input(structure, keywords, valence_dict=valence_dict)
gout = gc.run(gin)
return gio.get_energy(gout)
def get_energy_relax_structure_buckingham(structure, gulp_cmd="gulp", keywords=("optimise", "conp"), valence_dict=None):
"""
Relax a structure and compute the energy using Buckingham potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
keywords: GULP first line keywords
valence_dict: {El: valence}. Needed if the structure is not charge
neutral.
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.buckingham_input(structure, keywords, valence_dict=valence_dict)
gout = gc.run(gin)
energy = gio.get_energy(gout)
relax_structure = gio.get_relaxed_structure(gout)
return energy, relax_structure
class GulpError(Exception):
"""
Exception class for GULP.
Raised when the GULP gives an error
"""
def __init__(self, msg):
"""
Args:
msg (str): Message
"""
self.msg = msg
def __str__(self):
return "GulpError : " + self.msg
class GulpConvergenceError(Exception):
"""
Exception class for GULP.
Raised when proper convergence is not reached in Mott-Littleton
defect energy optimisation procedure in GULP
"""
def __init__(self, msg=""):
"""
Args:
msg (str): Message
"""
self.msg = msg
def __str__(self):
return self.msg
class BuckinghamPotential:
"""
Generate the Buckingham Potential Table from the bush.lib and lewis.lib.
Ref:
T.S.Bush, J.D.Gale, C.R.A.Catlow and P.D. Battle, J. Mater Chem.,
4, 831-837 (1994).
G.V. Lewis and C.R.A. Catlow, J. Phys. C: Solid State Phys., 18,
1149-1161 (1985)
"""
def __init__(self, bush_lewis_flag):
"""
Args:
bush_lewis_flag (str): Flag for using Bush or Lewis potential.
"""
assert bush_lewis_flag in {"bush", "lewis"}
pot_file = "bush.lib" if bush_lewis_flag == "bush" else "lewis.lib"
with open(os.path.join(os.environ["GULP_LIB"], pot_file), "rt") as f:
# In lewis.lib there is no shell for cation
species_dict, pot_dict, spring_dict = {}, {}, {}
sp_flg, pot_flg, spring_flg = False, False, False
for row in f:
if row[0] == "#":
continue
if row.split()[0] == "species":
sp_flg, pot_flg, spring_flg = True, False, False
continue
if row.split()[0] == "buckingham":
sp_flg, pot_flg, spring_flg = False, True, False
continue
if row.split()[0] == "spring":
sp_flg, pot_flg, spring_flg = False, False, True
continue
elmnt = row.split()[0]
if sp_flg:
if bush_lewis_flag == "bush":
if elmnt not in species_dict.keys():
species_dict[elmnt] = {"inp_str": "", "oxi": 0}
species_dict[elmnt]["inp_str"] += row
species_dict[elmnt]["oxi"] += float(row.split()[2])
elif bush_lewis_flag == "lewis":
if elmnt == "O":
if row.split()[1] == "core":
species_dict["O_core"] = row
if row.split()[1] == "shel":
species_dict["O_shel"] = row
else:
metal = elmnt.split("_")[0]
# oxi_state = metaloxi.split('_')[1][0]
species_dict[elmnt] = metal + " core " + row.split()[2] + "\n"
continue
if pot_flg:
if bush_lewis_flag == "bush":
pot_dict[elmnt] = row
elif bush_lewis_flag == "lewis":
if elmnt == "O":
pot_dict["O"] = row
else:
metal = elmnt.split("_")[0]
# oxi_state = metaloxi.split('_')[1][0]
pot_dict[elmnt] = metal + " " + " ".join(row.split()[1:]) + "\n"
continue
if spring_flg:
spring_dict[elmnt] = row
if bush_lewis_flag == "bush":
# Fill the null keys in spring dict with empty strings
for key in pot_dict.keys():
if key not in spring_dict.keys():
spring_dict[key] = ""
self.species_dict = species_dict
self.pot_dict = pot_dict
self.spring_dict = spring_dict
class TersoffPotential:
"""
Generate Tersoff Potential Table from "OxideTersoffPotentialentials" file
"""
def __init__(self):
"""
Init TersoffPotential
"""
module_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(module_dir, "OxideTersoffPotentials"), "r") as f:
data = dict()
for row in f:
metaloxi = row.split()[0]
line = row.split(")")
data[metaloxi] = line[1]
self.data = data
|
gmatteo/pymatgen
|
pymatgen/command_line/gulp_caller.py
|
Python
|
mit
| 27,488
|
[
"GULP",
"pymatgen"
] |
6bf1b637ceee5eb42e829fb653a25f04f92ec572828df260f63fa684912b8c33
|
import numpy as np
from scipy import integrate
import math
from scipy.integrate import odeint, ode
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from scipy import interpolate
from matplotlib.path import Path
import matplotlib.patches as patches
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
#from pync import Notifier
# data play around with these
dt = 0.01 #timestep
maxtime = 1# end time
timesteps = int(maxtime/dt) # number of timesteps
length_x = 0.1 # length in x direction
length_y = 11 # length in y direction
num_x = 2 # number of nodes in x direction
num_y = 4 # number of nodes in y direction
m = 0.01 # mass
k = 10000 # stiffness k10 b0.8 m1 force1
b = 0.98 # damping coeff
g = 9.81
#do not change these
mu_stat = 0.2
mu_dyn = 0.1
k_ground = 800
b_ground = 10
D = 0.1
E = 1.6
ksma = 0
timestep_real = 0
# coordinates to plot
x_coords_init = np.zeros((num_x, num_y)) # coordinates
y_coords_init = np.zeros((num_x, num_y)) # coordinates
x_coords = np.zeros((num_x, num_y)) # coordinates
y_coords = np.zeros((num_x, num_y)) # coordinates
xct = np.zeros((num_x, num_y)) # coordinates
yct = np.zeros((num_x, num_y)) # coordinates
# SMA
nodes =[]
nodes.append([[0,0],[0,3]])
for i in range(num_x):
for j in range(num_y):
x_coords_init[i,j] = i*length_x/num_x
y_coords_init[i,j] = j*length_y/num_y
# mesh
for i in range(num_x-2):
for j in range(num_y-2):
x_coords_init[i+1,j+1] = x_coords_init[i+1,j+1] #+ np.random.rand()/10
y_coords_init[i+1,j+1] = y_coords_init[i+1,j+1] #+ np.random.rand()/10
'''
# boundaries
for i in range(num_x):
x_coords_init[i,0] = i*length_x/(num_x-1)
y_coords_init[i,0] = 0
x_coords_init[i,num_y-1] = i*length_x/(num_x-1)
y_coords_init[i,num_y-1] = length_y
for j in range(num_y):
x_coords_init[0,j] = 0
y_coords_init[0,j] = j*length_y/(num_y-1)
x_coords_init[num_x-1,j] = length_x
y_coords_init[num_x-1,j] = j*length_y/(num_y-1)
'''
# simulation parameters
abserr = 1.0e-8
relerr = 1.0e-6
# initialisation
p = np.zeros((num_x, num_y)) # displacement in x direction
q = np.zeros((num_x, num_y)) # vel in x direction
r = np.zeros((num_x, num_y)) # displacement in y direction
s = np.zeros((num_x, num_y)) # vel in y direction
q_before = np.zeros((num_x, num_y)) # vel in x direction
contactforce = np.zeros((num_x, num_y)) # contact force
smaforce_x = np.zeros((num_x, num_y)) # SMA force
smaforce_y = np.zeros((num_x, num_y)) # SMA force
bodyforce = np.zeros((num_x, num_y)) # body force
params = [m, k, b, smaforce_x, smaforce_y, ksma]
def octopus(angle, lx, ly, nx, ny, x0, y0):
xmatrix = np.zeros([nx, ny])
ymatrix = np.zeros([nx, ny])
anglevector = []
for a in range(nx):
anglevector.append(angle+a*(math.pi-2*angle)/(nx-1))
print(anglevector)
for i in range(nx):
for j in range(ny):
if (angle != math.pi/2):
p = ly * j / (math.tan(anglevector[i])*(ny-1))
else:
p = 0
xmatrix[i,j] = x0 + i*lx/(nx-1) - j * p
ymatrix[i,j] = y0 + j*ly/(ny-1)
return xmatrix, ymatrix
x_coords_init, y_coords_init = octopus(math.pi/2, length_x, length_y, num_x, num_y, 0, 2)
# mesh
for i in range(num_x-2):
for j in range(num_y-2):
x_coords_init[i+1,j+1] = x_coords_init[i+1,j+1] #+ np.random.rand()/10
y_coords_init[i+1,j+1] = y_coords_init[i+1,j+1] #+ np.random.rand()/10
def matrix_to_vector(matrix):
vector = []
for i in range(num_x):
for j in range(num_y):
vector.append(matrix[i,j])
vector = np.array(vector)
return vector
def vector_to_matrix(vector):
matrix = np.zeros((num_x, num_y))
for i in range(num_x):
for j in range(num_y):
matrix[i,j] = vector[(num_y)*i+j]
return matrix
def matrix_to_w(p, q, r, s):
w = []
p_vec = matrix_to_vector(p)
q_vec = matrix_to_vector(q)
r_vec = matrix_to_vector(r)
s_vec = matrix_to_vector(s)
iter = 0
for i in range(num_x):
for j in range(num_y):
w.append(p_vec[iter])
w.append(q_vec[iter])
w.append(r_vec[iter])
w.append(s_vec[iter])
iter = iter + 1
return w
def f(x):
x_points = [-0.03, -0.02, -0.01, 0, 1, 1.01, 1.02, 1.03]
y_points = [0 , 0 , 0 , 0, 1, 1 , 1 , 1 ]
tck = interpolate.splrep(x_points, y_points)
if (x > 1):
res = 1
elif (x < 0):
res = 0
else:
res = interpolate.splev(x, tck)
return res
def normalforce(yabs, yvel):
return max(0, -k_ground*np.sign(yabs)*abs(yabs)**E-b_ground*yvel*f(-yabs/D))
def frictionforce(fhorizontal, fnormal, vhorizontal, v_before, dtreal):
if(abs(vhorizontal) < abs(fnormal)*mu_dyn*dtreal/m or np.sign(vhorizontal)!=np.sign(v_before) ):
if (abs(fhorizontal) > mu_stat * abs(fnormal)):
force = fhorizontal - np.sign(vhorizontal) * mu_dyn * fnormal
else:
force = 0
else:
force = fhorizontal - np.sign(vhorizontal) * mu_dyn * fnormal
return force
def smaforce(t, transient, nodes, smaforce_x, smaforce_y, x_coords_init, y_coords_init, p, r, ksma):
for k in range(len(nodes)):
i0 = nodes[k][0][0]
j0 = nodes[k][0][1]
x0abs = x_coords_init[i0, j0] + p[i0, j0]
y0abs = y_coords_init[i0, j0] + r[i0, j0]
x0 = x_coords_init[i0, j0]
y0 = y_coords_init[i0, j0]
i1 = nodes[k][1][0]
j1 = nodes[k][1][1]
x1abs = x_coords_init[i1, j1] + p[i1, j1]
y1abs = y_coords_init[i1, j1] + r[i1, j1]
x1 = x_coords_init[i1, j1]
y1 = y_coords_init[i1, j1]
initiallength = math.sqrt((x0-x1)**2+(y0-y1)**2)
restinglength = initiallength * (0.6 * math.exp(-transient*t)+0.4)
smaforce_x[i0, j0] = -ksma * np.dot((math.sqrt((x0abs-x1abs)**2+(y0abs-y1abs)**2)-restinglength)*np.array([x0abs-x1abs,y0abs-y1abs])/(math.sqrt((x0abs-x1abs)**2+(y0abs-y1abs)**2)),np.array([1,0]))
smaforce_y[i0, j0] = -ksma * np.dot((math.sqrt((x0abs-x1abs)**2+(y0abs-y1abs)**2)-restinglength)*np.array([x0abs-x1abs,y0abs-y1abs])/(math.sqrt((x0abs-x1abs)**2+(y0abs-y1abs)**2)),np.array([0,1]))
smaforce_x[i1, j1] = ksma * np.dot((math.sqrt((x0abs-x1abs)**2+(y0abs-y1abs)**2)-restinglength)*np.array([x0abs-x1abs,y0abs-y1abs])/(math.sqrt((x0abs-x1abs)**2+(y0abs-y1abs)**2)),np.array([1,0]))
smaforce_y[i1, j1] = ksma * np.dot((math.sqrt((x0abs-x1abs)**2+(y0abs-y1abs)**2)-restinglength)*np.array([x0abs-x1abs,y0abs-y1abs])/(math.sqrt((x0abs-x1abs)**2+(y0abs-y1abs)**2)),np.array([0,1]))
return smaforce_x, smaforce_y
def f_x(x0, x1, y0, y1, x0abs, x1abs, y0abs, y1abs):
force = np.dot((math.sqrt((x0abs-x1abs)**2+(y0abs-y1abs)**2)-math.sqrt((x0abs-x0-x1abs+x1)**2+(y0abs-y0-y1abs+y1)**2))*np.array([x0abs-x1abs,y0abs-y1abs])/(math.sqrt((x0abs-x1abs)**2+(y0abs-y1abs)**2)),np.array([1,0]))
return force
def f_y(x0, x1, y0, y1, x0abs, x1abs, y0abs, y1abs):
force = np.dot((math.sqrt((x0abs-x1abs)**2+(y0abs-y1abs)**2)-math.sqrt((x0abs-x0-x1abs+x1)**2+(y0abs-y0-y1abs+y1)**2))*np.array([x0abs-x1abs,y0abs-y1abs])/(math.sqrt((x0abs-x1abs)**2+(y0abs-y1abs)**2)),np.array([0,1]))
return force
def w_to_matrix(w):
p = np.zeros((num_x, num_y)) # disp in x direction
r = np.zeros((num_x, num_y)) # disp in y direction
q = np.zeros((num_x, num_y)) # vel in x direction
s = np.zeros((num_x, num_y)) # vel in y direction
for i in range(num_x):
for j in range(num_y):
p[i,j] = w[i*num_y*4+j*4]
q[i,j] = w[i*num_y*4+j*4+1]
r[i,j] = w[i*num_y*4+j*4+2]
s[i,j] = w[i*num_y*4+j*4+3]
return p, q, r, s
def progressbar(tact, tmax):
if (tact == 0):
text = str(0)
else:
text = str(100*tact/tmax)
return text
def vectorfield(t, w, params):
global timestep_real
timestep_real = t - timestep_real
global q_before
# boundary conditions
print(progressbar(t,maxtime),'%')
p, q, r, s = w_to_matrix(w)
'''
p=boundaries(p)
q=boundaries(q)
r=boundaries(r)
s=boundaries(s)
print(p)
'''
# prescribed displacement and velocity
for i in range(num_x):
p[i, num_y-1] = math.sin(20*t)*0.08 # displacement in x direction
q[i, num_y-1] = math.cos(20*t)*0.08*20 # velocity in x direction
r[i, num_y-1] = 0
s[i, num_y-1] = 0
fp = np.zeros((num_x, num_y)) # function of dp/dt
fq = np.zeros((num_x, num_y)) # function of dq/dt
fr = np.zeros((num_x, num_y)) # function of dr/dt
fs = np.zeros((num_x, num_y)) # function of ds/dt
m, k, b, smaforce_x, smaforce_y, ksma = params
for i in range(num_x):
for j in range(num_y):
xct[i,j] = x_coords_init[i,j] + p[i,j]
yct[i,j] = y_coords_init[i,j] + r[i,j]
for i in range(num_x):
for j in range(num_y):
#contactforce[i,j] = 0
contactforce[i,j] = normalforce(yct[i,j], s[i,j])
smaforce_x, smaforce_y = smaforce(t, 0.2, nodes, smaforce_x, smaforce_y, x_coords_init, y_coords_init, p, r, ksma)
for i in range(num_x):
for j in range(num_y):
if (i == num_x-1):
force_x_right = 0
else:
force_x_right = k*f_x(p[i+1,j],p[i,j],r[i+1,j],r[i,j],xct[i+1,j],xct[i,j],yct[i+1,j],yct[i,j]) + b*f_x(q[i+1,j],q[i,j],s[i+1,j],s[i,j],xct[i+1,j],xct[i,j],yct[i+1,j],yct[i,j])
if (j == num_y-1):
force_x_up = 0
else:
force_x_up = k*f_x(p[i,j+1],p[i,j],r[i,j+1],r[i,j],xct[i,j+1],xct[i,j],yct[i,j+1],yct[i,j]) + b*f_x(q[i,j+1],q[i,j],s[i,j+1],s[i,j],xct[i,j+1],xct[i,j],yct[i,j+1],yct[i,j])
if (i == 0):
force_x_left = 0
else:
force_x_left = k*f_x(p[i,j],p[i-1,j],r[i,j],r[i-1,j],xct[i,j],xct[i-1,j],yct[i,j],yct[i-1,j]) + b*f_x(q[i,j],q[i-1,j],s[i,j],s[i-1,j],xct[i,j],xct[i-1,j],yct[i,j],yct[i-1,j])
if (j == 0):
force_x_down = 0
else:
force_x_down = k*f_x(p[i,j],p[i,j-1],r[i,j],r[i,j-1],xct[i,j],xct[i,j-1],yct[i,j],yct[i,j-1]) + b*f_x(q[i,j],q[i,j-1],s[i,j],s[i,j-1],xct[i,j],xct[i,j-1],yct[i,j],yct[i,j-1])
if (i == (num_x - 1) or j == (num_y - 1) ):
force_x_upright = 0
else:
force_x_upright = k*f_x(p[i+1,j+1],p[i,j],r[i+1,j+1],r[i,j],xct[i+1,j+1],xct[i,j],yct[i+1,j+1],yct[i,j]) + b*f_x(q[i+1,j+1],q[i,j],s[i+1,j+1],s[i,j],xct[i+1,j+1],xct[i,j],yct[i+1,j+1],yct[i,j])
if (j == 0 or i == (num_x - 1) ):
force_x_downright = 0
else:
force_x_downright = k*f_x(p[i+1,j-1],p[i,j],r[i+1,j-1],r[i,j],xct[i+1,j-1],xct[i,j],yct[i+1,j-1],yct[i,j]) + b*f_x(q[i+1,j-1],q[i,j],s[i+1,j-1],s[i,j],xct[i+1,j-1],xct[i,j],yct[i+1,j-1],yct[i,j])
if (i == 0 or j == (num_y - 1) ):
force_x_upleft = 0
else:
force_x_upleft = k*f_x(p[i,j],p[i-1,j+1],r[i,j],r[i-1,j+1],xct[i,j],xct[i-1,j+1],yct[i,j],yct[i-1,j+1]) + b*f_x(q[i,j],q[i-1,j+1],s[i,j],s[i-1,j+1],xct[i,j],xct[i-1,j+1],yct[i,j],yct[i-1,j+1])
if (i == 0 or j == 0 ):
force_x_downleft = 0
else:
force_x_downleft = k*f_x(p[i,j],p[i-1,j-1],r[i,j],r[i-1,j-1],xct[i,j],xct[i-1,j-1],yct[i,j],yct[i-1,j-1]) + b*f_x(q[i,j],q[i-1,j-1],s[i,j],s[i-1,j-1],xct[i,j],xct[i-1,j-1],yct[i,j],yct[i-1,j-1])
f_horizontal = force_x_right + force_x_up - force_x_left - force_x_down + force_x_upright + force_x_downright - force_x_upleft - force_x_downleft + smaforce_x[i,j]
fr[i, j] = s[i,j]
if (i == num_x-1):
force_y_right = 0
else:
force_y_right = k*f_y(p[i+1,j],p[i,j],r[i+1,j],r[i,j],xct[i+1,j],xct[i,j],yct[i+1,j],yct[i,j]) + b*f_y(q[i+1,j],q[i,j],s[i+1,j],s[i,j],xct[i+1,j],xct[i,j],yct[i+1,j],yct[i,j])
if (j == num_y-1):
force_y_up = 0
else:
force_y_up = k*f_y(p[i,j+1],p[i,j],r[i,j+1],r[i,j],xct[i,j+1],xct[i,j],yct[i,j+1],yct[i,j]) + b*f_y(q[i,j+1],q[i,j],s[i,j+1],s[i,j],xct[i,j+1],xct[i,j],yct[i,j+1],yct[i,j])
if (i == 0):
force_y_left = 0
else:
force_y_left = k*f_y(p[i,j],p[i-1,j],r[i,j],r[i-1,j],xct[i,j],xct[i-1,j],yct[i,j],yct[i-1,j]) + b*f_y(q[i,j],q[i-1,j],s[i,j],s[i-1,j],xct[i,j],xct[i-1,j],yct[i,j],yct[i-1,j])
if (j == 0):
force_y_down = 0
else:
force_y_down = k*f_y(p[i,j],p[i,j-1],r[i,j],r[i,j-1],xct[i,j],xct[i,j-1],yct[i,j],yct[i,j-1]) + b*f_y(q[i,j],q[i,j-1],s[i,j],s[i,j-1],xct[i,j],xct[i,j-1],yct[i,j],yct[i,j-1])
if (i == (num_x - 1) or j == (num_y - 1) ):
force_y_upright = 0
else:
force_y_upright = k*f_y(p[i+1,j+1],p[i,j],r[i+1,j+1],r[i,j],xct[i+1,j+1],xct[i,j],yct[i+1,j+1],yct[i,j]) + b*f_y(q[i+1,j+1],q[i,j],s[i+1,j+1],s[i,j],xct[i+1,j+1],xct[i,j],yct[i+1,j+1],yct[i,j])
if (j == 0 or i == (num_x - 1) ):
force_y_downright = 0
else:
force_y_downright = k*f_y(p[i+1,j-1],p[i,j],r[i+1,j-1],r[i,j],xct[i+1,j-1],xct[i,j],yct[i+1,j-1],yct[i,j]) + b*f_y(q[i+1,j-1],q[i,j],s[i+1,j-1],s[i,j],xct[i+1,j-1],xct[i,j],yct[i+1,j-1],yct[i,j])
if (i == 0 or j == (num_y - 1) ):
force_y_upleft = 0
else:
force_y_upleft = k*f_y(p[i,j],p[i-1,j+1],r[i,j],r[i-1,j+1],xct[i,j],xct[i-1,j+1],yct[i,j],yct[i-1,j+1]) + b*f_y(q[i,j],q[i-1,j+1],s[i,j],s[i-1,j+1],xct[i,j],xct[i-1,j+1],yct[i,j],yct[i-1,j+1])
if (i == 0 or j == 0 ):
force_y_downleft = 0
else:
force_y_downleft = k*f_y(p[i,j],p[i-1,j-1],r[i,j],r[i-1,j-1],xct[i,j],xct[i-1,j-1],yct[i,j],yct[i-1,j-1]) + b*f_y(q[i,j],q[i-1,j-1],s[i,j],s[i-1,j-1],xct[i,j],xct[i-1,j-1],yct[i,j],yct[i-1,j-1])
fs[i, j] = 1/m*( +force_y_right + force_y_up - force_y_left - force_y_down + force_y_upright + force_y_downright - force_y_upleft - force_y_downleft + contactforce[i,j] - m * g + smaforce_y[i,j])
fp[i, j] = q[i,j]
if (yct[i,j] <= 0):
fforce = frictionforce(f_horizontal, contactforce[i,j], q[i,j], q_before[i,j], timestep_real)
else:
fforce = f_horizontal
fq[i, j] = 1/m*(fforce)
q_before[i,j] = q[i,j]
f = matrix_to_w(fp, fq, fr, fs)
return f
# time
t = [maxtime * float(i) / (timesteps - 1) for i in range(timesteps)]
# SMA force definition
'''
for i in range(int(num_y/2)):
smaforce_x[0,i] = 3.5
smaforce_x[num_x-1,i] = -3.5
smaforce_x[0+1,i] = 3.5
smaforce_x[num_x-1-1,i] = -3.5
smaforce_x[0+2,i] = 3.5
smaforce_x[num_x-1-2,i] = -3.5
smaforce_x[0+3,i] = 3.5
smaforce_x[num_x-1-3,i] = -3.5
'''
# initial conditions
w0 = matrix_to_w(p, q, r, s)
solver = ode(vectorfield)
solver.set_integrator('dopri5')
solver.set_f_params(params)
solver.set_initial_value(w0, 0)
sol = np.empty((timesteps, len(w0)))
sol[0] = w0
k = 1
while solver.successful() and solver.t < maxtime:
solver.integrate(t[k])
sol[k] = solver.y
k += 1
# ODE solver
'''
wsol = odeint(vectorfield, w0, t, args=(params,),atol=abserr, rtol=relerr)
solution=[]
# unpack solution
for t1,w1 in zip(t, wsol):
solution.append(w1)
solution=np.array(solution)
'''
# STRAIN CALCULATION
import matplotlib.animation as animation
# First set up the figure, the axis, and the plot element we want to animate
vertices = np.random.rand(6, 2)
fig = plt.figure()
ax = plt.axes(xlim=(-2, 10), ylim=(-2, 14))
scat, = ax.plot([],[], linestyle='', marker='o', color='b')
line, = ax.plot([], [], lw=2, color='b')
poly = ax.fill([100,100,100], [100,100,100], "b")
def init():
#line.set_data([], [])
#poly.set_data([], [])
return [poly]
# animation function. This is called sequentially
def animate(t):
fig.clear()
ax = plt.axes(xlim=(-2, 10), ylim=(-2, 14))
#ax = plt.axes(xlim=(-2, 10), ylim=(-2, 14))
p, q, r, s = w_to_matrix(sol[t])
for i in range(num_x):
for j in range(num_y):
x_coords[i,j] = x_coords_init[i,j] + p[i,j]
y_coords[i,j] = y_coords_init[i,j] + r[i,j]
x = []
y = []
for j in range(num_y):
for i in range(num_x):
if (j % 2 == 0):
x.append(x_coords[num_x-i-1,j])
y.append(y_coords[num_x-i-1,j])
else:
x.append(x_coords[i,j])
y.append(y_coords[i,j])
for i in range(num_x):
for j in range(num_y):
if (num_y % 2 == 0):
if (i % 2 == 0):
x.append(x_coords[num_x-i-1,j])
y.append(y_coords[num_x-i-1,j])
else:
x.append(x_coords[num_x-i-1,num_y-j-1])
y.append(y_coords[num_x-i-1,num_y-j-1])
else:
if (i % 2 == 0):
x.append(x_coords[i,num_y-j-1])
y.append(y_coords[i,num_y-j-1])
else:
x.append(x_coords[i,j])
y.append(y_coords[i,j])
#x = np.reshape(x_coords, num_x*num_y)
#y = np.reshape(y_coords, num_x*num_y)
xline = x
yline = y
down = np.zeros([num_x, 2])
right = np.zeros([num_y, 2])
up = np.zeros([num_x, 2])
left = np.zeros([num_y, 2])
for i in range(num_x):
down[i,0] = x_coords[i,0]
down[i,1] = y_coords[i,0]
up[i,0] = x_coords[num_x-1-i,num_y-1]
up[i,1] = y_coords[num_x-1-i,num_y-1]
for j in range(num_y):
right[j,0] = x_coords[num_x-1,j]
right[j,1] = y_coords[num_x-1,j]
left[j,0] = x_coords[0,num_y-1-j]
left[j,1] = y_coords[0,num_y-1-j]
outline = []
outline_x = []
outline_y = []
for k in range (num_x):
outline_x.append(down[k,0])
outline_y.append(down[k,1])
outline.append([down[k,0],down[k,1]])
for k in range (num_y):
outline.append([right[k,0],right[k,1]])
outline_x.append(right[k,0])
outline_y.append(right[k,1])
for k in range (num_x):
outline.append([up[k,0],up[k,1]])
outline_x.append(up[k,0])
outline_y.append(up[k,1])
for k in range (num_y):
outline.append([left[k,0],left[k,1]])
outline_x.append(left[k,0])
outline_y.append(left[k,1])
#poly.set_xy(outline)
#patch = PatchCollection(poly, alpha=0.4)
#ax.add_collection(patch)
poly = ax.fill(outline_x, outline_y, "b")
#line.set_data(xline, yline)
#scat.set_data(x, y)
#return [scat, line, poly, ]
return [poly, ]
anim = animation.FuncAnimation(fig, animate,init_func=init, frames=range(timesteps), interval=30, blit=False)
anim.save('cartpole.mp4')
plt.grid(True)
plt.show()
#t = [maxtime * float(i) / (timesteps - 1) for i in range(timesteps)]
#plt.plot(t,strainA)
#plt.show()
|
GitYiheng/reinforcement_learning_test
|
test00_previous_files/cartpole.py
|
Python
|
mit
| 19,516
|
[
"Octopus"
] |
2a212d34fc5edc8a06898dcaa61b03e125ccac979ada1b5f4356c7138c8206d0
|
import os
from pyjade import Parser, Compiler as _Compiler
from pyjade.runtime import attrs
from pyjade.utils import process
ATTRS_FUNC = '__pyjade_attrs'
ITER_FUNC = '__pyjade_iter'
class Compiler(_Compiler):
useRuntime = True
def compile_top(self):
return '# -*- coding: utf-8 -*-\n<%%! from pyjade.runtime import attrs as %s, iteration as %s %%>'%(ATTRS_FUNC,ITER_FUNC)
def interpolate(self,text):
return self._interpolate(text,lambda x:'${%s}'%x)
def visitCodeBlock(self,block):
if self.mixing > 0:
self.buffer('${caller.body() if caller else ""}')
else:
self.buffer('<%%block name="%s">'%block.name)
if block.mode=='append': self.buffer('${parent.%s()}'%block.name)
self.visitBlock(block)
if block.mode=='prepend': self.buffer('${parent.%s()}'%block.name)
self.buffer('</%block>')
def visitMixin(self,mixin):
self.mixing += 1
if not mixin.call:
self.buffer('<%%def name="%s(%s)">'%(mixin.name,mixin.args))
self.visitBlock(mixin.block)
self.buffer('</%def>')
elif mixin.block:
self.buffer('<%%call expr="%s(%s)">'%(mixin.name,mixin.args))
self.visitBlock(mixin.block)
self.buffer('</%call>')
else:
self.buffer('${%s(%s)}'%(mixin.name,mixin.args))
self.mixing -= 1
def visitAssignment(self,assignment):
self.buffer('<%% %s = %s %%>'%(assignment.name,assignment.val))
def visitExtends(self,node):
path = self.format_path(node.path)
self.buffer('<%%inherit file="%s"/>'%(path))
def visitInclude(self,node):
path = self.format_path(node.path)
self.buffer('<%%include file="%s"/>'%(path))
def visitConditional(self,conditional):
TYPE_CODE = {
'if': lambda x: 'if %s'%x,
'unless': lambda x: 'if not %s'%x,
'elif': lambda x: 'elif %s'%x,
'else': lambda x: 'else'
}
self.buf.append('\\\n%% %s:\n'%TYPE_CODE[conditional.type](conditional.sentence))
if conditional.block:
self.visit(conditional.block)
for next in conditional.next:
self.visitConditional(next)
if conditional.type in ['if','unless']: self.buf.append('\\\n% endif\n')
def visitVar(self,var,escape=False):
return '${%s%s}'%(var,'| h' if escape else '| n')
def visitCode(self,code):
if code.buffer:
val = code.val.lstrip()
self.buf.append(self.visitVar(val, code.escape))
else:
self.buf.append('<%% %s %%>'%code.val)
if code.block:
# if not code.buffer: self.buf.append('{')
self.visit(code.block)
# if not code.buffer: self.buf.append('}')
if not code.buffer:
codeTag = code.val.strip().split(' ',1)[0]
if codeTag in self.autocloseCode:
self.buf.append('</%%%s>'%codeTag)
def visitEach(self,each):
self.buf.append('\\\n%% for %s in %s(%s,%d):\n'%(','.join(each.keys),ITER_FUNC,each.obj,len(each.keys)))
self.visit(each.block)
self.buf.append('\\\n% endfor\n')
def attributes(self,attrs):
return "${%s(%s)}"%(ATTRS_FUNC,attrs)
def preprocessor(source):
return process(source,compiler=Compiler)
|
glennyonemitsu/MarkupHiveServer
|
src/pyjade/ext/mako.py
|
Python
|
mit
| 3,388
|
[
"VisIt"
] |
709ac275c4016b2b94a6ec5a165b7dec8700b1e7fbf580c676eea445df975a4e
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# TODO - Don't use "from XXX import *"
from __future__ import print_function
try:
from numpy import *
from numpy import dot # missing in old PyPy's micronumpy
from numpy.linalg import svd, det # Missing in PyPy 2.0 numpypy
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NumPy if you want to use Bio.SVDSuperimposer.")
from Bio.SVDSuperimposer import SVDSuperimposer
# start with two coordinate sets (Nx3 arrays - Float0)
x=array([[51.65, -1.90, 50.07],
[50.40, -1.23, 50.65],
[50.68, -0.04, 51.54],
[50.22, -0.02, 52.85]], 'f')
y=array([[51.30, -2.99, 46.54],
[51.09, -1.88, 47.58],
[52.36, -1.20, 48.03],
[52.71, -1.18, 49.38]], 'f')
sup=SVDSuperimposer()
# set the coords
# y will be rotated and translated on x
sup.set(x, y)
# do the lsq fit
sup.run()
# get the rmsd
rms=sup.get_rms()
# get rotation (right multiplying!) and the translation
rot, tran=sup.get_rotran()
# rotate y on x manually
y_on_x1=dot(y, rot)+tran
# same thing
y_on_x2=sup.get_transformed()
def simple_matrix_print(matrix):
"""Simple string to display a floating point matrix
This should give the same output on multiple systems. This is
needed because a simple "print matrix" uses scientific notation
which varies between platforms.
Only 4 decimal places are used to avoid false test failures due
to slight differences in the calculation (e.g. due to different
versions of the underlying libraries or the compilation options
they used).
"""
return "[%s]" % "\n ".join("[%s]" % " ".join("% 1.4f" % v for v in row)
for row in matrix)
# output results
print(simple_matrix_print(y_on_x1))
print("")
print(simple_matrix_print(y_on_x2))
print("")
print("%.2f" % rms)
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_SVDSuperimposer.py
|
Python
|
gpl-2.0
| 2,041
|
[
"Biopython"
] |
4ef7d3b5eb7f748bb40fd3f256b18219792a089b2de005e9ec2e0de3133dd66d
|
from __future__ import print_function
import sys
import os
import regreg.api as rr
import numpy as np
from selection.reduced_optimization.generative_model import generate_data, generate_data_random
from selection.reduced_optimization.initial_soln import instance
from selection.tests.instance import logistic_instance, gaussian_instance
def selection_nonrandomized(X, y, sigma=None, method="theoretical"):
n, p = X.shape
loss = rr.glm.gaussian(X,y)
epsilon = 1. / np.sqrt(n)
lam_frac = 1.
if sigma is None:
sigma = 1.
if method == "theoretical":
lam = 1. * sigma * lam_frac * np.mean(np.fabs(np.dot(X.T, np.random.standard_normal((n, 10000)))).max(0))
W = np.ones(p)*lam
penalty = rr.group_lasso(np.arange(p), weights = dict(zip(np.arange(p), W)), lagrange=1.)
# initial solution
problem = rr.simple_problem(loss, penalty)
random_term = rr.identity_quadratic(epsilon, 0, 0, 0)
solve_args = {'tol': 1.e-10, 'min_its': 100, 'max_its': 500}
initial_soln = problem.solve(random_term, **solve_args)
active = (initial_soln != 0)
if np.sum(active) == 0:
return None
initial_grad = loss.smooth_objective(initial_soln, mode='grad')
betaE = initial_soln[active]
subgradient = -(initial_grad+epsilon*initial_soln)
cube = subgradient[~active]/lam
return lam, epsilon, active, betaE, cube, initial_soln
def lasso_selection(X,
y,
beta,
sigma):
n,p = X.shape
sel = selection_nonrandomized(X, y)
if sel is not None:
lam, epsilon, active, betaE, cube, initial_soln = sel
lagrange = lam * np.ones(p)
active_sign = np.sign(betaE)
nactive = active.sum()
print("number of selected variables by Lasso", nactive)
print("initial soln", betaE)
prior_variance = 1000.
noise_variance = sigma**2
projection_active = X[:, active].dot(np.linalg.inv(X[:, active].T.dot(X[:, active])))
M_1 = prior_variance * (X.dot(X.T)) + noise_variance * np.identity(n)
M_2 = prior_variance * ((X.dot(X.T)).dot(projection_active))
M_3 = prior_variance * (projection_active.T.dot(X.dot(X.T)).dot(projection_active))
post_mean = M_2.T.dot(np.linalg.inv(M_1)).dot(y)
print("observed data", post_mean)
post_var = M_3 - M_2.T.dot(np.linalg.inv(M_1)).dot(M_2)
unadjusted_intervals = np.vstack([post_mean - 1.65 * (np.sqrt(post_var.diagonal())),
post_mean + 1.65 * (np.sqrt(post_var.diagonal()))])
print("unadjusted intervals", unadjusted_intervals)
coverage_unad = np.zeros(nactive)
unad_length = np.zeros(nactive)
true_val = projection_active.T.dot(X.dot(beta))
print("true value", true_val)
for l in range(nactive):
if (unadjusted_intervals[0, l] <= true_val[l]) and (true_val[l] <= unadjusted_intervals[1, l]):
coverage_unad[l] += 1
unad_length[l] = unadjusted_intervals[1, l] - unadjusted_intervals[0, l]
naive_cov = coverage_unad.sum() / nactive
unad_len = unad_length.sum() / nactive
bayes_risk_unad = np.power(post_mean - true_val, 2.).sum() / nactive
return np.vstack([naive_cov, unad_len, bayes_risk_unad])
else:
return None
if __name__ == "__main__":
### set parameters
n = 200
p = 1000
### GENERATE X
niter = 50
unad_cov = 0.
unad_len = 0.
unad_risk = 0.
for i in range(niter):
np.random.seed(0)
sample = instance(n=n, p=p, s=0, sigma=1., rho=0, snr=7.)
### GENERATE Y BASED ON SEED
np.random.seed(i) # ensures different y
#X, y, beta, nonzero, sigma = gaussian_instance()
X, y, beta, nonzero, sigma = sample.generate_response()
### RUN LASSO AND TEST
lasso = lasso_selection(X,
y,
beta,
sigma)
if lasso is not None:
unad_cov += lasso[0,0]
unad_len += lasso[1, 0]
unad_risk += lasso[2,0]
print("\n")
print("cov", unad_cov)
print("risk", unad_risk)
print("iteration completed", i)
print("\n")
print("unadjusted coverage, lengths and risk", unad_cov/niter, unad_len/niter, unad_risk/niter)
|
selective-inference/selective-inference
|
sandbox/bayesian/lasso_selection.py
|
Python
|
bsd-3-clause
| 4,485
|
[
"Gaussian"
] |
61576dc62233b6b64047ac49b649b42edc9f18aa5f43865730658ed25fe31795
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.