text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# proxy module
from __future__ import absolute_import
from mayavi.core.ui.engine_rich_view import *
| enthought/etsproxy | enthought/mayavi/core/ui/engine_rich_view.py | Python | bsd-3-clause | 100 | [
"Mayavi"
] | efb173c4ae009c1820ee0b277a229a9ef2202c3a6d4e17208c4a5eae638afe14 |
'''test methods related to Red Hat Enterprise Linux, e.g. entitlement'''
from behave import *
import ConfigParser
import re
from distutils.version import LooseVersion
config = ConfigParser.ConfigParser()
config.read('config/uat.cfg')
rh_gpg_fingerprint = '567E 347A D004 4ADE 55BA 8A5F 199E 2F91 FD43 1D51'
rh_gpg_fingerprint_short = '199E2F91FD431D51'
@when(u'"{host}" host is auto-subscribed to "{env}"')
@when(u'"{host}" is auto-subscribed')
def step_impl(context, host, env="prod"):
'''Subscribe remote machine'''
env_section = "redhat-%s" % env
user = config.get(env_section, 'user')
passwd = config.get(env_section, 'pass')
hostname = config.get(env_section, 'hostname')
baseurl = config.get(env_section, 'baseurl')
if hostname:
# we are registering against non-prod, update rhsm.conf
assert context.remote_cmd("ini_file",
host,
module_args="dest=/etc/rhsm/rhsm.conf section=server option=hostname value=%s backup=yes" % hostname)
if baseurl:
# we are registering against non-prod, update rhsm.conf
assert context.remote_cmd("ini_file",
host,
module_args="dest=/etc/rhsm/rhsm.conf section=rhsm option=baseurl value=%s backup=yes" % baseurl)
assert context.remote_cmd("command",
host,
module_args="subscription-manager register --username %s --password %s --auto-attach" % (user, passwd))
@then('"{host}" host is unsubscribed and unregistered')
def step_impl(context, host):
'''Unregister remote host'''
r = context.remote_cmd("command",
host,
module_args="subscription-manager unregister")
assert r
@then(u'subscription status is ok on "{host}"')
def step_impl(context, host):
r = context.remote_cmd("command",
host,
module_args="subscription-manager status")
assert r
for i in r:
assert 'Status: Current' in i['stdout']
@then(u'"{total}" entitlement is consumed on "{host}"')
def step_impl(context, total, host):
'''Verify consumed entitlements'''
r = context.remote_cmd("command",
host,
module_args='subscription-manager list --consumed')
assert r
for i in r:
assert int(total) == i['stdout'].count('Serial')
@then(u'subscription status is unknown on "{host}"')
def step_impl(context, host):
r = context.remote_cmd("command",
host,
ignore_rc=True,
module_args="subscription-manager status")
assert r
for i in r:
assert 'Status: Unknown' in i['stdout']
@given(u'cloud-init on "{host}" host is running')
def step_imp(context,host):
cloudinit_is_active = context.remote_cmd(cmd='command',
host=host,
module_args='systemctl is-active cloud-init')
assert cloudinit_is_active, "The cloud-init service is not running"
@then(u'wait for rh_subscription_manager plugin to finish')
def step_impl(context):
cloudinit_completed = context.remote_cmd(cmd='wait_for',
module_args = 'path=/var/log/cloud-init.log search_regex=complete')
assert cloudinit_completed[0].has_key('failed') == False, "The cloud-init service did not complete"
@then(u'check if the rh_subscription_manager completed successfully')
def step_impl(context):
cloudinit_result = context.remote_cmd(cmd='shell',
module_args='grep cc_rh_subscription.py /var/log/cloud-init.log | tail -n1 | cut -d ":" -f4 | sed "s/^ //"')[0]['stdout']
assert cloudinit_result == 'rh_subscription plugin completed successfully', 'rh_subscription plugin failed'
@then(u'check if the subscription-manager successfully registered')
def step_impl(context):
register_result = context.remote_cmd(cmd='shell',
module_args='grep cc_rh_subscription.py /var/log/cloud-init.log | grep Regist | cut -d ":" -f4 | sed -e "s/^ //" -e "s/ [-a-f0-9]\+//" -e "s/ $//"')[0]['stdout']
assert register_result == 'Registered successfully with ID', "subscription-manager did not register successfully"
@then(u'check if subscription-manager successfully attached existing pools')
def step_impl(context):
pools_attached = context.remote_cmd(cmd='shell',
module_args='grep cc_rh_subscription.py /var/log/cloud-init.log | grep pools | cut -d ":" -f5 | sed "s/^ //"')[0]['stdout']
assert pools_attached == '8a85f9823e3d5e43013e3ddd4e9509c4', "Configured pools weren't attached"
@then(u'check if the existing listed repoids were enabled')
def step_impl(context):
repoids_enabled = context.remote_cmd(cmd='shell',
module_args='grep cc_rh_subscription.py /var/log/cloud-init.log | grep "Enabled the following repos" | cut -d ":" -f5 | sed "s/^ //"')[0]['stdout']
assert repoids_enabled == 'rhel-7-server-optional-beta-rpms, rhel-7-server-beta-debug-rpms', "Configured repoids weren't enabled"
@then(u'check if the rh_subscription_manager failed to complete')
def step_impl(context):
cloudinit_result = context.remote_cmd(cmd='shell',
module_args='grep cc_rh_subscription.py /var/log/cloud-init.log | tail -n1 | cut -d ":" -f4 | sed "s/^ //"')[0]['stdout']
assert cloudinit_result == 'rh_subscription plugin did not complete successfully', 'rh_subscription plugin should have failed'
@then(u'check if the subscription-manager failed to register with bad username')
def step_impl(context):
register_result = context.remote_cmd(cmd='shell',
module_args='grep cc_rh_subscription.py /var/log/cloud-init.log | grep Invalid | cut -d ":" -f4 | sed -e "s/^ //" | tail -n1')[0]['stdout']
assert register_result == 'Invalid username or password. To create a login, please visit https', "subscription-manager didn't fail to register"
@then(u'check if the subscription-manager failed to register with bad password')
def step_impl(context):
register_result = context.remote_cmd(cmd='shell',
module_args='grep cc_rh_subscription.py /var/log/cloud-init.log | grep Invalid | cut -d ":" -f4 | sed -e "s/^ //" | tail -n1')[0]['stdout']
assert register_result == 'Invalid username or password. To create a login, please visit https', "subscription-manager didn't fail to register"
@then(u'check if the subscription-manager failed to attach non-existent pool-id')
def step_impl(context):
register_result = context.remote_cmd(cmd='shell',
module_args='grep cc_rh_subscription.py /var/log/cloud-init.log | grep Pool | cut -d ":" -f4 | sed -e "s/^ //"')[0]['stdout']
assert register_result == 'Pool 8a85f9823e3d5e43013e3ddd4e95ffff is not available', "Pool 8a85f9823e3d5e43013e3ddd4e95ffff shouldn't be available"
@then(u'check if the subscription-manager failed to attach pool-id defined as a scalar')
def step_impl(context):
register_result = context.remote_cmd(cmd='shell',
module_args='grep cc_rh_subscription.py /var/log/cloud-init.log | grep Pool | cut -d ":" -f4 | sed -e "s/^ //"')[0]['stdout']
assert register_result == 'Pools must in the format of a list.', "Pools in scalar form shouldn't be accepted"
@then(u'check if an error message is shown in the log when trying to add non-existent repo')
def step_impl(context):
register_result = context.remote_cmd(cmd='shell',
module_args='grep cc_rh_subscription.py /var/log/cloud-init.log | grep Repo | grep exist | cut -d ":" -f4 | sed -e "s/^ //"')[0]['stdout']
assert register_result == 'Repo rhel-7-server-beta-debug-rpm does not appear to exist', "Error message not found"
@then(u'check the Repo "{reponame}" is already enabled message appearance')
def step_impl(context, reponame):
register_result = context.remote_cmd(cmd='shell',
module_args='grep cc_rh_subscription.py /var/log/cloud-init.log | grep Repo | grep already | cut -d ":" -f4 | sed -e "s/^ //"')[0]['stdout']
repo_message = 'Repo ' + reponame + ' is already enabled'
assert register_result == repo_message, "Informational error message not found"
@then(u'check the Repo "{reponame}" not disabled because it is not enabled message appearance')
def step_impl(context, reponame):
register_result = context.remote_cmd(cmd='shell',
module_args='grep cc_rh_subscription.py /var/log/cloud-init.log | grep Repo | grep disabled | cut -d ":" -f4 | sed -e "s/^ //"')[0]['stdout']
repo_message = 'Repo ' + reponame + ' not disabled because it is not enabled'
assert register_result == repo_message, "Informational error message not found"
@then(u'check if the subscription-manager issued error message when incorrect subscription keys are provided')
def step_impl(context):
register_result = context.remote_cmd(cmd='shell',
module_args='grep cc_rh_subscription.py /var/log/cloud-init.log | grep "not a valid key" | cut -d ":" -f4 | sed -e "s/^ //"')[0]['stdout']
assert register_result == 'list is not a valid key for rh_subscription. Valid keys are', "Error message not found"
@when(u"import Red Hat's release key 2 to the superuser's keyring succeeds")
def step_impl(context):
assert context.remote_cmd("command",
module_args="gpg --import /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release"), "import of Red Hat's release key 2 failed"
@then(u"verify if Red Hat's release key 2 matches public fingerprint")
def step_impl(context):
gpg_out = context.remote_cmd("command", module_args="gpg --fingerprint")[0]['stdout']
m = re.search(rh_gpg_fingerprint, gpg_out)
assert m.group(0) == rh_gpg_fingerprint, "the imported Red Hat's release key 2 does not match its public fingerprint"
@when(u'download "{file}" script with sha256sum "{sha256sum}" finishes')
def step_impl(context, file, sha256sum):
download_status = context.remote_cmd("get_url", module_args="url=" + file + " dest=/tmp/gpgverify.py force=yes sha256sum=" + sha256sum)
assert download_status[0].has_key('failed') == False, "the gpgverify.py either failed to download or its checksum was invalid"
@when(u'OSTree version is lower than "{version}"')
def step_impl(context, version):
atomic_host_status = context.remote_cmd(cmd='shell',
module_args='atomic host status | grep "^*"')[0]['stdout']
running_version = " ".join(atomic_host_status.split()).split()[3]
if not LooseVersion(running_version) < LooseVersion(version):
context.scenario.skip(reason='OSTree version is greater or equal than ' + version + '. Skipping this scenario')
@then(u'use the gpgverify.py script to verify gpg signatures')
def step_impl(context):
atomic_host_status = context.remote_cmd(cmd='shell',
module_args='atomic host status | grep "^*"')[0]['stdout']
treeid = " ".join(atomic_host_status.split()).split()[4]
chmod_status = context.remote_cmd(cmd='file',
module_args='path=/tmp/gpgverify.py mode=0755')
assert chmod_status[0].has_key('failed') == False, "attempt to chmod 0755 /tmp/gpgverify.py failed"
gpgverify_out = context.remote_cmd(cmd='command',
module_args='/tmp/gpgverify.py /sysroot/ostree/repo ' + treeid)
assert gpgverify_out, "the gpgverify.py script crashed, the OSTree isn't signed"
m = re.search('(?<==> )(\w+)',gpgverify_out[0]['stdout'])
commit_path = '/sysroot/ostree/repo/objects/' + m.group(0)[:2] + '/' + m.group(0)[2:] + '.commit'
gpg_out = context.remote_cmd(cmd='command',
module_args='gpg --verify sig.0 ' + commit_path)
assert gpg_out, "verification of the gpg signature has failed"
m = re.search('(?<=Primary key fingerprint: )(.*)',gpg_out[0]['stderr'])
primary_key = m.group(0)
assert primary_key == rh_gpg_fingerprint, "the OSTree signature does not match Red Hat's release key 2"
@then(u'use ostree show command to verify gpg signatures')
def step_impl(context):
atomic_host_status = context.remote_cmd(cmd='shell',
module_args='atomic host status | grep "^*"')[0]['stdout']
tree_version = " ".join(atomic_host_status.split()).split()[3]
treeid = " ".join(atomic_host_status.split()).split()[4]
signature_status = context.remote_cmd(cmd='shell',
module_args='ostree show ' + treeid + ' | grep ' + rh_gpg_fingerprint_short)
assert signature_status, "OSTree version " + tree_version + " isn't signed by Red Hat's release key 2"
@then(u'check whether there are no references to the "{pattern}"')
def step_impl(context, pattern):
pattern_occurence = context.remote_cmd(cmd='command',
module_args='sudo find / \( -path "/proc" -o -path "/sys" -o -path "/dev" -o -path "/sysroot" -o -path "/var/home" \) -prune -o -type f -exec grep -nHI "' + pattern + '" {} \;')
assert pattern_occurence == "", "Fail, the " + pattern + " is present on the system."
| mdshuai/UATFramework | steps/redhat.py | Python | gpl-2.0 | 12,806 | [
"VisIt"
] | bcdd3d5e5b3f77edc2f6d40f690b506836862b531124639c17bf720c303ec4bb |
#!/usr/bin/env python
"""
Created on Fri 7 March 2014
For backing up SI constants before converting them to CGS units in main
program.
@author Kristoffer Braekken
"""
"""PHYSICAL CONSTANTS"""
_L_SUN = 3.846e26 # [W]
_R_SUN = 6.96e8 # [m]
_M_SUN = 1.989e30 # [kg]
_G = 6.67384e-11 # [m^3 kg^-1 s^-2]
_C = 3.e8 # [m s^-1]
_SIGMA = 5.67e-8 # [W m^-2 K^-4]
_K_B = 1.382e-23 # [m^2 kg s^-2 K^-1]
_N_A = 6.0221413e23 # Avogadro's constant
_H_MASS = 1.6738e-27 # [kg]
_HE3_MASS = 5.0081e-27 # [kg]
_HE4_MASS = 6.6464e-27 # [kg]
_LI7_MASS = 1.16503486e-26 # [kg]
_BE7_MASS = 1.16518851e-26 # [kg]
_E_MASS = 9.10938291e-31 # [kg]
"""NUCLEAR ENERGY VALUES"""
# PP I
_Q_H_H = 1.177 # [MeV]
_Q_D_HE = 5.494 # [MeV]
_Q_HE3_HE3 = 12.860 # [MeV]
# PP II
_Q_HE3_ALPHA = 1.586 # [MeV]
_Q_BE7_E = 0.049 # [MeV]
_Q_LI7_H = 17.346 # [MeV]
# PP III
_Q_BE7_H = 0.137 # [MeV]
_Q_B8 = 8.367 # [MeV]
_Q_BE8 = 2.995 # [MeV]
"""INITIAL PARAMETERS"""
_L0 = _L_SUN
_R0 = 0.5*_R_SUN
_M0 = 0.7*_M_SUN
_RHO0 = 1.e3 # [kg m^-1]
_T0 = 1.e5 # [K]
_P0 = 1.e11 # [Pa]
_X0 = 0.7
_Y3_0 = 1.e-10
_Y0 = 0.29
_Z0 = 0.01
_Z0_7LI = 1.e-5
_Z0_7BE = 1.e-5
"""IONIZATION"""
_MU0 = 1. / ( _X0 + _Y0 / 4. + _Z0 / 2. )
_E = _MU0 * ( _X0 + (1 + 2) * _Y0 / 4. )
_MU = _MU0 / (1 + _E)
| PaulMag/AST3310-Prj01 | python/SI_constants.py | Python | mit | 1,247 | [
"Avogadro"
] | f27d0ee1938fe0f654f9a948eb49e64e39bfde32dac6dee5a2adbb0eb5ca479c |
import numpy as np
import cv2
import imutils
import sys
from scipy.misc import imread
from scipy import signal
image2 = cv2.imread(sys.argv[1],)
image2 = imutils.resize(image2, height=500)
cv2.imshow('image', image2)
cv2.waitKey(0)
image1 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
image = imutils.resize(image1, height=500)
cv2.imshow('gdh', image)
cv2.waitKey(0)
gaussian = np.ones((5, 5), np.float32) / 25
laplacian = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])
dst = cv2.filter2D(image, -1, gaussian)
cv2.imshow('dsd', dst)
cv2.waitKey(0)
dst1 = cv2.filter2D(dst, -1, laplacian)
cv2.imshow('jh', dst1)
cv2.waitKey(0)
# (_,cnts, _) = cv2.findContours(dst1.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# cv2.drawContours(imag,e cnts, -1, 255, 2)
# cv2.imshow('hello',image)
# cv2.waitKey(0)
# print(dst1)
dst1 = (255 - dst1)
cv2.imshow('dhgf', dst1)
cv2.waitKey(0)
res = cv2.bitwise_and(image2, image2, mask=dst1)
cv2.imshow('win', res)
cv2.waitKey(0)
print(dst1.shape)
th2 = cv2.filter2D(dst1, -1, gaussian)
th3 = cv2.adaptiveThreshold(
th2, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
cv2.imshow('wind', th3)
cv2.waitKey(0)
# kernal = cv2.getStructuringElement(cv2.MORPH_RECT,(7,7))
# closed = cv2.morphologyEx(th3,cv2.MORPH_CLOSE,kernal)
# cv2.imshow('win3',closed)
# cv2.waitKey(0)
res1 = cv2.bitwise_and(image2, image2, mask=th3)
cv2.imshow('final', res1)
cv2.waitKey(0)
# a = 0
# b = 0
# count = 0
# for i in th3:
# for j in i:
# if j == 0:
# count = count+1
# image2[i][j] = [0,255,0]
# a = a+1
# print(count)
# cv2.imshow('final',image2)
# cv2.waitKey(0)
| ITCoders/Surveillance-System | scripts/LOG_image.py | Python | gpl-3.0 | 1,613 | [
"Gaussian"
] | aca7904cf7d4076ff35243fa8d514cdd7d621cf1e5690e81e77cd6cc99c46351 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module containing classes to generate grain boundaries.
"""
import itertools
import logging
import warnings
from fractions import Fraction
from functools import reduce
from math import cos, floor, gcd
import numpy as np
from monty.fractions import lcm
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.sites import PeriodicSite
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
# This module implements representations of grain boundaries, as well as
# algorithms for generating them.
__author__ = "Xiang-Guo Li"
__copyright__ = "Copyright 2018, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Xiang-Guo Li"
__email__ = "xil110@ucsd.edu"
__date__ = "7/30/18"
logger = logging.getLogger(__name__)
class GrainBoundary(Structure):
"""
Subclass of Structure representing a GrainBoundary (gb) object.
Implements additional attributes pertaining to gbs, but the
init method does not actually implement any algorithm that
creates a gb. This is a DUMMY class who's init method only holds
information about the gb. Also has additional methods that returns
other information about a gb such as sigma value.
Note that all gbs have the gb surface normal oriented in the c-direction.
This means the lattice vectors a and b are in the gb surface plane (at
least for one grain) and the c vector is out of the surface plane
(though not necessary perpendicular to the surface.)
"""
def __init__(
self,
lattice,
species,
coords,
rotation_axis,
rotation_angle,
gb_plane,
join_plane,
init_cell,
vacuum_thickness,
ab_shift,
site_properties,
oriented_unit_cell,
validate_proximity=False,
coords_are_cartesian=False,
):
"""
Makes a gb structure, a structure object with additional information
and methods pertaining to gbs.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Species]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / species specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Species objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
rotation_axis (list): Rotation axis of GB in the form of a list of
integers, e.g. [1, 1, 0].
rotation_angle (float, in unit of degree): rotation angle of GB.
gb_plane (list): Grain boundary plane in the form of a list of integers
e.g.: [1, 2, 3].
join_plane (list): Joining plane of the second grain in the form of a list of
integers. e.g.: [1, 2, 3].
init_cell (Structure): initial bulk structure to form the GB.
site_properties (dict): Properties associated with the sites as a
dict of sequences, The sequences have to be the same length as
the atomic species and fractional_coords. For gb, you should
have the 'grain_label' properties to classify the sites as 'top',
'bottom', 'top_incident', or 'bottom_incident'.
vacuum_thickness (float in angstrom): The thickness of vacuum inserted
between two grains of the GB.
ab_shift (list of float, in unit of crystal vector a, b): The relative
shift along a, b vectors.
oriented_unit_cell (Structure): oriented unit cell of the bulk init_cell.
Help to accurate calculate the bulk properties that are consistent
with gb calculations.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
"""
self.oriented_unit_cell = oriented_unit_cell
self.rotation_axis = rotation_axis
self.rotation_angle = rotation_angle
self.gb_plane = gb_plane
self.join_plane = join_plane
self.init_cell = init_cell
self.vacuum_thickness = vacuum_thickness
self.ab_shift = ab_shift
super().__init__(
lattice,
species,
coords,
validate_proximity=validate_proximity,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties,
)
def copy(self):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
return GrainBoundary(
self.lattice,
self.species_and_occu,
self.frac_coords,
self.rotation_axis,
self.rotation_angle,
self.gb_plane,
self.join_plane,
self.init_cell,
self.vacuum_thickness,
self.ab_shift,
self.site_properties,
self.oriented_unit_cell,
)
def get_sorted_structure(self, key=None, reverse=False):
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species. Note that Slab has to override this
because of the different __init__ args.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
sites = sorted(self, key=key, reverse=reverse)
s = Structure.from_sites(sites)
return GrainBoundary(
s.lattice,
s.species_and_occu,
s.frac_coords,
self.rotation_axis,
self.rotation_angle,
self.gb_plane,
self.join_plane,
self.init_cell,
self.vacuum_thickness,
self.ab_shift,
self.site_properties,
self.oriented_unit_cell,
)
@property
def sigma(self):
"""
This method returns the sigma value of the gb.
If using 'quick_gen' to generate GB, this value is not valid.
"""
return int(round(self.oriented_unit_cell.volume / self.init_cell.volume))
@property
def sigma_from_site_prop(self):
"""
This method returns the sigma value of the gb from site properties.
If the GB structure merge some atoms due to the atoms too closer with
each other, this property will not work.
"""
num_coi = 0
if None in self.site_properties["grain_label"]:
raise RuntimeError("Site were merged, this property do not work")
for tag in self.site_properties["grain_label"]:
if "incident" in tag:
num_coi += 1
return int(round(self.num_sites / num_coi))
@property
def top_grain(self):
"""
return the top grain (Structure) of the GB.
"""
top_sites = []
for i, tag in enumerate(self.site_properties["grain_label"]):
if "top" in tag:
top_sites.append(self.sites[i])
return Structure.from_sites(top_sites)
@property
def bottom_grain(self):
"""
return the bottom grain (Structure) of the GB.
"""
bottom_sites = []
for i, tag in enumerate(self.site_properties["grain_label"]):
if "bottom" in tag:
bottom_sites.append(self.sites[i])
return Structure.from_sites(bottom_sites)
@property
def coincidents(self):
"""
return the a list of coincident sites.
"""
coincident_sites = []
for i, tag in enumerate(self.site_properties["grain_label"]):
if "incident" in tag:
coincident_sites.append(self.sites[i])
return coincident_sites
def __str__(self):
comp = self.composition
outs = [
"Gb Summary (%s)" % comp.formula,
"Reduced Formula: %s" % comp.reduced_formula,
"Rotation axis: %s" % (self.rotation_axis,),
"Rotation angle: %s" % (self.rotation_angle,),
"GB plane: %s" % (self.gb_plane,),
"Join plane: %s" % (self.join_plane,),
"vacuum thickness: %s" % (self.vacuum_thickness,),
"ab_shift: %s" % (self.ab_shift,),
]
def to_s(x, rjust=10):
return ("%0.6f" % x).rjust(rjust)
outs.append("abc : " + " ".join([to_s(i) for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i) for i in self.lattice.angles]))
outs.append("Sites ({i})".format(i=len(self)))
for i, site in enumerate(self):
outs.append(
" ".join(
[
str(i + 1),
site.species_string,
" ".join([to_s(j, 12) for j in site.frac_coords]),
]
)
)
return "\n".join(outs)
def as_dict(self):
"""
Returns:
Dictionary representation of GrainBoundary object
"""
d = super().as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["init_cell"] = self.init_cell.as_dict()
d["rotation_axis"] = self.rotation_axis
d["rotation_angle"] = self.rotation_angle
d["gb_plane"] = self.gb_plane
d["join_plane"] = self.join_plane
d["vacuum_thickness"] = self.vacuum_thickness
d["ab_shift"] = self.ab_shift
d["oriented_unit_cell"] = self.oriented_unit_cell.as_dict()
return d
@classmethod
def from_dict(cls, d):
"""
Generates a GrainBoundary object from a dictionary created by as_dict().
Args:
d: dict
Returns:
GrainBoundary object
"""
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
s = Structure.from_sites(sites)
return GrainBoundary(
lattice=lattice,
species=s.species_and_occu,
coords=s.frac_coords,
rotation_axis=d["rotation_axis"],
rotation_angle=d["rotation_angle"],
gb_plane=d["gb_plane"],
join_plane=d["join_plane"],
init_cell=Structure.from_dict(d["init_cell"]),
vacuum_thickness=d["vacuum_thickness"],
ab_shift=d["ab_shift"],
oriented_unit_cell=Structure.from_dict(d["oriented_unit_cell"]),
site_properties=s.site_properties,
)
class GrainBoundaryGenerator:
"""
This class is to generate grain boundaries (GBs) from bulk
conventional cell (fcc, bcc can from the primitive cell), and works for Cubic,
Tetragonal, Orthorhombic, Rhombohedral, and Hexagonal systems.
It generate GBs from given parameters, which includes
GB plane, rotation axis, rotation angle.
This class works for any general GB, including twist, tilt and mixed GBs.
The three parameters, rotation axis, GB plane and rotation angle, are
sufficient to identify one unique GB. While sometimes, users may not be able
to tell what exactly rotation angle is but prefer to use sigma as an parameter,
this class also provides the function that is able to return all possible
rotation angles for a specific sigma value.
The same sigma value (with rotation axis fixed) can correspond to
multiple rotation angles.
Users can use structure matcher in pymatgen to get rid of the redundant structures.
"""
def __init__(self, initial_structure, symprec=0.1, angle_tolerance=1):
"""
initial_structure (Structure): Initial input structure. It can
be conventional or primitive cell (primitive cell works for bcc and fcc).
For fcc and bcc, using conventional cell can lead to a non-primitive
grain boundary structure.
This code supplies Cubic, Tetragonal, Orthorhombic, Rhombohedral, and
Hexagonal systems.
symprec (float): Tolerance for symmetry finding. Defaults to 0.1 (the value used
in Materials Project), which is for structures with slight deviations
from their proper atomic positions (e.g., structures relaxed with
electronic structure codes).
A smaller value of 0.01 is often used for properly refined
structures with atoms in the proper symmetry coordinates.
User should make sure the symmetry is what you want.
angle_tolerance (float): Angle tolerance for symmetry finding.
"""
analyzer = SpacegroupAnalyzer(initial_structure, symprec, angle_tolerance)
self.lat_type = analyzer.get_lattice_type()[0]
if self.lat_type == "t":
# need to use the conventional cell for tetragonal
initial_structure = analyzer.get_conventional_standard_structure()
a, b, c = initial_structure.lattice.abc
# c axis of tetragonal structure not in the third direction
if abs(a - b) > symprec:
# a == c, rotate b to the third direction
if abs(a - c) < symprec:
initial_structure.make_supercell([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
# b == c, rotate a to the third direction
else:
initial_structure.make_supercell([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
elif self.lat_type == "h":
alpha, beta, gamma = initial_structure.lattice.angles
# c axis is not in the third direction
if abs(gamma - 90) < angle_tolerance:
# alpha = 120 or 60, rotate b, c to a, b vectors
if abs(alpha - 90) > angle_tolerance:
initial_structure.make_supercell([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
# beta = 120 or 60, rotate c, a to a, b vectors
elif abs(beta - 90) > angle_tolerance:
initial_structure.make_supercell([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
elif self.lat_type == "r":
# need to use primitive cell for rhombohedra
initial_structure = analyzer.get_primitive_standard_structure()
elif self.lat_type == "o":
# need to use the conventional cell for orthorombic
initial_structure = analyzer.get_conventional_standard_structure()
self.initial_structure = initial_structure
def gb_from_parameters(
self,
rotation_axis,
rotation_angle,
expand_times=4,
vacuum_thickness=0.0,
ab_shift=[0, 0],
normal=False,
ratio=None,
plane=None,
max_search=20,
tol_coi=1.0e-8,
rm_ratio=0.7,
quick_gen=False,
):
"""
Args:
rotation_axis (list): Rotation axis of GB in the form of a list of integer
e.g.: [1, 1, 0]
rotation_angle (float, in unit of degree): rotation angle used to generate GB.
Make sure the angle is accurate enough. You can use the enum* functions
in this class to extract the accurate angle.
e.g.: The rotation angle of sigma 3 twist GB with the rotation axis
[1, 1, 1] and GB plane (1, 1, 1) can be 60.000000000 degree.
If you do not know the rotation angle, but know the sigma value, we have
provide the function get_rotation_angle_from_sigma which is able to return
all the rotation angles of sigma value you provided.
expand_times (int): The multiple times used to expand one unit grain to larger grain.
This is used to tune the grain length of GB to warrant that the two GBs in one
cell do not interact with each other. Default set to 4.
vacuum_thickness (float, in angstrom): The thickness of vacuum that you want to insert
between two grains of the GB. Default to 0.
ab_shift (list of float, in unit of a, b vectors of Gb): in plane shift of two grains
normal (logic):
determine if need to require the c axis of top grain (first transformation matrix)
perperdicular to the surface or not.
default to false.
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
This code also supplies a class method to generate the ratio from the
structure (get_ratio). User can also make their own approximation and
input the ratio directly.
plane (list): Grain boundary plane in the form of a list of integers
e.g.: [1, 2, 3]. If none, we set it as twist GB. The plane will be perpendicular
to the rotation axis.
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane. For complex GB, if you want to speed up, you can reduce this value.
But too small of this value may lead to error.
tol_coi (float): tolerance to find the coincidence sites. When making approximations to
the ratio needed to generate the GB, you probably need to increase this tolerance to
obtain the correct number of coincidence sites. To check the number of coincidence
sites are correct or not, you can compare the generated Gb object's sigma_from_site_prop
with enum* sigma values (what user expected by input).
rm_ratio (float): the criteria to remove the atoms which are too close with each other.
rm_ratio*bond_length of bulk system is the criteria of bond length, below which the atom
will be removed. Default to 0.7.
quick_gen (bool): whether to quickly generate a supercell, if set to true, no need to
find the smallest cell.
Returns:
Grain boundary structure (gb object).
"""
lat_type = self.lat_type
# if the initial structure is primitive cell in cubic system,
# calculate the transformation matrix from its conventional cell
# to primitive cell, basically for bcc and fcc systems.
trans_cry = np.eye(3)
if lat_type == "c":
analyzer = SpacegroupAnalyzer(self.initial_structure)
convention_cell = analyzer.get_conventional_standard_structure()
vol_ratio = self.initial_structure.volume / convention_cell.volume
# bcc primitive cell, belong to cubic system
if abs(vol_ratio - 0.5) < 1.0e-3:
trans_cry = np.array([[0.5, 0.5, -0.5], [-0.5, 0.5, 0.5], [0.5, -0.5, 0.5]])
logger.info("Make sure this is for cubic with bcc primitive cell")
# fcc primitive cell, belong to cubic system
elif abs(vol_ratio - 0.25) < 1.0e-3:
trans_cry = np.array([[0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]])
logger.info("Make sure this is for cubic with fcc primitive cell")
else:
logger.info("Make sure this is for cubic with conventional cell")
elif lat_type == "t":
logger.info("Make sure this is for tetragonal system")
if ratio is None:
logger.info("Make sure this is for irrational c2/a2")
elif len(ratio) != 2:
raise RuntimeError("Tetragonal system needs correct c2/a2 ratio")
elif lat_type == "o":
logger.info("Make sure this is for orthorhombic system")
if ratio is None:
raise RuntimeError(
"CSL does not exist if all axial ratios are irrational " "for an orthorhombic system"
)
if len(ratio) != 3:
raise RuntimeError("Orthorhombic system needs correct c2:b2:a2 ratio")
elif lat_type == "h":
logger.info("Make sure this is for hexagonal system")
if ratio is None:
logger.info("Make sure this is for irrational c2/a2")
elif len(ratio) != 2:
raise RuntimeError("Hexagonal system needs correct c2/a2 ratio")
elif lat_type == "r":
logger.info("Make sure this is for rhombohedral system")
if ratio is None:
logger.info("Make sure this is for irrational (1+2*cos(alpha)/cos(alpha) ratio")
elif len(ratio) != 2:
raise RuntimeError("Rhombohedral system needs correct " "(1+2*cos(alpha)/cos(alpha) ratio")
else:
raise RuntimeError(
"Lattice type not implemented. This code works for cubic, "
"tetragonal, orthorhombic, rhombehedral, hexagonal systems"
)
# transform four index notation to three index notation for hexagonal and rhombohedral
if len(rotation_axis) == 4:
u1 = rotation_axis[0]
v1 = rotation_axis[1]
w1 = rotation_axis[3]
if lat_type.lower() == "h":
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
rotation_axis = [u, v, w]
elif lat_type.lower() == "r":
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
rotation_axis = [u, v, w]
# make sure gcd(rotation_axis)==1
if reduce(gcd, rotation_axis) != 1:
rotation_axis = [int(round(x / reduce(gcd, rotation_axis))) for x in rotation_axis]
# transform four index notation to three index notation for plane
if plane is not None:
if len(plane) == 4:
u1 = plane[0]
v1 = plane[1]
w1 = plane[3]
plane = [u1, v1, w1]
# set the plane for grain boundary when plane is None.
if plane is None:
if lat_type.lower() == "c":
plane = rotation_axis
else:
if lat_type.lower() == "h":
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, -0.5, 0], [-0.5, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == "r":
if ratio is None:
cos_alpha = 0.5
else:
cos_alpha = 1.0 / (ratio[0] / ratio[1] - 2)
metric = np.array(
[
[1, cos_alpha, cos_alpha],
[cos_alpha, 1, cos_alpha],
[cos_alpha, cos_alpha, 1],
]
)
elif lat_type.lower() == "t":
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, 0, 0], [0, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == "o":
for i in range(3):
if ratio[i] is None:
ratio[i] = 1
metric = np.array(
[
[1, 0, 0],
[0, ratio[1] / ratio[2], 0],
[0, 0, ratio[0] / ratio[2]],
]
)
else:
raise RuntimeError("Lattice type has not implemented.")
plane = np.matmul(rotation_axis, metric)
fractions = [Fraction(x).limit_denominator() for x in plane]
least_mul = reduce(lcm, [f.denominator for f in fractions])
plane = [int(round(x * least_mul)) for x in plane]
if reduce(gcd, plane) != 1:
index = reduce(gcd, plane)
plane = [int(round(x / index)) for x in plane]
t1, t2 = self.get_trans_mat(
r_axis=rotation_axis,
angle=rotation_angle,
normal=normal,
trans_cry=trans_cry,
lat_type=lat_type,
ratio=ratio,
surface=plane,
max_search=max_search,
quick_gen=quick_gen,
)
# find the join_plane
if lat_type.lower() != "c":
if lat_type.lower() == "h":
if ratio is None:
mu, mv = [1, 1]
else:
mu, mv = ratio
trans_cry1 = np.array([[1, 0, 0], [-0.5, np.sqrt(3.0) / 2.0, 0], [0, 0, np.sqrt(mu / mv)]])
elif lat_type.lower() == "r":
if ratio is None:
c2_a2_ratio = 1
else:
mu, mv = ratio
c2_a2_ratio = 3.0 / (2 - 6 * mv / mu)
trans_cry1 = np.array(
[
[0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[-0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[0, -1 * np.sqrt(3.0) / 3.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
]
)
else:
if lat_type.lower() == "t":
if ratio is None:
mu, mv = [1, 1]
else:
mu, mv = ratio
lam = mv
elif lat_type.lower() == "o":
new_ratio = [1 if v is None else v for v in ratio]
mu, lam, mv = new_ratio
trans_cry1 = np.array([[1, 0, 0], [0, np.sqrt(lam / mv), 0], [0, 0, np.sqrt(mu / mv)]])
else:
trans_cry1 = trans_cry
grain_matrix = np.dot(t2, trans_cry1)
plane_init = np.cross(grain_matrix[0], grain_matrix[1])
if lat_type.lower() != "c":
plane_init = np.dot(plane_init, trans_cry1.T)
join_plane = self.vec_to_surface(plane_init)
parent_structure = self.initial_structure.copy()
# calculate the bond_length in bulk system.
if len(parent_structure) == 1:
temp_str = parent_structure.copy()
temp_str.make_supercell([1, 1, 2])
distance = temp_str.distance_matrix
else:
distance = parent_structure.distance_matrix
bond_length = np.min(distance[np.nonzero(distance)])
# top grain
top_grain = fix_pbc(parent_structure * t1)
# obtain the smallest oriended cell
if normal and not quick_gen:
t_temp = self.get_trans_mat(
r_axis=rotation_axis,
angle=rotation_angle,
normal=False,
trans_cry=trans_cry,
lat_type=lat_type,
ratio=ratio,
surface=plane,
max_search=max_search,
)
oriended_unit_cell = fix_pbc(parent_structure * t_temp[0])
t_matrix = oriended_unit_cell.lattice.matrix
normal_v_plane = np.cross(t_matrix[0], t_matrix[1])
unit_normal_v = normal_v_plane / np.linalg.norm(normal_v_plane)
unit_ab_adjust = (t_matrix[2] - np.dot(unit_normal_v, t_matrix[2]) * unit_normal_v) / np.dot(
unit_normal_v, t_matrix[2]
)
else:
oriended_unit_cell = top_grain.copy()
unit_ab_adjust = 0.0
# bottom grain, using top grain's lattice matrix
bottom_grain = fix_pbc(parent_structure * t2, top_grain.lattice.matrix)
# label both grains with 'top','bottom','top_incident','bottom_incident'
n_sites = top_grain.num_sites
t_and_b = Structure(
top_grain.lattice,
top_grain.species + bottom_grain.species,
list(top_grain.frac_coords) + list(bottom_grain.frac_coords),
)
t_and_b_dis = t_and_b.lattice.get_all_distances(
t_and_b.frac_coords[0:n_sites], t_and_b.frac_coords[n_sites : n_sites * 2]
)
index_incident = np.nonzero(t_and_b_dis < np.min(t_and_b_dis) + tol_coi)
top_labels = []
for i in range(n_sites):
if i in index_incident[0]:
top_labels.append("top_incident")
else:
top_labels.append("top")
bottom_labels = []
for i in range(n_sites):
if i in index_incident[1]:
bottom_labels.append("bottom_incident")
else:
bottom_labels.append("bottom")
top_grain = Structure(
Lattice(top_grain.lattice.matrix),
top_grain.species,
top_grain.frac_coords,
site_properties={"grain_label": top_labels},
)
bottom_grain = Structure(
Lattice(bottom_grain.lattice.matrix),
bottom_grain.species,
bottom_grain.frac_coords,
site_properties={"grain_label": bottom_labels},
)
# expand both grains
top_grain.make_supercell([1, 1, expand_times])
bottom_grain.make_supercell([1, 1, expand_times])
top_grain = fix_pbc(top_grain)
bottom_grain = fix_pbc(bottom_grain)
# determine the top-grain location.
edge_b = 1.0 - max(bottom_grain.frac_coords[:, 2])
edge_t = 1.0 - max(top_grain.frac_coords[:, 2])
c_adjust = (edge_t - edge_b) / 2.0
# construct all species
all_species = []
all_species.extend([site.specie for site in bottom_grain])
all_species.extend([site.specie for site in top_grain])
half_lattice = top_grain.lattice
# calculate translation vector, perpendicular to the plane
normal_v_plane = np.cross(half_lattice.matrix[0], half_lattice.matrix[1])
unit_normal_v = normal_v_plane / np.linalg.norm(normal_v_plane)
translation_v = unit_normal_v * vacuum_thickness
# construct the final lattice
whole_matrix_no_vac = np.array(half_lattice.matrix)
whole_matrix_no_vac[2] = half_lattice.matrix[2] * 2
whole_matrix_with_vac = whole_matrix_no_vac.copy()
whole_matrix_with_vac[2] = whole_matrix_no_vac[2] + translation_v * 2
whole_lat = Lattice(whole_matrix_with_vac)
# construct the coords, move top grain with translation_v
all_coords = []
grain_labels = bottom_grain.site_properties["grain_label"] + top_grain.site_properties["grain_label"]
for site in bottom_grain:
all_coords.append(site.coords)
for site in top_grain:
all_coords.append(
site.coords
+ half_lattice.matrix[2] * (1 + c_adjust)
+ unit_ab_adjust * np.linalg.norm(half_lattice.matrix[2] * (1 + c_adjust))
+ translation_v
+ ab_shift[0] * whole_matrix_with_vac[0]
+ ab_shift[1] * whole_matrix_with_vac[1]
)
gb_with_vac = Structure(
whole_lat,
all_species,
all_coords,
coords_are_cartesian=True,
site_properties={"grain_label": grain_labels},
)
# merge closer atoms. extract near gb atoms.
cos_c_norm_plane = np.dot(unit_normal_v, whole_matrix_with_vac[2]) / whole_lat.c
range_c_len = abs(bond_length / cos_c_norm_plane / whole_lat.c)
sites_near_gb = []
sites_away_gb = []
for site in gb_with_vac.sites:
if (
site.frac_coords[2] < range_c_len
or site.frac_coords[2] > 1 - range_c_len
or (site.frac_coords[2] > 0.5 - range_c_len and site.frac_coords[2] < 0.5 + range_c_len)
):
sites_near_gb.append(site)
else:
sites_away_gb.append(site)
if len(sites_near_gb) >= 1:
s_near_gb = Structure.from_sites(sites_near_gb)
s_near_gb.merge_sites(tol=bond_length * rm_ratio, mode="d")
all_sites = sites_away_gb + s_near_gb.sites
gb_with_vac = Structure.from_sites(all_sites)
# move coordinates into the periodic cell.
gb_with_vac = fix_pbc(gb_with_vac, whole_lat.matrix)
return GrainBoundary(
whole_lat,
gb_with_vac.species,
gb_with_vac.cart_coords,
rotation_axis,
rotation_angle,
plane,
join_plane,
self.initial_structure,
vacuum_thickness,
ab_shift,
site_properties=gb_with_vac.site_properties,
oriented_unit_cell=oriended_unit_cell,
coords_are_cartesian=True,
)
def get_ratio(self, max_denominator=5, index_none=None):
"""
find the axial ratio needed for GB generator input.
Args:
max_denominator (int): the maximum denominator for
the computed ratio, default to be 5.
index_none (int): specify the irrational axis.
0-a, 1-b, 2-c. Only may be needed for orthorhombic system.
Returns:
axial ratio needed for GB generator (list of integers).
"""
structure = self.initial_structure
lat_type = self.lat_type
if lat_type in ("t", "h"):
# For tetragonal and hexagonal system, ratio = c2 / a2.
a, c = (structure.lattice.a, structure.lattice.c)
if c > a:
frac = Fraction(c ** 2 / a ** 2).limit_denominator(max_denominator)
ratio = [frac.numerator, frac.denominator]
else:
frac = Fraction(a ** 2 / c ** 2).limit_denominator(max_denominator)
ratio = [frac.denominator, frac.numerator]
elif lat_type == "r":
# For rhombohedral system, ratio = (1 + 2 * cos(alpha)) / cos(alpha).
cos_alpha = cos(structure.lattice.alpha / 180 * np.pi)
frac = Fraction((1 + 2 * cos_alpha) / cos_alpha).limit_denominator(max_denominator)
ratio = [frac.numerator, frac.denominator]
elif lat_type == "o":
# For orthorhombic system, ratio = c2:b2:a2.If irrational for one axis, set it to None.
ratio = [None] * 3
lat = (structure.lattice.c, structure.lattice.b, structure.lattice.a)
index = [0, 1, 2]
if index_none is None:
min_index = np.argmin(lat)
index.pop(min_index)
frac1 = Fraction(lat[index[0]] ** 2 / lat[min_index] ** 2).limit_denominator(max_denominator)
frac2 = Fraction(lat[index[1]] ** 2 / lat[min_index] ** 2).limit_denominator(max_denominator)
com_lcm = lcm(frac1.denominator, frac2.denominator)
ratio[min_index] = com_lcm
ratio[index[0]] = frac1.numerator * int(round((com_lcm / frac1.denominator)))
ratio[index[1]] = frac2.numerator * int(round((com_lcm / frac2.denominator)))
else:
index.pop(index_none)
if lat[index[0]] > lat[index[1]]:
frac = Fraction(lat[index[0]] ** 2 / lat[index[1]] ** 2).limit_denominator(max_denominator)
ratio[index[0]] = frac.numerator
ratio[index[1]] = frac.denominator
else:
frac = Fraction(lat[index[1]] ** 2 / lat[index[0]] ** 2).limit_denominator(max_denominator)
ratio[index[1]] = frac.numerator
ratio[index[0]] = frac.denominator
elif lat_type == "c":
# Cubic system does not need axial ratio.
return None
else:
raise RuntimeError("Lattice type not implemented.")
return ratio
@staticmethod
def get_trans_mat(
r_axis,
angle,
normal=False,
trans_cry=np.eye(3),
lat_type="c",
ratio=None,
surface=None,
max_search=20,
quick_gen=False,
):
"""
Find the two transformation matrix for each grain from given rotation axis,
GB plane, rotation angle and corresponding ratio (see explanation for ratio
below).
The structure of each grain can be obtained by applying the corresponding
transformation matrix to the conventional cell.
The algorithm for this code is from reference, Acta Cryst, A32,783(1976).
Args:
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w for hex/rho system only):
the rotation axis of the grain boundary.
angle (float, in unit of degree) :
the rotation angle of the grain boundary
normal (logic):
determine if need to require the c axis of one grain associated with
the first transformation matrix perperdicular to the surface or not.
default to false.
trans_cry (3 by 3 array):
if the structure given are primitive cell in cubic system, e.g.
bcc or fcc system, trans_cry is the transformation matrix from its
conventional cell to the primitive cell.
lat_type ( one character):
'c' or 'C': cubic system
't' or 'T': tetragonal system
'o' or 'O': orthorhombic system
'h' or 'H': hexagonal system
'r' or 'R': rhombohedral system
default to cubic system
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha)/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
surface (list of three integers, e.g. h, k, l
or four integers, e.g. h, k, i, l for hex/rho system only):
the miller index of grain boundary plane, with the format of [h,k,l]
if surface is not given, the default is perpendicular to r_axis, which is
a twist grain boundary.
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane.
quick_gen (bool): whether to quickly generate a supercell, if set to true, no need to
find the smallest cell.
Returns:
t1 (3 by 3 integer array):
The transformation array for one grain.
t2 (3 by 3 integer array):
The transformation array for the other grain
"""
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
if lat_type.lower() == "h":
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
r_axis = [u, v, w]
elif lat_type.lower() == "r":
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
r_axis = [u, v, w]
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
if surface is not None:
if len(surface) == 4:
u1 = surface[0]
v1 = surface[1]
w1 = surface[3]
surface = [u1, v1, w1]
# set the surface for grain boundary.
if surface is None:
if lat_type.lower() == "c":
surface = r_axis
else:
if lat_type.lower() == "h":
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, -0.5, 0], [-0.5, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == "r":
if ratio is None:
cos_alpha = 0.5
else:
cos_alpha = 1.0 / (ratio[0] / ratio[1] - 2)
metric = np.array(
[
[1, cos_alpha, cos_alpha],
[cos_alpha, 1, cos_alpha],
[cos_alpha, cos_alpha, 1],
]
)
elif lat_type.lower() == "t":
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = ratio[0] / ratio[1]
metric = np.array([[1, 0, 0], [0, 1, 0], [0, 0, c2_a2_ratio]])
elif lat_type.lower() == "o":
for i in range(3):
if ratio[i] is None:
ratio[i] = 1
metric = np.array(
[
[1, 0, 0],
[0, ratio[1] / ratio[2], 0],
[0, 0, ratio[0] / ratio[2]],
]
)
else:
raise RuntimeError("Lattice type has not implemented.")
surface = np.matmul(r_axis, metric)
fractions = [Fraction(x).limit_denominator() for x in surface]
least_mul = reduce(lcm, [f.denominator for f in fractions])
surface = [int(round(x * least_mul)) for x in surface]
if reduce(gcd, surface) != 1:
index = reduce(gcd, surface)
surface = [int(round(x / index)) for x in surface]
if lat_type.lower() == "h":
# set the value for u,v,w,mu,mv,m,n,d,x
# check the reference for the meaning of these parameters
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2/a2, CSL only exist for [0,0,1] " "or [u,v,0] and m = 0")
else:
mu, mv = ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
d = (u ** 2 + v ** 2 - u * v) * mv + w ** 2 * mu
if abs(angle - 180.0) < 1.0e0:
m = 0
n = 1
else:
fraction = Fraction(
np.tan(angle / 2 / 180.0 * np.pi) / np.sqrt(float(d) / 3.0 / mu)
).limit_denominator()
m = fraction.denominator
n = fraction.numerator
# construct the rotation matrix, check reference for details
r_list = [
(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 + 2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 - 2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) * n ** 2 + 3 * mu * m ** 2,
]
m = -1 * m
r_list_inv = [
(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 + 2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 - 2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) * n ** 2 + 3 * mu * m ** 2,
]
m = -1 * m
F = 3 * mu * m ** 2 + d * n ** 2
all_list = r_list + r_list_inv + [F]
com_fac = reduce(gcd, all_list)
sigma = F / com_fac
r_matrix = (np.array(r_list) / com_fac / sigma).reshape(3, 3)
elif lat_type.lower() == "r":
# set the value for u,v,w,mu,mv,m,n,d
# check the reference for the meaning of these parameters
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio is None:
mu, mv = [1, 1]
if u + v + w != 0:
if u != v or u != w:
raise RuntimeError(
"For irrational ratio_alpha, CSL only exist for [1,1,1]" "or [u, v, -(u+v)] and m =0"
)
else:
mu, mv = ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
d = (u ** 2 + v ** 2 + w ** 2) * (mu - 2 * mv) + 2 * mv * (v * w + w * u + u * v)
if abs(angle - 180.0) < 1.0e0:
m = 0
n = 1
else:
fraction = Fraction(np.tan(angle / 2 / 180.0 * np.pi) / np.sqrt(float(d) / mu)).limit_denominator()
m = fraction.denominator
n = fraction.numerator
# construct the rotation matrix, check reference for details
r_list = [
(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2
+ 2 * mv * (v - w) * m * n
- 2 * mv * v * w * n ** 2
+ mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2
+ 2 * mv * (w - u) * m * n
- 2 * mv * u * w * n ** 2
+ mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2
+ 2 * mv * (u - v) * m * n
- 2 * mv * u * v * n ** 2
+ mu * m ** 2,
]
m = -1 * m
r_list_inv = [
(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2
+ 2 * mv * (v - w) * m * n
- 2 * mv * v * w * n ** 2
+ mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2
+ 2 * mv * (w - u) * m * n
- 2 * mv * u * w * n ** 2
+ mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2
+ 2 * mv * (u - v) * m * n
- 2 * mv * u * v * n ** 2
+ mu * m ** 2,
]
m = -1 * m
F = mu * m ** 2 + d * n ** 2
all_list = r_list_inv + r_list + [F]
com_fac = reduce(gcd, all_list)
sigma = F / com_fac
r_matrix = (np.array(r_list) / com_fac / sigma).reshape(3, 3)
else:
u, v, w = r_axis
if lat_type.lower() == "c":
mu = 1
lam = 1
mv = 1
elif lat_type.lower() == "t":
if ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError(
"For irrational c2/a2, CSL only exist for [0,0,1] " "or [u,v,0] and m = 0"
)
else:
mu, mv = ratio
lam = mv
elif lat_type.lower() == "o":
if None in ratio:
mu, lam, mv = ratio
non_none = [i for i in ratio if i is not None]
if len(non_none) < 2:
raise RuntimeError("No CSL exist for two irrational numbers")
non1, non2 = non_none
if mu is None:
lam = non1
mv = non2
mu = 1
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError(
"For irrational c2, CSL only exist for [0,0,1] " "or [u,v,0] and m = 0"
)
elif lam is None:
mu = non1
mv = non2
lam = 1
if v != 0:
if u != 0 or (w != 0):
raise RuntimeError(
"For irrational b2, CSL only exist for [0,1,0] " "or [u,0,w] and m = 0"
)
elif mv is None:
mu = non1
lam = non2
mv = 1
if u != 0:
if w != 0 or (v != 0):
raise RuntimeError(
"For irrational a2, CSL only exist for [1,0,0] " "or [0,v,w] and m = 0"
)
else:
mu, lam, mv = ratio
if u == 0 and v == 0:
mu = 1
if u == 0 and w == 0:
lam = 1
if v == 0 and w == 0:
mv = 1
# make sure mu, lambda, mv are coprime integers.
if reduce(gcd, [mu, lam, mv]) != 1:
temp = reduce(gcd, [mu, lam, mv])
mu = int(round(mu / temp))
mv = int(round(mv / temp))
lam = int(round(lam / temp))
d = (mv * u ** 2 + lam * v ** 2) * mv + w ** 2 * mu * mv
if abs(angle - 180.0) < 1.0e0:
m = 0
n = 1
else:
fraction = Fraction(np.tan(angle / 2 / 180.0 * np.pi) / np.sqrt(d / mu / lam)).limit_denominator()
m = fraction.denominator
n = fraction.numerator
r_list = [
(u ** 2 * mv * mv - lam * v ** 2 * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv - v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2,
]
m = -1 * m
r_list_inv = [
(u ** 2 * mv * mv - lam * v ** 2 * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv - v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2,
]
m = -1 * m
F = mu * lam * m ** 2 + d * n ** 2
all_list = r_list + r_list_inv + [F]
com_fac = reduce(gcd, all_list)
sigma = F / com_fac
r_matrix = (np.array(r_list) / com_fac / sigma).reshape(3, 3)
if sigma > 1000:
raise RuntimeError(
"Sigma >1000 too large. Are you sure what you are doing, " "Please check the GB if exist"
)
# transform surface, r_axis, r_matrix in terms of primitive lattice
surface = np.matmul(surface, np.transpose(trans_cry))
fractions = [Fraction(x).limit_denominator() for x in surface]
least_mul = reduce(lcm, [f.denominator for f in fractions])
surface = [int(round(x * least_mul)) for x in surface]
if reduce(gcd, surface) != 1:
index = reduce(gcd, surface)
surface = [int(round(x / index)) for x in surface]
r_axis = np.rint(np.matmul(r_axis, np.linalg.inv(trans_cry))).astype(int)
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
r_matrix = np.dot(np.dot(np.linalg.inv(trans_cry.T), r_matrix), trans_cry.T)
# set one vector of the basis to the rotation axis direction, and
# obtain the corresponding transform matrix
eye = np.eye(3, dtype=np.int_)
for h in range(3):
if abs(r_axis[h]) != 0:
eye[h] = np.array(r_axis)
k = h + 1 if h + 1 < 3 else abs(2 - h)
l = h + 2 if h + 2 < 3 else abs(1 - h)
break
trans = eye.T
new_rot = np.array(r_matrix)
# with the rotation matrix to construct the CSL lattice, check reference for details
fractions = [Fraction(x).limit_denominator() for x in new_rot[:, k]]
least_mul = reduce(lcm, [f.denominator for f in fractions])
scale = np.zeros((3, 3))
scale[h, h] = 1
scale[k, k] = least_mul
scale[l, l] = sigma / least_mul
for i in range(least_mul):
check_int = i * new_rot[:, k] + (sigma / least_mul) * new_rot[:, l]
if all([np.round(x, 5).is_integer() for x in list(check_int)]):
n_final = i
break
try:
n_final
except NameError:
raise RuntimeError("Something is wrong. Check if this GB exists or not")
scale[k, l] = n_final
# each row of mat_csl is the CSL lattice vector
csl_init = np.rint(np.dot(np.dot(r_matrix, trans), scale)).astype(int).T
if abs(r_axis[h]) > 1:
csl_init = GrainBoundaryGenerator.reduce_mat(np.array(csl_init), r_axis[h], r_matrix)
csl = np.rint(Lattice(csl_init).get_niggli_reduced_lattice().matrix).astype(int)
# find the best slab supercell in terms of the conventional cell from the csl lattice,
# which is the transformation matrix
# now trans_cry is the transformation matrix from crystal to cartesian coordinates.
# for cubic, do not need to change.
if lat_type.lower() != "c":
if lat_type.lower() == "h":
trans_cry = np.array([[1, 0, 0], [-0.5, np.sqrt(3.0) / 2.0, 0], [0, 0, np.sqrt(mu / mv)]])
elif lat_type.lower() == "r":
if ratio is None:
c2_a2_ratio = 1
else:
c2_a2_ratio = 3.0 / (2 - 6 * mv / mu)
trans_cry = np.array(
[
[0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[-0.5, np.sqrt(3.0) / 6.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
[0, -1 * np.sqrt(3.0) / 3.0, 1.0 / 3 * np.sqrt(c2_a2_ratio)],
]
)
else:
trans_cry = np.array([[1, 0, 0], [0, np.sqrt(lam / mv), 0], [0, 0, np.sqrt(mu / mv)]])
t1_final = GrainBoundaryGenerator.slab_from_csl(
csl, surface, normal, trans_cry, max_search=max_search, quick_gen=quick_gen
)
t2_final = np.array(np.rint(np.dot(t1_final, np.linalg.inv(r_matrix.T)))).astype(int)
return t1_final, t2_final
@staticmethod
def enum_sigma_cubic(cutoff, r_axis):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in cubic system.
The algorithm for this code is from reference, Acta Cryst, A40,108(1984)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angles of one grain respect to
the other grain.
When generate the microstructures of the grain boundary using these angles,
you need to analyze the symmetry of the structure. Different angles may
result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
# count the number of odds in r_axis
odd_r = len(list(filter(lambda x: x % 2 == 1, r_axis)))
# Compute the max n we need to enumerate.
if odd_r == 3:
a_max = 4
elif odd_r == 0:
a_max = 1
else:
a_max = 2
n_max = int(np.sqrt(cutoff * a_max / sum(np.array(r_axis) ** 2)))
# enumerate all possible n, m to give possible sigmas within the cutoff.
for n_loop in range(1, n_max + 1):
n = n_loop
m_max = int(np.sqrt(cutoff * a_max - n ** 2 * sum(np.array(r_axis) ** 2)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
if m == 0:
n = 1
else:
n = n_loop
# construct the quadruple [m, U,V,W], count the number of odds in
# quadruple to determine the parameter a, refer to the reference
quadruple = [m] + [x * n for x in r_axis]
odd_qua = len(list(filter(lambda x: x % 2 == 1, quadruple)))
if odd_qua == 4:
a = 4
elif odd_qua == 2:
a = 2
else:
a = 1
sigma = int(round((m ** 2 + n ** 2 * sum(np.array(r_axis) ** 2)) / a))
if 1 < sigma <= cutoff:
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) / np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) / np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
return sigmas
@staticmethod
def enum_sigma_hex(cutoff, r_axis, c2_a2_ratio):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in hexagonal system.
The algorithm for this code is from reference, Acta Cryst, A38,550(1982)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w):
the rotation axis of the grain boundary.
c2_a2_ratio (list of two integers, e.g. mu, mv):
mu/mv is the square of the hexagonal axial ratio, which is rational
number. If irrational, set c2_a2_ratio = None
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
u = 2 * u1 + v1
v = 2 * v1 + u1
w = w1
else:
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if c2_a2_ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2/a2, CSL only exist for [0,0,1] " "or [u,v,0] and m = 0")
else:
mu, mv = c2_a2_ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u ** 2 + v ** 2 - u * v) * mv + w ** 2 * mu
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * 12 * mu * mv) / abs(d)))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if (c2_a2_ratio is None) and w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * 12 * mu * mv - n ** 2 * d) / (3 * mu)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [
(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 + 2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 - 2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) * n ** 2 + 3 * mu * m ** 2,
]
m = -1 * m
# inverse of the rotation matrix
R_list_inv = [
(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 + 2 * w * mu * m * n + 3 * mu * m ** 2,
(2 * v - u) * u * mv * n ** 2 - 4 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * (2 * v - u) * mu * m * n,
(2 * u - v) * v * mv * n ** 2 + 4 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 - 2 * w * mu * m * n + 3 * mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * (2 * u - v) * mu * m * n,
(2 * u - v) * w * mv * n ** 2 - 3 * v * mv * m * n,
(2 * v - u) * w * mv * n ** 2 + 3 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv + u * v * mv) * n ** 2 + 3 * mu * m ** 2,
]
m = -1 * m
F = 3 * mu * m ** 2 + d * n ** 2
all_list = R_list_inv + R_list + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round((3 * mu * m ** 2 + d * n ** 2) / com_fac))
if 1 < sigma <= cutoff:
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / 3.0 / mu)) / np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / 3.0 / mu)) / np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_sigma_rho(cutoff, r_axis, ratio_alpha):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in rhombohedral system.
The algorithm for this code is from reference, Acta Cryst, A45,505(1989).
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w):
the rotation axis of the grain boundary, with the format of [u,v,w]
or Weber indices [u, v, t, w].
ratio_alpha (list of two integers, e.g. mu, mv):
mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha) with rational number.
If irrational, set ratio_alpha = None.
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# transform four index notation to three index notation
if len(r_axis) == 4:
u1 = r_axis[0]
v1 = r_axis[1]
w1 = r_axis[3]
u = 2 * u1 + v1 + w1
v = v1 + w1 - u1
w = w1 - 2 * v1 - u1
r_axis = [u, v, w]
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if ratio_alpha is None:
mu, mv = [1, 1]
if u + v + w != 0:
if u != v or u != w:
raise RuntimeError(
"For irrational ratio_alpha, CSL only exist for [1,1,1]" "or [u, v, -(u+v)] and m =0"
)
else:
mu, mv = ratio_alpha
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u ** 2 + v ** 2 + w ** 2) * (mu - 2 * mv) + 2 * mv * (v * w + w * u + u * v)
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * abs(4 * mu * (mu - 3 * mv))) / abs(d)))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if ratio_alpha is None and u + v + w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * abs(4 * mu * (mu - 3 * mv)) - n ** 2 * d) / (mu)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [
(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2
+ 2 * mv * (v - w) * m * n
- 2 * mv * v * w * n ** 2
+ mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2
+ 2 * mv * (w - u) * m * n
- 2 * mv * u * w * n ** 2
+ mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2
+ 2 * mv * (u - v) * m * n
- 2 * mv * u * v * n ** 2
+ mu * m ** 2,
]
m = -1 * m
# inverse of the rotation matrix
R_list_inv = [
(mu - 2 * mv) * (u ** 2 - v ** 2 - w ** 2) * n ** 2
+ 2 * mv * (v - w) * m * n
- 2 * mv * v * w * n ** 2
+ mu * m ** 2,
2 * (mv * u * n * (w * n + u * n - m) - (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
2 * (mv * u * n * (v * n + u * n + m) + (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * v * n * (w * n + v * n + m) + (mu - mv) * m * w * n + (mu - 2 * mv) * u * v * n ** 2),
(mu - 2 * mv) * (v ** 2 - w ** 2 - u ** 2) * n ** 2
+ 2 * mv * (w - u) * m * n
- 2 * mv * u * w * n ** 2
+ mu * m ** 2,
2 * (mv * v * n * (v * n + u * n - m) - (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
2 * (mv * w * n * (w * n + v * n - m) - (mu - mv) * m * v * n + (mu - 2 * mv) * w * u * n ** 2),
2 * (mv * w * n * (w * n + u * n + m) + (mu - mv) * m * u * n + (mu - 2 * mv) * w * v * n ** 2),
(mu - 2 * mv) * (w ** 2 - u ** 2 - v ** 2) * n ** 2
+ 2 * mv * (u - v) * m * n
- 2 * mv * u * v * n ** 2
+ mu * m ** 2,
]
m = -1 * m
F = mu * m ** 2 + d * n ** 2
all_list = R_list_inv + R_list + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round(abs(F / com_fac)))
if 1 < sigma <= cutoff:
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) / np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) / np.pi * 180.0
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_sigma_tet(cutoff, r_axis, c2_a2_ratio):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in tetragonal system.
The algorithm for this code is from reference, Acta Cryst, B46,117(1990)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
c2_a2_ratio (list of two integers, e.g. mu, mv):
mu/mv is the square of the tetragonal axial ratio with rational number.
if irrational, set c2_a2_ratio = None
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, mv are coprime integers.
if c2_a2_ratio is None:
mu, mv = [1, 1]
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2/a2, CSL only exist for [0,0,1] " "or [u,v,0] and m = 0")
else:
mu, mv = c2_a2_ratio
if gcd(mu, mv) != 1:
temp = gcd(mu, mv)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
# refer to the meaning of d in reference
d = (u ** 2 + v ** 2) * mv + w ** 2 * mu
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * 4 * mu * mv) / d))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
if c2_a2_ratio is None and w == 0:
m_max = 0
else:
m_max = int(np.sqrt((cutoff * 4 * mu * mv - n ** 2 * d) / mu))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [
(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 + mu * m ** 2,
2 * v * u * mv * n ** 2 - 2 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * v * mu * m * n,
2 * u * v * mv * n ** 2 + 2 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 + mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * u * mu * m * n,
2 * u * w * mv * n ** 2 - 2 * v * mv * m * n,
2 * v * w * mv * n ** 2 + 2 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv) * n ** 2 + mu * m ** 2,
]
m = -1 * m
# inverse of rotation matrix
R_list_inv = [
(u ** 2 * mv - v ** 2 * mv - w ** 2 * mu) * n ** 2 + mu * m ** 2,
2 * v * u * mv * n ** 2 - 2 * w * mu * m * n,
2 * u * w * mu * n ** 2 + 2 * v * mu * m * n,
2 * u * v * mv * n ** 2 + 2 * w * mu * m * n,
(v ** 2 * mv - u ** 2 * mv - w ** 2 * mu) * n ** 2 + mu * m ** 2,
2 * v * w * mu * n ** 2 - 2 * u * mu * m * n,
2 * u * w * mv * n ** 2 - 2 * v * mv * m * n,
2 * v * w * mv * n ** 2 + 2 * u * mv * m * n,
(w ** 2 * mu - u ** 2 * mv - v ** 2 * mv) * n ** 2 + mu * m ** 2,
]
m = -1 * m
F = mu * m ** 2 + d * n ** 2
all_list = R_list + R_list_inv + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round((mu * m ** 2 + d * n ** 2) / com_fac))
if 1 < sigma <= cutoff:
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) / np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu)) / np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_sigma_ort(cutoff, r_axis, c2_b2_a2_ratio):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in orthorhombic system.
The algorithm for this code is from reference, Scipta Metallurgica 27, 291(1992)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
c2_b2_a2_ratio (list of three integers, e.g. mu,lamda, mv):
mu:lam:mv is the square of the orthorhombic axial ratio with rational
numbers. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angle of one grain respect to the
other grain.
When generate the microstructure of the grain boundary using these
angles, you need to analyze the symmetry of the structure. Different
angles may result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
u, v, w = r_axis
# make sure mu, lambda, mv are coprime integers.
if None in c2_b2_a2_ratio:
mu, lam, mv = c2_b2_a2_ratio
non_none = [i for i in c2_b2_a2_ratio if i is not None]
if len(non_none) < 2:
raise RuntimeError("No CSL exist for two irrational numbers")
non1, non2 = non_none
if reduce(gcd, non_none) != 1:
temp = reduce(gcd, non_none)
non1 = int(round(non1 / temp))
non2 = int(round(non2 / temp))
if mu is None:
lam = non1
mv = non2
mu = 1
if w != 0:
if u != 0 or (v != 0):
raise RuntimeError("For irrational c2, CSL only exist for [0,0,1] " "or [u,v,0] and m = 0")
elif lam is None:
mu = non1
mv = non2
lam = 1
if v != 0:
if u != 0 or (w != 0):
raise RuntimeError("For irrational b2, CSL only exist for [0,1,0] " "or [u,0,w] and m = 0")
elif mv is None:
mu = non1
lam = non2
mv = 1
if u != 0:
if w != 0 or (v != 0):
raise RuntimeError("For irrational a2, CSL only exist for [1,0,0] " "or [0,v,w] and m = 0")
else:
mu, lam, mv = c2_b2_a2_ratio
if reduce(gcd, c2_b2_a2_ratio) != 1:
temp = reduce(gcd, c2_b2_a2_ratio)
mu = int(round(mu / temp))
mv = int(round(mv / temp))
lam = int(round(lam / temp))
if u == 0 and v == 0:
mu = 1
if u == 0 and w == 0:
lam = 1
if v == 0 and w == 0:
mv = 1
# refer to the meaning of d in reference
d = (mv * u ** 2 + lam * v ** 2) * mv + w ** 2 * mu * mv
# Compute the max n we need to enumerate.
n_max = int(np.sqrt((cutoff * 4 * mu * mv * mv * lam) / d))
# Enumerate all possible n, m to give possible sigmas within the cutoff.
for n in range(1, n_max + 1):
mu_temp, lam_temp, mv_temp = c2_b2_a2_ratio
if (mu_temp is None and w == 0) or (lam_temp is None and v == 0) or (mv_temp is None and u == 0):
m_max = 0
else:
m_max = int(np.sqrt((cutoff * 4 * mu * mv * lam * mv - n ** 2 * d) / mu / lam))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
# construct the rotation matrix, refer to the reference
R_list = [
(u ** 2 * mv * mv - lam * v ** 2 * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv - v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2,
]
m = -1 * m
# inverse of rotation matrix
R_list_inv = [
(u ** 2 * mv * mv - lam * v ** 2 * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * lam * (v * u * mv * n ** 2 - w * mu * m * n),
2 * mu * (u * w * mv * n ** 2 + v * lam * m * n),
2 * mv * (u * v * mv * n ** 2 + w * mu * m * n),
(v ** 2 * mv * lam - u ** 2 * mv * mv - w ** 2 * mu * mv) * n ** 2 + lam * mu * m ** 2,
2 * mv * mu * (v * w * n ** 2 - u * m * n),
2 * mv * (u * w * mv * n ** 2 - v * lam * m * n),
2 * lam * mv * (v * w * n ** 2 + u * m * n),
(w ** 2 * mu * mv - u ** 2 * mv * mv - v ** 2 * mv * lam) * n ** 2 + lam * mu * m ** 2,
]
m = -1 * m
F = mu * lam * m ** 2 + d * n ** 2
all_list = R_list + R_list_inv + [F]
# Compute the max common factors for the elements of the rotation matrix
# and its inverse.
com_fac = reduce(gcd, all_list)
sigma = int(round((mu * lam * m ** 2 + d * n ** 2) / com_fac))
if 1 < sigma <= cutoff:
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu / lam)) / np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n / m * np.sqrt(d / mu / lam)) / np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
if m_max == 0:
break
return sigmas
@staticmethod
def enum_possible_plane_cubic(plane_cutoff, r_axis, r_angle):
"""
Find all possible plane combinations for GBs given a rotaion axis and angle for
cubic system, and classify them to different categories, including 'Twist',
'Symmetric tilt', 'Normal tilt', 'Mixed' GBs.
Args:
plane_cutoff (integer): the cutoff of plane miller index.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
r_angle (float): rotation angle of the GBs.
Returns:
all_combinations (dict):
dictionary with keys as GB type, e.g. 'Twist','Symmetric tilt',etc.
and values as the combination of the two plane miller index
(GB plane and joining plane).
"""
all_combinations = {}
all_combinations["Symmetric tilt"] = []
all_combinations["Twist"] = []
all_combinations["Normal tilt"] = []
all_combinations["Mixed"] = []
sym_plane = symm_group_cubic([[1, 0, 0], [1, 1, 0]])
j = np.arange(0, plane_cutoff + 1)
combination = []
for i in itertools.product(j, repeat=3):
if sum(abs(np.array(i))) != 0:
combination.append(list(i))
if len(np.nonzero(i)[0]) == 3:
for i1 in range(3):
new_i = list(i).copy()
new_i[i1] = -1 * new_i[i1]
combination.append(new_i)
elif len(np.nonzero(i)[0]) == 2:
new_i = list(i).copy()
new_i[np.nonzero(i)[0][0]] = -1 * new_i[np.nonzero(i)[0][0]]
combination.append(new_i)
miller = np.array(combination)
miller = miller[np.argsort(np.linalg.norm(miller, axis=1))]
for i, val in enumerate(miller):
if reduce(gcd, val) == 1:
matrix = GrainBoundaryGenerator.get_trans_mat(r_axis, r_angle, surface=val, quick_gen=True)
vec = np.cross(matrix[1][0], matrix[1][1])
miller2 = GrainBoundaryGenerator.vec_to_surface(vec)
if np.all(np.abs(np.array(miller2)) <= plane_cutoff):
cos_1 = abs(np.dot(val, r_axis) / np.linalg.norm(val) / np.linalg.norm(r_axis))
if 1 - cos_1 < 1.0e-5:
all_combinations["Twist"].append([list(val), miller2])
elif cos_1 < 1.0e-8:
sym_tilt = False
if np.sum(np.abs(val)) == np.sum(np.abs(miller2)):
ave = (np.array(val) + np.array(miller2)) / 2
ave1 = (np.array(val) - np.array(miller2)) / 2
for plane in sym_plane:
cos_2 = abs(np.dot(ave, plane) / np.linalg.norm(ave) / np.linalg.norm(plane))
cos_3 = abs(np.dot(ave1, plane) / np.linalg.norm(ave1) / np.linalg.norm(plane))
if 1 - cos_2 < 1.0e-5 or 1 - cos_3 < 1.0e-5:
all_combinations["Symmetric tilt"].append([list(val), miller2])
sym_tilt = True
break
if not sym_tilt:
all_combinations["Normal tilt"].append([list(val), miller2])
else:
all_combinations["Mixed"].append([list(val), miller2])
return all_combinations
@staticmethod
def get_rotation_angle_from_sigma(sigma, r_axis, lat_type="C", ratio=None):
"""
Find all possible rotation angle for the given sigma value.
Args:
sigma (integer):
sigma value provided
r_axis (list of three integers, e.g. u, v, w
or four integers, e.g. u, v, t, w for hex/rho system only):
the rotation axis of the grain boundary.
lat_type ( one character):
'c' or 'C': cubic system
't' or 'T': tetragonal system
'o' or 'O': orthorhombic system
'h' or 'H': hexagonal system
'r' or 'R': rhombohedral system
default to cubic system
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha)/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
Returns:
rotation_angles corresponding to the provided sigma value.
If the sigma value is not correct, return the rotation angle corresponding
to the correct possible sigma value right smaller than the wrong sigma value provided.
"""
if lat_type.lower() == "c":
logger.info("Make sure this is for cubic system")
sigma_dict = GrainBoundaryGenerator.enum_sigma_cubic(cutoff=sigma, r_axis=r_axis)
elif lat_type.lower() == "t":
logger.info("Make sure this is for tetragonal system")
if ratio is None:
logger.info("Make sure this is for irrational c2/a2 ratio")
elif len(ratio) != 2:
raise RuntimeError("Tetragonal system needs correct c2/a2 ratio")
sigma_dict = GrainBoundaryGenerator.enum_sigma_tet(cutoff=sigma, r_axis=r_axis, c2_a2_ratio=ratio)
elif lat_type.lower() == "o":
logger.info("Make sure this is for orthorhombic system")
if len(ratio) != 3:
raise RuntimeError("Orthorhombic system needs correct c2:b2:a2 ratio")
sigma_dict = GrainBoundaryGenerator.enum_sigma_ort(cutoff=sigma, r_axis=r_axis, c2_b2_a2_ratio=ratio)
elif lat_type.lower() == "h":
logger.info("Make sure this is for hexagonal system")
if ratio is None:
logger.info("Make sure this is for irrational c2/a2 ratio")
elif len(ratio) != 2:
raise RuntimeError("Hexagonal system needs correct c2/a2 ratio")
sigma_dict = GrainBoundaryGenerator.enum_sigma_hex(cutoff=sigma, r_axis=r_axis, c2_a2_ratio=ratio)
elif lat_type.lower() == "r":
logger.info("Make sure this is for rhombohedral system")
if ratio is None:
logger.info("Make sure this is for irrational (1+2*cos(alpha)/cos(alpha) ratio")
elif len(ratio) != 2:
raise RuntimeError("Rhombohedral system needs correct " "(1+2*cos(alpha)/cos(alpha) ratio")
sigma_dict = GrainBoundaryGenerator.enum_sigma_rho(cutoff=sigma, r_axis=r_axis, ratio_alpha=ratio)
else:
raise RuntimeError("Lattice type not implemented")
sigmas = list(sigma_dict.keys())
if not sigmas:
raise RuntimeError("This is a wriong sigma value, and no sigma exists smaller than this value.")
if sigma in sigmas:
rotation_angles = sigma_dict[sigma]
else:
sigmas.sort()
warnings.warn(
"This is not the possible sigma value according to the rotation axis!"
"The nearest neighbor sigma and its corresponding angle are returned"
)
rotation_angles = sigma_dict[sigmas[-1]]
rotation_angles.sort()
return rotation_angles
@staticmethod
def slab_from_csl(csl, surface, normal, trans_cry, max_search=20, quick_gen=False):
"""
By linear operation of csl lattice vectors to get the best corresponding
slab lattice. That is the area of a,b vectors (within the surface plane)
is the smallest, the c vector first, has shortest length perpendicular
to surface [h,k,l], second, has shortest length itself.
Args:
csl (3 by 3 integer array):
input csl lattice.
surface (list of three integers, e.g. h, k, l):
the miller index of the surface, with the format of [h,k,l]
normal (logic):
determine if the c vector needs to perpendicular to surface
trans_cry (3 by 3 array):
transform matrix from crystal system to orthogonal system
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane.
quick_gen (bool): whether to quickly generate a supercell, no need to find the smallest
cell if set to true.
Returns:
t_matrix: a slab lattice ( 3 by 3 integer array):
"""
# set the transform matrix in real space
trans = trans_cry
# transform matrix in reciprocal space
ctrans = np.linalg.inv(trans.T)
t_matrix = csl.copy()
# vectors constructed from csl that perpendicular to surface
ab_vector = []
# obtain the miller index of surface in terms of csl.
miller = np.matmul(surface, csl.T)
if reduce(gcd, miller) != 1:
miller = [int(round(x / reduce(gcd, miller))) for x in miller]
miller_nonzero = []
# quickly generate a supercell, normal is not work in this way
if quick_gen:
scale_factor = []
eye = np.eye(3, dtype=np.int_)
for i, j in enumerate(miller):
if j == 0:
scale_factor.append(eye[i])
else:
miller_nonzero.append(i)
if len(scale_factor) < 2:
index_len = len(miller_nonzero)
for i in range(index_len):
for j in range(i + 1, index_len):
lcm_miller = lcm(miller[miller_nonzero[i]], miller[miller_nonzero[j]])
l = [0, 0, 0]
l[miller_nonzero[i]] = -int(round(lcm_miller / miller[miller_nonzero[i]]))
l[miller_nonzero[j]] = int(round(lcm_miller / miller[miller_nonzero[j]]))
scale_factor.append(l)
if len(scale_factor) == 2:
break
t_matrix[0] = np.array(np.dot(scale_factor[0], csl))
t_matrix[1] = np.array(np.dot(scale_factor[1], csl))
t_matrix[2] = csl[miller_nonzero[0]]
if abs(np.linalg.det(t_matrix)) > 1000:
warnings.warn("Too large matrix. Suggest to use quick_gen=False")
return t_matrix
for i, j in enumerate(miller):
if j == 0:
ab_vector.append(csl[i])
else:
c_index = i
miller_nonzero.append(j)
if len(miller_nonzero) > 1:
t_matrix[2] = csl[c_index]
index_len = len(miller_nonzero)
lcm_miller = []
for i in range(index_len):
for j in range(i + 1, index_len):
com_gcd = gcd(miller_nonzero[i], miller_nonzero[j])
mil1 = int(round(miller_nonzero[i] / com_gcd))
mil2 = int(round(miller_nonzero[j] / com_gcd))
lcm_miller.append(max(abs(mil1), abs(mil2)))
lcm_sorted = sorted(lcm_miller)
if index_len == 2:
max_j = lcm_sorted[0]
else:
max_j = lcm_sorted[1]
else:
if not normal:
t_matrix[0] = ab_vector[0]
t_matrix[1] = ab_vector[1]
t_matrix[2] = csl[c_index]
return t_matrix
max_j = abs(miller_nonzero[0])
if max_j > max_search:
max_j = max_search
# area of a, b vectors
area = None
# length of c vector
c_norm = np.linalg.norm(np.matmul(t_matrix[2], trans))
# c vector length along the direction perpendicular to surface
c_length = np.abs(np.dot(t_matrix[2], surface))
# check if the init c vector perpendicular to the surface
if normal:
c_cross = np.cross(np.matmul(t_matrix[2], trans), np.matmul(surface, ctrans))
normal_init = np.linalg.norm(c_cross) < 1e-8
j = np.arange(0, max_j + 1)
combination = []
for i in itertools.product(j, repeat=3):
if sum(abs(np.array(i))) != 0:
combination.append(list(i))
if len(np.nonzero(i)[0]) == 3:
for i1 in range(3):
new_i = list(i).copy()
new_i[i1] = -1 * new_i[i1]
combination.append(new_i)
elif len(np.nonzero(i)[0]) == 2:
new_i = list(i).copy()
new_i[np.nonzero(i)[0][0]] = -1 * new_i[np.nonzero(i)[0][0]]
combination.append(new_i)
for i in combination:
if reduce(gcd, i) == 1:
temp = np.dot(np.array(i), csl)
if abs(np.dot(temp, surface) - 0) < 1.0e-8:
ab_vector.append(temp)
else:
# c vector length along the direction perpendicular to surface
c_len_temp = np.abs(np.dot(temp, surface))
# c vector length itself
c_norm_temp = np.linalg.norm(np.matmul(temp, trans))
if normal:
c_cross = np.cross(np.matmul(temp, trans), np.matmul(surface, ctrans))
if np.linalg.norm(c_cross) < 1.0e-8:
if normal_init:
if c_norm_temp < c_norm:
t_matrix[2] = temp
c_norm = c_norm_temp
else:
c_norm = c_norm_temp
normal_init = True
t_matrix[2] = temp
else:
if c_len_temp < c_length or (abs(c_len_temp - c_length) < 1.0e-8 and c_norm_temp < c_norm):
t_matrix[2] = temp
c_norm = c_norm_temp
c_length = c_len_temp
if normal and (not normal_init):
logger.info("Did not find the perpendicular c vector, increase max_j")
while not normal_init:
if max_j == max_search:
warnings.warn("Cannot find the perpendicular c vector, please increase max_search")
break
max_j = 3 * max_j
if max_j > max_search:
max_j = max_search
j = np.arange(0, max_j + 1)
combination = []
for i in itertools.product(j, repeat=3):
if sum(abs(np.array(i))) != 0:
combination.append(list(i))
if len(np.nonzero(i)[0]) == 3:
for i1 in range(3):
new_i = list(i).copy()
new_i[i1] = -1 * new_i[i1]
combination.append(new_i)
elif len(np.nonzero(i)[0]) == 2:
new_i = list(i).copy()
new_i[np.nonzero(i)[0][0]] = -1 * new_i[np.nonzero(i)[0][0]]
combination.append(new_i)
for i in combination:
if reduce(gcd, i) == 1:
temp = np.dot(np.array(i), csl)
if abs(np.dot(temp, surface) - 0) > 1.0e-8:
c_cross = np.cross(np.matmul(temp, trans), np.matmul(surface, ctrans))
if np.linalg.norm(c_cross) < 1.0e-8:
# c vetor length itself
c_norm_temp = np.linalg.norm(np.matmul(temp, trans))
if normal_init:
if c_norm_temp < c_norm:
t_matrix[2] = temp
c_norm = c_norm_temp
else:
c_norm = c_norm_temp
normal_init = True
t_matrix[2] = temp
if normal_init:
logger.info("Found perpendicular c vector")
# find the best a, b vectors with their formed area smallest and average norm of a,b smallest.
for i in itertools.combinations(ab_vector, 2):
area_temp = np.linalg.norm(np.cross(np.matmul(i[0], trans), np.matmul(i[1], trans)))
if abs(area_temp - 0) > 1.0e-8:
ab_norm_temp = np.linalg.norm(np.matmul(i[0], trans)) + np.linalg.norm(np.matmul(i[1], trans))
if area is None:
area = area_temp
ab_norm = ab_norm_temp
t_matrix[0] = i[0]
t_matrix[1] = i[1]
elif area_temp < area:
t_matrix[0] = i[0]
t_matrix[1] = i[1]
area = area_temp
ab_norm = ab_norm_temp
elif abs(area - area_temp) < 1.0e-8 and ab_norm_temp < ab_norm:
t_matrix[0] = i[0]
t_matrix[1] = i[1]
area = area_temp
ab_norm = ab_norm_temp
# make sure we have a left-handed crystallographic system
if np.linalg.det(np.matmul(t_matrix, trans)) < 0:
t_matrix *= -1
if normal and abs(np.linalg.det(t_matrix)) > 1000:
warnings.warn("Too large matrix. Suggest to use Normal=False")
return t_matrix
@staticmethod
def reduce_mat(mat, mag, r_matrix):
"""
Reduce integer array mat's determinant mag times by linear combination
of its row vectors, so that the new array after rotation (r_matrix) is
still an integer array
Args:
mat (3 by 3 array): input matrix
mag (integer): reduce times for the determinant
r_matrix (3 by 3 array): rotation matrix
Return:
the reduced integer array
"""
max_j = abs(int(round(np.linalg.det(mat) / mag)))
reduced = False
for h in range(3):
k = h + 1 if h + 1 < 3 else abs(2 - h)
l = h + 2 if h + 2 < 3 else abs(1 - h)
j = np.arange(-max_j, max_j + 1)
for j1, j2 in itertools.product(j, repeat=2):
temp = mat[h] + j1 * mat[k] + j2 * mat[l]
if all([np.round(x, 5).is_integer() for x in list(temp / mag)]):
mat_copy = mat.copy()
mat_copy[h] = np.array([int(round(ele / mag)) for ele in temp])
new_mat = np.dot(mat_copy, np.linalg.inv(r_matrix.T))
if all([np.round(x, 5).is_integer() for x in list(np.ravel(new_mat))]):
reduced = True
mat[h] = np.array([int(round(ele / mag)) for ele in temp])
break
if reduced:
break
if not reduced:
warnings.warn("Matrix reduction not performed, may lead to non-primitive gb cell.")
return mat
@staticmethod
def vec_to_surface(vec):
"""
Transform a float vector to a surface miller index with integers.
Args:
vec (1 by 3 array float vector): input float vector
Return:
the surface miller index of the input vector.
"""
miller = [None] * 3
index = []
for i, value in enumerate(vec):
if abs(value) < 1.0e-8:
miller[i] = 0
else:
index.append(i)
if len(index) == 1:
miller[index[0]] = 1
else:
min_index = np.argmin([i for i in vec if i != 0])
true_index = index[min_index]
index.pop(min_index)
frac = []
for i, value in enumerate(index):
frac.append(Fraction(vec[value] / vec[true_index]).limit_denominator(100))
if len(index) == 1:
miller[true_index] = frac[0].denominator
miller[index[0]] = frac[0].numerator
else:
com_lcm = lcm(frac[0].denominator, frac[1].denominator)
miller[true_index] = com_lcm
miller[index[0]] = frac[0].numerator * int(round((com_lcm / frac[0].denominator)))
miller[index[1]] = frac[1].numerator * int(round((com_lcm / frac[1].denominator)))
return miller
def factors(n):
"""
Compute the factors of a integer.
Args:
n: the input integer
Returns:
a set of integers that are the factors of the input integer.
"""
return set(
reduce(
list.__add__,
([i, n // i] for i in range(1, int(np.sqrt(n)) + 1) if n % i == 0),
)
)
def fix_pbc(structure, matrix=None):
"""
Set all frac_coords of the input structure within [0,1].
Args:
structure (pymatgen structure object):
input structure
matrix (lattice matrix, 3 by 3 array/matrix)
new structure's lattice matrix, if none, use
input structure's matrix
Return:
new structure with fixed frac_coords and lattice matrix
"""
spec = []
coords = []
if matrix is None:
latte = Lattice(structure.lattice.matrix)
else:
latte = Lattice(matrix)
for site in structure:
spec.append(site.specie)
coord = np.array(site.frac_coords)
for i in range(3):
coord[i] -= floor(coord[i])
if np.allclose(coord[i], 1):
coord[i] = 0
elif np.allclose(coord[i], 0):
coord[i] = 0
else:
coord[i] = round(coord[i], 7)
coords.append(coord)
return Structure(latte, spec, coords, site_properties=structure.site_properties)
def symm_group_cubic(mat):
"""
obtain cubic symmetric eqivalents of the list of vectors.
Args:
matrix (lattice matrix, n by 3 array/matrix)
Return:
cubic symmetric eqivalents of the list of vectors.
"""
sym_group = np.zeros([24, 3, 3])
sym_group[0, :] = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
sym_group[1, :] = [[1, 0, 0], [0, -1, 0], [0, 0, -1]]
sym_group[2, :] = [[-1, 0, 0], [0, 1, 0], [0, 0, -1]]
sym_group[3, :] = [[-1, 0, 0], [0, -1, 0], [0, 0, 1]]
sym_group[4, :] = [[0, -1, 0], [-1, 0, 0], [0, 0, -1]]
sym_group[5, :] = [[0, -1, 0], [1, 0, 0], [0, 0, 1]]
sym_group[6, :] = [[0, 1, 0], [-1, 0, 0], [0, 0, 1]]
sym_group[7, :] = [[0, 1, 0], [1, 0, 0], [0, 0, -1]]
sym_group[8, :] = [[-1, 0, 0], [0, 0, -1], [0, -1, 0]]
sym_group[9, :] = [[-1, 0, 0], [0, 0, 1], [0, 1, 0]]
sym_group[10, :] = [[1, 0, 0], [0, 0, -1], [0, 1, 0]]
sym_group[11, :] = [[1, 0, 0], [0, 0, 1], [0, -1, 0]]
sym_group[12, :] = [[0, 1, 0], [0, 0, 1], [1, 0, 0]]
sym_group[13, :] = [[0, 1, 0], [0, 0, -1], [-1, 0, 0]]
sym_group[14, :] = [[0, -1, 0], [0, 0, 1], [-1, 0, 0]]
sym_group[15, :] = [[0, -1, 0], [0, 0, -1], [1, 0, 0]]
sym_group[16, :] = [[0, 0, 1], [1, 0, 0], [0, 1, 0]]
sym_group[17, :] = [[0, 0, 1], [-1, 0, 0], [0, -1, 0]]
sym_group[18, :] = [[0, 0, -1], [1, 0, 0], [0, -1, 0]]
sym_group[19, :] = [[0, 0, -1], [-1, 0, 0], [0, 1, 0]]
sym_group[20, :] = [[0, 0, -1], [0, -1, 0], [-1, 0, 0]]
sym_group[21, :] = [[0, 0, -1], [0, 1, 0], [1, 0, 0]]
sym_group[22, :] = [[0, 0, 1], [0, -1, 0], [1, 0, 0]]
sym_group[23, :] = [[0, 0, 1], [0, 1, 0], [-1, 0, 0]]
mat = np.atleast_2d(mat)
all_vectors = []
for sym in sym_group:
for vec in mat:
all_vectors.append(np.dot(sym, vec))
return np.unique(np.array(all_vectors), axis=0)
| davidwaroquiers/pymatgen | pymatgen/analysis/gb/grain.py | Python | mit | 116,292 | [
"CRYSTAL",
"pymatgen"
] | 553e8cb6342d9f30ff7396efcbed2a10ba0ce3b0669440501a9a9da7f1f8460d |
#!/usr/bin/env python
from __future__ import division, print_function
import argparse
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
import json
import os
import os.path
import re
import sys
import requests
DATA_TABLE_NAME = "primer_scheme_bedfiles"
def write_artic_style_bed(input_file, bed_output_filename):
with open(bed_output_filename, "w") as bed_output_file:
for line in input_file:
fields = line.split("\t")
if len(fields) < 6:
# too short to encode the strand format
exit("invalid format in BED file: {}".format(line.rstrip()))
try:
# try and parse field 5 as a number
score = float(fields[4])
except ValueError:
# Alright, this is an ARTIC-style bed,
# which is actually against the specs, but required by the
# ARTIC pipeline.
pass
else:
# This is a regular bed with numbers in the score column.
# We need to "fix" it for the ARTIC pipeline.
fields[4] = '_{0}'.format(score)
bed_output_file.write("\t".join(fields))
def fetch_artic_primers(output_directory, primers):
primer_sets = {
"SARS-CoV-2-ARTICv1": "https://raw.githubusercontent.com/artic-network/artic-ncov2019/master/primer_schemes/nCoV-2019/V1/nCoV-2019.bed",
"SARS-CoV-2-ARTICv2": "https://raw.githubusercontent.com/artic-network/artic-ncov2019/master/primer_schemes/nCoV-2019/V2/nCoV-2019.bed",
"SARS-CoV-2-ARTICv3": "https://raw.githubusercontent.com/artic-network/artic-ncov2019/master/primer_schemes/nCoV-2019/V3/nCoV-2019.bed",
}
data = []
for name, url in primer_sets.items():
if name not in primers:
continue
response = requests.get(url)
if response.status_code != 200:
print(
"Error: download of",
url,
"failed with code",
response.status_code,
file=sys.stderr,
)
exit(response.status_code)
bed_output_filename = os.path.join(output_directory, name + ".bed")
write_artic_style_bed(StringIO(response.text), bed_output_filename)
description = name[:-2] + " " + name[-2:] + " primer set"
data.append(dict(value=name, path=bed_output_filename, description=description))
return data
def install_primer_file(
output_directory, input_filename, primer_name, primer_description
):
name = re.sub(r"\W", "", str(primer_name).replace(" ", "_"))
output_filename = os.path.join(output_directory, name + ".bed")
with open(input_filename) as input_file:
write_artic_style_bed(input_file, output_filename)
data = [dict(value=name, description=primer_description, path=output_filename)]
return data
class SplitArgs(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values.split(","))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Fetch ARTIC SARS-CoV-2 primer files for Galaxy/IRIDA use"
)
parser.add_argument(
"--output_directory", default="tmp", help="Directory to write output to"
)
primer_file = parser.add_argument_group()
primer_file.add_argument(
"--primer_file", help="BED format file containing primer scheme"
)
primer_file.add_argument(
"--primer_name",
help="Name of primer scheme (one word). Required if --primer_file is used",
)
primer_file.add_argument(
"--primer_description",
help="Description of primer scheme. Required if --primer_file is used",
)
artic = parser.add_argument_group()
artic.add_argument(
"--artic_primers",
action=SplitArgs,
help="Comma separated list of primers to fetch",
)
parser.add_argument(
"galaxy_datamanager_filename",
help="Galaxy JSON format file describing data manager inputs",
)
args = parser.parse_args()
if args.artic_primers is None and args.primer_file is None:
print(
"One of --artic_primers or --primer_file + --primer_name + --primer_description is required.",
file=sys.stderr,
)
exit(1)
elif args.primer_file is not None and (
args.primer_name is None or args.primer_description is None
):
print(
"If --primer_file is used --primer_name and --primer_description is also required",
file=sys.stderr,
)
exit(1)
elif args.primer_file is not None and args.artic_primers is not None:
print(
"Only one of --artic_primers or --primer_file + --primer_name + --primer_description can be chosen"
)
exit(1)
config = json.load(open(args.galaxy_datamanager_filename))
output_directory = config.get("output_data", [{}])[0].get("extra_files_path", None)
if output_directory is None:
output_directory = args.output_directory
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
data_manager_dict = {}
data_manager_dict["data_tables"] = json.load(
open(args.galaxy_datamanager_filename)
).get("data_tables", {})
data_manager_dict["data_tables"] = data_manager_dict.get("data_tables", {})
data_manager_dict["data_tables"][DATA_TABLE_NAME] = data_manager_dict[
"data_tables"
].get(DATA_TABLE_NAME, [])
if args.artic_primers:
data = fetch_artic_primers(output_directory, args.artic_primers)
else:
data = install_primer_file(
output_directory,
args.primer_file,
args.primer_name,
args.primer_description,
)
data_manager_dict["data_tables"][DATA_TABLE_NAME].extend(data)
print(data_manager_dict)
json.dump(data_manager_dict, open(args.galaxy_datamanager_filename, "w"))
| mblue9/tools-iuc | data_managers/data_manager_primer_scheme_bedfiles/data_manager/install_primer_scheme_bedfiles.py | Python | mit | 6,053 | [
"Galaxy"
] | 67e5a076409682ed5984eaf500f89927b1978fc5527cb25e4296ab2d8736e01b |
# coding: utf-8
# !/usr/bin/env python
#
# This code is part of the binding affinity prediction tools distribution
# and governed by its license. Please see the LICENSE file that should
# have been included as part of this package.
#
"""
Binding affinity predictor based on Intermolecular Contacts (ICs).
Anna Vangone and Alexandre M.J.J. Bonvin,
Contacts-based prediction of binding affinity in protein-protein complexes.
eLife (2015)
"""
from __future__ import print_function, division
__version__ = '2.0'
__author__ = ["Anna Vangone", "Joao Rodrigues", "Joerg Schaarschmidt"]
import sys
import logging
try:
from Bio.PDB import NeighborSearch
except ImportError as e:
print('[!] The binding affinity prediction tools require Biopython', file=sys.stderr)
raise ImportError(e)
from lib.freesasa import execute_freesasa
from lib.models import IC_NIS
from lib.utils import _check_path, dg_to_kd
from lib.parsers import parse_structure
from lib import aa_properties
def calculate_ic(struct, d_cutoff=5.5, selection=None):
"""
Calculates intermolecular contacts in a parsed struct object.
"""
atom_list = list(struct.get_atoms())
ns = NeighborSearch(atom_list)
all_list = ns.search_all(radius=d_cutoff, level='R')
if selection:
_sd = selection
_chain = lambda x: x.parent.id
ic_list = [c for c in all_list if (_chain(c[0]) in _sd and _chain(c[1]) in _sd)
and (_sd[_chain(c[0])] != _sd[_chain(c[1])])]
else:
ic_list = [c for c in all_list if c[0].parent.id != c[1].parent.id]
if not ic_list:
raise ValueError('No contacts found for selection')
return ic_list
def analyse_contacts(contact_list):
"""
Enumerates and classifies contacts based on the chemical characteristics
of the participating amino acids.
"""
bins = {
'AA': 0, 'PP': 0,
'CC': 0, 'AP': 0,
'CP': 0, 'AC': 0,
}
_data = aa_properties.aa_character_ic
for (res_i, res_j) in contact_list:
contact_type = (_data.get(res_i.resname), _data.get(res_j.resname))
contact_type = ''.join(sorted(contact_type))
bins[contact_type] += 1
return bins
def analyse_nis(sasa_dict, acc_threshold=0.05):
"""
Returns the percentages of apolar, polar, and charged
residues at the interface, according to an accessibility
criterion.
"""
_data = aa_properties.aa_character_protorp
_char_to_index = lambda x: {'A': 0, 'C': 1, 'P': 2}.get(x)
count = [0, 0, 0]
for res, rsa in sasa_dict.iteritems():
chain, resn, resi = res
if rsa >= acc_threshold:
aa_character = _data[resn]
aa_index = _char_to_index(aa_character)
count[aa_index] += 1
percentages = map(lambda x: 100 * x / sum(count), count)
# print('[+] No. of buried interface residues: {0}'.format(sum(count)))
return percentages
class Prodigy:
# init parameters
def __init__(self, struct_obj, selection=None, temp=25.0):
self.temp = float(temp)
if selection is None:
self.selection = [chain.id for chain in structure.get_chains()]
else:
self.selection = selection
self.structure = struct_obj
self.ic_network = {}
self.bins = {}
self.nis_a = 0
self.nis_c = 0
self.ba_val = 0
self.kd_val = 0
def predict(self, temp=None, distance_cutoff=5.5, acc_threshold=0.05):
if temp is not None:
self.temp = temp
# Make selection dict from user option or PDB chains
selection_dict = {}
for igroup, group in enumerate(self.selection):
chains = group.split(',')
for chain in chains:
if chain in selection_dict:
errmsg = 'Selections must be disjoint sets: {0} is repeated'.format(chain)
raise ValueError(errmsg)
selection_dict[chain] = igroup
# Contacts
self.ic_network = calculate_ic(self.structure, d_cutoff=distance_cutoff, selection=selection_dict)
self.bins = analyse_contacts(self.ic_network)
# SASA
_, cmplx_sasa = execute_freesasa(self.structure, selection=selection_dict)
self.nis_a, self.nis_c, _ = analyse_nis(cmplx_sasa, acc_threshold=acc_threshold)
# Affinity Calculation
self.ba_val = IC_NIS(self.bins['CC'], self.bins['AC'], self.bins['PP'], self.bins['AP'], self.nis_a, self.nis_c)
self.kd_val = dg_to_kd(self.ba_val, self.temp)
def as_dict(self):
return_dict = {
'structure': self.structure.id,
'selection': self.selection,
'temp': self.temp,
'ICs': len(self.ic_network),
'nis_a': self.nis_a,
'nis_c': self.nis_c,
'ba_val': self.ba_val,
'kd_val': self.kd_val,
}
return_dict.update(self.bins)
return return_dict
def print_prediction(self, outfile='', quiet=False):
if outfile:
handle = open(outfile, 'w')
else:
handle = sys.stdout
if quiet:
handle.write('{0}\t{1:8.3f}\n'.format(self.structure.id, self.ba_val))
else:
handle.write('[+] No. of intermolecular contacts: {0}\n'.format(len(self.ic_network)))
handle.write('[+] No. of charged-charged contacts: {0}\n'.format(self.bins['CC']))
handle.write('[+] No. of charged-polar contacts: {0}\n'.format(self.bins['CP']))
handle.write('[+] No. of charged-apolar contacts: {0}\n'.format(self.bins['AC']))
handle.write('[+] No. of polar-polar contacts: {0}\n'.format(self.bins['PP']))
handle.write('[+] No. of apolar-polar contacts: {0}\n'.format(self.bins['AP']))
handle.write('[+] No. of apolar-apolar contacts: {0}\n'.format(self.bins['AA']))
handle.write('[+] Percentage of apolar NIS residues: {0:3.2f}\n'.format(self.nis_a))
handle.write('[+] Percentage of charged NIS residues: {0:3.2f}\n'.format(self.nis_c))
handle.write('[++] Predicted binding affinity (kcal.mol-1): {0:8.1f}\n'.format(self.ba_val))
handle.write(
'[++] Predicted dissociation constant (M) at {:.1f}˚C: {:8.1e}\n'.format(self.temp, self.kd_val))
if handle is not sys.stdout:
handle.close()
def print_contacts(self, outfile=''):
if outfile:
handle = open(outfile, 'w')
else:
handle = sys.stdout
for res1, res2 in self.ic_network:
_fmt_str = "{0.resname:>5s} {0.id[1]:5} {0.parent.id:>3s} {1.resname:>5s} {1.id[1]:5} {1.parent.id:>3s}\n"
if res1.parent.id not in self.selection[0]:
res1, res2 = res2, res1
handle.write(_fmt_str.format(res1, res2))
if handle is not sys.stdout:
handle.close()
def print_pymol_script(self, outfile=''):
# Writing output PYMOL: pml script
# initialize array with chains and save chain selection string
selection_strings = []
chains = {}
for s in self.selection:
selection_strings.append(s.replace(",", '+'))
for c in s.split(","):
chains[c] = set()
# loop over pairs and add interface residues to respective chains
for pair in self.ic_network:
for r in pair:
chains[r.parent.id].add(str(r.id[1]))
# set output stream
handle = open(outfile, 'w') if outfile else sys.stdout
# write default setup strings
handle.writelines(["color silver\n", "as cartoon\n", "bg_color white\n", "center\n",
"color lightblue, chain {}\n".format(selection_strings[0]),
"color lightpink, chain {}\n".format(selection_strings[1])])
# loop over interfaces construct selection strings and write interface related commands
for color, iface in [('blue', 1), ('hotpink', 2)]:
p_sel_string = " or ".join(["chain {} and resi {}".format(c, "+".join(chains[c]))
for c in selection_strings[iface-1].split('+')])
handle.write("select iface{}, {}\n".format(iface, p_sel_string))
handle.write("color {}, iface{}\n".format(color, iface))
handle.write("show sticks, iface{}\n".format(iface))
# close file handle if applicable
if handle is not sys.stdout:
handle.close()
if __name__ == "__main__":
try:
import argparse
from argparse import RawTextHelpFormatter
except ImportError as e:
print('[!] The binding affinity prediction tools require Python 2.7+', file=sys.stderr)
raise ImportError(e)
ap = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
ap.add_argument('structf', help='Structure to analyse in PDB or mmCIF format')
ap.add_argument('--distance-cutoff', type=float, default=5.5, help='Distance cutoff to calculate ICs')
ap.add_argument('--acc-threshold', type=float, default=0.05, help='Accessibility threshold for BSA analysis')
ap.add_argument('--temperature', type=float, default=25.0, help='Temperature (C) for Kd prediction')
ap.add_argument('--contact_list', action='store_true', help='Output a list of contacts')
ap.add_argument('--pymol_selection', action='store_true', help='Output a script to highlight the interface (pymol)')
ap.add_argument('-q', '--quiet', action='store_true', help='Outputs only the predicted affinity value')
ap.add_argument('-V', '--version', action='version', version='%(prog)s {}'.format(__version__),
help='Print the version and exit.')
_co_help = """
By default, all intermolecular contacts are taken into consideration,
a molecule being defined as an isolated group of amino acids sharing
a common chain identifier. In specific cases, for example
antibody-antigen complexes, some chains should be considered as a
single molecule.
Use the --selection option to provide collections of chains that should
be considered for the calculation. Separate by a space the chains that
are to be considered _different_ molecules. Use commas to include multiple
chains as part of a single group:
--selection A B => Contacts calculated (only) between chains A and B.
--selection A,B C => Contacts calculated (only) between chains A and C; and B and C.
--selection A B C => Contacts calculated (only) between chains A and B; B and C; and A and C.
"""
sel_opt = ap.add_argument_group('Selection Options', description=_co_help)
sel_opt.add_argument('--selection', nargs='+', metavar=('A B', 'A,B C'))
cmd = ap.parse_args()
# setup logging
log_level = logging.ERROR if cmd.quiet else logging.INFO
logging.basicConfig(level=log_level, stream=sys.stdout, format="%(message)s")
logger = logging.getLogger('Prodigy')
struct_path = _check_path(cmd.structf)
# Parse structure
structure, n_chains, n_res = parse_structure(struct_path)
logger.info('[+] Parsed structure file {0} ({1} chains, {2} residues)'.format(structure.id, n_chains, n_res))
prodigy = Prodigy(structure, cmd.selection, cmd.temperature)
prodigy.predict(distance_cutoff=cmd.distance_cutoff, acc_threshold=cmd.acc_threshold)
prodigy.print_prediction(quiet=cmd.quiet)
# Print out interaction network
if cmd.contact_list:
fname = struct_path[:-4] + '.ic'
prodigy.print_contacts(fname)
# Print out interaction network
if cmd.pymol_selection:
fname = struct_path[:-4] + '.pml'
prodigy.print_pymol_script(fname)
| haddocking/binding_affinity | predict_IC.py | Python | apache-2.0 | 11,803 | [
"Biopython",
"PyMOL"
] | e05e3c1e2a17e70fc2be51bb67d0fd4d17532b235fd6caef112842e9f251c9b4 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
from io import StringIO
import os
import numpy as np
import numpy.testing as npt
import pandas as pd
from skbio import TreeNode
from skbio.util import get_data_path
from skbio.tree import DuplicateNodeError, MissingNodeError
from skbio.diversity.alpha import (
berger_parker_d, brillouin_d, dominance, doubles, enspie, equitability,
esty_ci, faith_pd, fisher_alpha, goods_coverage, heip_e, kempton_taylor_q,
margalef, mcintosh_d, mcintosh_e, menhinick, michaelis_menten_fit,
observed_otus, osd, robbins, shannon, simpson, simpson_e, singles, strong)
class BaseTests(TestCase):
def setUp(self):
self.counts = np.array([0, 1, 1, 4, 2, 5, 2, 4, 1, 2])
self.b1 = np.array(
[[1, 3, 0, 1, 0],
[0, 2, 0, 4, 4],
[0, 0, 6, 2, 1],
[0, 0, 1, 1, 1]])
self.sids1 = list('ABCD')
self.oids1 = ['OTU%d' % i for i in range(1, 6)]
self.t1 = TreeNode.read(StringIO(
u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
u'0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
self.t1_w_extra_tips = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
u')root;'))
def test_berger_parker_d(self):
self.assertEqual(berger_parker_d(np.array([5])), 1)
self.assertEqual(berger_parker_d(np.array([5, 5])), 0.5)
self.assertEqual(berger_parker_d(np.array([1, 1, 1, 1, 0])), 0.25)
self.assertEqual(berger_parker_d(self.counts), 5 / 22)
def test_brillouin_d(self):
self.assertAlmostEqual(brillouin_d(np.array([1, 2, 0, 0, 3, 1])),
0.86289353018248782)
def test_dominance(self):
self.assertEqual(dominance(np.array([5])), 1)
self.assertAlmostEqual(dominance(np.array([1, 0, 2, 5, 2])), 0.34)
def test_doubles(self):
self.assertEqual(doubles(self.counts), 3)
self.assertEqual(doubles(np.array([0, 3, 4])), 0)
self.assertEqual(doubles(np.array([2])), 1)
self.assertEqual(doubles(np.array([0, 0])), 0)
def test_enspie(self):
# Totally even community should have ENS_pie = number of OTUs.
self.assertAlmostEqual(enspie(np.array([1, 1, 1, 1, 1, 1])), 6)
self.assertAlmostEqual(enspie(np.array([13, 13, 13, 13])), 4)
# Hand calculated.
arr = np.array([1, 41, 0, 0, 12, 13])
exp = 1 / ((arr / arr.sum()) ** 2).sum()
self.assertAlmostEqual(enspie(arr), exp)
# Using dominance.
exp = 1 / dominance(arr)
self.assertAlmostEqual(enspie(arr), exp)
arr = np.array([1, 0, 2, 5, 2])
exp = 1 / dominance(arr)
self.assertAlmostEqual(enspie(arr), exp)
def test_equitability(self):
self.assertAlmostEqual(equitability(np.array([5, 5])), 1)
self.assertAlmostEqual(equitability(np.array([1, 1, 1, 1, 0])), 1)
def test_esty_ci(self):
def _diversity(indices, f):
"""Calculate diversity index for each window of size 1.
indices: vector of indices of OTUs
f: f(counts) -> diversity measure
"""
result = []
max_size = max(indices) + 1
freqs = np.zeros(max_size, dtype=int)
for i in range(len(indices)):
freqs += np.bincount(indices[i:i + 1], minlength=max_size)
try:
curr = f(freqs)
except (ZeroDivisionError, FloatingPointError):
curr = 0
result.append(curr)
return np.array(result)
data = [1, 1, 2, 1, 1, 3, 2, 1, 3, 4]
observed_lower, observed_upper = zip(*_diversity(data, esty_ci))
expected_lower = np.array([1, -1.38590382, -0.73353593, -0.17434465,
-0.15060902, -0.04386191, -0.33042054,
-0.29041008, -0.43554755, -0.33385652])
expected_upper = np.array([1, 1.38590382, 1.40020259, 0.67434465,
0.55060902, 0.71052858, 0.61613483,
0.54041008, 0.43554755, 0.53385652])
npt.assert_array_almost_equal(observed_lower, expected_lower)
npt.assert_array_almost_equal(observed_upper, expected_upper)
def test_faith_pd_none_observed(self):
actual = faith_pd(np.array([], dtype=int), np.array([], dtype=int),
self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
actual = faith_pd([0, 0, 0, 0, 0], self.oids1, self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
def test_faith_pd_all_observed(self):
actual = faith_pd([1, 1, 1, 1, 1], self.oids1, self.t1)
expected = sum(n.length for n in self.t1.traverse()
if n.length is not None)
self.assertAlmostEqual(actual, expected)
actual = faith_pd([1, 2, 3, 4, 5], self.oids1, self.t1)
expected = sum(n.length for n in self.t1.traverse()
if n.length is not None)
self.assertAlmostEqual(actual, expected)
def test_faith_pd(self):
# expected results derived from QIIME 1.9.1, which
# is a completely different implementation skbio's initial
# phylogenetic diversity implementation
actual = faith_pd(self.b1[0], self.oids1, self.t1)
expected = 4.5
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[1], self.oids1, self.t1)
expected = 4.75
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[2], self.oids1, self.t1)
expected = 4.75
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[3], self.oids1, self.t1)
expected = 4.75
self.assertAlmostEqual(actual, expected)
def test_faith_pd_extra_tips(self):
# results are the same despite presences of unobserved tips in tree
actual = faith_pd(self.b1[0], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[0], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[1], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[1], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[2], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[2], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[3], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[3], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
def test_faith_pd_minimal_trees(self):
# expected values computed by hand
# zero tips
tree = TreeNode.read(StringIO(u'root;'))
actual = faith_pd(np.array([], dtype=int), [], tree)
expected = 0.0
self.assertEqual(actual, expected)
# two tips
tree = TreeNode.read(StringIO(u'(OTU1:0.25, OTU2:0.25)root;'))
actual = faith_pd([1, 0], ['OTU1', 'OTU2'], tree)
expected = 0.25
self.assertEqual(actual, expected)
def test_faith_pd_qiime_tiny_test(self):
# the following table and tree are derived from the QIIME 1.9.1
# "tiny-test" data
tt_table_fp = get_data_path(
os.path.join('qiime-191-tt', 'otu-table.tsv'), 'data')
tt_tree_fp = get_data_path(
os.path.join('qiime-191-tt', 'tree.nwk'), 'data')
self.q_table = pd.read_csv(tt_table_fp, sep='\t', skiprows=1,
index_col=0)
self.q_tree = TreeNode.read(tt_tree_fp)
expected_fp = get_data_path(
os.path.join('qiime-191-tt', 'faith-pd.txt'), 'data')
expected = pd.read_csv(expected_fp, sep='\t', index_col=0)
for sid in self.q_table.columns:
actual = faith_pd(self.q_table[sid], otu_ids=self.q_table.index,
tree=self.q_tree)
self.assertAlmostEqual(actual, expected['PD_whole_tree'][sid])
def test_faith_pd_root_not_observed(self):
# expected values computed by hand
tree = TreeNode.read(
StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)'
u'root;'))
otu_ids = ['OTU%d' % i for i in range(1, 5)]
# root node not observed, but branch between (OTU1, OTU2) and root
# is considered observed
actual = faith_pd([1, 1, 0, 0], otu_ids, tree)
expected = 0.6
self.assertAlmostEqual(actual, expected)
# root node not observed, but branch between (OTU3, OTU4) and root
# is considered observed
actual = faith_pd([0, 0, 1, 1], otu_ids, tree)
expected = 2.3
self.assertAlmostEqual(actual, expected)
def test_faith_pd_invalid_input(self):
# Many of these tests are duplicated from
# skbio.diversity.tests.test_base, but I think it's important to
# confirm that they are being run when faith_pd is called.
# tree has duplicated tip ids
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(DuplicateNodeError, faith_pd, counts, otu_ids, t)
# unrooted tree as input
t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
u'OTU4:0.7);'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# otu_ids has duplicated ids
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU2']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# len of vectors not equal
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# negative counts
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, -3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# tree with no branch lengths
t = TreeNode.read(
StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# tree missing some branch lengths
t = TreeNode.read(
StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# otu_ids not present in tree
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU42']
self.assertRaises(MissingNodeError, faith_pd, counts, otu_ids, t)
def test_fisher_alpha(self):
exp = 2.7823795367398798
arr = np.array([4, 3, 4, 0, 1, 0, 2])
obs = fisher_alpha(arr)
self.assertAlmostEqual(obs, exp)
# Should depend only on S and N (number of OTUs, number of
# individuals / seqs), so we should obtain the same output as above.
obs = fisher_alpha([1, 6, 1, 0, 1, 0, 5])
self.assertAlmostEqual(obs, exp)
# Should match another by hand:
# 2 OTUs, 62 seqs, alpha is 0.39509
obs = fisher_alpha([61, 0, 0, 1])
self.assertAlmostEqual(obs, 0.39509, delta=0.0001)
# Test case where we have >1000 individuals (SDR-IV makes note of this
# case). Verified against R's vegan::fisher.alpha.
obs = fisher_alpha([999, 0, 10])
self.assertAlmostEqual(obs, 0.2396492)
def test_goods_coverage(self):
counts = [1] * 75 + [2, 2, 2, 2, 2, 2, 3, 4, 4]
obs = goods_coverage(counts)
self.assertAlmostEqual(obs, 0.23469387755)
def test_heip_e(self):
# Calculate "by hand".
arr = np.array([1, 2, 3, 1])
h = shannon(arr, base=np.e)
expected = (np.exp(h) - 1) / 3
self.assertEqual(heip_e(arr), expected)
# From Statistical Ecology: A Primer in Methods and Computing, page 94,
# table 8.1.
self.assertAlmostEqual(heip_e([500, 300, 200]), 0.90, places=2)
self.assertAlmostEqual(heip_e([500, 299, 200, 1]), 0.61, places=2)
def test_kempton_taylor_q(self):
# Approximate Magurran 1998 calculation p143.
arr = np.array([2, 3, 3, 3, 3, 3, 4, 4, 4, 6, 6, 7, 7, 9, 9, 11, 14,
15, 15, 20, 29, 33, 34, 36, 37, 53, 57, 138, 146, 170])
exp = 14 / np.log(34 / 4)
self.assertAlmostEqual(kempton_taylor_q(arr), exp)
# Should get same answer regardless of input order.
np.random.shuffle(arr)
self.assertAlmostEqual(kempton_taylor_q(arr), exp)
def test_margalef(self):
self.assertEqual(margalef(self.counts), 8 / np.log(22))
def test_mcintosh_d(self):
self.assertAlmostEqual(mcintosh_d(np.array([1, 2, 3])),
0.636061424871458)
def test_mcintosh_e(self):
num = np.sqrt(15)
den = np.sqrt(19)
exp = num / den
self.assertEqual(mcintosh_e(np.array([1, 2, 3, 1])), exp)
def test_menhinick(self):
# observed_otus = 9, total # of individuals = 22
self.assertEqual(menhinick(self.counts), 9 / np.sqrt(22))
def test_michaelis_menten_fit(self):
obs = michaelis_menten_fit([22])
self.assertAlmostEqual(obs, 1.0)
obs = michaelis_menten_fit([42])
self.assertAlmostEqual(obs, 1.0)
obs = michaelis_menten_fit([34], num_repeats=3, params_guess=(13, 13))
self.assertAlmostEqual(obs, 1.0)
obs = michaelis_menten_fit([70, 70], num_repeats=5)
self.assertAlmostEqual(obs, 2.0, places=1)
obs_few = michaelis_menten_fit(np.arange(4) * 2, num_repeats=10)
obs_many = michaelis_menten_fit(np.arange(4) * 100, num_repeats=10)
# [0,100,200,300] looks like only 3 OTUs.
self.assertAlmostEqual(obs_many, 3.0, places=1)
# [0,2,4,6] looks like 3 OTUs with maybe more to be found.
self.assertTrue(obs_few > obs_many)
def test_observed_otus(self):
obs = observed_otus(np.array([4, 3, 4, 0, 1, 0, 2]))
self.assertEqual(obs, 5)
obs = observed_otus(np.array([0, 0, 0]))
self.assertEqual(obs, 0)
obs = observed_otus(self.counts)
self.assertEqual(obs, 9)
def test_osd(self):
self.assertEqual(osd(self.counts), (9, 3, 3))
def test_robbins(self):
self.assertEqual(robbins(np.array([1, 2, 3, 0, 1])), 2 / 7)
def test_shannon(self):
self.assertEqual(shannon(np.array([5])), 0)
self.assertEqual(shannon(np.array([5, 5])), 1)
self.assertEqual(shannon(np.array([1, 1, 1, 1, 0])), 2)
def test_simpson(self):
self.assertAlmostEqual(simpson(np.array([1, 0, 2, 5, 2])), 0.66)
self.assertAlmostEqual(simpson(np.array([5])), 0)
def test_simpson_e(self):
# A totally even community should have simpson_e = 1.
self.assertEqual(simpson_e(np.array([1, 1, 1, 1, 1, 1, 1])), 1)
arr = np.array([0, 30, 25, 40, 0, 0, 5])
freq_arr = arr / arr.sum()
D = (freq_arr ** 2).sum()
exp = 1 / (D * 4)
obs = simpson_e(arr)
self.assertEqual(obs, exp)
# From:
# https://groups.nceas.ucsb.edu/sun/meetings/calculating-evenness-
# of-habitat-distributions
arr = np.array([500, 400, 600, 500])
D = 0.0625 + 0.04 + 0.09 + 0.0625
exp = 1 / (D * 4)
self.assertEqual(simpson_e(arr), exp)
def test_singles(self):
self.assertEqual(singles(self.counts), 3)
self.assertEqual(singles(np.array([0, 3, 4])), 0)
self.assertEqual(singles(np.array([1])), 1)
self.assertEqual(singles(np.array([0, 0])), 0)
def test_strong(self):
self.assertAlmostEqual(strong(np.array([1, 2, 3, 1])), 0.214285714)
if __name__ == '__main__':
main()
| SamStudio8/scikit-bio | skbio/diversity/alpha/tests/test_base.py | Python | bsd-3-clause | 17,647 | [
"scikit-bio"
] | 5d15e8d9e302748ca8439b6500ca7e50ed523e45108cfc920c067909e715ee72 |
import atexit
import contextlib
import fnmatch
import importlib.util
import itertools
import os
import shutil
import sys
import uuid
import warnings
from enum import Enum
from errno import EBADF
from errno import ELOOP
from errno import ENOENT
from errno import ENOTDIR
from functools import partial
from os.path import expanduser
from os.path import expandvars
from os.path import isabs
from os.path import sep
from pathlib import Path
from pathlib import PurePath
from posixpath import sep as posix_sep
from types import ModuleType
from typing import Callable
from typing import Iterable
from typing import Iterator
from typing import Optional
from typing import Set
from typing import TypeVar
from typing import Union
import py
from _pytest.compat import assert_never
from _pytest.outcomes import skip
from _pytest.warning_types import PytestWarning
LOCK_TIMEOUT = 60 * 60 * 24 * 3
_AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath)
# The following function, variables and comments were
# copied from cpython 3.9 Lib/pathlib.py file.
# EBADF - guard against macOS `stat` throwing EBADF
_IGNORED_ERRORS = (ENOENT, ENOTDIR, EBADF, ELOOP)
_IGNORED_WINERRORS = (
21, # ERROR_NOT_READY - drive exists but is not accessible
1921, # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself
)
def _ignore_error(exception):
return (
getattr(exception, "errno", None) in _IGNORED_ERRORS
or getattr(exception, "winerror", None) in _IGNORED_WINERRORS
)
def get_lock_path(path: _AnyPurePath) -> _AnyPurePath:
return path.joinpath(".lock")
def on_rm_rf_error(func, path: str, exc, *, start_path: Path) -> bool:
"""Handle known read-only errors during rmtree.
The returned value is used only by our own tests.
"""
exctype, excvalue = exc[:2]
# Another process removed the file in the middle of the "rm_rf" (xdist for example).
# More context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018
if isinstance(excvalue, FileNotFoundError):
return False
if not isinstance(excvalue, PermissionError):
warnings.warn(
PytestWarning(f"(rm_rf) error removing {path}\n{exctype}: {excvalue}")
)
return False
if func not in (os.rmdir, os.remove, os.unlink):
if func not in (os.open,):
warnings.warn(
PytestWarning(
"(rm_rf) unknown function {} when removing {}:\n{}: {}".format(
func, path, exctype, excvalue
)
)
)
return False
# Chmod + retry.
import stat
def chmod_rw(p: str) -> None:
mode = os.stat(p).st_mode
os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR)
# For files, we need to recursively go upwards in the directories to
# ensure they all are also writable.
p = Path(path)
if p.is_file():
for parent in p.parents:
chmod_rw(str(parent))
# Stop when we reach the original path passed to rm_rf.
if parent == start_path:
break
chmod_rw(str(path))
func(path)
return True
def ensure_extended_length_path(path: Path) -> Path:
"""Get the extended-length version of a path (Windows).
On Windows, by default, the maximum length of a path (MAX_PATH) is 260
characters, and operations on paths longer than that fail. But it is possible
to overcome this by converting the path to "extended-length" form before
performing the operation:
https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation
On Windows, this function returns the extended-length absolute version of path.
On other platforms it returns path unchanged.
"""
if sys.platform.startswith("win32"):
path = path.resolve()
path = Path(get_extended_length_path_str(str(path)))
return path
def get_extended_length_path_str(path: str) -> str:
"""Convert a path to a Windows extended length path."""
long_path_prefix = "\\\\?\\"
unc_long_path_prefix = "\\\\?\\UNC\\"
if path.startswith((long_path_prefix, unc_long_path_prefix)):
return path
# UNC
if path.startswith("\\\\"):
return unc_long_path_prefix + path[2:]
return long_path_prefix + path
def rm_rf(path: Path) -> None:
"""Remove the path contents recursively, even if some elements
are read-only."""
path = ensure_extended_length_path(path)
onerror = partial(on_rm_rf_error, start_path=path)
shutil.rmtree(str(path), onerror=onerror)
def find_prefixed(root: Path, prefix: str) -> Iterator[Path]:
"""Find all elements in root that begin with the prefix, case insensitive."""
l_prefix = prefix.lower()
for x in root.iterdir():
if x.name.lower().startswith(l_prefix):
yield x
def extract_suffixes(iter: Iterable[PurePath], prefix: str) -> Iterator[str]:
"""Return the parts of the paths following the prefix.
:param iter: Iterator over path names.
:param prefix: Expected prefix of the path names.
"""
p_len = len(prefix)
for p in iter:
yield p.name[p_len:]
def find_suffixes(root: Path, prefix: str) -> Iterator[str]:
"""Combine find_prefixes and extract_suffixes."""
return extract_suffixes(find_prefixed(root, prefix), prefix)
def parse_num(maybe_num) -> int:
"""Parse number path suffixes, returns -1 on error."""
try:
return int(maybe_num)
except ValueError:
return -1
def _force_symlink(
root: Path, target: Union[str, PurePath], link_to: Union[str, Path]
) -> None:
"""Helper to create the current symlink.
It's full of race conditions that are reasonably OK to ignore
for the context of best effort linking to the latest test run.
The presumption being that in case of much parallelism
the inaccuracy is going to be acceptable.
"""
current_symlink = root.joinpath(target)
try:
current_symlink.unlink()
except OSError:
pass
try:
current_symlink.symlink_to(link_to)
except Exception:
pass
def make_numbered_dir(root: Path, prefix: str, mode: int = 0o700) -> Path:
"""Create a directory with an increased number as suffix for the given prefix."""
for i in range(10):
# try up to 10 times to create the folder
max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
new_number = max_existing + 1
new_path = root.joinpath(f"{prefix}{new_number}")
try:
new_path.mkdir(mode=mode)
except Exception:
pass
else:
_force_symlink(root, prefix + "current", new_path)
return new_path
else:
raise OSError(
"could not create numbered dir with prefix "
"{prefix} in {root} after 10 tries".format(prefix=prefix, root=root)
)
def create_cleanup_lock(p: Path) -> Path:
"""Create a lock to prevent premature folder cleanup."""
lock_path = get_lock_path(p)
try:
fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
except FileExistsError as e:
raise OSError(f"cannot create lockfile in {p}") from e
else:
pid = os.getpid()
spid = str(pid).encode()
os.write(fd, spid)
os.close(fd)
if not lock_path.is_file():
raise OSError("lock path got renamed after successful creation")
return lock_path
def register_cleanup_lock_removal(lock_path: Path, register=atexit.register):
"""Register a cleanup function for removing a lock, by default on atexit."""
pid = os.getpid()
def cleanup_on_exit(lock_path: Path = lock_path, original_pid: int = pid) -> None:
current_pid = os.getpid()
if current_pid != original_pid:
# fork
return
try:
lock_path.unlink()
except OSError:
pass
return register(cleanup_on_exit)
def maybe_delete_a_numbered_dir(path: Path) -> None:
"""Remove a numbered directory if its lock can be obtained and it does
not seem to be in use."""
path = ensure_extended_length_path(path)
lock_path = None
try:
lock_path = create_cleanup_lock(path)
parent = path.parent
garbage = parent.joinpath(f"garbage-{uuid.uuid4()}")
path.rename(garbage)
rm_rf(garbage)
except OSError:
# known races:
# * other process did a cleanup at the same time
# * deletable folder was found
# * process cwd (Windows)
return
finally:
# If we created the lock, ensure we remove it even if we failed
# to properly remove the numbered dir.
if lock_path is not None:
try:
lock_path.unlink()
except OSError:
pass
def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) -> bool:
"""Check if `path` is deletable based on whether the lock file is expired."""
if path.is_symlink():
return False
lock = get_lock_path(path)
try:
if not lock.is_file():
return True
except OSError:
# we might not have access to the lock file at all, in this case assume
# we don't have access to the entire directory (#7491).
return False
try:
lock_time = lock.stat().st_mtime
except Exception:
return False
else:
if lock_time < consider_lock_dead_if_created_before:
# We want to ignore any errors while trying to remove the lock such as:
# - PermissionDenied, like the file permissions have changed since the lock creation;
# - FileNotFoundError, in case another pytest process got here first;
# and any other cause of failure.
with contextlib.suppress(OSError):
lock.unlink()
return True
return False
def try_cleanup(path: Path, consider_lock_dead_if_created_before: float) -> None:
"""Try to cleanup a folder if we can ensure it's deletable."""
if ensure_deletable(path, consider_lock_dead_if_created_before):
maybe_delete_a_numbered_dir(path)
def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]:
"""List candidates for numbered directories to be removed - follows py.path."""
max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
max_delete = max_existing - keep
paths = find_prefixed(root, prefix)
paths, paths2 = itertools.tee(paths)
numbers = map(parse_num, extract_suffixes(paths2, prefix))
for path, number in zip(paths, numbers):
if number <= max_delete:
yield path
def cleanup_numbered_dir(
root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float
) -> None:
"""Cleanup for lock driven numbered directories."""
for path in cleanup_candidates(root, prefix, keep):
try_cleanup(path, consider_lock_dead_if_created_before)
for path in root.glob("garbage-*"):
try_cleanup(path, consider_lock_dead_if_created_before)
def make_numbered_dir_with_cleanup(
root: Path, prefix: str, keep: int, lock_timeout: float, mode: int,
) -> Path:
"""Create a numbered dir with a cleanup lock and remove old ones."""
e = None
for i in range(10):
try:
p = make_numbered_dir(root, prefix, mode)
lock_path = create_cleanup_lock(p)
register_cleanup_lock_removal(lock_path)
except Exception as exc:
e = exc
else:
consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout
# Register a cleanup for program exit
atexit.register(
cleanup_numbered_dir,
root,
prefix,
keep,
consider_lock_dead_if_created_before,
)
return p
assert e is not None
raise e
def resolve_from_str(input: str, rootpath: Path) -> Path:
input = expanduser(input)
input = expandvars(input)
if isabs(input):
return Path(input)
else:
return rootpath.joinpath(input)
def fnmatch_ex(pattern: str, path) -> bool:
"""A port of FNMatcher from py.path.common which works with PurePath() instances.
The difference between this algorithm and PurePath.match() is that the
latter matches "**" glob expressions for each part of the path, while
this algorithm uses the whole path instead.
For example:
"tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py"
with this algorithm, but not with PurePath.match().
This algorithm was ported to keep backward-compatibility with existing
settings which assume paths match according this logic.
References:
* https://bugs.python.org/issue29249
* https://bugs.python.org/issue34731
"""
path = PurePath(path)
iswin32 = sys.platform.startswith("win")
if iswin32 and sep not in pattern and posix_sep in pattern:
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posix_sep, sep)
if sep not in pattern:
name = path.name
else:
name = str(path)
if path.is_absolute() and not os.path.isabs(pattern):
pattern = f"*{os.sep}{pattern}"
return fnmatch.fnmatch(name, pattern)
def parts(s: str) -> Set[str]:
parts = s.split(sep)
return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))}
def symlink_or_skip(src, dst, **kwargs):
"""Make a symlink, or skip the test in case symlinks are not supported."""
try:
os.symlink(str(src), str(dst), **kwargs)
except OSError as e:
skip(f"symlinks not supported: {e}")
class ImportMode(Enum):
"""Possible values for `mode` parameter of `import_path`."""
prepend = "prepend"
append = "append"
importlib = "importlib"
class ImportPathMismatchError(ImportError):
"""Raised on import_path() if there is a mismatch of __file__'s.
This can happen when `import_path` is called multiple times with different filenames that has
the same basename but reside in packages
(for example "/tests1/test_foo.py" and "/tests2/test_foo.py").
"""
def import_path(
p: Union[str, py.path.local, Path],
*,
mode: Union[str, ImportMode] = ImportMode.prepend,
) -> ModuleType:
"""Import and return a module from the given path, which can be a file (a module) or
a directory (a package).
The import mechanism used is controlled by the `mode` parameter:
* `mode == ImportMode.prepend`: the directory containing the module (or package, taking
`__init__.py` files into account) will be put at the *start* of `sys.path` before
being imported with `__import__.
* `mode == ImportMode.append`: same as `prepend`, but the directory will be appended
to the end of `sys.path`, if not already in `sys.path`.
* `mode == ImportMode.importlib`: uses more fine control mechanisms provided by `importlib`
to import the module, which avoids having to use `__import__` and muck with `sys.path`
at all. It effectively allows having same-named test modules in different places.
:raises ImportPathMismatchError:
If after importing the given `path` and the module `__file__`
are different. Only raised in `prepend` and `append` modes.
"""
mode = ImportMode(mode)
path = Path(str(p))
if not path.exists():
raise ImportError(path)
if mode is ImportMode.importlib:
module_name = path.stem
for meta_importer in sys.meta_path:
spec = meta_importer.find_spec(module_name, [str(path.parent)])
if spec is not None:
break
else:
spec = importlib.util.spec_from_file_location(module_name, str(path))
if spec is None:
raise ImportError(
"Can't find module {} at location {}".format(module_name, str(path))
)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod) # type: ignore[union-attr]
return mod
pkg_path = resolve_package_path(path)
if pkg_path is not None:
pkg_root = pkg_path.parent
names = list(path.with_suffix("").relative_to(pkg_root).parts)
if names[-1] == "__init__":
names.pop()
module_name = ".".join(names)
else:
pkg_root = path.parent
module_name = path.stem
# Change sys.path permanently: restoring it at the end of this function would cause surprising
# problems because of delayed imports: for example, a conftest.py file imported by this function
# might have local imports, which would fail at runtime if we restored sys.path.
if mode is ImportMode.append:
if str(pkg_root) not in sys.path:
sys.path.append(str(pkg_root))
elif mode is ImportMode.prepend:
if str(pkg_root) != sys.path[0]:
sys.path.insert(0, str(pkg_root))
else:
assert_never(mode)
importlib.import_module(module_name)
mod = sys.modules[module_name]
if path.name == "__init__.py":
return mod
ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "")
if ignore != "1":
module_file = mod.__file__
if module_file.endswith((".pyc", ".pyo")):
module_file = module_file[:-1]
if module_file.endswith(os.path.sep + "__init__.py"):
module_file = module_file[: -(len(os.path.sep + "__init__.py"))]
try:
is_same = _is_same(str(path), module_file)
except FileNotFoundError:
is_same = False
if not is_same:
raise ImportPathMismatchError(module_name, module_file, path)
return mod
# Implement a special _is_same function on Windows which returns True if the two filenames
# compare equal, to circumvent os.path.samefile returning False for mounts in UNC (#7678).
if sys.platform.startswith("win"):
def _is_same(f1: str, f2: str) -> bool:
return Path(f1) == Path(f2) or os.path.samefile(f1, f2)
else:
def _is_same(f1: str, f2: str) -> bool:
return os.path.samefile(f1, f2)
def resolve_package_path(path: Path) -> Optional[Path]:
"""Return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
Returns None if it can not be determined.
"""
result = None
for parent in itertools.chain((path,), path.parents):
if parent.is_dir():
if not parent.joinpath("__init__.py").is_file():
break
if not parent.name.isidentifier():
break
result = parent
return result
def visit(
path: str, recurse: Callable[["os.DirEntry[str]"], bool]
) -> Iterator["os.DirEntry[str]"]:
"""Walk a directory recursively, in breadth-first order.
Entries at each directory level are sorted.
"""
# Skip entries with symlink loops and other brokenness, so the caller doesn't
# have to deal with it.
entries = []
for entry in os.scandir(path):
try:
entry.is_file()
except OSError as err:
if _ignore_error(err):
continue
raise
entries.append(entry)
entries.sort(key=lambda entry: entry.name)
yield from entries
for entry in entries:
if entry.is_dir() and recurse(entry):
yield from visit(entry.path, recurse)
def absolutepath(path: Union[Path, str]) -> Path:
"""Convert a path to an absolute path using os.path.abspath.
Prefer this over Path.resolve() (see #6523).
Prefer this over Path.absolute() (not public, doesn't normalize).
"""
return Path(os.path.abspath(str(path)))
def commonpath(path1: Path, path2: Path) -> Optional[Path]:
"""Return the common part shared with the other path, or None if there is
no common part.
If one path is relative and one is absolute, returns None.
"""
try:
return Path(os.path.commonpath((str(path1), str(path2))))
except ValueError:
return None
def bestrelpath(directory: Path, dest: Path) -> str:
"""Return a string which is a relative path from directory to dest such
that directory/bestrelpath == dest.
The paths must be either both absolute or both relative.
If no such path can be determined, returns dest.
"""
if dest == directory:
return os.curdir
# Find the longest common directory.
base = commonpath(directory, dest)
# Can be the case on Windows for two absolute paths on different drives.
# Can be the case for two relative paths without common prefix.
# Can be the case for a relative path and an absolute path.
if not base:
return str(dest)
reldirectory = directory.relative_to(base)
reldest = dest.relative_to(base)
return os.path.join(
# Back from directory to base.
*([os.pardir] * len(reldirectory.parts)),
# Forward from base to dest.
*reldest.parts,
)
| pexip/os-pytest | src/_pytest/pathlib.py | Python | mit | 21,411 | [
"VisIt"
] | 626d6ae6534a563c591e59725a772556f95b3581c2466e7c6be2f6801a4c14a4 |
"""
Miscellaneous utility functions.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "BSD 3-clause"
import cPickle
import gzip
import numpy as np
import os
import pandas as pd
from rdkit import Chem
from rdkit.Chem.Scaffolds import MurckoScaffold
from vs_utils.utils.rdkit_utils import PicklableMol, serial
def write_dataframe(df, filename):
"""
Serialize DataFrame.
Parameters
----------
df : DataFrame
DataFrame to serialize.
filename : str
Output filename (file format is determined by suffix).
"""
if filename.endswith('csv'):
df.to_csv(filename, index=False)
elif filename.endswith('csv.gz'):
with gzip.open(filename, 'wb') as f:
df.to_csv(f, index=False)
elif filename.endswith('.pkl') or filename.endswith('.pkl.gz'):
write_pickle(df, filename)
else:
raise NotImplementedError(
'Unrecognized extension for "{}"'.format(filename))
def read_csv(filename):
"""
Read CSV data into a DataFrame.
Parameters
----------
filename : str
Filename containing serialized data.
"""
if filename.endswith('.csv'):
return pd.read_csv(filename)
elif filename.endswith('.csv.gz'):
return pd.read_csv(filename, compression='gzip')
else:
raise ValueError('{} is not a csv file!'.format(filename))
def read_csv_features(filename):
"""
Read features that were written to csv by featurize.py.
Parameters
----------
filename : str
CSV filename containing features.
Returns
-------
DataFrame with 'features' column containing numpy arrays.
"""
df = read_csv(filename)
features = []
for _, row in df.iterrows():
features.append(np.fromstring(row.features, sep=' '))
del df['features'] # need to replace completely
df.loc[:, 'features'] = pd.Series(features, index=df.index)
return df
def read_pickle(filename):
"""
Read pickled data from (possibly gzipped) files.
Parameters
----------
filename : str
Filename.
"""
if filename.endswith('.gz'):
with gzip.open(filename) as f:
data = cPickle.load(f)
else:
with open(filename) as f:
data = cPickle.load(f)
return data
def write_pickle(data, filename, protocol=cPickle.HIGHEST_PROTOCOL):
"""
Write data to a (possibly gzipped) pickle.
Parameters
----------
data : object
Object to pickle.
filename : str
Filename.
protocol : int, optional (default cPickle.HIGHEST_PROTOCOL)
Pickle protocol.
"""
if filename.endswith('.gz'):
f = gzip.open(filename, 'wb')
else:
f = open(filename, 'wb')
cPickle.dump(data, f, protocol)
f.close()
class DatasetSharder(object):
"""
Split a dataset into chunks.
Parameters
----------
filename : str, optional
Input filename. One of filename or mols must be provided.
mols : iterable, optional
Molecules to shard. One of filename or mols must be provided.
shard_size : int, optional (default 1000)
Number of molecules per shard.
write_shards : bool, optional (default True)
Write shards to disk.
prefix : str, optional
Prefix for output files.
flavor : str, optional (default 'pkl.gz')
Output molecule format used as the extension for shard filenames.
start_index : int, optional (default 0)
Starting index for shard filenames.
"""
def __init__(self, filename=None, mols=None, shard_size=1000,
write_shards=True, prefix=None, flavor='pkl.gz',
start_index=0):
if filename is None and mols is None:
raise ValueError('One of filename or mols must be provided.')
self.filename = filename
self.mols = mols
self.shard_size = shard_size
self.write_shards = write_shards
if self.filename is not None and prefix is None:
prefix = self._guess_prefix()
if write_shards and prefix is None:
raise ValueError('One of filename or prefix must be provided ' +
'when writing shards.')
self.prefix = prefix
self.flavor = flavor
self.index = start_index
self.writer = serial.MolWriter()
def _guess_prefix(self):
"""
Get the prefix from a filename.
Takes everything in the basename before the first period. For example,
the prefix for '../foo.bar.gz' is 'foo'.
"""
return os.path.basename(self.filename).split('.')[0]
def _next_filename(self):
"""
Generate the next shard filename.
"""
if self.prefix is None:
raise ValueError('Prefix must be provided when writing shards.')
filename = '{}-{}.{}'.format(self.prefix, self.index, self.flavor)
self.index += 1
return filename
def read_mols_from_file(self):
"""
Read molecules from a file.
"""
with serial.MolReader().open(self.filename) as reader:
for mol in reader.get_mols():
yield mol
def shard(self):
"""
Split a dataset into chunks.
If self.write_shards is False, a shard generator is returned. Each
shard is an ndarray with dtype=object, which gives convenient access
to ndarray operations (like fancy indexing) for downstream
applications.
"""
if self.write_shards:
for shard in self._shard():
self.write_shard(shard)
else:
return self._shard()
def _shard(self):
"""
Split a dataset into chunks.
"""
if self.mols is None:
self.mols = self.read_mols_from_file()
shard = []
for mol in self.mols:
shard.append(mol)
if len(shard) >= self.shard_size:
yield np.asarray(shard) # ndarray with dtype=object
shard = []
if len(shard):
yield np.asarray(shard)
def __iter__(self):
"""
Iterate through shards.
"""
return self._shard()
def write_shard(self, mols):
"""
Write molecules to the next shard file.
Molecules are converted to PicklableMols prior to writing to preserve
properties such as molecule names.
Parameters
----------
mols : array_like
Molecules.
"""
mols = [PicklableMol(mol) for mol in mols] # preserve properties
filename = self._next_filename()
with self.writer.open(filename) as f:
f.write(mols)
def pad_array(x, shape, fill=0, both=False):
"""
Pad an array with a fill value.
Parameters
----------
x : ndarray
Matrix.
shape : tuple or int
Desired shape. If int, all dimensions are padded to that size.
fill : object, optional (default 0)
Fill value.
both : bool, optional (default False)
If True, split the padding on both sides of each axis. If False,
padding is applied to the end of each axis.
"""
x = np.asarray(x)
if not isinstance(shape, tuple):
shape = tuple(shape for _ in xrange(x.ndim))
pad = []
for i in xrange(x.ndim):
diff = shape[i] - x.shape[i]
assert diff >= 0
if both:
a, b = divmod(diff, 2)
b += a
pad.append((a, b))
else:
pad.append((0, diff))
pad = tuple(pad)
x = np.pad(x, pad, mode='constant', constant_values=fill)
return x
class SmilesGenerator(object):
"""
Generate SMILES strings for molecules.
Parameters
----------
remove_hydrogens : bool, optional (default True)
Remove hydrogens prior to generating SMILES.
assign_stereo_from_3d : bool, optional (default False)
Assign stereochemistry from 3D coordinates. This will overwrite any
existing stereochemistry information on molecules.
"""
def __init__(self, remove_hydrogens=True, assign_stereo_from_3d=False):
self.remove_hydrogens = remove_hydrogens
self.assign_stereo_from_3d = assign_stereo_from_3d
def get_smiles(self, mol):
"""
Map a molecule name to its corresponding SMILES string.
Parameters
----------
mol : RDKit Mol
Molecule.
"""
if self.assign_stereo_from_3d: # do this before removing hydrogens
Chem.AssignAtomChiralTagsFromStructure(mol)
if self.remove_hydrogens:
mol = Chem.RemoveHs(mol) # creates a copy
return Chem.MolToSmiles(mol, isomericSmiles=True, canonical=True)
def get_unique_smiles(self, mols):
"""
Get unique SMILES for a set of molecules.
Parameters
----------
mols : iterable
Molecules.
"""
return np.unique([self.get_smiles(mol) for mol in mols])
class SmilesMap(object):
"""
Map compound names to SMILES.
Parameters
----------
prefix : str, optional
Prefix to prepend to IDs.
allow_duplicates : bool, optional (default True)
Allow duplicate SMILES.
kwargs : dict, optional
Keyword arguments for SmilesGenerator.
"""
def __init__(self, prefix=None, allow_duplicates=True, **kwargs):
self.prefix = prefix
self.allow_duplicates = allow_duplicates
self.engine = SmilesGenerator(**kwargs)
self.map = {}
def add_mol(self, mol):
"""
Map a molecule name to its corresponding SMILES string and store in the
SMILES map.
Parameters
----------
mol : RDKit Mol
Molecule.
"""
name = mol.GetProp('_Name')
try:
int(name) # check if this is a bare ID
if self.prefix is None:
raise TypeError('Bare IDs are not allowed.')
except ValueError:
pass
if self.prefix is not None:
name = '{}{}'.format(self.prefix, name)
smiles = self.engine.get_smiles(mol)
# Failures:
# * Name is already mapped to a different SMILES
# * SMILES is already used for a different name
if name in self.map: # catch all cases where name is already used
if self.map[name] != smiles:
raise ValueError('ID collision for "{}".'.format(name))
elif not self.allow_duplicates and smiles in self.map.values():
other = None
for key, val in self.map.items():
if val == smiles:
other = key
break
raise ValueError(
'SMILES collision between "{}" and "{}":\n\t{}'.format(
name, other, smiles))
else:
self.map[name] = smiles
def get_map(self):
"""
Get the map.
"""
return self.map
class ScaffoldGenerator(object):
"""
Generate molecular scaffolds.
Parameters
----------
include_chirality : : bool, optional (default False)
Include chirality in scaffolds.
"""
def __init__(self, include_chirality=False):
self.include_chirality = include_chirality
def get_scaffold(self, mol):
"""
Get Murcko scaffolds for molecules.
Murcko scaffolds are described in DOI: 10.1021/jm9602928. They are
essentially that part of the molecule consisting of rings and the
linker atoms between them.
Parameters
----------
mols : array_like
Molecules.
"""
return MurckoScaffold.MurckoScaffoldSmiles(
mol=mol, includeChirality=self.include_chirality)
| rbharath/vs-utils | vs_utils/utils/__init__.py | Python | gpl-3.0 | 11,924 | [
"RDKit"
] | 3baa248ef6af61dee68f0a10e225f74659da0618e451eb2b3a3b0dafe4796564 |
# run with: python manage.py test hs_core.tests.serialization.test_resourcemeta_sax_parsing
import unittest
import xml.sax
from hs_core.serialization import GenericResourceSAXHandler
from hs_geo_raster_resource.serialization import RasterResourceSAXHandler
from hs_app_netCDF.serialization import NetcdfResourceSAXHandler
class TestGenericResourceMetaSax(unittest.TestCase):
def setUp(self):
self.parse_sample = """<?xml version="1.0"?>
<!DOCTYPE rdf:RDF PUBLIC "-//DUBLIN CORE//DCMES DTD 2002/07/31//EN"
"http://dublincore.org/documents/2002/07/31/dcmes-xml/dcmes-xml-dtd.dtd">
<rdf:RDF xmlns:dcterms="http://purl.org/dc/terms/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:hsterms="http://hydroshare.org/terms/">
<rdf:Description rdf:about="http://localhost:8000/resource/dc52e6aa93154521af08522de27ec276">
<dc:contributor>
<rdf:Description rdf:about="http://localhost:8000/user/1/">
<hsterms:name>Brian Miles</hsterms:name>
<hsterms:organization>Someplace</hsterms:organization>
<hsterms:email>foo@gmail.com</hsterms:email>
<hsterms:address>123 Main Street</hsterms:address>
<hsterms:phone rdf:resource="tel:412-555-1212"/>
<hsterms:homepage rdf:resource="http://www.ie.unc.edu/"/>
</rdf:Description>
</dc:contributor>
<dc:contributor>
<rdf:Description rdf:about="http://localhost:8000/user/2/">
<hsterms:name>Miles Brian</hsterms:name>
<hsterms:organization>Elsewhere</hsterms:organization>
<hsterms:email>bar@icloud.com</hsterms:email>
<hsterms:address>123 Wall Street</hsterms:address>
<hsterms:phone rdf:resource="tel:412-555-2121"/>
<hsterms:homepage rdf:resource="http://www.cmu.edu/"/>
</rdf:Description>
</dc:contributor>
<dc:subject>xDCIShare</dc:subject>
<dc:subject>cuahsi</dc:subject>
<dc:subject>Presentation</dc:subject>
<dc:subject>Hydroinformatics</dc:subject>
</rdf:Description>
</rdf:RDF>
"""
def tearDown(self):
pass
def test_sax_parsing(self):
handler = GenericResourceSAXHandler()
xml.sax.parseString(self.parse_sample, handler)
self.assertTrue(len(handler.subjects) == 4)
self.assertEqual(handler.subjects[0], 'xDCIShare')
self.assertEqual(handler.subjects[1], 'cuahsi')
self.assertEqual(handler.subjects[2], 'Presentation')
self.assertEqual(handler.subjects[3], 'Hydroinformatics')
self.assertTrue(len(handler.contributors) == 2)
self.assertEqual(handler.contributors[0].uri, 'http://localhost:8000/user/1/')
self.assertEqual(handler.contributors[0].name, 'Brian Miles')
self.assertEqual(handler.contributors[0].organization, 'Someplace')
self.assertEqual(handler.contributors[0].email, 'foo@gmail.com')
self.assertEqual(handler.contributors[0].address, '123 Main Street')
self.assertEqual(handler.contributors[0].phone, '412-555-1212')
self.assertEqual(handler.contributors[1].uri, 'http://localhost:8000/user/2/')
self.assertEqual(handler.contributors[1].name, 'Miles Brian')
self.assertEqual(handler.contributors[1].organization, 'Elsewhere')
self.assertEqual(handler.contributors[1].email, 'bar@icloud.com')
self.assertEqual(handler.contributors[1].address, '123 Wall Street')
self.assertEqual(handler.contributors[1].phone, '412-555-2121')
class TestRasterResourceMetaSax(unittest.TestCase):
def setUp(self):
self.parse_sample = """<?xml version="1.0"?>
<!DOCTYPE rdf:RDF PUBLIC "-//DUBLIN CORE//DCMES DTD 2002/07/31//EN"
"http://dublincore.org/documents/2002/07/31/dcmes-xml/dcmes-xml-dtd.dtd">
<rdf:RDF xmlns:dcterms="http://purl.org/dc/terms/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:hsterms="http://hydroshare.org/terms/">
<rdf:Description rdf:about="http://localhost:8000/resource/dc52e6aa93154521af08522de27ec276">
<hsterms:BandInformation>
<rdf:Description>
<hsterms:name>Band_1</hsterms:name>
<hsterms:variableName>red</hsterms:variableName>
<hsterms:variableUnit>DN</hsterms:variableUnit>
<hsterms:method>measured</hsterms:method>
<hsterms:comment>real good.</hsterms:comment>
</rdf:Description>
</hsterms:BandInformation>
<hsterms:BandInformation>
<rdf:Description>
<hsterms:name>Band_2</hsterms:name>
<hsterms:variableName>green</hsterms:variableName>
<hsterms:variableUnit>DN</hsterms:variableUnit>
<hsterms:method>guessed</hsterms:method>
<hsterms:comment>not so good.</hsterms:comment>
</rdf:Description>
</hsterms:BandInformation>
<hsterms:BandInformation>
<rdf:Description>
<hsterms:name>Band_3</hsterms:name>
<hsterms:variableName>blue</hsterms:variableName>
<hsterms:variableUnit>DN</hsterms:variableUnit>
<hsterms:method>random</hsterms:method>
<hsterms:comment>random like.</hsterms:comment>
</rdf:Description>
</hsterms:BandInformation>
</rdf:Description>
</rdf:RDF>
"""
def tearDown(self):
pass
def test_sax_parsing(self):
handler = RasterResourceSAXHandler()
xml.sax.parseString(self.parse_sample, handler)
self.assertTrue(len(handler.band_info) == 3)
self.assertEqual(handler.band_info[0].name, 'Band_1')
self.assertEqual(handler.band_info[0].variableName, 'red')
self.assertEqual(handler.band_info[0].variableUnit, 'DN')
self.assertEqual(handler.band_info[0].method, 'measured')
self.assertEqual(handler.band_info[0].comment, 'real good.')
self.assertEqual(handler.band_info[1].name, 'Band_2')
self.assertEqual(handler.band_info[1].variableName, 'green')
self.assertEqual(handler.band_info[1].variableUnit, 'DN')
self.assertEqual(handler.band_info[1].method, 'guessed')
self.assertEqual(handler.band_info[1].comment, 'not so good.')
self.assertEqual(handler.band_info[2].name, 'Band_3')
self.assertEqual(handler.band_info[2].variableName, 'blue')
self.assertEqual(handler.band_info[2].variableUnit, 'DN')
self.assertEqual(handler.band_info[2].method, 'random')
self.assertEqual(handler.band_info[2].comment, 'random like.')
class TestNetcdfResourceMetaSax(unittest.TestCase):
def setUp(self):
self.parse_sample = """<?xml version="1.0"?>
<!DOCTYPE rdf:RDF PUBLIC "-//DUBLIN CORE//DCMES DTD 2002/07/31//EN"
"http://dublincore.org/documents/2002/07/31/dcmes-xml/dcmes-xml-dtd.dtd">
<rdf:RDF xmlns:dcterms="http://purl.org/dc/terms/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:hsterms="http://hydroshare.org/terms/">
<rdf:Description rdf:about="http://localhost:8000/resource/dc52e6aa93154521af08522de27ec276">
<hsterms:netcdfVariable>
<rdf:Description>
<hsterms:shape>Time,south_north,west_east</hsterms:shape>
<hsterms:name>ACLWDNB</hsterms:name>
<hsterms:longName>Long ACLWDNB</hsterms:longName>
<hsterms:missingValue>NA</hsterms:missingValue>
<hsterms:type>Float</hsterms:type>
<hsterms:comment>Something flippant. </hsterms:comment>
<hsterms:unit>J m-2</hsterms:unit>
</rdf:Description>
</hsterms:netcdfVariable>
<hsterms:netcdfVariable>
<rdf:Description>
<hsterms:shape>Time,force_soil_layers</hsterms:shape>
<hsterms:name>T_SOIL_FORCING_TEND</hsterms:name>
<hsterms:longName>Long T_SOIL_FORCING_TEND</hsterms:longName>
<hsterms:missingValue>-999</hsterms:missingValue>
<hsterms:type>Float</hsterms:type>
<hsterms:comment>Something better.</hsterms:comment>
<hsterms:unit>K s-1</hsterms:unit>
</rdf:Description>
</hsterms:netcdfVariable>
<hsterms:netcdfVariable>
<rdf:Description>
<hsterms:shape>Time,south_north,west_east</hsterms:shape>
<hsterms:name>LWUPT</hsterms:name>
<hsterms:longName>Long LWUPT</hsterms:longName>
<hsterms:missingValue>-42424242</hsterms:missingValue>
<hsterms:type>Float</hsterms:type>
<hsterms:comment>Not helpful.</hsterms:comment>
<hsterms:unit>W m-2</hsterms:unit>
</rdf:Description>
</hsterms:netcdfVariable>
</rdf:Description>
</rdf:RDF>
"""
def tearDown(self):
pass
def test_sax_parsing(self):
handler = NetcdfResourceSAXHandler()
xml.sax.parseString(self.parse_sample, handler)
self.assertTrue(len(handler.variables) == 3)
self.assertEqual(handler.variables[0].name, 'ACLWDNB')
self.assertEqual(handler.variables[0].shape, 'Time,south_north,west_east')
self.assertEqual(handler.variables[0].longName, 'Long ACLWDNB')
self.assertEqual(handler.variables[0].missingValue, 'NA')
self.assertEqual(handler.variables[0].type, 'Float')
self.assertEqual(handler.variables[0].comment, 'Something flippant. ')
self.assertEqual(handler.variables[0].unit, 'J m-2')
self.assertEqual(handler.variables[1].name, 'T_SOIL_FORCING_TEND')
self.assertEqual(handler.variables[1].shape, 'Time,force_soil_layers')
self.assertEqual(handler.variables[1].longName, 'Long T_SOIL_FORCING_TEND')
self.assertEqual(handler.variables[1].missingValue, '-999')
self.assertEqual(handler.variables[1].type, 'Float')
self.assertEqual(handler.variables[1].comment, 'Something better.')
self.assertEqual(handler.variables[1].unit, 'K s-1')
self.assertEqual(handler.variables[2].name, 'LWUPT')
self.assertEqual(handler.variables[2].shape, 'Time,south_north,west_east')
self.assertEqual(handler.variables[2].longName, 'Long LWUPT')
self.assertEqual(handler.variables[2].missingValue, '-42424242')
self.assertEqual(handler.variables[2].type, 'Float')
self.assertEqual(handler.variables[2].comment, 'Not helpful.')
self.assertEqual(handler.variables[2].unit, 'W m-2')
| RENCI/xDCIShare | hs_core/tests/serialization/test_resourcemeta_sax_parsing.py | Python | bsd-3-clause | 11,024 | [
"Brian"
] | 994cb43c71b438e89735d1815e86ac0155161a3ce5a00f03a02ff6ef36b1dad1 |
#!/usr/bin/env python
'''
Project: Geothon (https://github.com/MBoustani/Geothon)
File: Vector/netcdf_to_shp.py
Description: This code converts netCDF file to Shapefile.
Author: Maziyar Boustani (github.com/MBoustani)
'''
import os
from netCDF4 import Dataset
try:
import ogr
except ImportError:
from osgeo import ogr
try:
import osr
except ImportError:
from osgeo import osr
#an exmaple of netCDF file
nc_file = "../static_files/netcdf/airs_h2o_128x256_miroc5_sep04.nc"
#open the netCDF file
nc_dataset = Dataset(nc_file, 'r')
#netCDF variables
latitude = 'lat'
longitude = 'lon'
time = 'time'
value = 'H2OMMRLevStd_average'
#get number of time (time dimension)
num_time = len(nc_dataset.dimensions[time])
#get netCDF variable objects
latitudes = nc_dataset.variables[latitude]
longitudes = nc_dataset.variables[longitude]
values = nc_dataset.variables[value]
#get netCDF variable values
lats = latitudes[:]
lons = longitudes[:]
vals = values[:, :, :, :]
#make a list of latitudes and longitudes
latitudes = [int(i) for i in lats]
longitudes = [int(i) for i in lons]
#define multipoint geometry (datapoints)
multipoint = ogr.Geometry(ogr.wkbMultiPoint)
#an output shapefile name
shapefile = 'multipoints.shp'
#an output shapefile layer
layer_name = 'multipoint_layer'
#create ESRI shapefile dirver
driver = ogr.GetDriverByName('ESRI Shapefile')
#create shapefile data_source(file)
if os.path.exists(shapefile):
driver.DeleteDataSource(shapefile)
data_source = driver.CreateDataSource(shapefile)
#create spatial reference
srs = osr.SpatialReference()
#in this case wgs84
srs.ImportFromEPSG(4326)
#create a shapefile layer
layer = data_source.CreateLayer(layer_name, srs, ogr.wkbPoint)
#make all columns(fields) in layer
for time in range(num_time):
field_name = ogr.FieldDefn("time_{0}".format(time), ogr.OFTString)
field_name.SetWidth(50)
layer.CreateField(field_name)
for lat in range(len(latitudes)):
for lon in range(len(longitudes)):
#define a point geometry
point = ogr.Geometry(ogr.wkbPoint)
#add point to the geometry
point.AddPoint(longitudes[lon], latitudes[lat])
#create a feature
feature = ogr.Feature(layer.GetLayerDefn())
#set point geometry to feature
feature.SetGeometry(point)
for time in range(num_time):
#fill the attribute table with netCDF values for each time
#putting '0' for 'alt' variable to pick first alt
feature.SetField("time_{0}".format(time), str(vals[lon, lat, 0, time]))
#create feature in layer
layer.CreateFeature(feature)
#destroy feature
feature.Destroy()
| MBoustani/Geothon | Conversion Tools/netcdf_to_shp.py | Python | apache-2.0 | 2,714 | [
"NetCDF"
] | 8280d951fafaf70bbf74c88781324c3ec543d774b9861caaf162f3aecdca80df |
import gen_utils
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
class opening(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
# initialise our base class
ModuleBase.__init__(self, module_manager)
self._imageDilate = vtk.vtkImageContinuousDilate3D()
self._imageErode = vtk.vtkImageContinuousErode3D()
self._imageDilate.SetInput(self._imageErode.GetOutput())
module_utils.setup_vtk_object_progress(self, self._imageDilate,
'Performing greyscale 3D dilation')
module_utils.setup_vtk_object_progress(self, self._imageErode,
'Performing greyscale 3D erosion')
self._config.kernelSize = (3, 3, 3)
configList = [
('Kernel size:', 'kernelSize', 'tuple:int,3', 'text',
'Size of the kernel in x,y,z dimensions.')]
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self,
'vtkImageContinuousDilate3D' : self._imageDilate,
'vtkImageContinuousErode3D' : self._imageErode})
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
ModuleBase.close(self)
# get rid of our reference
del self._imageDilate
del self._imageErode
def get_input_descriptions(self):
return ('vtkImageData',)
def set_input(self, idx, inputStream):
self._imageErode.SetInput(inputStream)
def get_output_descriptions(self):
return ('Opened image (vtkImageData)',)
def get_output(self, idx):
return self._imageDilate.GetOutput()
def logic_to_config(self):
# if the user's futzing around, she knows what she's doing...
# (we assume that the dilate/erode pair are in sync)
self._config.kernelSize = self._imageErode.GetKernelSize()
def config_to_logic(self):
ks = self._config.kernelSize
self._imageDilate.SetKernelSize(ks[0], ks[1], ks[2])
self._imageErode.SetKernelSize(ks[0], ks[1], ks[2])
def execute_module(self):
self._imageErode.Update()
self._imageDilate.Update()
| nagyistoce/devide | modules/filters/opening.py | Python | bsd-3-clause | 2,661 | [
"VTK"
] | 5ac59fee091dc8985c8d5bb0307015426807e53988d935d8cde6d45b0387bd01 |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.linalg
'''
Extension to scipy.linalg module developed for PBC branch.
'''
def davidson_nosymm(matvec,size,nroots,Adiag=None):
'''Davidson diagonalization method to solve A c = E c
when A is not Hermitian.
'''
# We don't pass args
def matvec_args(vec, args):
return matvec(vec)
nroots = min(nroots,size)
#if Adiag == None:
# Adiag = matvec(numpy.ones(size))
# Currently not used:
x = np.ones((size,1))
P = np.ones((size,1))
arnold = Arnoldi(matvec_args, x, P, nroots=nroots)
return arnold.solve()
VERBOSE = False
class Arnoldi:
def __init__(self,matr_multiply,xStart,inPreCon,nroots=1,tol=1e-6):
self.matrMultiply = matr_multiply
self.size = xStart.shape[0]
self.nEigen = min(nroots, self.size)
self.maxM = min(30, self.size)
self.maxOuterLoop = 10
self.tol = tol
#
# Creating initial guess and preconditioner
#
self.x0 = xStart.real.copy()
self.iteration = 0
self.totalIter = 0
self.converged = False
self.preCon = inPreCon.copy()
#
# Allocating other vectors
#
self.allocateVecs()
def solve(self):
while self.converged == 0:
if self.totalIter == 0:
self.guessInitial()
for i in xrange(self.maxM):
if self.deflated == 1:
self.currentSize = self.nEigen
if self.deflated == 0 and self.totalIter > 0:
self.hMult()
self.push_Av()
self.constructSubspace()
self.solveSubspace()
self.constructSol()
self.computeResidual()
self.checkConvergence()
self.deflated = 0
if self.converged:
break
self.updateVecs()
self.checkDeflate()
self.constructDeflatedSub()
self.totalIter += 1
self.currentSize += 1
print("")
print("Converged in %3d cycles" % self.totalIter)
self.constructAllSolV()
return self.outeigs, self.outevecs
def allocateVecs(self):
self.subH = np.zeros( shape=(self.maxM,self.maxM), dtype=complex )
self.sol = np.zeros( shape=(self.maxM), dtype=complex )
self.dgks = np.zeros( shape=(self.maxM), dtype=complex )
self.nConv = np.zeros( shape=(self.maxM), dtype=int )
self.eigs = np.zeros( shape=(self.maxM), dtype=complex )
self.evecs = np.zeros( shape=(self.maxM,self.maxM), dtype=complex )
self.oldeigs = np.zeros( shape=(self.maxM), dtype=complex )
self.deigs = np.zeros( shape=(self.maxM), dtype=complex )
self.outeigs = np.zeros( shape=(self.nEigen), dtype=complex )
self.outevecs = np.zeros( shape=(self.size,self.nEigen), dtype=complex)
self.currentSize = 0
self.Ax = np.zeros( shape=(self.size), dtype=complex )
self.res = np.zeros( shape=(self.size), dtype=complex )
self.vlist = np.zeros( shape=(self.maxM,self.size), dtype=complex )
self.cv = np.zeros( shape = (self.size), dtype = complex )
self.cAv = np.zeros( shape = (self.size), dtype = complex )
self.Avlist = np.zeros( shape=(self.maxM,self.size), dtype=complex )
self.dres = 999.9
self.resnorm = 999.9
self.cvEig = 0.1
self.ciEig = 0
self.deflated = 0
def guessInitial(self):
nrm = np.linalg.norm(self.x0)
self.x0 *= 1./nrm
self.currentSize = self.nEigen
for i in xrange(self.currentSize):
self.vlist[i] *= 0.0
self.vlist[i,i] = 1.0 + 0.0*1j
self.vlist[i] /= np.linalg.norm(self.vlist[i])
for i in xrange(self.currentSize):
self.cv = self.vlist[i].copy()
self.hMult()
self.Avlist[i] = self.cAv.copy()
self.constructSubspace()
def hMult(self):
args = 0
self.cAv = self.matrMultiply(self.cv.reshape(self.size),args)
def push_Av(self):
self.Avlist[self.currentSize-1] = self.cAv.reshape(self.size)
def constructSubspace(self):
if self.totalIter == 0 or self.deflated == 1: # construct the full block of v^*Av
for i in xrange(self.currentSize):
for j in xrange(self.currentSize):
val = np.vdot(self.vlist[i],self.Avlist[j])
self.subH[i,j] = val
else:
for j in xrange(self.currentSize):
if j <= (self.currentSize-1):
val = np.vdot(self.vlist[j],self.Avlist[self.currentSize-1])
self.subH[j,self.currentSize-1] = val
if j < (self.currentSize-1):
val = np.vdot(self.vlist[self.currentSize-1],self.Avlist[j])
self.subH[self.currentSize-1,j] = val
def solveSubspace(self):
w, v = scipy.linalg.eig(self.subH[:self.currentSize,:self.currentSize])
idx = w.real.argsort()
#imag_norm = np.linalg.norm(w.imag)
#if imag_norm > 1e-12:
# print " *************************************************** "
# print " WARNING IMAGINARY EIGENVALUE OF NORM %.15g " % (imag_norm)
# print " *************************************************** "
#print "Imaginary norm eigenvectors = ", np.linalg.norm(v.imag)
#print "Imaginary norm eigenvalue = ", np.linalg.norm(w.imag)
v = v[:,idx]
w = w[idx].real
self.sol[:self.currentSize] = v[:,self.ciEig]
self.evecs[:self.currentSize,:self.currentSize] = v
self.eigs[:self.currentSize] = w[:self.currentSize]
self.outeigs[:self.nEigen] = w[:self.nEigen]
self.cvEig = self.eigs[self.ciEig]
def constructAllSolV(self):
for i in range(self.nEigen):
self.sol[:] = self.evecs[:,i]
self.cv = np.dot(self.vlist[:self.currentSize].transpose(),self.sol[:self.currentSize])
self.outevecs[:,i] = self.cv
def constructSol(self):
self.constructSolV()
self.constructSolAv()
def constructSolV(self):
self.cv = np.dot(self.vlist[:self.currentSize].transpose(),self.sol[:self.currentSize])
def constructSolAv(self):
self.cAv = np.dot(self.Avlist[:self.currentSize].transpose(),self.sol[:self.currentSize])
def computeResidual(self):
self.res = self.cAv - self.cvEig * self.cv
self.dres = np.vdot(self.res,self.res)**0.5
#
# gram-schmidt for residual vector
#
for i in xrange(self.currentSize):
self.dgks[i] = np.vdot( self.vlist[i], self.res )
self.res -= self.dgks[i]*self.vlist[i]
#
# second gram-schmidt to make them really orthogonal
#
for i in xrange(self.currentSize):
self.dgks[i] = np.vdot( self.vlist[i], self.res )
self.res -= self.dgks[i]*self.vlist[i]
self.resnorm = np.linalg.norm(self.res)
self.res /= self.resnorm
orthog = 0.0
for i in xrange(self.currentSize):
orthog += np.vdot(self.res,self.vlist[i])**2.0
orthog = orthog ** 0.5
if not self.deflated:
if VERBOSE:
print("%3d %20.14f %20.14f %10.4g" % (self.ciEig, self.cvEig.real, self.resnorm.real, orthog.real))
#else:
# print "%3d %20.14f %20.14f %20.14f (deflated)" % (self.ciEig, self.cvEig,
# self.resnorm, orthog)
self.iteration += 1
def updateVecs(self):
self.vlist[self.currentSize] = self.res.copy()
self.cv = self.vlist[self.currentSize]
def checkConvergence(self):
if self.resnorm < self.tol:
if VERBOSE:
print("Eigenvalue %3d converged! (res = %.15g)" % (self.ciEig, self.resnorm))
self.ciEig += 1
if self.ciEig == self.nEigen:
self.converged = True
if self.resnorm < self.tol and not self.converged:
if VERBOSE:
print("")
print("")
print("%-3s %-20s %-20s %-8s" % ("#", " Eigenvalue", " Res. Norm.", " Ortho. (should be ~0)"))
def gramSchmidtCurrentVec(self,northo):
for i in xrange(northo):
self.dgks[i] = np.vdot( self.vlist[i], self.cv )
self.cv -= self.dgks[i]*self.vlist[i] #/ np.vdot(self.vlist[i],self.vlist[i])
self.cv /= np.linalg.norm(self.cv)
def checkDeflate(self):
if self.currentSize == self.maxM-1:
self.deflated = 1
#print "deflating..."
for i in xrange(self.nEigen):
self.sol[:self.currentSize] = self.evecs[:self.currentSize,i]
self.constructSolV() # Finds the "best" eigenvector for this eigenvalue
self.Avlist[i] = self.cv.copy() # Puts this guess in self.Avlist rather than self.vlist for now...
# since this would mess up self.constructSolV()'s solution
for i in xrange(self.nEigen):
self.cv = self.Avlist[i].copy() # This is actually the "best" eigenvector v, not A*v (see above)
self.gramSchmidtCurrentVec(i)
self.vlist[i] = self.cv.copy()
for i in xrange(self.nEigen):
self.cv = self.vlist[i].copy() # This is actually the "best" eigenvector v, not A*v (see above)
self.hMult() # Use current vector cv to create cAv
self.Avlist[i] = self.cAv.copy()
def constructDeflatedSub(self):
if self.deflated == 1:
self.currentSize = self.nEigen
self.constructSubspace()
| gkc1000/pyscf | pyscf/pbc/lib/arnoldi.py | Python | apache-2.0 | 10,542 | [
"PySCF"
] | 0c34e3033c38805173627289884ca3bf29f2846d5f4a858c23472297943efa01 |
# Copyright 2012 Patrick Varilly, Stefano Angioletti-Uberti
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
==========================================================================
Physical constants in :mod:`correct units <units>` (:mod:`dnacc.physics``)
==========================================================================
Fundamental constants
+++++++++++++++++++++
.. autodata:: c
.. autodata:: mu_0
.. autodata:: eps_0
.. autodata:: N_A
.. autodata:: kB
.. autodata:: R
Atomic and Nuclear Physics
++++++++++++++++++++++++++
.. autodata:: e
.. autodata:: eV
.. autodata:: amu
.. autodata:: m_e
.. autodata:: a_0
.. autodata:: D
"""
from . import units
_GSL_CONST_MKSA_SPEED_OF_LIGHT = 2.99792458e8 # m / s
_GSL_CONST_MKSA_VACUUM_PERMITTIVITY = 8.854187817e-12 # A^2 s^4 / kg m^3
_GSL_CONST_MKSA_VACUUM_PERMEABILITY = 1.25663706144e-6 # kg m / A^2 s^2
_GSL_CONST_NUM_AVOGADRO = 6.02214199e23 # 1 / mol
_GSL_CONST_MKSA_BOLTZMANN = 1.3806504e-23 # kg m^2 / K s^2
_GSL_CONST_MKSA_MOLAR_GAS = 8.314472e0 # kg m^2 / K mol s^2
_GSL_CONST_MKSA_ELECTRON_CHARGE = 1.602176487e-19 # A s
_GSL_CONST_MKSA_ELECTRON_VOLT = 1.602176487e-19 # kg m^2 / s^2
_GSL_CONST_MKSA_UNIFIED_ATOMIC_MASS = 1.660538782e-27 # kg
_GSL_CONST_MKSA_MASS_ELECTRON = 9.10938188e-31 # kg
_GSL_CONST_MKSA_BOHR_RADIUS = 5.291772083e-11 # m
_GSL_CONST_MKSA_DEBYE = 3.33564095198e-30 # A s^2 / m^2
# Fundamental constants
# =====================
#: Speed of light in vacuum
c = _GSL_CONST_MKSA_SPEED_OF_LIGHT * units.m / units.s
#: Permeability of free space, :math:`\mu_0`
mu_0 = _GSL_CONST_MKSA_VACUUM_PERMEABILITY * units.N / units.Ampere ** 2
#: Permittivity of free space, :math:`\epsilon_0`
eps_0 = _GSL_CONST_MKSA_VACUUM_PERMITTIVITY * units.F / units.m
#: Avogadro's number
N_A = _GSL_CONST_NUM_AVOGADRO
#: Boltzmann's constant
kB = _GSL_CONST_MKSA_BOLTZMANN * units.J / units.K
#: Gas constant (numerically identical to kB in base units!)
R = _GSL_CONST_MKSA_MOLAR_GAS * units.J / (units.K * units.mol)
# Atomic and Nuclear Physics
# ==========================
#: Electron charge
e = _GSL_CONST_MKSA_ELECTRON_CHARGE * units.C
#: Electron volt
eV = _GSL_CONST_MKSA_ELECTRON_VOLT * units.J
#: Atomic mass unit
amu = _GSL_CONST_MKSA_UNIFIED_ATOMIC_MASS * units.kg
#: Mass of electron
m_e = _GSL_CONST_MKSA_MASS_ELECTRON * units.kg
#: Bohr radius
a_0 = _GSL_CONST_MKSA_BOHR_RADIUS * units.m
#: Debye
D = _GSL_CONST_MKSA_DEBYE * units.C * units.m
| patvarilly/DNACC | dnacc/physics.py | Python | gpl-3.0 | 3,077 | [
"Avogadro"
] | 4d498f1d18c355c9d159dcb053491f9e39a6787df7b8a3a17a07e180797aaf13 |
import webbrowser
import tempfile
import shutil
import requests
import logging
import platform
import os
import configparser
from collections import Counter
from PyQt5 import QtCore, QtWidgets, Qt, QtGui
from ui.Ui_mainwindow import Ui_MainWindow
from ui.Ui_aboutwindow import Ui_aboutWindow
from ui.Ui_aboutstandard import Ui_aboutStandard
from ui.Ui_logwindow import Ui_Changelog
from ui.Ui_presavewindow import Ui_presaveWindow
from ui.Ui_addsite import Ui_Addsite
from ui.Ui_addUrl import Ui_AddURL
from ui.Ui_apiwindow import Ui_apiWindow
from ui._version import _asmm_version
from ui._version import _xml_version
from ui._version import _py_version
from ui._version import _report_version
from ui._version import _qt_version
from ui._version import _eclipse_version
from functions.asmm_xml import create_asmm_xml
from functions.asmm_xml import read_asmm_xml
from functions.asmm_pdf import create_asmm_pdf
from functions.netcdf_lite import NetCdf
from functions.button_functions import add_clicked
from functions.button_functions import button_clicked
from functions.button_functions import add_image
from functions.button_functions import delete_image
from functions.button_functions import display_image
from functions.sql_functions import objectsInit
from functions.check_functions import fill_all_fields
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, path, parent=None):
self.asmm_path = path
QtWidgets.QMainWindow.__init__(self, parent)
logging.info('mainwindow.py - UI initialization ...')
self.setupUi(self)
objectsInit(self)
self.aircraft_db = {}
self.operator_db = {}
self.fill_operator_rolebox()
self.dirpath = tempfile.mkdtemp()
all_check_boxes = self.findChildren(QtWidgets.QCheckBox)
for check_box in all_check_boxes:
check_box.stateChanged.connect(lambda: self.set_modified())
all_text_edits = self.findChildren(QtWidgets.QTextEdit)
for widget in all_text_edits:
widget.textChanged.connect(lambda: self.set_modified())
all_line_edits = self.findChildren(QtWidgets.QLineEdit)
for widget in all_line_edits:
widget.textChanged.connect(lambda: self.set_modified())
self.date_dt.dateChanged.connect(lambda: self.set_modified())
all_tool_buttons = self.findChildren(QtWidgets.QToolButton)
for widget in all_tool_buttons:
widget.clicked.connect(lambda: self.toolButton_clicked())
all_rolbox_edits = self.findChildren(QtWidgets.QComboBox)
for widget in all_rolbox_edits:
widget.activated.connect(lambda: self.set_modified())
self.operator_cb.activated.connect(lambda: self.operator_changed())
self.location_cb.addItems(self.locations)
self.location_cb.activated.connect(lambda: self.location_changed())
self.newOperator_ln.hide()
self.newAircraft_ln.hide()
self.label_38.hide()
self.label_39.hide()
self.newCountry_lb.hide()
self.newCountry_cb.hide()
self.newRegistration_lb.hide()
self.newRegistration_ln.hide()
self.newManufacturer_lb.hide()
self.newManufacturer_ln.hide()
"patch for combobox stylesheet"
itemDelegate = QtWidgets.QStyledItemDelegate()
self.location_cb.setItemDelegate(itemDelegate)
self.detailList.setItemDelegate(itemDelegate)
self.operator_cb.setItemDelegate(itemDelegate)
self.aircraft_cb.setItemDelegate(itemDelegate)
self.newCountry_cb.setItemDelegate(itemDelegate)
self.contact_cb.setItemDelegate(itemDelegate)
"-----------------------------"
self.menubar.setStyleSheet("QMenuBar {\n"
" background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, \n"
" stop: 0 #f0f0f0, stop: 1 #e5e5e5);\n"
"}\n"
"\n"
"QMenuBar::item {\n"
" spacing: 3px;\n"
" padding: 5px 5px 5px 5px;\n"
" background: transparent;\n"
"}\n"
"\n"
"QMenuBar::item:selected {\n"
" border: 0px solid #7eb4ea;\n"
" border-radius: 1px;\n"
" background-color: rgb(200,200,200);\n"
"}\n"
"\n"
"QMenu {\n"
" background-color: #f0f0f0;\n"
" border: 0px solid #f0f0f0;\n"
"}\n"
"\n"
"QMenu::item:selected {\n"
" background-color: rgb(200,200,200);\n"
" color: black;\n"
"}\n"
"\n"
"QMenu::icon {\n"
" margin-left: 20px;\n"
" background-color: red;\n"
" border: none;\n"
"}")
self.make_window_title()
self.api_eufar_acronym_completer()
self.api_eufar_database_parsing()
config_dict = configparser.ConfigParser()
config_dict.read(os.path.join(path, 'asmm_creator.ini'))
show_api_info = config_dict.get('OPTIONS', 'api_info')
result = False
if show_api_info == 'True':
result = self.api_eufar_information()
if show_api_info != str(result):
config_dict.set('OPTIONS', 'api_info', str(result))
with open(os.path.join(path, 'asmm_creator.ini'), 'w') as configfile:
config_dict.write(configfile)
logging.info('mainwindow.py - UI initialized !')
logging.info('**************************************************')
@QtCore.pyqtSlot()
def on_actionNew_triggered(self):
logging.debug('mainwindow.py - on_actionNew_triggered - self.modified ' + str(self.modified))
if self.modified:
result = self.make_onsave_msg_box("Clear")
if result == "iw_saveButton":
self.save_document()
self.reset_all_fields()
elif result == "iw_nosaveButton":
self.reset_all_fields()
else:
self.reset_all_fields()
@QtCore.pyqtSlot()
def on_actionSave_triggered(self):
logging.debug('mainwindow.py - on_actionSave_triggered')
self.save_document()
@QtCore.pyqtSlot()
def on_actionSave_As_triggered(self):
logging.debug('mainwindow.py - on_actionSave_As_triggered')
self.save_document(save_as=True)
@QtCore.pyqtSlot()
def on_actionPrint_triggered(self):
logging.debug('mainwindow.py - on_actionPrint_triggered')
self.out_file_name_pdf = self.get_file_name_pdf()
if not self.out_file_name_pdf:
return
if '.pdf' not in self.out_file_name_pdf:
self.out_file_name_pdf = self.out_file_name_pdf + '.pdf'
create_asmm_pdf(self, self.out_file_name_pdf)
@QtCore.pyqtSlot()
def on_actionOpen_triggered(self):
logging.debug('mainwindow.py - on_actionOpen_triggered - self.modified ' + str(self.modified))
if self.modified:
result = self.make_onsave_msg_box("Open")
if result == "iw_saveButton":
self.save_document()
self.open_file()
elif result == "iw_nosaveButton":
self.open_file()
else:
self.open_file()
@QtCore.pyqtSlot()
def on_actionExit_triggered(self):
logging.debug('mainwindow.py - on_actionExit_triggered')
self.close()
@QtCore.pyqtSlot()
def on_actionEUFAR_N7SP_triggered(self):
logging.debug('mainwindow.py - on_actionEUFAR_N7SP_triggered')
webbrowser.open('http://www.eufar.net/cms/standards-and-protocols/')
@QtCore.pyqtSlot()
def on_actionHelp_triggered(self):
logging.debug('mainwindow.py - on_actionHelp_triggered')
webbrowser.open('http://www.eufar.net/cms/airborne-science-mission-metadata-help/')
@QtCore.pyqtSlot()
def on_actionASMM_CreatorAbout_triggered(self):
logging.debug('mainwindow.py - on_actionASMM_CreatorAbout_triggered')
aboutText = ("The Airborne Science Mission Metadata (ASMM) Creator v%s offline version, was "
+ "developed by EUFAR using Eclipse %s, Python %s and PyQt %s. XML files generated by this "
+ "version conform to v%s of the ASMM XML standard. The opensource reporting library (v%s) "
+ "used for PDF report generation is provided and owned by <a href=http://www.reportlab.com"
+ "/opensource><span style=\" text-decoration: underline; color:#0000ff;\">Reportlab</a>.<b"
+ "r><br>For more information, or to report a bug, please contact <a href='mailto:"
+ "bureau.at.eufar.net'><span style=\" text-decoration: underline; color:#0000ff;\">"
+ "bureau.at.eufar.net</a>.<br><br>The latest offline version and source code of the ASMM Creat"
+ "or can be found at <a href=https://github.com/EUFAR/asmm-eufar><span style=\" text-d"
+ "ecoration: underline; color:#0000ff;\">https://github.com/EUFAR/asmm-eufar</a>.") % (_asmm_version,
_eclipse_version,
_py_version,
_qt_version,
_xml_version,
_report_version)
self.aboutWindow = MyAbout(aboutText)
x1, y1, w1, h1 = self.geometry().getRect()
_, _, w2, h2 = self.aboutWindow.geometry().getRect()
x2 = x1 + w1/2 - w2/2
y2 = y1 + h1/2 - h2/2
self.aboutWindow.setGeometry(x2, y2, w2, h2)
self.aboutWindow.setMinimumSize(QtCore.QSize(480, self.aboutWindow.sizeHint().height()))
self.aboutWindow.setMaximumSize(QtCore.QSize(480, self.aboutWindow.sizeHint().height()))
self.aboutWindow.exec_()
@QtCore.pyqtSlot()
def on_actionASMM_XML_Standard_triggered(self):
logging.debug('mainwindow.py - on_actionASMM_XML_Standard_triggered')
aboutText = ("<html><head/><body><p align=justify>The Airborne Science Mission Metadata (ASM"
+ "M) standard aims to harmonise descriptive information of science research flights. This "
+ "common description will allow users of the airborne science data to search past datasets"
+ " for specific meteorological conditions, geographical regions, cloud-types encountered, "
+ "particles sampled, and other parameters not evident from the data itself.<br> <br> For m"
+ "ore information, please read the following document: <a href=https://github.com/EUFAR"
+ "/asmm-eufar/blob/master/Documentation/ASMM%20-%20XML%20Implementation%20Rules.pdf >AS"
+ "MM - XML Implementation Rules.pdf</a></p></body></html>")
self.aboutWindow = MyStandard(aboutText)
x1, y1, w1, h1 = self.geometry().getRect()
_, _, w2, h2 = self.aboutWindow.geometry().getRect()
x2 = x1 + w1/2 - w2/2
y2 = y1 + h1/2 - h2/2
self.aboutWindow.setGeometry(x2, y2, w2, h2)
self.aboutWindow.setMinimumSize(QtCore.QSize(460, self.aboutWindow.sizeHint().height()))
self.aboutWindow.setMaximumSize(QtCore.QSize(460, self.aboutWindow.sizeHint().height()))
self.aboutWindow.exec_()
@QtCore.pyqtSlot()
def on_actionChangelog_triggered(self):
logging.debug('mainwindow.py - on_actionChangelog_triggered')
self.logWindow = MyLog()
x1, y1, w1, h1 = self.geometry().getRect()
_, _, w2, h2 = self.logWindow.geometry().getRect()
x2 = x1 + w1/2 - w2/2
y2 = y1 + h1/2 - h2/2
self.logWindow.setGeometry(x2, y2, w2, h2)
self.logWindow.exec_()
@QtCore.pyqtSlot()
def on_readBoundingBoxButton_clicked(self):
logging.debug('mainwindow.py - on_readBoundingBoxButton_clicked')
lat_min, lat_max, lon_min, lon_max, alt_min, alt_max = None, None, None, None, None, None
filename, _ = QtWidgets.QFileDialog.getOpenFileName(self,'Open associated NetCDF','',
'NetCDF files (*.nc *.cdf);;All Files (*.*)')
if not filename:
return
f = NetCdf(str(filename))
var_list = f.get_variable_list()
try:
lat_min = round(f.get_attribute_value("geospatial_lat_min"), 2)
lat_max = round(f.get_attribute_value("geospatial_lat_max"), 2)
except KeyError:
logging.error('mainwindow.py - on_readBoundingBoxButton_clicked - KeyError, lat_min or '
+ 'lat_max not found.')
attr_found = False
for var_name in var_list:
try:
attr_val = f.get_attribute_value('standard_name', var_name)
if attr_val == 'latitude':
attr_found = True
break
except KeyError:
pass
if attr_found:
lat_values = f.read_variable(str(var_name))
lat_min = round(min(lat_values[lat_values != 0]), 2)
lat_max = round(max(lat_values[lat_values != 0]), 2)
else:
[var_name, ok] = QtWidgets.QInputDialog.getItem(self, "Latitude Variable Name", "ERROR: Latitude "
"values not found. Please select the latitude variable in the"
" following list.", var_list, current=0, editable=False)
if var_name and ok:
lat_values = f.read_variable(str(var_name))
lat_min = round(min(lat_values[lat_values != 0]), 2)
lat_max = round(max(lat_values[lat_values != 0]), 2)
try:
lon_min = round(f.get_attribute_value("geospatial_lon_min"), 2)
lon_max = round(f.get_attribute_value("geospatial_lon_max"), 2)
except KeyError:
logging.error('mainwindow.py - on_readBoundingBoxButton_clicked - KeyError, lon_min or '
+ 'lon_max not found.')
attr_found = False
for var_name in var_list:
try:
attr_val = f.get_attribute_value('standard_name', var_name)
if attr_val == 'longitude':
attr_found = True
break
except KeyError:
pass
if attr_found:
lon_values = f.read_variable(str(var_name))
lon_min = round(min(lon_values[lon_values != 0]), 2)
lon_max = round(max(lon_values[lon_values != 0]), 2)
else:
[var_name, ok] = QtWidgets.QInputDialog.getItem(self, "Longitude Variable Name", "ERROR: Longitud"
"e values not found. Please select the longitude vari"
"able in the following list.", var_list, current=0, editable=False)
if var_name and ok:
lon_values = f.read_variable(str(var_name))
lon_min = round(min(lon_values[lon_values != 0]), 2)
lon_max = round(max(lon_values[lon_values != 0]), 2)
try:
alt_min = round(f.get_attribute_value("geospatial_vertical_min"), 2)
alt_max = round(f.get_attribute_value("geospatial_vertical_max"), 2)
except KeyError:
logging.error('mainwindow.py - on_readBoundingBoxButton_clicked - KeyError, alt_min or '
+ 'alt_max not found.')
attr_found = False
for var_name in var_list:
try:
attr_val = f.get_attribute_value('standard_name', var_name)
if attr_val == 'altitude':
attr_found = True
break
except KeyError:
pass
if attr_found:
alt_values = f.read_variable(str(var_name))
alt_min = round(min(alt_values[alt_values != 0]), 2)
alt_max = round(max(alt_values[alt_values != 0]), 2)
else:
[var_name, ok] = QtWidgets.QInputDialog.getItem(self, "Altitude Variable Name", "ERROR: Altitude "
"values not found. Please select the altitude variable in"
" the following list.", var_list, current=0, editable=False)
if var_name and ok:
alt_values = f.read_variable(str(var_name))
alt_min = round(min(alt_values[alt_values != 0]), 2)
alt_max = round(max(alt_values[alt_values != 0]), 2)
self.westBoundLongitudeLine.setText(str(lon_min))
self.eastBoundLongitudeLine.setText(str(lon_max))
self.northBoundLatitudeLine.setText(str(lat_max))
self.southBoundLatitudeLine.setText(str(lat_min))
self.minAltitudeLine.setText(str(alt_min))
self.maxAltitudeLine.setText(str(alt_max))
self.westBoundLongitudeLine.setCursorPosition(0)
self.eastBoundLongitudeLine.setCursorPosition(0)
self.northBoundLatitudeLine.setCursorPosition(0)
self.southBoundLatitudeLine.setCursorPosition(0)
self.minAltitudeLine.setCursorPosition(0)
self.maxAltitudeLine.setCursorPosition(0)
@QtCore.pyqtSlot()
def on_imageAddButton_clicked(self):
logging.debug('mainwindow.py - on_imageAddButton_clicked - self.verticalLayout_52.count() ' +
str(self.verticalLayout_52.count()))
if self.verticalLayout_52.count() < 10:
filename, fileext = QtWidgets.QFileDialog.getOpenFileName(self,'Open an image','','All Files (*.*);;Images' # @UnusedVariable
' (*.jpg *.jpeg *.bmp *.png *.gif *.tiff)')
if filename:
add_image(self, filename)
self.im_del[-1].clicked.connect(lambda: self.del_image())
self.im_label[-1].clicked.connect(lambda: self.show_image())
else:
alertBox = QtWidgets.QMessageBox()
alertBox.about(self, "Warning", "You can't add more than 10 images.")
@QtCore.pyqtSlot()
def on_urlAddButton_clicked(self):
logging.debug('mainwindow.py - on_urlAddButton_clicked - self.verticalLayout_52.count() ' +
str(self.verticalLayout_52.count()))
if self.verticalLayout_52.count() < 10:
x = QtGui.QCursor.pos().x()
y = QtGui.QCursor.pos().y()
x = x - 150
y = y + 50
self.urlWindow = MyURL()
self.urlWindow.setMinimumSize(QtCore.QSize(420, self.urlWindow.sizeHint().height()))
self.urlWindow.setMaximumSize(QtCore.QSize(420, self.urlWindow.sizeHint().height()))
self.urlWindow.setGeometry(x, y, 420, self.urlWindow.sizeHint().height())
if self.urlWindow.exec_():
add_image(self, self.urlWindow.ck_inputLine.text())
self.im_del[-1].clicked.connect(lambda: self.del_image())
self.im_label[-1].clicked.connect(lambda: self.show_image())
else:
alertBox = QtWidgets.QMessageBox()
alertBox.about(self, "Warning", "You can't add more than 10 images.")
def del_image(self):
delete_image(self)
def show_image(self):
display_image(self)
def closeEvent(self, event):
logging.debug('mainwindow.py - closeEvent - self.modified ' + str(self.modified))
if self.modified:
result = self.make_onsave_msg_box("Close")
if result == "iw_saveButton":
self.save_document()
shutil.rmtree(self.dirpath)
logging.info('ASMM ' + _asmm_version + ' is closing ...')
event.accept()
elif result == "iw_nosaveButton":
shutil.rmtree(self.dirpath)
logging.info('ASMM ' + _asmm_version + ' is closing ...')
event.accept()
else:
event.ignore()
else:
shutil.rmtree(self.dirpath)
logging.info('ASMM ' + _asmm_version + ' is closing ...')
self.close()
def make_window_title(self):
logging.debug('mainwindow.py - make_window_title - self.modified ' + str(self.modified) +
' ; self.saved ' + str(self.saved))
title_string = 'ASMM Creator v' + _asmm_version
file_string = ''
saved_string = ''
modified_string = ''
if self.out_file_name:
file_string = ' - ' + self.out_file_name
if not self.saved:
saved_string = ' - unsaved'
if self.modified:
modified_string = ' - modified'
title_string = title_string + file_string + saved_string + modified_string
self.setWindowTitle(title_string)
def set_modified(self):
if not self.modified:
self.modified = True
self.saved = False
self.make_window_title()
def save_document(self, save_as=False):
logging.debug('mainwindow.py - save_document - save_as ' + str(save_as))
cancel = fill_all_fields(self)
if cancel == True:
return
if not self.out_file_name or save_as:
self.out_file_name = self.get_file_name()
if not self.out_file_name:
return
if '.xml' not in self.out_file_name:
self.out_file_name = self.out_file_name + '.xml'
create_asmm_xml(self, self.out_file_name)
self.make_window_title()
def get_file_name(self):
logging.debug('mainwindow.py - get_file_name')
file_dialog = QtWidgets.QFileDialog()
file_dialog.setDefaultSuffix('xml')
out_file_name, _ = file_dialog.getSaveFileName(self, "Save XML File","!!!Flight identifier!!!_xxxxxxxxxx.xml"
, filter='XML Files (*.xml)')
logging.debug('mainwindow.py - get_file_name - out_file_name ' + out_file_name)
return out_file_name
def get_file_name_pdf(self):
logging.debug('mainwindow.py - get_file_name_pdf')
file_dialog = QtWidgets.QFileDialog()
file_dialog.setDefaultSuffix('pdf')
out_file_name_pdf, _ = file_dialog.getSaveFileName(self, "Save PDF File", filter='PDF Files (*.pdf)')
logging.debug('mainwindow.py - get_file_name_pdf - out_file_name_pdf ' + out_file_name_pdf)
return out_file_name_pdf
def reset_all_fields(self):
logging.debug('mainwindow.py - reset_all_fields - starting ...')
all_check_boxes = self.findChildren(QtWidgets.QCheckBox)
for check_box in all_check_boxes:
check_box.setCheckState(False)
all_text_edits = self.findChildren(QtWidgets.QTextEdit)
for widget in all_text_edits:
widget.clear()
all_line_edits = self.findChildren(QtWidgets.QLineEdit)
for widget in all_line_edits:
widget.clear()
all_list_widgets = self.findChildren(QtWidgets.QListWidget)
for widget in all_list_widgets:
widget.clear()
for i in reversed(range(self.gridLayout_5.count())):
self.gridLayout_5.itemAt(i).widget().deleteLater()
for i in reversed(range(self.gridLayout_8.count())):
self.gridLayout_8.itemAt(i).widget().deleteLater()
for i in reversed(range(self.gridLayout_9.count())):
self.gridLayout_9.itemAt(i).widget().deleteLater()
for i in reversed(range(self.gridLayout_10.count())):
self.gridLayout_10.itemAt(i).widget().deleteLater()
for i in reversed(range(self.gridLayout_11.count())):
self.gridLayout_11.itemAt(i).widget().deleteLater()
for i in reversed(range(self.gridLayout_16.count())):
self.gridLayout_16.itemAt(i).widget().deleteLater()
for i in reversed(range(self.gridLayout_14.count())):
self.gridLayout_14.itemAt(i).widget().deleteLater()
for i in reversed(range(self.gridLayout_15.count())):
self.gridLayout_15.itemAt(i).widget().deleteLater()
for i in reversed(range(self.gridLayout_25.count())):
self.gridLayout_25.itemAt(i).widget().deleteLater()
self.operator_cb.setCurrentIndex(0)
self.newAircraft_ln.hide()
self.newAircraft_ln.setText('')
self.newOperator_ln.hide()
self.newOperator_ln.setText('')
self.label_38.hide()
self.label_39.hide()
self.newCountry_lb.hide()
self.newCountry_cb.hide()
self.newCountry_cb.clear()
self.newRegistration_lb.hide()
self.newRegistration_ln.hide()
self.newRegistration_ln.setText('')
self.newManufacturer_lb.hide()
self.newManufacturer_ln.hide()
self.newManufacturer_ln.setText('')
self.aircraft_cb.clear()
self.aircraft_cb.setEnabled(False)
self.location_cb.setCurrentIndex(0)
self.detailList.clear()
self.detailList.setEnabled(False)
for i in reversed(range(0, len(self.images_pdf_path))):
delete_image(self, i)
objectsInit(self)
self.make_window_title()
logging.debug('mainwindow.py - reset_all_fields - finished')
def make_onsave_msg_box(self, string):
logging.debug('mainwindow.py - make_onsave_msg_box')
self.presaveWindow = MyWarning(string)
x1, y1, w1, h1 = self.geometry().getRect()
_, _, w2, h2 = self.presaveWindow.geometry().getRect()
x2 = x1 + w1/2 - w2/2
y2 = y1 + h1/2 - h2/2
self.presaveWindow.setGeometry(x2, y2, w2, h2)
self.presaveWindow.setMinimumSize(QtCore.QSize(450, self.presaveWindow.sizeHint().height()))
self.presaveWindow.setMaximumSize(QtCore.QSize(452, self.presaveWindow.sizeHint().height()))
self.presaveWindow.exec_()
return self.presaveWindow.buttonName
def open_file(self):
logging.debug('mainwindow.py - open_file')
out_file_name, _ = QtWidgets.QFileDialog.getOpenFileName(self,'Open XML File','','XML Files (*.xml)')
if out_file_name:
read_asmm_xml(self, out_file_name)
self.saved = True
self.modified = False
self.out_file_name = out_file_name
self.make_window_title()
logging.debug('mainwindow.py - open_file - self.saved ' + str(self.saved) + ' ; self.modified '
+ str(self.modified) + ' ; self.out_file_name ' + str(self.out_file_name))
def addListItem(self, title, label, listWidget, item_list):
logging.debug('mainwindow.py - addListItem - title ' + str(title) + ' ; label ' + str(label))
x = QtGui.QCursor.pos().x()
y = QtGui.QCursor.pos().y()
x = x - 150
y = y + 50
self.siteWindow = MySite()
self.siteWindow.setMinimumSize(QtCore.QSize(340, self.siteWindow.sizeHint().height()))
self.siteWindow.setMaximumSize(QtCore.QSize(340, self.siteWindow.sizeHint().height()))
self.siteWindow.setGeometry(x, y, 340, self.siteWindow.sizeHint().height())
self.siteWindow.label.setText(label)
self.siteWindow.setWindowTitle(title)
if self.siteWindow.exec_():
self.modified = True
self.make_window_title()
item_list.append(self.siteWindow.ck_inputLine.text())
listWidget.addItem(self.siteWindow.ck_inputLine.text())
logging.debug('mainwindow.py - addListItem - text ' + self.siteWindow.ck_inputLine.text())
def removeListItem(self, listWidget, item_list):
logging.debug('mainwindow.py - removeListItem - item_list ' + str(item_list))
selected_line = listWidget.currentRow()
if selected_line >= 0:
selected_item = listWidget.currentItem()
item_list.remove(selected_item.text())
listWidget.takeItem(selected_line)
self.modified = True
self.make_window_title()
def toolButton_clicked(self):
if self.sender().objectName() != '':
logging.debug('mainwindow.py - toolButton_clicked - self.sender().objectName() ' + self.sender().objectName())
if "infoButton" in self.sender().objectName():
button_clicked(self)
elif "groundAddButton" in self.sender().objectName():
self.addListItem("Add a Ground Site", "Please, enter a name for the new Ground Site.",
self.groundListWidget, self.ground_site_list)
elif "armAddButton" in self.sender().objectName():
self.addListItem("Add an ARM Site", "Please, enter a name for the new ARM Site.",
self.armListWidget, self.arm_site_list)
elif "armMobileAddButton" in self.sender().objectName():
self.addListItem("Add an ARM Mobile Site", "Please, enter a name for the new ARM Mobile Site.",
self.armMobileListWidget, self.arm_mobile_list)
elif "vesselAddButton" in self.sender().objectName():
self.addListItem("Add a Research Vessel", "Please, enter a name for the new Research Vessel.",
self.vesselListWidget, self.research_vessel_list)
elif "groundRemoveButton" in self.sender().objectName():
self.removeListItem(self.groundListWidget, self.ground_site_list)
elif "armRemoveButton" in self.sender().objectName():
self.removeListItem(self.armListWidget, self.arm_site_list)
elif "armMobileRemoveButton" in self.sender().objectName():
self.removeListItem(self.armMobileListWidget, self.arm_mobile_list)
elif "vesselRemoveButton" in self.sender().objectName():
self.removeListItem(self.vesselListWidget, self.research_vessel_list)
elif "addButton" in self.sender().objectName():
if len(self.ck_list_dict.get(str(self.sender().objectName()[:2]))) < 12:
add_clicked(self)
else:
alertBox = QtWidgets.QMessageBox()
alertBox.about(self, "Warning", "You can't add more than 12 checkboxes.")
return
def location_changed(self):
logging.debug('mainwindow.py - location_changed - self.location_cb.currentText() ' + self.location_cb.currentText())
if self.location_cb.currentText() == "Make a choice...":
self.detailList.clear()
self.detailList.setEnabled(False)
elif self.location_cb.currentText() == "Continents":
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItems(self.continents)
elif self.location_cb.currentText() == "Countries":
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItem('Make a choice...')
country_list = []
for key, _ in self.new_country_code.items():
country_list.append(key)
self.detailList.addItems(sorted(country_list))
elif self.location_cb.currentText() == "Oceans":
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItems(self.oceans)
elif self.location_cb.currentText() == "Regions":
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItems(self.regions)
def fill_operator_rolebox(self):
logging.debug('mainwindow.py - fill_operator_rolebox')
unique_list = []
for item in self.new_operators_aircraft:
if item[0] not in unique_list:
unique_list.append(item[0])
self.operator_cb.clear()
self.operator_cb.addItem('Make a choice...')
self.operator_cb.addItem('Other...')
self.operator_cb.addItems(sorted(unique_list, key=str.lower))
def operator_changed(self):
logging.debug('mainwindow.py - operator_changed - self.operator_cb.currentText() ' + self.operator_cb.currentText())
if self.operator_cb.currentText() == "Make a choice...":
self.newAircraft_ln.hide()
self.newAircraft_ln.setText('')
self.newOperator_ln.hide()
self.newOperator_ln.setText('')
self.label_38.hide()
self.label_39.hide()
self.newCountry_lb.hide()
self.newCountry_cb.hide()
self.newCountry_cb.clear()
self.newRegistration_lb.hide()
self.newRegistration_ln.hide()
self.newRegistration_ln.setText('')
self.newManufacturer_lb.hide()
self.newManufacturer_ln.hide()
self.newManufacturer_ln.setText('')
self.aircraft_cb.clear()
self.aircraft_cb.setEnabled(False)
elif self.operator_cb.currentText() == "Other...":
self.newOperator_ln.show()
self.newAircraft_ln.show()
self.label_38.show()
self.label_39.show()
self.aircraft_cb.clear()
self.aircraft_cb.addItem("Other...")
self.aircraft_cb.setEnabled(True)
self.newCountry_lb.show()
self.newCountry_cb.show()
self.newRegistration_lb.show()
self.newRegistration_ln.show()
self.newManufacturer_lb.show()
self.newManufacturer_ln.show()
self.newCountry_cb.addItem('Make a choice...')
country_list = []
for key, _ in self.new_country_code.items():
country_list.append(key)
self.newCountry_cb.addItems(sorted(country_list))
else:
self.newAircraft_ln.hide()
self.newAircraft_ln.setText('')
self.newOperator_ln.hide()
self.newOperator_ln.setText('')
self.label_38.hide()
self.label_39.hide()
self.newCountry_lb.hide()
self.newCountry_cb.hide()
self.newCountry_cb.clear()
self.newRegistration_lb.hide()
self.newRegistration_ln.hide()
self.newRegistration_ln.setText('')
self.newManufacturer_lb.hide()
self.newManufacturer_ln.hide()
self.newManufacturer_ln.setText('')
self.aircraft_cb.clear()
self.aircraft_cb.setEnabled(True)
aircraft_list = []
type_list = []
for i in range(len(self.new_operators_aircraft)):
if self.operator_cb.currentText() == self.new_operators_aircraft[i][0]:
aircraft_list.append(self.new_operators_aircraft[i])
index = self.new_operators_aircraft[i][1].find(', ')
type_list.append(self.new_operators_aircraft[i][1][index + 2:])
if len(aircraft_list) > 1:
self.aircraft_cb.addItem("Make a choice...")
counter_result = dict(Counter(type_list))
for key, value in counter_result.items():
if value > 1:
for i in range(len(aircraft_list)):
if type_list[i] == key:
type_list[i] = type_list[i] + ' - ' + aircraft_list[i][2]
self.aircraft_cb.addItems(sorted(type_list))
def api_eufar_database_parsing(self):
logging.debug('mainwindow.py - api_eufar_database_parsing')
self.download_and_parse_objects = DownloadAndParseJSON()
self.download_and_parse_objects.start()
self.download_and_parse_objects.finished.connect(self.api_eufar_asmm_db_updating)
def api_eufar_asmm_db_updating(self, val):
logging.debug('mainwindow.py - api_eufar_asmm_db_updating')
self.aircraft_db = val[0]
self.project_db = val[1]
self.new_operators_aircraft = []
for _, value in self.aircraft_db.items():
self.new_operators_aircraft.append([value['operator'], value['manufacturer_and_aircraft_type'],
value['registration_number'], value['country']])
current_operator = self.operator_cb.currentText()
current_aircraft = self.aircraft_cb.currentText()
operator_list = []
for item in self.new_operators_aircraft:
if item[0] not in operator_list:
operator_list.append(item[0])
self.operator_cb.clear()
self.operator_cb.addItem('Make a choice...')
self.operator_cb.addItem('Other...')
self.operator_cb.addItems(sorted(operator_list))
if current_operator != 'Make a choice...':
operator_index = self.operator_cb.findText(current_operator)
if operator_index == -1:
self.operator_cb.setCurrentIndex(1)
self.operator_changed()
self.newOperator_ln.setText(current_operator)
self.newAircraft_ln.setText(current_aircraft)
else:
self.operator_cb.setCurrentIndex(operator_index)
self.operator_changed()
aircraft_index = self.aircraft_cb.findText(current_aircraft)
if aircraft_index == -1:
self.operator_cb.setCurrentIndex(1)
self.operator_changed()
self.newOperator_ln.setText(current_operator)
self.newAircraft_ln.setText(current_aircraft)
else:
self.aircraft_cb.setCurrentIndex(aircraft_index)
completer_list = []
for key, value in self.project_db.items():
completer_list.append(key)
self.completer_model.setStringList(completer_list)
def api_eufar_acronym_completer(self):
logging.debug('mainwindow.py - api_eufar_acronym_completer')
self.completer = QtWidgets.QCompleter()
self.completer.popup().setStyleSheet("QListView {\n"
" selection-background-color: rgb(200,200,200);\n"
" selection-color: black;\n"
" background-color: #f0f0f0;\n"
" border: 0px solid #f0f0f0;\n"
"}\n"
"\n"
"QScrollBar:vertical {\n"
" border: 1px solid white;\n"
" background-color: rgb(240, 240, 240);\n"
" width: 20px;\n"
" margin: 21px 0px 21px 0px;\n"
"}\n"
"\n"
"QScrollBar::handle:vertical {\n"
" background-color: rgb(205, 205, 205);\n"
" min-height: 25px;\n"
"}\n"
"\n"
"QScrollBar:handle:vertical:hover {\n"
" background-color: rgb(167, 167, 167);\n"
"}\n"
"\n"
"QScrollBar::add-line:vertical {\n"
" border-top: 1px solid rgb(240,240,240);\n"
" border-left: 1px solid white;\n"
" border-right: 1px solid white;\n"
" border-bottom: 1px solid white;\n"
" background-color: rgb(240, 240, 240);\n"
" height: 20px;\n"
" subcontrol-position: bottom;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::add-line:vertical:hover {\n"
" background-color: rgb(219, 219, 219);\n"
"}\n"
"\n"
"QScrollBar::sub-line:vertical {\n"
" border-top: 1px solid white;\n"
" border-left: 1px solid white;\n"
" border-right: 1px solid white;\n"
" border-bottom: 1px solid rgb(240,240,240);\n"
" background-color: rgb(240, 240, 240);\n"
" height: 20px;\n"
" subcontrol-position: top;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::sub-line:vertical:hover {\n"
" background-color: rgb(219, 219, 219);\n"
"}\n"
"\n"
"QScrollBar::up-arrow:vertical {\n"
" image: url(icons/up_arrow_icon.svg); \n"
" width: 16px;\n"
" height: 16px;\n"
"}\n"
"\n"
"QScrollBar::up-arrow:vertical:pressed {\n"
" right: -1px;\n"
" bottom: -1px;\n"
"}\n"
"\n"
"QScrollBar::down-arrow:vertical {\n"
" image: url(icons/down_arrow_icon.svg); \n"
" width: 16px;\n"
" height: 16px;\n"
"}\n"
"\n"
"QScrollBar::down-arrow:vertical:pressed {\n"
" right: -1px;\n"
" bottom: -1px;\n"
"}\n")
self.projectAcronym_ln.setCompleter(self.completer)
self.completer_model = QtCore.QStringListModel()
self.completer.setModel(self.completer_model)
self.completer.activated.connect(self.api_eufar_completer_function)
def api_eufar_completer_function(self,val):
logging.debug('mainwindow.py - api_eufar_completer_function - val ' + str(val))
project = self.project_db[val]
self.missionSci_ln.setText(project['leader'])
try:
platform = self.aircraft_db[project['aircraft']]
except KeyError:
platform = {}
try:
operator = platform['operator']
except KeyError:
operator = ''
try:
aircraft = platform['manufacturer_and_aircraft_type']
index = aircraft.find(', ')
manufacturer = aircraft[: index]
aircraft = aircraft[index + 2:]
except KeyError:
aircraft = ''
manufacturer = ''
try:
registration = platform['registration_number']
except KeyError:
registration = ''
try:
country = platform['country']
for key, value in self.new_country_code.items():
if value == country:
country = key
break
except KeyError:
country = ''
if operator or aircraft or manufacturer or registration or country:
index = self.operator_cb.findText(operator)
if index != -1:
self.operator_cb.setCurrentIndex(index)
self.operator_changed()
index = self.aircraft_cb.findText(aircraft)
if index != -1:
self.aircraft_cb.setCurrentIndex(index)
else:
self.operator_cb.setCurrentIndex(1)
self.operator_changed()
self.newOperator_ln.setText(operator)
self.newAircraft_ln.setText(aircraft)
self.newRegistration_ln.setText(registration)
self.newManufacturer_ln.setText(manufacturer)
index = self.newCountry_cb.findText(country)
if index!= -1:
self.newCountry_cb.setCurrentIndex(index)
else:
self.operator_cb.setCurrentIndex(1)
self.operator_changed()
self.newOperator_ln.setText(operator)
self.newAircraft_ln.setText(aircraft)
self.newRegistration_ln.setText(registration)
self.newManufacturer_ln.setText(manufacturer)
index = self.newCountry_cb.findText(country)
if index!= -1:
self.newCountry_cb.setCurrentIndex(index)
def api_eufar_information(self):
logging.debug('mainwindow.py - api_eufar_information')
self.apiWindow = MyApi()
x1, y1, w1, h1 = self.geometry().getRect()
_, _, w2, h2 = self.apiWindow.geometry().getRect()
x2 = x1 + w1/2 - w2/2
y2 = y1 + h1/2 - h2/2
self.apiWindow.setGeometry(x2, y2, w2, h2)
if platform.system() == 'Linux':
x = 500
y = self.apiWindow.sizeHint().height()
elif platform.system() == 'Windows':
x = 700
y = 400
else:
x = 500
y = self.apiWindow.sizeHint().height()
self.apiWindow.setMinimumSize(QtCore.QSize(x, y))
self.apiWindow.setMaximumSize(QtCore.QSize(x, y))
self.apiWindow.exec_()
return self.apiWindow.checkboxStatus
class MyAbout(QtWidgets.QDialog, Ui_aboutWindow):
def __init__(self, aboutText):
QtWidgets.QWidget.__init__(self)
logging.debug('mainwindow.py - MyAbout')
self.setupUi(self)
self.aw_label_1.setText(aboutText)
self.aw_okButton.clicked.connect(self.closeWindow)
def closeWindow(self):
self.close()
class MyLog(QtWidgets.QDialog, Ui_Changelog):
def __init__(self):
QtWidgets.QWidget.__init__(self)
logging.debug('mainwindow.py - MyLog')
self.setupUi(self)
self.log_txBrower.setPlainText(open("Documentation/changelog.txt").read())
self.lg_okButton.clicked.connect(self.closeWindow)
def closeWindow(self):
self.close()
class MyStandard(QtWidgets.QDialog, Ui_aboutStandard):
def __init__(self, aboutText):
QtWidgets.QWidget.__init__(self)
logging.debug('mainwindow.py - MyStandard')
self.setupUi(self)
self.aw_label_1.setText(aboutText)
self.aw_okButton.clicked.connect(self.closeWindow)
def closeWindow(self):
self.close()
class MyWarning(QtWidgets.QDialog, Ui_presaveWindow):
def __init__(self, string):
QtWidgets.QWidget.__init__(self)
logging.debug('mainwindow.py - MyWarning - string ' + string)
self.setupUi(self)
self.iw_cancelButton.setFocus(True)
all_buttons = self.findChildren(QtWidgets.QToolButton)
for widget in all_buttons:
widget.clicked.connect(self.closeWindow)
self.iw_nosaveButton.setText(string + " without saving")
def closeWindow(self):
self.buttonName = self.sender().objectName()
self.close()
class MySite(QtWidgets.QDialog, Ui_Addsite):
def __init__(self):
QtWidgets.QWidget.__init__(self)
logging.debug('mainwindow.py - MySite')
self.setupUi(self)
self.ck_cancelButton.clicked.connect(self.closeWindow)
self.ck_submitButton.clicked.connect(self.submitBox)
def closeWindow(self):
self.close()
def submitBox(self):
if self.ck_inputLine.text():
self.accept()
class MyURL(QtWidgets.QDialog, Ui_AddURL):
def __init__(self):
QtWidgets.QWidget.__init__(self)
logging.debug('mainwindow.py - MyAbout')
self.setupUi(self)
self.ck_cancelButton.clicked.connect(self.closeWindow)
self.ck_submitButton.clicked.connect(self.submitBox)
def closeWindow(self):
self.close()
def submitBox(self):
self.accept()
class MyApi(QtWidgets.QDialog, Ui_apiWindow):
def __init__(self):
QtWidgets.QWidget.__init__(self)
logging.debug('button_functions.py - MyInfo')
self.setupUi(self)
self.iw_okButton.clicked.connect(self.closeWindow)
def closeWindow(self):
self.checkboxStatus = self.checkBox.isChecked()
self.close()
class DownloadAndParseJSON(Qt.QThread):
finished = QtCore.pyqtSignal(list)
def __init__(self):
Qt.QThread.__init__(self)
logging.debug('mainwindow.py - DownloadAndParseJSON - starting ...')
self.url_list = ['http://eufar.net/api/json/ta/open/aircraft/',
'http://eufar.net/api/sad89712hhdsa89yp1/json/projects/']
self.db_list = []
def run(self):
for url in self.url_list:
dict_tmp = {}
try:
logging.debug('mainwindow.py - DownloadAndParseJSON - ' + url + ' running ...')
req = requests.get(url=url)
json_object = req.json()
for item in json_object:
dict_tmp[item['acronym']] = dict(item)
self.db_list.append(dict_tmp)
except Exception:
self.db_list.append(dict_tmp)
logging.error('mainwindow.py - DownloadAndParseJSON - ' + url + ' error in connexion or json object')
self.finished.emit(self.db_list)
def stop(self):
self.terminate()
| eufarn7sp/asmm-eufar | ui/mainwindow.py | Python | bsd-3-clause | 49,065 | [
"NetCDF"
] | b0f017e80cd07f19e0d75542120a10c8e999a143caec4e5721820f6a03a8f62c |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import sys
import re
import datetime
from collections import namedtuple
import json
from io import StringIO
from monty.json import MontyDecoder, MontyEncoder
from monty.string import remove_non_ascii
from pymatgen.core.structure import Structure, Molecule
from pybtex.database.input import bibtex
from pybtex import errors
"""
Classes and methods related to the Structure Notation Language (SNL)
"""
__author__ = 'Anubhav Jain, Shyue Ping Ong'
__credits__ = 'Dan Gunter'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = 'Anubhav Jain'
__email__ = 'ajain@lbl.gov'
__date__ = 'Feb 11, 2013'
MAX_HNODE_SIZE = 64000 # maximum size (bytes) of SNL HistoryNode
MAX_DATA_SIZE = 256000 # maximum size (bytes) of SNL data field
MAX_HNODES = 100 # maximum number of HistoryNodes in SNL file
MAX_BIBTEX_CHARS = 20000 # maximum number of characters for BibTeX reference
def is_valid_bibtex(reference):
"""
Use pybtex to validate that a reference is in proper BibTeX format
Args:
reference: A String reference in BibTeX format.
Returns:
Boolean indicating if reference is valid bibtex.
"""
# str is necessary since pybtex seems to have an issue with unicode. The
# filter expression removes all non-ASCII characters.
sio = StringIO(remove_non_ascii(reference))
parser = bibtex.Parser()
errors.set_strict_mode(False)
bib_data = parser.parse_stream(sio)
return len(bib_data.entries) > 0
class HistoryNode(namedtuple('HistoryNode', ['name', 'url', 'description'])):
"""
A HistoryNode represents a step in the chain of events that lead to a
Structure. HistoryNodes leave 'breadcrumbs' so that you can trace back how
a Structure was created. For example, a HistoryNode might represent pulling
a Structure from an external database such as the ICSD or CSD. Or, it might
represent the application of a code (e.g. pymatgen) to the Structure, with
a custom description of how that code was applied (e.g. a site removal
Transformation was applied).
A HistoryNode contains three fields:
.. attribute:: name
The name of a code or resource that this Structure encountered in
its history (String)
.. attribute:: url
The URL of that code/resource (String)
.. attribute:: description
A free-form description of how the code/resource is related to the
Structure (dict).
"""
def as_dict(self):
return {"name": self.name, "url": self.url,
"description": self.description}
@staticmethod
def from_dict(h_node):
return HistoryNode(h_node['name'], h_node['url'],
h_node['description'])
@staticmethod
def parse_history_node(h_node):
"""
Parses a History Node object from either a dict or a tuple.
Args:
h_node: A dict with name/url/description fields or a 3-element
tuple.
Returns:
History node.
"""
if isinstance(h_node, dict):
return HistoryNode.from_dict(h_node)
else:
if len(h_node) != 3:
raise ValueError("Invalid History node, "
"should be dict or (name, version, "
"description) tuple: {}".format(h_node))
return HistoryNode(h_node[0], h_node[1], h_node[2])
class Author(namedtuple('Author', ['name', 'email'])):
"""
An Author contains two fields:
.. attribute:: name
Name of author (String)
.. attribute:: email
Email of author (String)
"""
def __str__(self):
"""
String representation of an Author
"""
return '{} <{}>'.format(self.name, self.email)
def as_dict(self):
return {"name": self.name, "email": self.email}
@staticmethod
def from_dict(d):
return Author(d['name'], d['email'])
@staticmethod
def parse_author(author):
"""
Parses an Author object from either a String, dict, or tuple
Args:
author: A String formatted as "NAME <email@domain.com>",
(name, email) tuple, or a dict with name and email keys.
Returns:
An Author object.
"""
if isinstance(author, str):
# Regex looks for whitespace, (any name), whitespace, <, (email),
# >, whitespace
m = re.match(r'\s*(.*?)\s*<(.*?@.*?)>\s*', author)
if not m or m.start() != 0 or m.end() != len(author):
raise ValueError("Invalid author format! {}".format(author))
return Author(m.groups()[0], m.groups()[1])
elif isinstance(author, dict):
return Author.from_dict(author)
else:
if len(author) != 2:
raise ValueError("Invalid author, should be String or (name, "
"email) tuple: {}".format(author))
return Author(author[0], author[1])
class StructureNL:
"""
The Structure Notation Language (SNL, pronounced 'snail') is container
for a pymatgen Structure/Molecule object with some additional fields for
enhanced provenance. It is meant to be imported/exported in a JSON file
format with the following structure:
- about
- created_at
- authors
- projects
- references
- remarks
- data
- history
- lattice (optional)
- sites
Args:
struct_or_mol: A pymatgen.core.structure Structure/Molecule object
authors: *List* of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors
projects: List of Strings ['Project A', 'Project B']
references: A String in BibTeX format
remarks: List of Strings ['Remark A', 'Remark B']
data: A free form dict. Namespaced at the root level with an
underscore, e.g. {"_materialsproject": <custom data>}
history: List of dicts - [{'name':'', 'url':'', 'description':{}}]
created_at: A datetime object
"""
def __init__(self, struct_or_mol, authors, projects=None, references='',
remarks=None, data=None, history=None, created_at=None):
# initialize root-level structure keys
self.structure = struct_or_mol
# turn authors into list of Author objects
authors = authors.split(',')\
if isinstance(authors, str) else authors
self.authors = [Author.parse_author(a) for a in authors]
# turn projects into list of Strings
projects = projects if projects else []
self.projects = [projects] if isinstance(projects, str) else projects
# check that references are valid BibTeX
if not isinstance(references, str):
raise ValueError("Invalid format for SNL reference! Should be "
"empty string or BibTeX string.")
if references and not is_valid_bibtex(references):
raise ValueError("Invalid format for SNL reference! Should be "
"BibTeX string.")
if len(references) > MAX_BIBTEX_CHARS:
raise ValueError("The BibTeX string must be fewer than {} chars "
", you have {}"
.format(MAX_BIBTEX_CHARS, len(references)))
self.references = references
# turn remarks into list of Strings
remarks = remarks if remarks else []
self.remarks = [remarks] if isinstance(remarks, str) else remarks
# check remarks limit
for r in self.remarks:
if len(r) > 140:
raise ValueError("The remark exceeds the maximum size of"
"140 characters: {}".format(r))
# check data limit
self.data = data if data else {}
if not sys.getsizeof(self.data) < MAX_DATA_SIZE:
raise ValueError("The data dict exceeds the maximum size limit of"
" {} bytes (you have {})"
.format(MAX_DATA_SIZE, sys.getsizeof(data)))
for k, v in self.data.items():
if not k.startswith("_"):
raise ValueError("data must contain properly namespaced data "
"with keys starting with an underscore. The "
"key {} does not start with an underscore.",
format(k))
# check for valid history nodes
history = history if history else [] # initialize null fields
if len(history) > MAX_HNODES:
raise ValueError("A maximum of {} History nodes are supported, "
"you have {}!".format(MAX_HNODES, len(history)))
self.history = [HistoryNode.parse_history_node(h) for h in history]
if not all([sys.getsizeof(h) < MAX_HNODE_SIZE for h in history]):
raise ValueError("One or more history nodes exceeds the maximum "
"size limit of {} bytes".format(MAX_HNODE_SIZE))
self.created_at = created_at if created_at \
else datetime.datetime.utcnow()
def as_dict(self):
d = self.structure.as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["about"] = {"authors": [a.as_dict() for a in self.authors],
"projects": self.projects,
"references": self.references,
"remarks": self.remarks,
"history": [h.as_dict() for h in self.history],
"created_at": json.loads(json.dumps(self.created_at,
cls=MontyEncoder))}
d["about"].update(json.loads(json.dumps(self.data,
cls=MontyEncoder)))
return d
@classmethod
def from_dict(cls, d):
a = d["about"]
dec = MontyDecoder()
created_at = dec.process_decoded(a.get("created_at"))
data = {k: v for k, v in d["about"].items()
if k.startswith("_")}
data = dec.process_decoded(data)
structure = Structure.from_dict(d) if "lattice" in d \
else Molecule.from_dict(d)
return cls(structure, a["authors"], projects=a.get("projects", None),
references=a.get("references", ""),
remarks=a.get("remarks", None), data=data,
history=a.get("history", None), created_at=created_at)
@classmethod
def from_structures(cls, structures, authors, projects=None,
references='', remarks=None, data=None,
histories=None, created_at=None):
"""
A convenience method for getting a list of StructureNL objects by
specifying structures and metadata separately. Some of the metadata
is applied to all of the structures for ease of use.
Args:
structures: A list of Structure objects
authors: *List* of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors
projects: List of Strings ['Project A', 'Project B']. This
applies to all structures.
references: A String in BibTeX format. Again, this applies to all
structures.
remarks: List of Strings ['Remark A', 'Remark B']
data: A list of free form dict. Namespaced at the root level
with an underscore, e.g. {"_materialsproject":<custom data>}
. The length of data should be the same as the list of
structures if not None.
histories: List of list of dicts - [[{'name':'', 'url':'',
'description':{}}], ...] The length of histories should be the
same as the list of structures if not None.
created_at: A datetime object
"""
data = [{}] * len(structures) if data is None else data
histories = [[]] * len(structures) if histories is None else \
histories
snl_list = []
for i, struct in enumerate(structures):
snl = StructureNL(struct, authors, projects=projects,
references=references,
remarks=remarks, data=data[i],
history=histories[i],
created_at=created_at)
snl_list.append(snl)
return snl_list
def __str__(self):
return "\n".join(["{}\n{}".format(k, getattr(self, k))
for k in ("structure", "authors", "projects",
"references", "remarks", "data", "history",
"created_at")])
def __eq__(self, other):
return all(map(lambda n: getattr(self, n) == getattr(other, n),
("structure", "authors", "projects", "references",
"remarks", "data", "history", "created_at")))
def __ne__(self, other):
return not self.__eq__(other)
| dongsenfo/pymatgen | pymatgen/util/provenance.py | Python | mit | 13,521 | [
"pymatgen"
] | 9b25235ee7984d6a8e27ac5d25edcfcbfababf586890ea9dc442bb8f8273732e |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Distribution configuration for neurom
"""
# pylint: disable=R0801
import os
from setuptools import setup
from setuptools import find_packages
import pip
from pip.req import parse_requirements
from optparse import Option
VERSION = "1.4.2"
def parse_reqs(reqs_file):
''' parse the requirements '''
options = Option('--workaround')
options.skip_requirements_regex = None
# Hack for old pip versions
# Versions greater than 1.x have a required parameter "sessions" in
# parse_requierements
if pip.__version__.startswith('1.'):
install_reqs = parse_requirements(reqs_file, options=options)
else:
from pip.download import PipSession # pylint:disable=E0611
options.isolated_mode = False
install_reqs = parse_requirements(reqs_file, # pylint:disable=E1123
options=options,
session=PipSession)
return [str(ir.req) for ir in install_reqs]
BASEDIR = os.path.dirname(os.path.abspath(__file__))
# Hack to avoid installation of modules with C extensions
# in readthedocs documentation building environment.
if os.environ.get('READTHEDOCS') == 'True':
REQS = []
else:
REQS = parse_reqs(os.path.join(BASEDIR, 'requirements.txt'))
EXTRA_REQS_PREFIX = 'requirements_'
EXTRA_REQS = {}
for file_name in os.listdir(BASEDIR):
if not file_name.startswith(EXTRA_REQS_PREFIX):
continue
base_name = os.path.basename(file_name)
(extra, _) = os.path.splitext(base_name)
extra = extra[len(EXTRA_REQS_PREFIX):]
EXTRA_REQS[extra] = parse_reqs(file_name)
config = {
'description': 'NeuroM: a light-weight neuron morphology analysis package',
'author': 'BBP Algorithm Development Team',
'url': 'http://https://github.com/BlueBrain/NeuroM',
'author_email': 'juan.palacios@epfl.ch, lida.kanari@epfl.ch',
'version': VERSION,
'install_requires': REQS,
'extras_require': EXTRA_REQS,
'packages': find_packages(),
'license': 'BSD',
'scripts': ['apps/raw_data_check',
'apps/morph_check',
'apps/morph_stats',
],
'name': 'neurom',
'include_package_data': True,
}
setup(**config)
| liesbethvanherpe/NeuroM | setup.py | Python | bsd-3-clause | 3,932 | [
"NEURON"
] | 9b1c6235157b7cb696abb3e88ddda71aed28385523a64db212c8bdedd6710b0e |
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - LocalSiteMap action
The LocalSiteMap action gives you a page that shows
nearby links. This is an example of what appears on the
page (names are linkable on the real page):
MoinMoin
GarthKidd
OrphanedPages
WantedPages
JoeDoe
CategoryHomepage
CategoryCategory
WikiHomePage
JoeWishes
WikiWiki
OriginalWiki
@copyright: 2001 Steve Howell <showell@zipcon.com>,
2001-2004 Juergen Hermann <jh@web.de>
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin import wikiutil
from MoinMoin.Page import Page
class MaxNodesReachedException(Exception):
pass
def execute(pagename, request):
_ = request.getText
# This action generate data using the user language
request.setContentLanguage(request.lang)
request.theme.send_title(_('Local Site Map for "%s"') % (pagename), pagename=pagename)
# Start content - IMPORTANT - witout content div, there is no
# direction support!
request.write(request.formatter.startContent("content"))
request.write(LocalSiteMap(pagename).output(request))
request.write(request.formatter.endContent()) # end content div
request.theme.send_footer(pagename)
request.theme.send_closing_html()
class LocalSiteMap:
def __init__(self, name):
self.name = name
self.result = []
def output(self, request):
tree = PageTreeBuilder(request).build_tree(self.name)
#self.append("<small>")
tree.depth_first_visit(request, self)
#self.append("</small>")
return """
<p>
%s
</p>
""" % ''.join(self.result)
def visit(self, request, name, depth):
""" Visit a page, i.e. create a link.
"""
if not name:
return
_ = request.getText
pg = Page(request, name)
action = __name__.split('.')[-1]
self.append(' ' * (5*depth+1))
self.append(pg.link_to(request, querystr={'action': action}))
self.append(" <small>[")
self.append(pg.link_to(request, _('view')))
self.append("</small>]<br>")
def append(self, text):
self.result.append(text)
class PageTreeBuilder:
def __init__(self, request):
self.request = request
self.children = {}
self.numnodes = 0
self.maxnodes = 35
def mark_child(self, name):
self.children[name] = 1
def child_marked(self, name):
return name in self.children
def is_ok(self, child):
if not self.child_marked(child):
if not self.request.user.may.read(child):
return 0
if Page(self.request, child).exists():
self.mark_child(child)
return 1
return 0
def new_kids(self, name):
# does not recurse
kids = []
for child in Page(self.request, name).getPageLinks(self.request):
if self.is_ok(child):
kids.append(child)
return kids
def new_node(self):
self.numnodes = self.numnodes + 1
if self.numnodes == self.maxnodes:
raise MaxNodesReachedException
def build_tree(self, name):
self.mark_child(name)
tree = Tree(name)
try:
self.recurse_build([tree], 1)
except MaxNodesReachedException:
pass
return tree
def recurse_build(self, trees, depth):
all_kids = []
for tree in trees:
kids = self.new_kids(tree.node)
for kid in kids:
newTree = Tree(kid)
tree.append(newTree)
self.new_node()
all_kids.append(newTree)
if len(all_kids):
self.recurse_build(all_kids, depth+1)
class Tree:
def __init__(self, node):
self.node = node
self.children = []
def append(self, node):
self.children.append(node)
def depth_first_visit(self, request, visitor, depth=0):
visitor.visit(request, self.node, depth)
for c in self.children:
c.depth_first_visit(request, visitor, depth+1)
| Glottotopia/aagd | moin/local/moin/build/lib.linux-x86_64-2.6/MoinMoin/action/LocalSiteMap.py | Python | mit | 4,416 | [
"VisIt"
] | 8fc85d89ec0130abaabf118a67b3696d039f989180524dfceef08bf7b0722074 |
''' Command
Base class for all commands.
'''
from DIRAC import gLogger, S_OK
__RCSID__ = '$Id: $'
class Command( object ):
'''
The Command class is a simple base class for all the commands
for interacting with the clients
'''
def __init__( self, args = None, clients = None ):
self.apis = ( 1 and clients ) or {}
self.masterMode = False
self.onlyCache = False
self.metrics = { 'failed' : [] }
self.args = { 'onlyCache' : False }
_args = ( 1 and args ) or {}
self.args.update( _args )
self.log = gLogger.getSubLogger( self.__class__.__name__ )
def doNew( self, masterParams = None ):
''' To be extended by real commands
'''
return S_OK( ( self.args, masterParams ) )
def doCache( self ):
''' To be extended by real commands
'''
return S_OK( self.args )
def doMaster( self ):
''' To be extended by real commands
'''
return S_OK( self.metrics )
def doCommand( self ):
''' To be extended by real commands
'''
if self.masterMode:
self.log.verbose( 'doMaster' )
return self.returnSObj( self.doMaster() )
self.log.verbose( 'doCache' )
result = self.doCache()
if not result[ 'OK' ]:
return self.returnERROR( result )
# We may be interested on running the commands only from the cache,
# without requesting new values.
if result[ 'Value' ] or self.args[ 'onlyCache' ]:
return result
self.log.verbose( 'doNew' )
return self.returnSObj( self.doNew() )
def returnERROR( self, s_obj ):
'''
Overwrites S_ERROR message with command name, much easier to debug
'''
s_obj[ 'Message' ] = '%s %s' % ( self.__class__.__name__, s_obj[ 'Message' ] )
return s_obj
def returnSObj( self, s_obj ):
'''
Overwrites S_ERROR message with command name, much easier to debug
'''
if s_obj[ 'OK' ]:
return s_obj
return self.returnERROR( s_obj )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| hgiemza/DIRAC | ResourceStatusSystem/Command/Command.py | Python | gpl-3.0 | 2,134 | [
"DIRAC"
] | 8ef1c0b0632d04315d3ec5b8babc20051b85c06a106703c43225b74def4def96 |
from exceptions import ValidationError
from flask import request, current_app, url_for
from flask.ext.mail import Message
import requests
def get_post_data(request):
if request.headers.get('content-type','application/json')=='application/json':
data = request.get_json()
else:
data = request.form
return data
def send_password_reset_email(email,url=None,msg=None):
token = current_app.ts.dumps(email,salt='reset-email')
if msg is None:
endpoint = '{url}/{token}'.format(url=url,token=token)
msg = Message(
subject="[ADS] Password reset",
recipients=[email],
html='''
Hi,
Someone (probably you) has requested a password reset on the account associated with this email address.
To reset your password, please visit
<a href="{endpoint}">{endpoint}</a> with your browser.
This link will be valid for the next 10 minutes.
If this is a mistake, then just ignore this email.
-The ADS team'''.format(endpoint=endpoint))
current_app.extensions['mail'].send(msg)
return msg, token
def send_verification_email(email, url=None, msg=None):
token = current_app.ts.dumps(email,salt='verification-email')
if msg is None:
endpoint = '{url}/{token}'.format(url=url,token=token)
msg = Message(
subject="[ADS] Please verify your email address",
recipients=[email],
html='''
Hi,
Someone (probably you) has registered this email address with the NASA-ADS (http://adslabs.org).
To confirm this action, please visit
<a href="{endpoint}">{endpoint}</a> with your browser.
If this is a mistake, then just ignore this email.
-The ADS team'''.format(endpoint=endpoint))
current_app.extensions['mail'].send(msg)
return msg, token
def scope_func():
if hasattr(request,'oauth') and request.oauth.client:
return request.oauth.client.client_id
return request.remote_addr
def verify_recaptcha(request,ep=None):
if ep is None:
ep = current_app.config['GOOGLE_RECAPTCHA_ENDPOINT']
payload = {
'secret': current_app.config['GOOGLE_RECAPTCHA_PRIVATE_KEY'],
'remoteip': request.remote_addr,
'response': request.json['g-recaptcha-response'] if request.headers.get('content-type','application/json')=='application/json' else request.form['g-recaptcha-response'],
}
r = requests.post(ep,data=payload)
r.raise_for_status()
return True if r.json()['success'] == True else False
def validate_email(email):
if '@' not in email:
#This minimal validation is OK, since we validate the email with a link anyways
raise ValidationError('Not a valid email')
return True
def validate_password(password):
""" Password must have one lowercase letter, one uppercase letter and one digit.
Inspired/reused from lingthio/Flask-User
"""
password_length = len(password)
# Count lowercase, uppercase and numbers
lowers = uppers = digits = 0
for ch in password:
if ch.islower(): lowers+=1
if ch.isupper(): uppers+=1
if ch.isdigit(): digits+=1
# Password must have one lowercase letter, one uppercase letter and one digit
is_valid = password_length>=6 and lowers and uppers and digits
if not is_valid:
raise ValidationError('Password must have at least 6 characters with one lowercase letter, one uppercase letter and one number')
return True
| ehenneken/adsws | adsws/accounts/utils.py | Python | gpl-2.0 | 3,290 | [
"VisIt"
] | c9079974e8431b9af87b8d0205be4747aa19ce228b03be64de73d00b7fa90faa |
# -*- coding: utf8 -*-
"""Lookup tables used in the Researcher Format transformation."""
# Import required modules
# These should all be contained in the standard library
from collections import OrderedDict
__author__ = 'Victoria Morris'
__license__ = 'MIT License'
__version__ = '1.0.0'
__status__ = '4 - Beta Development'
# ====================
# Lookup tables
# ====================
TYPES = OrderedDict({
'032': 'Fonds',
'033': 'SubFonds',
'034': 'SubSubFonds',
'035': 'SubSubSubFonds',
'036': 'Series',
'037': 'SubSeries',
'038': 'SubSubSeries',
'039': 'SubSubSubSeries',
'040': 'File',
'041': 'Item',
'042': 'SubItem',
'043': 'SubSubItem',
'044': 'SubSubSubItem',
'045': 'Corporation',
'046': 'Family',
'047': 'Person',
'048': 'Place',
'049': 'Subject',
})
SOURCES = OrderedDict({
'B': 'BNB',
'E': 'ESTC',
'M': 'MainCat',
})
PLACES_ENGLAND = ['Accrington', 'Aldermaston', 'Alfreton', 'Alnwick', 'Alton', 'Ambleside', 'Andover', 'Arundel',
'Aylesbury', 'Aylesford', 'Bacup', 'Bakewell', 'Banbury', 'Barnoldswick', 'Barnsley', 'Barnstaple',
'Barrow-in-Furness', 'Basildon', 'Basingstoke', 'Bath', 'Batley', 'Benfleet', 'Berkhamsted',
'Berwick', 'Bexhill', 'Biddulph', 'Biggleswade', 'Bingley', 'Birkenhead', 'Bishop Auckland',
'Bishop\'s Stortford', 'Blackburn', 'Blackpool', 'Bournemouth', 'Bowness-On-Windermere', 'Bracknell',
'Bradford', 'Braintree', 'Bridlington', 'Brighton', 'Bristol', 'Burnham-on-Sea', 'Burnley', 'Byfleet',
'Cannock', 'Canterbury', 'Carlisle', 'Chatham', 'Chelmsford', 'Chelsea', 'Chepstow', 'Chertsey',
'Chester', 'Chesterfield', 'Chichester', 'Chippenham', 'Chorley', 'Clacton-on-Sea', 'Cleethorpes',
'Clitheroe', 'Cockermouth', 'Colchester', 'Colne', 'Congleton', 'Consett', 'Coventry', 'Crediton',
'Dalton-in-Furness', 'Derby', 'Devizes', 'Dewsbury', 'Douglas', 'Driffield', 'Dudley', 'Dunstable',
'Egham', 'Ellesmere', 'Enfield', 'Epping', 'Epsom', 'Epworth', 'Evesham', 'Exeter', 'Exmouth',
'Fakenham', 'Falmouth', 'Farnborough', 'Farnham', 'Faversham', 'Felixstowe', 'Fleetwood', 'Frodsham',
'Frome', 'Gainsborough', 'Garstang', 'Gateshead', 'Gloucester', 'Godalming', 'Goole', 'Gosport',
'Grantham', 'Gravesend', 'Grimsby', 'Hammersmith', 'Harlow', 'Harpenden', 'Harrogate', 'Hartford',
'Hartlepool', 'Harwich', 'Haslemere', 'Haslingden', 'Haverhill', 'Heckmondwike', 'Hemel Hempstead',
'Hereford', 'Hexham', 'Hitchin', 'Hoddesdon', 'Honiton', 'Horsforth', 'Huddersfield', 'Hull',
'Hunstanton', 'Ilfracombe', 'Ilkley', 'Ipswich', 'Keighley', 'Kendal', 'Knaresborough', 'Knutsford',
'Launceston', 'Leamington', 'Leeds', 'Letchworth', 'Lewes', 'Lichfield', 'Liskeard', 'Littlehampton',
'Liverpool', 'London', 'Loughborough', 'Lowestoft', 'Ludlow', 'Luton', 'Lymington', 'Macclesfield',
'Maidenhead', 'Maidstone', 'Marlborough', 'Maryport', 'Melton Mowbray', 'Middlesbrough', 'Morecambe',
'Morpeth', 'Newark', 'Newcastle upon Tyne', 'Newcastle-Upon-Tyne', 'Newington-Causey', 'Newmarket',
'Newquay', 'Newry', 'Newton-le-Willows', 'Normanton', 'Northallerton', 'Northampton', 'Northwich',
'Norwich', 'Nottingham', 'Nuneaton', 'Ormskirk', 'Otley', 'Paignton', 'Penistone', 'Penrith',
'Penryn', 'Penzance', 'Peterborough', 'Petersfield', 'Pontefract', 'Portsmouth', 'Preston',
'Ramsgate', 'Rawtenstall', 'Reading', 'Redruth', 'Reigate', 'Richmond', 'Rickmansworth', 'Ripley',
'Ripon', 'Risley', 'Rotherham', 'Royston', 'Rugby', 'Rugeley', 'Runcorn', 'Saffron Walden',
'Saint Albans', 'Saltburn-by-the-Sea', 'Scarborough', 'Scunthorpe', 'Seaham', 'Selby', 'Sevenoaks',
'Shaftesbury', 'Sheerness', 'Sheffield', 'Shoreham-by-Sea', 'Shrewsbury', 'Sidmouth', 'Sittingbourne',
'Skegness', 'Skelmersdale', 'Skipton', 'Sleaford', 'Slough', 'Solihull', 'Southampton',
'Southend-on-Sea', 'Southport', 'Spalding', 'Spenborough', 'Stafford', 'Staines', 'Stamford',
'Stevenage', 'Stockport', 'Stockton-on-Tees', 'Stoke-on-Trent', 'Stourbridge', 'Stourport',
'Stowmarket', 'Sudbury', 'Sunderland', 'Sutton Coldfield', 'Swaffham', 'Swanage', 'Swanley',
'Swindon', 'Tadcaster', 'Tamworth', 'Taunton', 'Teignmouth', 'Tenby', 'Thetford', 'Thirsk', 'Tipton',
'Tiverton', 'Todmorden', 'Tonbridge', 'Torquay', 'Totnes', 'Trowbridge', 'Truro', 'Tunbridge Wells',
'Tynemouth', 'Ulverston', 'Uttoxeter', 'Wakefield', 'Wallasey', 'Wallsend', 'Walsall',
'Waltham Abbey', 'Walton on the Naze', 'Walton-on-Thames', 'Warrington', 'Warwick', 'Wednesbury',
'West Bromwich', 'Westminster', 'Weston-super-Mare', 'Wetherby', 'Weymouth', 'Whitehaven',
'Whitstable', 'Widnes', 'Wigton', 'Wilmslow', 'Wimborne', 'Wincanton', 'Winchester', 'Windermere',
'Wisbeach', 'Wishaw', 'Withernsea', 'Woking', 'Wokingham', 'Wolverhampton', 'Wolverton', 'Workington',
'Worthing', 'Wymondham', 'Yarmouth', 'Yeovil']
PLACES_IRELAND = ['Ballyshannon', 'Carlow', 'Carrick-on-Shannon', 'Clonmel', 'Drogheda', 'Dublin', 'Dundalk',
'Enniscorthy', 'Limerick', 'Sligo', 'Tralee', 'Waterford', 'Wicklow']
PLACES_N_IRELAND = ['Armagh', 'Ballymena', 'Ballymoney', 'Ballynahinch', 'Carrickfergus', 'Belfast', 'Derry',
'Enniskillen', 'Omagh']
PLACES_SCOTLAND = ['Aberdeen', 'Airdrie', 'Alloa', 'Ayr', 'Broxbourn', 'Campbeltown', 'Clydebank', 'Cowdenbeath',
'Cumbernauld', 'Cupar', 'Dingwall', 'Dumbarton', 'Dumfries', 'Dundee', 'Dunfermline', 'Edinburgh',
'Elgin', 'Falkirk', 'Forfar', 'Galashiels', 'Glasgow', 'Glencoe', 'Greenock', 'Hawick',
'Helensburgh', 'Inverness', 'Kelso', 'Kilmarnock', 'Kirkcaldy', 'Kirkintilloch', 'Kirkwall',
'Kirriemuir', 'Leith', 'Linlithgow', 'Montrose', 'Motherwell', 'Musselburgh', 'Nairn', 'Oban',
'Paisley', 'Peebles', 'Selkirk', 'Stirling', 'Stranraer']
PLACES_WALES = ['Aberavon', 'Aberdare', 'Abergavenny', 'Abergele', 'Abersychan', 'Abertillery', 'Aberystwyth', 'Bala',
'Blaenau Ffestiniog', 'Blaenavon', 'Bangor', 'Brecon', 'Caernarvon', 'Caerphilly', 'Cardiff',
'Carmarthen', 'Colwyn Bay', 'Denbigh', 'Haverfordwest', 'Fishguard', 'Llandudno', 'Llanelli',
'Llangollen', 'Llanidloes', 'Llantrisant', 'Merthyr Tydfil', 'Monmouth', 'Oswestry', 'Pembroke',
'Pontypool', 'Pontypridd', 'Pontypridd', 'Prestatyn', 'Pwllheli', 'Rhondda', 'Rhyl', 'Swansea',
'Tredegar', 'Welshpool', 'Wrexham']
PLACES_US = ['Albany', 'Baltimore', 'Buffalo', 'Charlestown', 'Chicago', 'Cincinnati', 'Dallas', 'Des Moines',
'Fort Madison', 'Fort Scott', 'Fort Smith', 'Fort Worth', 'Grand Forks', 'Grand Haven', 'Grand Rapids',
'Granite City', 'Grass Valley', 'Guntersville', 'Hoboken', 'Hollidaysburg', 'Holly Springs', 'Houston',
'Hudson', 'Ithaca', 'Jackson', 'Jacksonville', 'Janesville', 'Jefferson', 'Jersey City', 'Kansas City',
'Knoxville', 'Little Rock', 'Los Angeles', 'Louisville', 'Lynchburg', 'Mackinaw City', 'Madison',
'Marysville', 'Memphis', 'Miami', 'Milwaukee', 'Minneapolis', 'Nashville', 'New Bedford', 'New Haven',
'New Orleans', 'New York', 'Omaha', 'Oregon City', 'Parkersburg', 'Paterson', 'Perrysburg', 'Pittsburgh',
'Pottsville', 'Red Bluff', 'Rockville', 'Salt Lake City', 'San Diego', 'Savannah', 'Southern Pines',
'Tallahassee', 'Union Springs', 'Urbana', 'Vicksburg', 'Virginia City', 'Warrensburg', 'Washington',
'Waynesville', 'Whitewater', 'Williamsburg', 'Woodstock']
PLACES_OTHER = ['Aabenraa', 'Aachen', 'Aalborg', 'Aalen', 'Aalten', 'Aarau', 'Aarhus', 'Abidjan', 'Accra', 'Adelaide',
'Aldershot', 'Alexandria', 'Algiers', 'Allahabad', 'Allentown', 'Amersham', 'Ammanford', 'Ampthill',
'Amsterdam', 'Antwerp', 'Apeldoorn', 'Argostolion', 'Armidale', 'Arnhem', 'Ashford', 'Ashington',
'Ashkhabad', 'Ashtabula', 'Atchison', 'Athens', 'Athlone', 'Atlanta', 'Auckland', 'Augsburg', 'Augusta',
'Axminster', 'Baghdad', 'Ballarat', 'Ballina', 'Ballinasloe', 'Ballinrobe', 'Bangalore', 'Bangkok',
'Banjul', 'Barberton', 'Barcelona', 'Bari', 'Basseterre', 'Bebington', 'Beith', 'Bendigo', 'Bendorf',
'Berlin', 'Bethesda', 'Bethlehem', 'Beverley', 'Bideford', 'Bilbao', 'Billericay', 'Billingham',
'Bilston', 'Birkenfeld', 'Birmingham', 'Blackwood', 'Blandford', 'Blantyre', 'Blaydon', 'Bletchley',
'Bloemfontein', 'Blyth', 'Bodmin', 'Bologna', 'Bombay', 'Bonnyrigg', 'Bordeaux', 'Borehamwood',
'Boulogne', 'Brakpan', 'Brandon', 'Bratislava', 'Breda', 'Bregenz', 'Bremen', 'Brentwood', 'Bridgend',
'Bridgeport', 'Bridgetown', 'Bridgnorth', 'Bridgwater', 'Brigg', 'Brighouse', 'Brisbane', 'Brixham',
'Brno', 'Broadstairs', 'Brunswick', 'Brussels', 'Buckingham', 'Budapest', 'Bulawayo', 'Bungay',
'Burlington', 'Cairo', 'Calcutta', 'Calgary', 'Camberley', 'Camborne', 'Cambridge', 'Caracas',
'Cardigan', 'Casablanca', 'Castlebar', 'Castleford', 'Castries', 'Catania', 'Catanzaro', 'Caterham',
'Charleroi', 'Charleston', 'Charlottetown', 'Chemnitz', 'Cherbourg', 'Chernovtsy', 'Chesham',
'Christchurch', 'Clermont-Ferrand', 'Clevedon', 'Cleveland', 'Clinton', 'Coalville', 'Coatbridge',
'Cologne', 'Colombo', 'Coolgardie', 'Copenhagen', 'Cork', 'Corwen', 'Cosenza', 'Cracow', 'Cradock',
'Crawley', 'Crewe', 'Crowborough', 'Damascus', 'Danville', 'Dar-es-Salaam', 'Darlington', 'Darmstadt',
'Dartford', 'Dartmouth', 'Delft', 'Delhi', 'Detmold', 'Detroit', 'Deventer', 'Dinnington', 'Doncaster',
'Dorchester', 'Dordrecht', 'Dorking', 'Dortmund', 'Douglas', 'Dresden', 'Dromore', 'Dunedin',
'Dungannon', 'Durban', 'Durham', 'Eastbourne', 'Eastleigh', 'Edenbridge', 'Edmonton', 'Eindhoven',
'Enschede', 'Essen', 'Evansville', 'Failsworth', 'Fleet', 'Flemington', 'Flensburg', 'Florence',
'Flushing', 'Folkestone', 'Freehold', 'Freeport', 'Freetown', 'Fremantle', 'Geneva', 'Georgetown',
'Gillingham', 'Gisborne', 'Grahamstown', 'Groningen', 'Guildford', 'Haarlem', 'Haddington', 'Hadleigh',
'Halesworth', 'Halifax', 'Hamburg', 'Hamilton', 'Hannover', 'Hanover', 'Harare', 'Hastings', 'Havana',
'Havant', 'Heidelberg', 'Heidenheim', 'Helsinki', 'Hermoupolis', 'Hertford', 'Hertogenbosch',
'Hildesheim', 'Hinckley', 'Hobart', 'Holyhead', 'Holywell', 'Honiara', 'Honolulu', 'Horncastle',
'Hornsea', 'Horsetown', 'Horsham', 'Hove', 'Hoylake', 'Huntingdon', 'Huntington', 'Hythe', 'Ibadan',
'Indianapolis', 'Innsbruck', 'Invercargill', 'Istanbul', 'Jakarta', 'Jamestown', 'Jeddah',
'Johannesburg', 'Johnstone', 'Karachi', 'Karlsruhe', 'Kartuzy', 'Kassel', 'Katowice', 'Keene', 'Kells',
'Kiel', 'Kiev', 'Kingsbridge', 'Kingston', 'Kingstown', 'Koblenz', 'Kota Kinabalu', 'Krugersdorp',
'Kuala Lumpur', 'Lagos', 'Lahore', 'Lancaster', 'Leatherhead', 'Leeuwarden', 'Leicester', 'Leiden',
'Leipzig', 'Lexington', 'Leyland', 'Lille', 'Lima', 'Limoges', 'Lincoln', 'Lisbon', 'Lisburn',
'Loughton', 'Lyons', 'Maastricht', 'Mablethorpe', 'Madras', 'Madrid', 'Mafikeng', 'Magdeburg',
'Maghull', 'Malmesbury', 'Manchester', 'Manila', 'Mannheim', 'Margate', 'Marseilles', 'Maryborough',
'Mbabane', 'Melbourne', 'Mexborough', 'Mexico City', 'Milan', 'Minsk', 'Monrovia', 'Montevideo',
'Montpellier', 'Montreal', 'Moscow', 'Mullingar', 'Munich', 'Nagasaki', 'Nairobi', 'Naples', 'Nassau',
'Nemaha', 'Nenagh', 'Newbury', 'Newcastle', 'Newtown', 'Nicosia', 'Nijmegen', 'Nuremberg', 'Offenbach',
'Offenburg', 'Oldenburg', 'Olsztyn', 'Oslo', 'Oxford', 'Painswick', 'Palermo', 'Pamplona',
'Panama City', 'Paris', 'Penarth', 'Perth', 'Philadelphia', 'Pietermaritzburg', 'Plymouth', 'Portland',
'Potchefstroom', 'Prague', 'Pretoria', 'Quebec', 'Rangoon', 'Rawmarsh', 'Rayleigh', 'Regina', 'Riga',
'Ringwood', 'Rochester', 'Rockhampton', 'Rome', 'Romsey', 'Roscommon', 'Roseau', 'Rothesay', 'Rothwell',
'Rotterdam', 'Sacramento', 'Saint Petersburg', 'Salerno', 'Salisbury', 'Salonica', 'San Salvador',
'Sandakan', 'Sandbach', 'Sandbank', 'Sandgate', 'Sandhurst', 'Santiago', 'Santo Domingo', 'Shanghai',
'Shanklin', 'Sherborne', 'Skibbereen', 'St Helen\'s', 'Stockholm', 'Strabane', 'Stuttgart', 'Sydney',
'Szczecin', 'Taipei', 'Tallinn', 'Tangier', 'Tbilisi', 'Tegucigalpa', 'The Hague', 'Tokyo', 'Toronto',
'Toulon', 'Toulouse', 'Trenton', 'Trieste', 'Tullamore', 'Turin', 'Turku', 'Tzaneen', 'Uckfield',
'Utrecht', 'Valdivia', 'Valencia', 'Valletta', 'Valparaiso', 'Vancouver', 'Venice', 'Ventnor', 'Vienna',
'Vilnius', 'Wanganui', 'Warminster', 'Warsaw', 'Watford', 'Wellington', 'Wexford', 'Whitchurch',
'Whiteabbey', 'Wickford', 'Wiesbaden', 'Wilhelmshaven', 'Williamsport', 'Windhoek', 'Windsor',
'Winnipeg', 'Worcester', 'Wuppertal', 'Yokohama', 'Zagreb', 'Zevenbergen', 'Zurich', 'Zutphen',
'Zwolle', ]
PLACES = list(set().union(PLACES_ENGLAND, PLACES_IRELAND, PLACES_N_IRELAND, PLACES_SCOTLAND, PLACES_WALES, PLACES_US,
PLACES_OTHER))
# Lookup table for MARC fields
marc_fields = {
'001': 'Control Number',
'003': 'Control Number Identifier',
'005': 'Date and Time of Latest Transaction',
'006': 'Fixed-Length Data Elements - Additional Material Characteristics',
'007': 'Physical Description Fixed Field',
'008': 'Fixed Length Data Elements',
'010': 'Library of Congress Control Number',
'013': 'Patent Control Information',
'015': 'National Bibliography Number',
'016': 'National Bibliographic Agency Control Number',
'017': 'Copyright or Legal Deposit Number',
'018': 'Copyright Article-Fee Code',
'019': 'Legacy Control Number',
'020': 'International Standard Book Number',
'022': 'International Standard Serial Number',
'024': 'Other Standard Identifier',
'025': 'Overseas Acquisition Number',
'027': 'Standard Technical Report Number',
'028': 'Publisher Number',
'030': 'CODEN Designation',
'031': 'Musical Incipits Information',
'032': 'Postal Registration Number',
'033': 'Date/Time and Place of an Event',
'034': 'Coded Cartographic Mathematical Data',
'035': 'System Control Number',
'036': 'Original Study Number for Computer Data files',
'037': 'Source of Acquisition',
'038': 'Record Content Licensor',
'039': 'National Bibliography Issue Number',
'040': 'Cataloging Source',
'041': 'Language Code',
'042': 'Authentication Code',
'043': 'Geographic Area Code',
'044': 'Country of Publishing/Producing Entity Code',
'045': 'Time Period of Content',
'046': 'Special Coded Dates',
'047': 'Form of Musical Composition Code',
'048': 'Number of Musical Instruments or Voices Code',
'050': 'Library of Congress Call Number',
'051': 'Library of Congress Copy, Issue, Offprint Statement',
'052': 'Geographic Classification',
'055': 'Classification Numbers Assigned in Canada',
'060': 'National Library of Medicine Call Number',
'061': 'National Library of Medicine Copy Statement',
'066': 'Character Sets Present',
'070': 'National Agricultural Library Call Number',
'071': 'National Agricultural Library Copy Statement',
'072': 'Subject Category Code',
'074': 'GPO Item Number',
'080': 'Universal Decimal Classification Number',
'082': 'Dewey Decimal Classification Number',
'083': 'Additional Dewey Decimal Classification Number',
'084': 'Other Classification Number',
'085': 'Synthesized Classification Number Components',
'086': 'Government Document Classification Number',
'088': 'Report Number',
'091': 'Previous Control Number (Document Supply Conference)',
'100': 'Main Entry - Personal Name',
'110': 'Main Entry - Corporate Name',
'111': 'Main Entry - Meeting Name',
'130': 'Main Entry - Uniform Title',
'210': 'Abbreviated Title',
'222': 'Key Title',
'240': 'Uniform Title',
'242': 'Translation of Title by Cataloging Agency',
'243': 'Collective Uniform Title',
'245': 'Title Statement',
'246': 'Varying Form of Title',
'247': 'Former Title',
'250': 'Edition Statement',
'254': 'Musical Presentation Statement',
'255': 'Cartographic Mathematical Data',
'256': 'Computer File Characteristics',
'257': 'Country of Producing Entity',
'258': 'Philatelic Issue Data',
'260': 'Publication, Distribution, etc. (Imprint)',
'263': 'Projected Publication Date',
'264': 'Production, Publication, Distribution, Manufacture, and Copyright Notice',
'270': 'Address',
'300': 'Physical Description',
'306': 'Playing Time',
'307': 'Hours, Etc.',
'310': 'Current Publication Frequency',
'321': 'Former Publication Frequency',
'336': 'Content Type',
'337': 'Media Type',
'338': 'Carrier Type',
'340': 'Physical Medium',
'342': 'Geospatial Reference Data',
'343': 'Planar Coordinate Data',
'344': 'Sound Characteristics',
'345': 'Projection Characteristics of Moving Image',
'346': 'Video Characteristics',
'347': 'Digital File Characteristics',
'348': 'Format of Notated Music',
'351': 'Organization and Arrangement of Materials',
'352': 'Digital Graphic Representation',
'355': 'Security Classification Control',
'357': 'Originator Dissemination Control',
'362': 'Dates of Publication and/or Sequential Designation',
'363': 'Normalized Date and Sequential Designation',
'365': 'Trade Price',
'366': 'Trade Availability Information',
'370': 'Associated Place',
'377': 'Associated Language',
'380': 'Form of Work',
'381': 'Other Distinguishing Characteristics of Work or Expression',
'382': 'Medium of Performance',
'383': 'Numeric Designation of Musical Work',
'384': 'Key',
'385': 'Audience Characteristics',
'386': 'Creator/Contributor Characteristics',
'388': 'Time Period of Creation',
'490': 'Series Statement',
'500': 'General Note',
'501': 'With Note',
'502': 'Dissertation Note',
'504': 'Bibliography, Etc. Note',
'505': 'Formatted Contents Note',
'506': 'Restrictions on Access Note',
'507': 'Scale Note for Graphic Material',
'508': 'Creation/Production Credits Note',
'509': 'Informal Notes',
'510': 'Citation/References Note',
'511': 'Participant or Performer Note',
'513': 'Type of Report and Period Covered Note',
'514': 'Data Quality Note',
'515': 'Numbering Peculiarities Note',
'516': 'Type of Computer File or Data Note',
'518': 'Date/Time and Place of an Event Note',
'520': 'Summary, Etc.',
'521': 'Target Audience Note',
'522': 'Geographic Coverage Note',
'524': 'Preferred Citation of Described Materials Note',
'525': 'Supplement Note',
'526': 'Study Program Information Note',
'530': 'Additional Physical Form Available Note',
'533': 'Reproduction Note',
'534': 'Original Version Note',
'535': 'Location of Originals/Duplicates Note',
'536': 'Funding Information Note',
'538': 'System Details Note',
'539': 'Location of Filmed Copy',
'540': 'Terms Governing Use and Reproduction Note',
'541': 'Immediate Source of Acquisition Note',
'542': 'Information Relating to Copyright Status',
'544': 'Location of Other Archival Materials Note',
'545': 'Biographical or Historical Data',
'546': 'Language Note',
'547': 'Former Title Complexity Note',
'550': 'Issuing Body Note',
'552': 'Entity and Attribute Information Note',
'555': 'Cumulative Index/Finding Aids Note',
'556': 'Information about Documentation Note',
'561': 'Ownership and Custodial History',
'562': 'Copy and Version Identification Note',
'563': 'Binding Information',
'565': 'Case File Characteristics Note',
'567': 'Methodology Note',
'580': 'Linking Entry Complexity Note',
'581': 'Publications About Described Materials Note',
'583': 'Action Note',
'584': 'Accumulation and Frequency of Use Note',
'585': 'Exhibitions Note',
'586': 'Awards Note',
'588': 'Source of Description Note',
'590': 'Document Supply General Note',
'591': 'Document Supply Conference Note',
'592': 'Collaboration Note',
'594': 'Reference to Items in Printed Catalogues',
'595': 'Document Supply Bibliographic History Note',
'597': 'Editing or Error Message',
'598': 'Document Supply Selection / Ordering Information',
'599': 'Notes Relating to an Original',
'600': 'Subject Added Entry - Personal Name',
'610': 'Subject Added Entry - Corporate Name',
'611': 'Subject Added Entry - Meeting Name',
'630': 'Subject Added Entry - Uniform Title',
'648': 'Subject Added Entry - Chronological Term',
'650': 'Subject Added Entry - Topical Term',
'651': 'Subject Added Entry - Geographic Name',
'653': 'Index Term-Uncontrolled',
'654': 'Subject Added Entry-Faceted Topical Terms',
'655': 'Index Term - Genre/Form',
'656': 'Index Term - Occupation',
'657': 'Index Term - Function',
'658': 'Index Term - Curriculum Objective',
'662': 'Subject Added Entry-Hierarchical Place Name',
'690': 'Collection Subset',
'692': 'Nineteenth Century Subject Series Field',
'700': 'Added Entry - Personal Name',
'710': 'Added Entry - Corporate Name',
'711': 'Added Entry - Meeting Name',
'720': 'Added Entry - Uncontrolled Name',
'730': 'Added Entry - Uniform Title',
'740': 'Added Entry - Uncontrolled Related/Analytical Title',
'751': 'Added Entry - Geographic Name',
'752': 'Added Entry - Hierarchical Place Name',
'753': 'System Details Access to Computer Files',
'754': 'Added Entry - Taxonomic Identification',
'760': 'Main Series Entry',
'762': 'Subseries Entry',
'765': 'Original Language Entry',
'767': 'Translation Entry',
'770': 'Supplement/Special Issue Entry',
'772': 'Supplement Parent Entry',
'773': 'Host Item Entry',
'774': 'Constituent Unit Entry',
'775': 'Other Edition Entry',
'776': 'Additional Physical Form Entry',
'777': 'Issued With Entry',
'780': 'Preceding Entry',
'785': 'Succeeding Entry',
'786': 'Data Source Entry',
'787': 'Other Relationship Entry',
'800': 'Series Added Entry - Personal Name',
'810': 'Series Added Entry - Corporate Name',
'811': 'Series Added Entry - Meeting Name',
'830': 'Series Added Entry - Uniform Title',
'850': 'Holding Institution',
'852': 'Location',
'856': 'Electronic Location and Access',
'859': 'Digital Resource Flag',
'880': 'Alternate Graphic Representation',
'882': 'Replacement Record Information',
'883': 'Machine-generated Metadata Provenance',
'884': 'Description Conversion Information',
'886': 'Foreign MARC Information Field',
'887': 'Non-MARC Information Field',
'916': 'Authority Control Information',
'917': 'Production Category',
'945': 'BL Local Title',
'950': 'Library of Congress Subject (Cross-Reference)',
'954': 'Transliteration Statement',
'955': 'Shelving Location',
'957': 'Acquisitions Data',
'958': 'Superseded Shelfmark',
'959': 'Document Supply Status Flag',
'960': 'Normalized Place of Publication',
'961': 'Sheet Index Note',
'962': 'Colindale Location Flag',
'963': 'Cambridge University Library Location',
'964': 'Science Museum Library Location',
'966': 'Document Supply Acquisitions Indicator',
'968': 'Record Status Field',
'970': 'Collection Code',
'975': 'Insufficient Record Statement',
'976': 'Non-monographic Conference Indicator',
'979': 'Negative Shelfmark',
'980': 'Card Production Indicator',
'985': 'Cataloguer\'s Note',
'990': 'Product Information Code',
'992': 'Stored Search Flag',
'996': 'Z39.50 SFX Enabler',
'997': 'Shared Library Message Field',
'A02': 'Serial Acquisitions System Number',
# 'ACF': 'Copyright Fee',
'AQN': 'Acquisitions Notes Field',
'BGT': 'BGLT (British Grey Literature Team) Report Flag',
'BUF': 'Batch Upgrade Flag',
# 'CAT': 'Cataloguer',
# 'CFI': 'Copyright Fee Information',
'CNF': 'Document Supply Conference Heading',
'COR': 'Original Preferred Term',
'DEL': 'Deleted',
'DGM': 'Digitised Record Match',
'DRT': 'Digital Record Type',
'EST': 'Document Supply ESTAR (Electronic Storage and Retrieval System)',
'EXP': 'Block Export',
'FFP': 'Flag For Publication',
'FIN': 'Finished (Cataloguing)',
'FMT': 'Format',
# 'LAS': 'Last CAT Field',
'LCS': 'Library of Congress Series Statement',
'LDO': 'LDO (Legal Deposit Office) Information',
'LDR': 'Leader',
'LEO': 'LEO (Library Export Operations) Identifier',
'LET': 'Serials claim letter title',
'LKR': 'Link',
'MIS': 'Monograph in Series Flag',
'MNI': 'Medium Neutral ISSN',
'MPX': 'Map Leader Data Element',
'NEG': 'LDO (Legal Deposit Office) Signoff',
'NID': 'Newspaper Identifier',
'OBJ': 'Digital Object Field',
'OHC': 'Original Holding Count',
'ONS': 'ONIX Subjects',
'ONX': 'ONIX Un-Mapped Data',
# 'OWN': 'Access Permission',
'PLR': 'PRIMO Large Record',
'RSC': 'Remote Supply Collection',
'SID': 'Source ID',
'SRC': 'Source',
'SSD': 'STI Serials Designation',
'STA': 'Status',
# 'SYS': 'Aleph System Number',
'TOC': 'Document Supply ETOC (Electronic Table of Contents) Flag',
# 'TCO': 'Unrecognised field',
'UNO': 'Unencrypted Download ID',
'UPD': 'Update',
'VIT': 'Virtual Item',
}
# Lookup table for RDA content types
content_types = {
'crd': 'Cartographic dataset',
'cri': 'Cartographic image',
'crm': 'Cartographic tactile image',
'crt': 'Cartographic tactile three-dimensional form',
'crf': 'Cartographic three-dimensional form',
'cod': 'Computer dataset',
'cop': 'Computer program',
'ntv': 'Notated movement',
'ntm': 'Notated music',
'prm': 'Performed music',
'snd': 'Sounds',
'spw': 'Spoken word',
'sti': 'Still image',
'tci': 'Tactile image',
'tcm': 'Tactile notated music',
'tcn': 'Tactile notated movement',
'tct': 'Tactile text',
'tcf': 'Tactile three-dimensional form',
'txt': 'Text',
'tdf': 'Three-dimensional form',
'tdm': 'Three-dimensional moving image',
'tdi': 'Two-dimensional moving image',
}
# Lookup table for content types from LDR/06 codes
content_types_ldr = {
'a': 'Language material',
'c': 'Notated music',
'd': 'Manuscript notated music',
'e': 'Cartographic material',
'f': 'Manuscript cartographic material',
'g': 'Projected medium',
'i': 'Nonmusical sound recording',
'j': 'Musical sound recording',
'k': 'Two-dimensional nonprojectable graphic',
'm': 'Computer file',
'o': 'Kit',
'p': 'Mixed materials',
'r': 'Three-dimensional artifact or naturally occurring object',
't': 'Manuscript language material',
}
# Lookup table for RDA carrier (material) types
material_types = {
'sg': 'Audio cartridge',
'se': 'Audio cylinder',
'sd': 'Audio disc',
'si': 'Sound track reel',
'sq': 'Audio roll',
'ss': 'Audiocassette',
'st': 'Audiotape reel',
'sz': 'Unspecified audio resource',
'ck': 'Computer card',
'cb': 'Computer chip cartridge',
'cd': 'Computer disc',
'ce': 'Computer disc cartridge',
'ca': 'Computer tape cartridge',
'cf': 'Computer tape cassette',
'ch': 'Computer tape reel',
'cr': 'Online resource',
'cz': 'Unspecified computer resource',
'ha': 'Aperture card',
'he': 'Microfiche',
'hf': 'Microfiche cassette',
'hb': 'Microfilm cartridge',
'hc': 'Microfilm cassette',
'hd': 'Microfilm reel',
'hj': 'Microfilm roll',
'hh': 'Microfilm slip',
'hg': 'Microopaque',
'hz': 'Unspecified microform resource',
'pp': 'Microscope slide',
'pz': 'Unspecified microscopic resource',
'mc': 'Film cartridge',
'mf': 'Film cassette',
'mr': 'Film reel',
'mo': 'Film roll',
'gd': 'Filmslip',
'gf': 'Filmstrip',
'gc': 'Filmstrip cartridge',
'gt': 'Overhead transparency',
'gs': 'Slide',
'mz': 'Unspecified projected image resource',
'eh': 'Stereograph card',
'es': 'Stereograph disc',
'ez': 'Unspecified stereographic resource',
'no': 'Card',
'nn': 'Flipchart',
'na': 'Roll',
'nb': 'Sheet',
'nc': 'Volume',
'nr': 'Object',
'nz': 'Unspecified unmediated resource',
'vc': 'Video cartridge',
'vf': 'Videocassette ',
'vd': 'Videodisc',
'vr': 'Videotape reel',
'vz': 'Unspecified video resource',
'zu': 'Unspecified resource',
}
# Lookup table for resource types from LDR/07 codes
resource_types = {
'a': 'Monographic component part',
'b': 'Serial component part',
'c': 'Collection',
'd': 'Subunit',
'i': 'Integrating resource',
'm': 'Monograph',
's': 'Serial',
}
# Lookup table for encoding levels from LDR/17 codes
encoding_levels = {
' ': '# - Full level',
'1': '1 - Full level, material not examined',
'2': '2 - Less-than-full level, material not examined',
'3': '3 - Abbreviated level',
'4': '4 - Core level',
'5': '5 - Partial (preliminary) level',
'7': '7 - Minimal level',
'8': '8 - Prepublication level',
'u': 'u - Unknown',
'z': 'z - Not applicable',
}
# Lookup table for target audience from 008/22 codes
audiences = {
'a': 'Preschool',
'b': 'Primary',
'c': 'Pre-adolescent',
'd': 'Adolescent',
'e': 'Adult',
'f': 'Specialized',
'g': 'General',
'j': 'Juvenile',
}
# Lookup table for literary form from 008/33 codes
literary_forms = {
'0': 'Not fiction',
'1': 'Fiction',
'd': 'Dramas',
'e': 'Essays',
'f': 'Novels',
'h': 'Humor, satires, etc.',
'i': 'Letters',
'j': 'Short stories',
'm': 'Mixed forms',
'p': 'Poetry',
's': 'Speeches',
}
# Lookup table for MARC relator codes
relators = {
'abr': 'abridger',
'acp': 'art copyist',
'act': 'actor',
'adi': 'art director',
'adp': 'adapter',
'aft': 'author of afterword or colophon',
'anl': 'analyst',
'anm': 'animator',
'ann': 'annotator',
'ant': 'bibliographic antecedent',
'ape': 'appellee',
'apl': 'appellant',
'app': 'applicant',
'aqt': 'author in quotations or text abstracts',
'arc': 'architect',
'ard': 'artistic director',
'arr': 'arranger',
'art': 'artist',
'asg': 'assignee',
'asn': 'associated name',
'ato': 'autographer',
'att': 'attributed name',
'auc': 'auctioneer',
'aud': 'author of dialog',
'aui': 'author of introduction',
'aus': 'screenwriter',
'aut': 'author',
'bdd': 'binding designer',
'bjd': 'bookjacket designer',
'bkd': 'book designer',
'bkp': 'book producer',
'blw': 'blurb writer',
'bnd': 'binder',
'bpd': 'bookplate designer',
'brd': 'broadcaster',
'brl': 'braille embosser',
'bsl': 'bookseller',
'cas': 'caster',
'ccp': 'conceptor',
'chr': 'choreographer',
'clb': 'collaborator',
'cli': 'client',
'cll': 'calligrapher',
'clr': 'colorist',
'clt': 'collotyper',
'cmm': 'commentator',
'cmp': 'composer',
'cmt': 'compositor',
'cnd': 'conductor',
'cng': 'cinematographer',
'cns': 'censor',
'coe': 'contestant-appellee',
'col': 'collector',
'com': 'compiler',
'con': 'conservator',
'cor': 'collection registrar',
'cos': 'contestant',
'cot': 'contestant-appellant',
'cou': 'court governed',
'cov': 'cover designer',
'cpc': 'copyright claimant',
'cpe': 'complainant-appellee',
'cph': 'copyright holder',
'cpl': 'complainant',
'cpt': 'complainant-appellant',
'cre': 'creator',
'crp': 'correspondent',
'crr': 'corrector',
'crt': 'court reporter',
'csl': 'consultant',
'csp': 'consultant to a project',
'cst': 'costume designer',
'ctb': 'contributor',
'cte': 'contestee-appellee',
'ctg': 'cartographer',
'ctr': 'contractor',
'cts': 'contestee',
'ctt': 'contestee-appellant',
'cur': 'curator',
'cwt': 'commentator for written text',
'dbp': 'distribution place',
'dfd': 'defendant',
'dfe': 'defendant-appellee',
'dft': 'defendant-appellant',
'dgg': 'degree granting institution',
'dgs': 'degree supervisor',
'dis': 'dissertant',
'dln': 'delineator',
'dnc': 'dancer',
'dnr': 'donor',
'dpc': 'depicted',
'dpt': 'depositor',
'drm': 'draftsman',
'drt': 'director',
'dsr': 'designer',
'dst': 'distributor',
'dtc': 'data contributor',
'dte': 'dedicatee',
'dtm': 'data manager',
'dto': 'dedicator',
'dub': 'dubious author',
'edc': 'editor of compilation',
'edm': 'editor of moving image work',
'eds': 'editor', # Not a MARC code
'edt': 'editor',
'egr': 'engraver',
'elg': 'electrician',
'elt': 'electrotyper',
'eng': 'engineer',
'enj': 'enacting jurisdiction',
'etr': 'etcher',
'evp': 'event place',
'exp': 'expert',
'fac': 'facsimilist',
'fds': 'film distributor',
'fld': 'field director',
'flm': 'film editor',
'fmd': 'film director',
'fmk': 'filmmaker',
'fmo': 'former owner',
'fmp': 'film producer',
'fnd': 'funder',
'fpy': 'first party',
'frg': 'forger',
'gis': 'geographic information specialist',
'grt': 'graphic technician',
'his': 'host institution',
'hnr': 'honoree',
'hst': 'host',
'ill': 'illustrator',
'ilu': 'illuminator',
'ins': 'inscriber',
'inv': 'inventor',
'isb': 'issuing body',
'itr': 'instrumentalist',
'ive': 'interviewee',
'ivr': 'interviewer',
'jud': 'judge',
'jug': 'jurisdiction governed',
'lbr': 'laboratory',
'lbt': 'librettist',
'ldr': 'laboratory director',
'led': 'lead',
'lee': 'libelee-appellee',
'lel': 'libelee',
'len': 'lender',
'let': 'libelee-appellant',
'lgd': 'lighting designer',
'lie': 'libelant-appellee',
'lil': 'libelant',
'lit': 'libelant-appellant',
'lsa': 'landscape architect',
'lse': 'licensee',
'lso': 'licensor',
'ltg': 'lithographer',
'lyr': 'lyricist',
'mcp': 'music copyist',
'mdc': 'metadata contact',
'med': 'medium',
'mfp': 'manufacture place',
'mfr': 'manufacturer',
'mod': 'moderator',
'mon': 'monitor',
'mrb': 'marbler',
'mrk': 'markup editor',
'msd': 'musical director',
'mte': 'metal-engraver',
'mtk': 'minute taker',
'mus': 'musician',
'nrt': 'narrator',
'opn': 'opponent',
'org': 'originator',
'orm': 'organizer',
'osp': 'onscreen presenter',
'oth': 'other',
'own': 'owner',
'pan': 'panelist',
'pat': 'patron',
'pbd': 'publishing director',
'pbl': 'publisher',
'pdr': 'project director',
'pfr': 'proofreader',
'pht': 'photographer',
'plt': 'platemaker',
'pma': 'permitting agency',
'pmn': 'production manager',
'pop': 'printer of plates',
'ppm': 'papermaker',
'ppt': 'puppeteer',
'pra': 'praeses',
'prc': 'process contact',
'prd': 'production personnel',
'pre': 'presenter',
'prf': 'performer',
'prg': 'programmer',
'prm': 'printmaker',
'prn': 'production company',
'pro': 'producer',
'prp': 'production place',
'prs': 'production designer',
'prt': 'printer',
'prv': 'provider',
'pta': 'patent applicant',
'pte': 'plaintiff-appellee',
'ptf': 'plaintiff',
'pth': 'patent holder',
'ptt': 'plaintiff-appellant',
'pup': 'publication place',
'rbr': 'rubricator',
'rcd': 'recordist',
'rce': 'recording engineer',
'rcp': 'addressee',
'rdd': 'radio director',
'red': 'redaktor',
'ren': 'renderer',
'res': 'researcher',
'rev': 'reviewer',
'rpc': 'radio producer',
'rps': 'repository',
'rpt': 'reporter',
'rpy': 'responsible party',
'rse': 'respondent-appellee',
'rsg': 'restager',
'rsp': 'respondent',
'rsr': 'restorationist',
'rst': 'respondent-appellant',
'rth': 'research team head',
'rtm': 'research team member',
'sad': 'scientific advisor',
'sce': 'scenarist',
'scl': 'sculptor',
'scr': 'scribe',
'sds': 'sound designer',
'sec': 'secretary',
'sgd': 'stage director',
'sgn': 'signer',
'sht': 'supporting host',
'sll': 'seller',
'sng': 'singer',
'spk': 'speaker',
'spn': 'sponsor',
'spy': 'second party',
'srv': 'surveyor',
'std': 'set designer',
'stg': 'setting',
'stl': 'storyteller',
'stm': 'stage manager',
'stn': 'standards body',
'str': 'stereotyper',
'tcd': 'technical director',
'tch': 'teacher',
'ths': 'thesis advisor',
'tld': 'television director',
'tlp': 'television producer',
'trc': 'transcriber',
'trl': 'translator',
'tyd': 'type designer',
'tyg': 'typographer',
'uvp': 'university place',
'vac': 'voice actor',
'vdg': 'videographer',
'voc': 'vocalist',
'wac': 'writer of added commentary',
'wal': 'writer of added lyrics',
'wam': 'writer of accompanying material',
'wat': 'writer of added text',
'wdc': 'woodcutter',
'wde': 'wood engraver',
'win': 'writer of introduction',
'wit': 'witness',
'wpr': 'writer of preface',
'wst': 'writer of supplementary textual content',
}
# Lookup table for country names
# States map to the larger country where possible
countries = {
'aa': 'Albania',
'abc': 'Canada', # Alberta
'ac': 'Ashmore and Cartier Islands', # Discontinued
'aca': 'Australia', # Australian Capital Territory
'ae': 'Algeria',
'af': 'Afghanistan',
'ag': 'Argentina',
'ai': 'Armenia (Republic)',
'air': 'Armenian S.S.R.', # Discontinued
'aj': 'Azerbaijan',
'ajr': 'Azerbaijan S.S.R.', # Discontinued
'aku': 'United States of America', # Alaska
'alu': 'United States of America', # Alabama
'am': 'Anguilla',
'an': 'Andorra',
'ao': 'Angola',
'aq': 'Antigua and Barbuda',
'aru': 'United States of America', # Arkansas
'as': 'American Samoa',
'at': 'Australia',
'au': 'Austria',
'aw': 'Aruba',
'ay': 'Antarctica',
'azu': 'United States of America', # Arizona
'ba': 'Bahrain',
'bb': 'Barbados',
'bcc': 'Canada', # British Columbia
'bd': 'Burundi',
'be': 'Belgium',
'bf': 'Bahamas',
'bg': 'Bangladesh',
'bh': 'Belize',
'bi': 'British Indian Ocean Territory',
'bl': 'Brazil',
'bm': 'Bermuda Islands',
'bn': 'Bosnia and Hercegovina',
'bo': 'Bolivia',
'bp': 'Solomon Islands',
'br': 'Burma',
'bs': 'Botswana',
'bt': 'Bhutan',
'bu': 'Bulgaria',
'bv': 'Bouvet Island',
'bw': 'Belarus',
'bwr': 'Byelorussian S.S.R.', # Discontinued
'bx': 'Brunei',
'ca': 'Caribbean Netherlands',
'cau': 'United States of America', # California
'cb': 'Cambodia',
'cc': 'China',
'cd': 'Chad',
'ce': 'Sri Lanka',
'cf': 'Congo (Brazzaville)',
'cg': 'Congo (Democratic Republic)',
'ch': 'China (Republic : 1949-)',
'ci': 'Croatia',
'cj': 'Cayman Islands',
'ck': 'Colombia',
'cl': 'Chile',
'cm': 'Cameroon',
'cn': 'Canada', # Discontinued
'co': 'Curaçao',
'cou': 'United States of America', # Colorado
'cp': 'Canton and Enderbury Islands', # Discontinued
'cq': 'Comoros',
'cr': 'Costa Rica',
'cs': 'Czechoslovakia', # Discontinued
'ctu': 'United States of America', # Connecticut
'cu': 'Cuba',
'cv': 'Cabo Verde',
'cw': 'Cook Islands',
'cx': 'Central African Republic',
'cy': 'Cyprus',
'cz': 'Canal Zone', # Discontinued
'dcu': 'United States of America', # District of Columbia
'deu': 'United States of America', # Delaware
'dk': 'Denmark',
'dm': 'Benin',
'dq': 'Dominica',
'dr': 'Dominican Republic',
'ea': 'Eritrea',
'ec': 'Ecuador',
'eg': 'Equatorial Guinea',
'em': 'Timor-Leste',
'enk': 'England',
'er': 'Estonia',
'err': 'Estonia', # Discontinued
'es': 'El Salvador',
'et': 'Ethiopia',
'fa': 'Faroe Islands',
'fg': 'French Guiana',
'fi': 'Finland',
'fj': 'Fiji',
'fk': 'Falkland Islands',
'flu': 'United States of America', # Florida
'fm': 'Micronesia (Federated States)',
'fp': 'French Polynesia',
'fr': 'France',
'fs': 'Terres australes et antarctiques françaises',
'ft': 'Djibouti',
'gau': 'United States of America', # Georgia
'gb': 'Kiribati',
'gd': 'Grenada',
'ge': 'Germany (East)', # Discontinued
'gh': 'Ghana',
'gi': 'Gibraltar',
'gl': 'Greenland',
'gm': 'Gambia',
'gn': 'Gilbert and Ellice Islands', # Discontinued
'go': 'Gabon',
'gp': 'Guadeloupe',
'gr': 'Greece',
'gs': 'Georgia (Republic)',
'gsr': 'Georgian S.S.R.', # Discontinued
'gt': 'Guatemala',
'gu': 'Guam',
'gv': 'Guinea',
'gw': 'Germany',
'gy': 'Guyana',
'gz': 'Gaza Strip',
'hiu': 'United States of America', # Hawaii
'hk': 'Hong Kong', # Discontinued
'hm': 'Heard and McDonald Islands',
'ho': 'Honduras',
'ht': 'Haiti',
'hu': 'Hungary',
'iau': 'United States of America', # Iowa
'ic': 'Iceland',
'idu': 'United States of America', # Idaho
'ie': 'Ireland',
'ii': 'India',
'ilu': 'United States of America', # Illinois
'inu': 'United States of America', # Indiana
'io': 'Indonesia',
'iq': 'Iraq',
'ir': 'Iran',
'is': 'Israel',
'it': 'Italy',
'iu': 'Israel-Syria Demilitarized Zones', # Discontinued
'iv': 'Côte d\'Ivoire',
'iw': 'Israel-Jordan Demilitarized Zones', # Discontinued
'iy': 'Iraq-Saudi Arabia Neutral Zone',
'ja': 'Japan',
'ji': 'Johnston Atoll',
'jm': 'Jamaica',
'jn': 'Jan Mayen', # Discontinued
'jo': 'Jordan',
'ke': 'Kenya',
'kg': 'Kyrgyzstan',
'kgr': 'Kirghiz S.S.R.', # Discontinued
'kn': 'Korea (North)',
'ko': 'Korea (South)',
'ksu': 'United States of America', # Kansas
'ku': 'Kuwait',
'kv': 'Kosovo',
'kyu': 'United States of America', # Kentucky
'kz': 'Kazakhstan',
'kzr': 'Kazakh S.S.R.', # Discontinued
'lau': 'United States of America', # Louisiana
'lb': 'Liberia',
'le': 'Lebanon',
'lh': 'Liechtenstein',
'li': 'Lithuania',
'lir': 'Lithuania', # Discontinued
'ln': 'Central and Southern Line Islands', # Discontinued
'lo': 'Lesotho',
'ls': 'Laos',
'lu': 'Luxembourg',
'lv': 'Latvia',
'lvr': 'Latvia', # Discontinued
'ly': 'Libya',
'mau': 'United States of America', # Massachusetts
'mbc': 'Canada', # Manitoba
'mc': 'Monaco',
'mdu': 'United States of America', # Maryland
'meu': 'United States of America', # Maine
'mf': 'Mauritius',
'mg': 'Madagascar',
'mh': 'Macao', # Discontinued
'miu': 'United States of America', # Michigan
'mj': 'Montserrat',
'mk': 'Oman',
'ml': 'Mali',
'mm': 'Malta',
'mnu': 'United States of America', # Minnesota
'mo': 'Montenegro',
'mou': 'United States of America', # Missouri
'mp': 'Mongolia',
'mq': 'Martinique',
'mr': 'Morocco',
'msu': 'United States of America', # Mississippi
'mtu': 'United States of America', # Montana
'mu': 'Mauritania',
'mv': 'Moldova',
'mvr': 'Moldavian S.S.R.', # Discontinued
'mw': 'Malawi',
'mx': 'Mexico',
'my': 'Malaysia',
'mz': 'Mozambique',
'na': 'Netherlands Antilles', # Discontinued
'nbu': 'United States of America', # Nebraska
'ncu': 'United States of America', # North Carolina
'ndu': 'United States of America', # North Dakota
'ne': 'Netherlands',
'nfc': 'Canada', # Newfoundland and Labrador
'ng': 'Niger',
'nhu': 'United States of America', # New Hampshire
'nik': 'Northern Ireland',
'nju': 'United States of America', # New Jersey
'nkc': 'Canada', # New Brunswick
'nl': 'New Caledonia',
'nm': 'Northern Mariana Islands', # Discontinued
'nmu': 'United States of America', # New Mexico
'nn': 'Vanuatu',
'no': 'Norway',
'np': 'Nepal',
'nq': 'Nicaragua',
'nr': 'Nigeria',
'nsc': 'Canada', # Nova Scotia
'ntc': 'Canada', # Northwest Territories
'nu': 'Nauru',
'nuc': 'Canada', # Nunavut
'nvu': 'United States of America', # Nevada
'nw': 'Northern Mariana Islands',
'nx': 'Norfolk Island',
'nyu': 'United States of America', # New York (State)
'nz': 'New Zealand',
'ohu': 'United States of America', # Ohio
'oku': 'United States of America', # Oklahoma
'onc': 'Canada', # Ontario
'oru': 'United States of America', # Oregon
'ot': 'Mayotte',
'pau': 'United States of America', # Pennsylvania
'pc': 'Pitcairn Island',
'pe': 'Peru',
'pf': 'Paracel Islands',
'pg': 'Guinea-Bissau',
'ph': 'Philippines',
'pic': 'Canada', # Prince Edward Island
'pk': 'Pakistan',
'pl': 'Poland',
'pn': 'Panama',
'po': 'Portugal',
'pp': 'Papua New Guinea',
'pr': 'Puerto Rico',
'pt': 'Portuguese Timor', # Discontinued
'pw': 'Palau',
'py': 'Paraguay',
'qa': 'Qatar',
'qea': 'Australia', # Queensland
'quc': 'Canada', # Québec (Province)
'rb': 'Serbia',
're': 'Réunion',
'rh': 'Zimbabwe',
'riu': 'United States of America', # Rhode Island
'rm': 'Romania',
'ru': 'Russia',
'rur': 'Russian S.F.S.R.', # Discontinued
'rw': 'Rwanda',
'ry': 'Ryukyu Islands, Southern', # Discontinued
'sa': 'South Africa',
'sb': 'Svalbard', # Discontinued
'sc': 'Saint-Barthélemy',
'scu': 'United States of America', # South Carolina
'sd': 'South Sudan',
'sdu': 'United States of America', # South Dakota
'se': 'Seychelles',
'sf': 'Sao Tome and Principe',
'sg': 'Senegal',
'sh': 'Spanish North Africa',
'si': 'Singapore',
'sj': 'Sudan',
'sk': 'Sikkim', # Discontinued
'sl': 'Sierra Leone',
'sm': 'San Marino',
'sn': 'Sint Maarten',
'snc': 'Canada', # Saskatchewan
'so': 'Somalia',
'sp': 'Spain',
'sq': 'Swaziland',
'sr': 'Surinam',
'ss': 'Western Sahara',
'st': 'Saint-Martin',
'stk': 'Scotland',
'su': 'Saudi Arabia',
'sv': 'Swan Islands', # Discontinued
'sw': 'Sweden',
'sx': 'Namibia',
'sy': 'Syria',
'sz': 'Switzerland',
'ta': 'Tajikistan',
'tar': 'Tajik S.S.R.', # Discontinued
'tc': 'Turks and Caicos Islands',
'tg': 'Togo',
'th': 'Thailand',
'ti': 'Tunisia',
'tk': 'Turkmenistan',
'tkr': 'Turkmen S.S.R.', # Discontinued
'tl': 'Tokelau',
'tma': 'Australia', # Tasmania
'tnu': 'United States of America', # Tennessee
'to': 'Tonga',
'tr': 'Trinidad and Tobago',
'ts': 'United Arab Emirates',
'tt': 'Trust Territory of the Pacific Islands', # Discontinued
'tu': 'Turkey',
'tv': 'Tuvalu',
'txu': 'United States of America', # Texas
'tz': 'Tanzania',
'ua': 'Egypt',
'uc': 'United States Miscellaneous Caribbean Islands',
'ug': 'Uganda',
'ui': 'United Kingdom Miscellaneous Islands', # Discontinued
'uik': 'United Kingdom Miscellaneous Islands',
'uk': 'United Kingdom', # Discontinued
'un': 'Ukraine',
'unr': 'Ukraine', # Discontinued
'up': 'United States Miscellaneous Pacific Islands',
'ur': 'Soviet Union', # Discontinued
'us': 'United States of America', # Discontinued
'utu': 'United States of America', # Utah
'uv': 'Burkina Faso',
'uy': 'Uruguay',
'uz': 'Uzbekistan',
'uzr': 'Uzbek S.S.R.', # Discontinued
'vau': 'United States of America', # Virginia
'vb': 'British Virgin Islands',
'vc': 'Vatican City',
've': 'Venezuela',
'vi': 'Virgin Islands of the United States',
'vm': 'Vietnam',
'vn': 'Vietnam, North', # Discontinued
# 'vp': 'Various places',
'vra': 'Australia', # Victoria
'vs': 'Vietnam, South', # Discontinued
'vtu': 'United States of America', # Vermont
'wau': 'United States of America', # Washington (State)
'wb': 'West Berlin', # Discontinued
'wea': 'Australia', # Western Australia
'wf': 'Wallis and Futuna',
'wiu': 'United States of America', # Wisconsin
'wj': 'West Bank of the Jordan River',
'wk': 'Wake Island',
'wlk': 'Wales',
'ws': 'Samoa',
'wvu': 'United States of America', # West Virginia
'wyu': 'United States of America', # Wyoming
'xa': 'Christmas Island (Indian Ocean)',
'xb': 'Cocos (Keeling) Islands',
'xc': 'Maldives',
'xd': 'Saint Kitts-Nevis',
'xe': 'Marshall Islands',
'xf': 'Midway Islands',
'xga': 'Australia', # Coral Sea Islands Territory
'xh': 'Niue',
'xi': 'Saint Kitts-Nevis-Anguilla', # Discontinued
'xj': 'Saint Helena',
'xk': 'Saint Lucia',
'xl': 'Saint Pierre and Miquelon',
'xm': 'Saint Vincent and the Grenadines',
'xn': 'Macedonia',
'xna': 'Australia', # New South Wales
'xo': 'Slovakia',
'xoa': 'Australia', # Northern Territory
'xp': 'Spratly Island',
'xr': 'Czech Republic',
'xra': 'Australia', # South Australia
'xs': 'South Georgia and the South Sandwich Islands',
'xv': 'Slovenia',
'xxc': 'Canada',
'xxk': 'United Kingdom',
'xxr': 'Soviet Union', # Discontinued
'xxu': 'United States of America',
'ye': 'Yemen',
'ykc': 'Canada', # Yukon Territory
'ys': 'Yemen (People\'s Democratic Republic)', # Discontinued
'yu': 'Serbia and Montenegro', # Discontinued
'za': 'Zambia',
}
# Lookup table for languages
languages = {
'aar': 'Afar',
'abk': 'Abkhazian',
'ace': 'Achinese',
'ach': 'Acoli',
'ada': 'Adangme',
'ady': 'Adyghe',
'afa': 'Afro-Asiatic languages',
'afh': 'Afrihili',
'afr': 'Afrikaans',
'ain': 'Ainu',
'ajm': 'Aljamia', # Discontinued
'aka': 'Akan',
'akk': 'Akkadian',
'alb': 'Albanian',
'ale': 'Aleut',
'alg': 'Algonquian languages',
'alt': 'Altai',
'amh': 'Amharic',
'ang': 'English, Old (ca. 450-1100)',
'anp': 'Angika',
'apa': 'Apache languages',
'ara': 'Arabic',
'arc': 'Official Aramaic (700-300 BCE)',
'arg': 'Aragonese',
'arm': 'Armenian',
'arn': 'Mapuche',
'arp': 'Arapaho',
'art': 'Artificial languages',
'arw': 'Arawak',
'asm': 'Assamese',
'ast': 'Asturian',
'ath': 'Athapascan languages',
'aus': 'Australian languages',
'ava': 'Avaric',
'ave': 'Avestan',
'awa': 'Awadhi',
'aym': 'Aymara',
'aze': 'Azerbaijani',
'bad': 'Banda languages',
'bai': 'Bamileke languages',
'bak': 'Bashkir',
'bal': 'Baluchi',
'bam': 'Bambara',
'ban': 'Balinese',
'baq': 'Basque',
'bas': 'Basa',
'bat': 'Baltic languages',
'bej': 'Beja',
'bel': 'Belarusian',
'bem': 'Bemba',
'ben': 'Bengali',
'ber': 'Berber languages',
'bho': 'Bhojpuri',
'bih': 'Bihari languages',
'bik': 'Bikol',
'bin': 'Bini',
'bis': 'Bislama',
'bla': 'Siksika',
'bnt': 'Bantu languages',
'bos': 'Bosnian',
'bra': 'Braj',
'bre': 'Breton',
'btk': 'Batak languages',
'bua': 'Buriat',
'bug': 'Buginese',
'bul': 'Bulgarian',
'bur': 'Burmese',
'byn': 'Blin',
'cad': 'Caddo',
'cai': 'Central American Indian languages',
'cam': 'Khmer', # Discontinued
'car': 'Galibi Carib',
'cat': 'Catalan',
'cau': 'Caucasian languages',
'ceb': 'Cebuano',
'cel': 'Celtic languages',
'cha': 'Chamorro',
'chb': 'Chibcha',
'che': 'Chechen',
'chg': 'Chagatai',
'chi': 'Chinese',
'chk': 'Chuukese',
'chm': 'Mari',
'chn': 'Chinook jargon',
'cho': 'Choctaw',
'chp': 'Chipewyan',
'chr': 'Cherokee',
'chu': 'Church Slavic',
'chv': 'Chuvash',
'chy': 'Cheyenne',
'cmc': 'Chamic languages',
'cop': 'Coptic',
'cor': 'Cornish',
'cos': 'Corsican',
'cpe': 'Creoles and pidgins, English based',
'cpf': 'Creoles and pidgins, French-based',
'cpp': 'Creoles and pidgins, Portuguese-based',
'cre': 'Cree',
'crh': 'Crimean Tatar',
'crp': 'Creoles and Pidgins',
'csb': 'Kashubian',
'cus': 'Cushitic languages',
'cze': 'Czech',
'dak': 'Dakota',
'dan': 'Danish',
'dar': 'Dargwa',
'day': 'Land Dayak languages',
'del': 'Delaware',
'den': 'Slave (Athapascan)',
'dgr': 'Dogrib',
'din': 'Dinka',
'div': 'Divehi',
'doi': 'Dogri',
'dra': 'Dravidian languages',
'dsb': 'Lower Sorbian',
'dua': 'Duala',
'dum': 'Dutch, Middle (ca. 1050-1350)',
'dut': 'Dutch',
'dyu': 'Dyula',
'dzo': 'Dzongkha',
'efi': 'Efik',
'egy': 'Egyptian (Ancient)',
'eka': 'Ekajuk',
'elx': 'Elamite',
'eng': 'English',
'enm': 'English, Middle (1100-1500)',
'epo': 'Esperanto',
'esk': 'Eskimo', # Discontinued
'esp': 'Esperanto', # Discontinued
'est': 'Estonian',
'eth': 'Ethiopic', # Discontinued
'ewe': 'Ewe',
'ewo': 'Ewondo',
'fan': 'Fang',
'fao': 'Faroese', # Discontinued
'far': 'Faroese',
'fat': 'Fanti',
'fij': 'Fijian',
'fil': 'Filipino',
'fin': 'Finnish',
'fiu': 'Finno-Ugrian languages',
'fon': 'Fon',
'fre': 'French',
'fri': 'Frisian', # Discontinued
'frm': 'French, Middle (ca. 1300-1600)',
'fro': 'French, Old (ca. 842-1300)',
'frr': 'Northern Frisian',
'frs': 'Eastern Frisian',
'fry': 'Western Frisian',
'ful': 'Fulah',
'fur': 'Friulian',
'gaa': 'Ga',
'gae': 'Scottish Gaelix', # Discontinued
'gag': 'Galician', # Discontinued
'gal': 'Oromo', # Discontinued
'gay': 'Gayo',
'gba': 'Gbaya',
'gem': 'Germanic languages',
'geo': 'Georgian',
'ger': 'German',
'gez': 'Geez',
'gil': 'Gilbertese',
'gla': 'Gaelic',
'gle': 'Irish',
'glg': 'Galician',
'glv': 'Manx',
'gmh': 'German, Middle High (ca.1050-1500)',
'goh': 'German, Old High (ca.750-1050)',
'gon': 'Gondi',
'gor': 'Gorontalo',
'got': 'Gothic',
'grb': 'Grebo',
'grc': 'Greek, Ancient (to 1453)',
'gre': 'Greek, Modern (1453-)',
'grn': 'Guarani',
'gsw': 'Swiss German',
'gua': 'Guarani', # Discontinued
'guj': 'Gujarati',
'gwi': 'Gwichin',
'hai': 'Haida',
'hat': 'Haitian',
'hau': 'Hausa',
'haw': 'Hawaiian',
'heb': 'Hebrew',
'her': 'Herero',
'hil': 'Hiligaynon',
'him': 'Western Pahari languages',
'hin': 'Hindi',
'hit': 'Hittite',
'hmn': 'Hmong',
'hmo': 'Hiri Motu',
'hrv': 'Croatian',
'hsb': 'Upper Sorbian',
'hun': 'Hungarian',
'hup': 'Hupa',
'iba': 'Iban',
'ibo': 'Igbo',
'ice': 'Icelandic',
'ido': 'Ido',
'iii': 'Sichuan Yi',
'ijo': 'Ijo languages',
'iku': 'Inuktitut',
'ile': 'Interlingue',
'ilo': 'Iloko',
'ina': 'Interlingua (International Auxiliary Language Association)',
'inc': 'Indic languages',
'ind': 'Indonesian',
'ine': 'Indo-European languages',
'inh': 'Ingush',
'int': 'Interlingua (International Auxiliary Language Association)', # Discontinued
'ipk': 'Inupiaq',
'ira': 'Iranian languages',
'iri': 'Irish', # Discontinued
'iro': 'Iroquoian languages',
'ita': 'Italian',
'jav': 'Javanese',
'jbo': 'Lojban',
'jpn': 'Japanese',
'jpr': 'Judeo-Persian',
'jrb': 'Judeo-Arabic',
'kaa': 'Kara-Kalpak',
'kab': 'Kabyle',
'kac': 'Kachin',
'kal': 'Kalatdlisut',
'kam': 'Kamba',
'kan': 'Kannada',
'kar': 'Karen languages',
'kas': 'Kashmiri',
'kau': 'Kanuri',
'kaw': 'Kawi',
'kaz': 'Kazakh',
'kbd': 'Kabardian',
'kha': 'Khasi',
'khi': 'Khoisan languages',
'khm': 'Central Khmer',
'kho': 'Khotanese',
'kik': 'Kikuyu',
'kin': 'Kinyarwanda',
'kir': 'Kirghiz',
'kmb': 'Kimbundu',
'kok': 'Konkani',
'kom': 'Komi',
'kon': 'Kongo',
'kor': 'Korean',
'kos': 'Kosraean',
'kpe': 'Kpelle',
'krc': 'Karachay-Balkar',
'krl': 'Karelian',
'kro': 'Kru languages',
'kru': 'Kurukh',
'kua': 'Kuanyama',
'kum': 'Kumyk',
'kur': 'Kurdish',
'kus': 'Kusaie', # Discontinued
'kut': 'Kutenai',
'lad': 'Ladino',
'lah': 'Lahnda',
'lam': 'Lamba',
'lan': 'Occitan (post 1500)', # Discontinued
'lao': 'Lao',
'lap': 'Sami', # Discontinued
'lat': 'Latin',
'lav': 'Latvian',
'lez': 'Lezghian',
'lim': 'Limburgan',
'lin': 'Lingala',
'lit': 'Lithuanian',
'lol': 'Mongo',
'loz': 'Lozi',
'ltz': 'Luxembourgish',
'lua': 'Luba-Lulua',
'lub': 'Luba-Katanga',
'lug': 'Ganda',
'lui': 'Luiseno',
'lun': 'Lunda',
'luo': 'Luo (Kenya and Tanzania)',
'lus': 'Lushai',
'mac': 'Macedonian',
'mad': 'Madurese',
'mag': 'Magahi',
'mah': 'Marshallese',
'mai': 'Maithili',
'mak': 'Makasar',
'mal': 'Malayalam',
'man': 'Mandingo',
'mao': 'Maori',
'map': 'Austronesian languages',
'mar': 'Marathi',
'mas': 'Masai',
'may': 'Malay',
'max': 'Manx', # Discontinued
'mdf': 'Moksha',
'mdr': 'Mandar',
'men': 'Mende',
'mga': 'Irish, Middle (900-1200)',
'mic': 'Mikmaq',
'min': 'Minangkabau',
'mkh': 'Mon-Khmer languages',
'mla': 'Malagasy', # Discontinued
'mlg': 'Malagasy',
'mlt': 'Maltese',
'mnc': 'Manchu',
'mni': 'Manipuri',
'mno': 'Manobo languages',
'moh': 'Mohawk',
'mol': 'Moldavian', # Discontinued
'mon': 'Mongolian',
'mos': 'Mossi',
'mun': 'Munda languages',
'mus': 'Creek',
'mwl': 'Mirandese',
'mwr': 'Marwari',
'myn': 'Mayan languages',
'myv': 'Erzya',
'nah': 'Nahuatl languages',
'nai': 'North American Indian languages',
'nap': 'Neapolitan',
'nau': 'Nauru',
'nav': 'Navajo',
'nbl': 'South Ndebele',
'nde': 'North Ndebele',
'ndo': 'Ndonga',
'nds': 'Low German',
'nep': 'Nepali',
'new': 'Nepal Bhasa',
'nia': 'Nias',
'nic': 'Niger-Kordofanian languages',
'niu': 'Niuean',
'nno': 'Norwegian Nynorsk',
'nob': 'Norwegian Bokml',
'nog': 'Nogai',
'non': 'Norse, Old',
'nor': 'Norwegian',
'nqo': 'NKo',
'nso': 'Pedi',
'nub': 'Nubian languages',
'nwc': 'Classical Newari',
'nya': 'Chichewa',
'nym': 'Nyamwezi',
'nyn': 'Nyankole',
'nyo': 'Nyoro',
'nzi': 'Nzima',
'oci': 'Occitan (post 1500)',
'oji': 'Ojibwa',
'ori': 'Oriya',
'orm': 'Oromo',
'osa': 'Osage',
'oss': 'Ossetian',
'ota': 'Turkish, Ottoman (1500-1928)',
'oto': 'Otomian languages',
'paa': 'Papuan languages',
'pag': 'Pangasinan',
'pal': 'Pahlavi',
'pam': 'Pampanga',
'pan': 'Punjabi',
'pap': 'Papiamento',
'pau': 'Palauan',
'peo': 'Persian, Old (ca.600-400 )',
'per': 'Persian',
'phi': 'Philippine languages',
'phn': 'Phoenician',
'pli': 'Pali',
'pol': 'Polish',
'pon': 'Pohnpeian',
'por': 'Portuguese',
'pra': 'Prakrit languages',
'pro': 'Provenal, Old (to 1500)',
'pus': 'Pushto',
'que': 'Quechua',
'raj': 'Rajasthani',
'rap': 'Rapanui',
'rar': 'Rarotongan',
'roa': 'Romance languages',
'roh': 'Romansh',
'rom': 'Romany',
'rum': 'Romanian',
'run': 'Rundi',
'rup': 'Aromanian',
'rus': 'Russian',
'sad': 'Sandawe',
'sag': 'Sango',
'sah': 'Yakut',
'sai': 'South American Indian languages',
'sal': 'Salishan languages',
'sam': 'Samaritan Aramaic',
'san': 'Sanskrit',
'sao': 'Samoan', # Discontinued
'sas': 'Sasak',
'sat': 'Santali',
'scn': 'Sicilian',
'scc': 'Serbian', # Discontinued
'sco': 'Scots',
'scr': 'Croatian', # Discontinued
'sel': 'Selkup',
'sem': 'Semitic languages',
'sga': 'Irish, Old (to 900)',
'sgn': 'Sign Languages',
'shn': 'Shan',
'sho': 'Shona', # Discontinued
'sid': 'Sidamo',
'sin': 'Sinhala',
'sio': 'Siouan languages',
'sit': 'Sino-Tibetan languages',
'sla': 'Slavic languages',
'slo': 'Slovak',
'slv': 'Slovenian',
'sma': 'Southern Sami',
'sme': 'Northern Sami',
'smi': 'Sami languages',
'smj': 'Lule Sami',
'smn': 'Inari Sami',
'smo': 'Samoan',
'sms': 'Skolt Sami',
'sna': 'Shona',
'snd': 'Sindhi',
'snh': 'Sinhalese', # Discontinued
'snk': 'Soninke',
'sog': 'Sogdian',
'som': 'Somali',
'son': 'Songhai languages',
'sot': 'Sotho, Southern',
'spa': 'Spanish',
'srd': 'Sardinian',
'srn': 'Sranan Tongo',
'srp': 'Serbian',
'srr': 'Serer',
'ssa': 'Nilo-Saharan languages',
'sso': 'Sotho', # Discontinued
'ssw': 'Swati',
'suk': 'Sukuma',
'sun': 'Sundanese',
'sus': 'Susu',
'sux': 'Sumerian',
'swa': 'Swahili',
'swe': 'Swedish',
'swz': 'Swazi', # Discontinued
'syc': 'Classical Syriac',
'syr': 'Syriac',
'tag': 'Tagalog', # Discontinued
'tah': 'Tahitian',
'tai': 'Tai languages',
'taj': 'Tajik', # Discontinued
'tam': 'Tamil',
'tar': 'Tatar', # Discontinued
'tat': 'Tatar',
'tel': 'Telugu',
'tem': 'Timne',
'ter': 'Tereno',
'tet': 'Tetum',
'tgk': 'Tajik',
'tgl': 'Tagalog',
'tha': 'Thai',
'tib': 'Tibetan',
'tig': 'Tigre',
'tir': 'Tigrinya',
'tiv': 'Tiv',
'tkl': 'Tokelau',
'tlh': 'Klingon',
'tli': 'Tlingit',
'tmh': 'Tamashek',
'tog': 'Tonga (Nyasa)',
'ton': 'Tongan',
'tpi': 'Tok Pisin',
'tru': 'Truk', # Discontinued
'tsi': 'Tsimshian',
'tsn': 'Tswana',
'tso': 'Tsonga',
'tsw': 'Tswana', # Discontinued
'tuk': 'Turkmen',
'tum': 'Tumbuka',
'tup': 'Tupi languages',
'tur': 'Turkish',
'tut': 'Altaic languages',
'tvl': 'Tuvalu',
'twi': 'Twi',
'tyv': 'Tuvinian',
'udm': 'Udmurt',
'uga': 'Ugaritic',
'uig': 'Uighur',
'ukr': 'Ukrainian',
'umb': 'Umbundu',
'urd': 'Urdu',
'uzb': 'Uzbek',
'vai': 'Vai',
'ven': 'Venda',
'vie': 'Vietnamese',
'vol': 'Volapk',
'vot': 'Votic',
'wak': 'Wakashan languages',
'wal': 'Wolaitta',
'war': 'Waray',
'was': 'Washo',
'wel': 'Welsh',
'wen': 'Sorbian languages',
'wln': 'Walloon',
'wol': 'Wolof',
'xal': 'Kalmyk',
'xho': 'Xhosa',
'yao': 'Yao',
'yap': 'Yapese',
'yid': 'Yiddish',
'yor': 'Yoruba',
'ypk': 'Yupik languages',
'zap': 'Zapotec',
'zbl': 'Bliss',
'zen': 'Zenaga',
'zgh': 'Standard Moroccan Tamazight',
'zha': 'Zhuang',
'znd': 'Zande languages',
'zul': 'Zulu',
'zun': 'Zuni',
'zza': 'Zaza',
}
# Lookup table for publication frequencies
frequencies = {
# '#': 'No determinable frequency',
'a': 'Annual',
'b': 'Bi-monthly',
'c': 'Semi-weekly',
'd': 'Daily',
'e': 'Bi-weekly',
'f': 'Semi-annual',
'g': 'Biennial',
'h': 'Triennial',
'i': 'Tri-weekly', # 3 times a week
'j': 'Tri-monthly', # 3 times a month
'k': 'Continuously updated',
'm': 'Monthly',
'q': 'Quarterly',
's': 'Semi-monthly',
't': 'Tri-annual', # 3 times a year
# 'u': 'Unknown',
'w': 'Weekly',
# 'z': 'Other',
# '|': 'No attempt to code',
}
# Lookup table for musical composition forms
musical_forms = {
'an': 'anthems',
'bd': 'ballads',
'bg': 'bluegrass music',
'bl': 'blues',
'bt': 'ballets',
'ca': 'chaconnes',
'cb': 'chants (religions other than Christianity)',
'cc': 'Christian chants',
'cg': 'concerti grossi',
'ch': 'chorales',
'cl': 'chorale preludes',
'cn': 'canons and rounds',
'co': 'concertos',
'cp': 'polyphonic chansons',
'cr': 'carols',
'cs': 'chance compositions',
'ct': 'cantatas',
'cy': 'country music',
'cz': 'canzonas',
'df': 'dance forms',
'dv': 'divertimentos, serenades, cassations, divertissements, and notturni',
'fg': 'fugues',
'fl': 'flamenco',
'fm': 'folk music',
'ft': 'fantasias',
'gm': 'gospel music',
'hy': 'hymns',
'jz': 'jazz',
'mc': 'musical revues and comedies',
'md': 'madrigals',
'mi': 'minuets',
'mo': 'motets',
'mp': 'motion picture music',
'mr': 'marches',
'ms': 'masses',
'mz': 'mazurkas',
'nc': 'nocturnes',
'op': 'operas',
'or': 'oratorios',
'ov': 'overtures',
'pg': 'program music',
'pm': 'passion music',
'po': 'polonaises',
'pp': 'popular music',
'pr': 'preludes',
'ps': 'passacaglias',
'pt': 'part-songs',
'pv': 'pavans',
'rc': 'rock music',
'rd': 'rondos',
'rg': 'ragtime music',
'ri': 'ricercars',
'rp': 'rhapsodies',
'rq': 'requiems',
'sd': 'square dance music',
'sg': 'songs',
'sn': 'sonatas',
'sp': 'symphonic poems',
'st': 'studies and exercises',
'su': 'suites',
'sy': 'symphonies',
'tc': 'toccatas',
'tl': 'teatro lirico',
'ts': 'trio-sonatas',
'vi': 'villancicos',
'vr': 'variations',
'wz': 'waltzes',
'za': 'zarzuelas',
}
| victoriamorris/iams2rf | marc2rf/lookup.py | Python | mit | 68,546 | [
"Dalton"
] | 2c3b5f680e2473f128335504965933e7336ac74c7217d0f6a855cfd1afad3a44 |
#!/usr/bin/env python
#
# $File: reichDemo.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
import math
def demo_model(model, N0=1000, N1=100000, G0=500, G1=500):
'''Return a demographic function
model: linear or exponential
N0: Initial sim.population size.
N1: Ending sim.population size.
G0: Length of burn-in stage.
G1: Length of sim.population expansion stage.
'''
def ins_expansion(gen):
if gen < G0:
return N0
else:
return N1
rate = (math.log(N1) - math.log(N0))/G1
def exp_expansion(gen):
if gen < G0:
return N0
else:
return int(N0 * math.exp((gen - G0) * rate))
if model == 'instant':
return ins_expansion
elif model == 'exponential':
return exp_expansion
# when needed, create a demographic function as follows
demo_func = demo_model('exponential', 1000, 100000, 500, 500)
# sim.population size at generation 700
print(demo_func(700))
| BoPeng/simuPOP | docs/reichDemo.py | Python | gpl-2.0 | 2,024 | [
"VisIt"
] | 1329d1d9aba472cf9fef4d08eb91103d4324ae394a901d5e1fb9064f3efeeca1 |
#Cauchy root finding
from __future__ import division
import numpy as np
import pylab as pl
import numpy.linalg as la
from numpy import pi, exp, arange, zeros, ones, real, imag, dot, roots, absolute, inf, roll, log, unwrap, linalg
from numpy import array, angle
_CROOT_PLOTRESULTS=0
_CROOT_DEBUG=0
def findzero_delves(f, fprime, z0=0, R=1, N=None, alpha=1, trange=None, tol=1e-6):
'''
Cauchy integral method for finding the zeros of an analytic function
f: function of a single variable returns function value
fprime: derivative of function f
z0: center location in the complex plane
R: radius of region in which to bound the search
N: Number of boundary integral points, otherwise automatic
Algorithim from Delves and Lyness
'''
Nt = 128 if N is None else N
trange = (0,2*pi) if trange is None else trange
residue = 1.0; niter = 0; maxiter = 5
while (residue>tol) and (niter<maxiter):
#Evaluate function on circle radius R
dt = 2*pi/Nt
thetas = np.arange(trange[0], trange[1], dt)
#Calculate Phi'(t)/Phi(t)
zs = R*np.exp(1j*thetas)
phi = fprime(zs+z0)/f(zs+z0)*1j*zs/alpha
if _CROOT_PLOTRESULTS>1:
pl.plot(thetas, np.unwrap(np.angle(phi)), 'r--')
#Estimate number of zeros
I0 = np.real(1/(2*pi*1j)*np.sum(phi)*dt)
K = int(round(I0))
#Reject K too large or too small
if (I0<0.999) or (I0>50):
print "Warning, no roots found", I0
return array([])
#Evaluate integral by trapezoidal rule
I = np.zeros(K, np.complex_)
for k in range(K):
I[k] = 1/(2*pi*1j)*np.sum(zs**(k+1)*(phi))*dt
#Solve for the coefficients
ac = np.zeros(K+1, np.complex_)
ac[0] = 1.0
for k in range(K):
ac[k+1] = - np.dot(ac[k::-1], I[:k+1])/(k+1)
calc_roots = np.roots(ac)
if _CROOT_PLOTRESULTS>0:
pl.plot(calc_roots.real, calc_roots.imag, 'b.')
#Check error
residue = np.absolute(f(calc_roots+z0)).max()
#Increase resolution
Nt = 2*Nt
niter += 1
print "Calculated %d roots in %d iterations to a residue %.2g" % (K, niter, residue)
return calc_roots+z0
def findzero_adr(f, z0=0, R=1, N=None, nroot=None, tol=1e-6, alpha=1, quiet=False, trange=None, maxiter=10):
'''
Cauchy integral method for finding the zeros of an analytic function
doesn't require the derivative of the function
f: function of a single variable returns function value
z0: center location in the complex plane
R: radius of region in which to bound the search
N: Number of boundary integral points, otherwise automatic
Algorithm from:
"A numerical method for locating the zeros and poles of a meromorphic function"
LF Abd-Ellal, LM Delves, JK Reid - Numerical Methods for Nonlinear Algebraic Equations, 1970
'''
trange = (0,2*pi) if trange is None else trange
Nt = 32 if N is None else N
T = trange[1]-trange[0]
residue = inf; niter = 0
while (residue>tol) and (niter<maxiter):
#Evaluate function on circle radius R
dt = T/Nt
thetas = np.arange(trange[0],trange[1],dt)
#Careful to 'unwrap' the phase
zs = R*np.exp(1j*thetas)
fz = f(zs+z0)
#fz = abs(fz)*exp(1j*unwrap(angle(fz)/alpha)*alpha)
if _CROOT_PLOTRESULTS>1:
pl.plot(thetas, np.unwrap(np.angle(1/fz)), 'r--')
#Check for zeros close to the boundary
#Number of roots enclosed
if nroot is None:
I0 = (np.unwrap(np.angle(fz))[-1]-np.unwrap(np.angle(fz))[0])/(2*pi)
K = int(round(I0))
else:
I0 = K = nroot
#Reject K too large or too small
if (I0<0.99) or (I0>50):
if not quiet: print "Warning, no roots found", I0
return array([])
#Construct companion matrix for the polynomial equation
#and the truncated matrix of Newton's equations.
Ic = np.zeros(K, complex)
XC = np.zeros((K,K), complex)
for k in range(K):
Ic[k] = R**(k+K+1)*np.sum(np.exp(1j*(k+K+1)*thetas)/fz)*dt
for m in range(K):
XC[k,m] = R**(k+m+1)*np.sum(np.exp(1j*(k+m+1)*thetas)/fz)*dt
#Solve for the coefficients
ac = np.ones(K+1, complex)
ac[1:] = la.solve(XC,-Ic)[::-1]
calc_roots = np.roots(ac)
if _CROOT_PLOTRESULTS>0:
pl.plot(calc_roots.real, calc_roots.imag, 'kx')
#Check error
residue = absolute(f(calc_roots+z0)).max()
#Increase resolution
Nt = 2*Nt
niter += 1
if not quiet: print "Calculated %d roots in %d iterations with approximate error %.2g" % (K, niter, residue)
return calc_roots+z0
def findzero_carpentier(f, z0=0, R=1, N=None, tol=1e-6, alpha=1, trange=None, quiet=False, force=False, maxiter=10):
'''
Cauchy integral method for finding the zeros of an analytic function
doesn't require the derivative of the function
f: function of a single variable returns function value
z0: center location in the complex plane
R: radius of region in which to bound the search
N: Number of boundary integral points, otherwise automatic
Algorithim from Carpentier and dos Santos
'''
trange = (0,2*pi) if trange is None else trange
Nt = 32 if N is None else N
T = trange[1]-trange[0]
residue = inf; niter = 0
while (residue>tol) and (niter<maxiter):
#Evaluate function on circle radius R
dt = T/Nt
thetas = np.arange(trange[0],trange[1],dt)
zs = R*np.exp(1j*thetas)
zshift = R*np.exp(1j*(thetas-dt))
#Careful to 'unwrap' the phase of the root
fz = f(zs+z0)
#fz_dt = f(zshift+z0)
fz_dt = np.roll(fz,1)
#Take the correct branch of the log function
g = np.log(fz/fz_dt)
g = np.real(g) + np.unwrap(np.imag(g))*1j
if _CROOT_PLOTRESULTS==2:
pl.plot(np.real(zs+z0)/f.k0, np.abs(fz), 'b--')
if _CROOT_PLOTRESULTS==3:
pl.plot(thetas, np.unwrap(np.angle(g)), 'b--')
pl.plot(thetas, np.angle(fz), 'g-')
#Check for zeros close to the boundary
#Number of roots enclosed
I0 = np.real(np.sum(g)/(T*1j))/alpha
if not quiet: print "Roots found:", I0
if not np.isfinite(I0) or (not force and (I0<0.999 or I0>50)):
if not quiet: print "No roots were found."
return array([])
K = int(round(I0))
#Calculate the contour integrals
Ic = np.zeros(K+1, complex)
for k in range(1,K+1):
Ic[k] = (R**k)*np.sum(g*np.exp(1j*k*thetas))*k \
/(np.exp(1j*k*T/Nt)-1)/Nt/alpha
#Construct companion matrix for the polynomial equation
#and the truncated matrix of Newton's equations.
XC = np.zeros((K,K), np.complex_)
X = np.zeros((K,K), np.complex_)
for k in range(0,K):
X[k,k] = K-k
XC[k,k:] = Ic[1:K+1-k]
if k>0:
X[k-1,k:] = Ic[1:K-k+1]
XC[k,k-1] = K-k
#Find eigenvalues - the roots of the equation
calc_roots = la.eigvals(dot(XC,linalg.inv(X)))
#calc_roots = linalg.eigvals(dot(linalg.inv(X),XC))
if _CROOT_PLOTRESULTS>0:
pl.plot(calc_roots.real, calc_roots.imag, 'kx')
#Check error
residue = np.absolute(f(calc_roots+z0)).max()
if not quiet: print "Roots:", calc_roots+z0
if not quiet: print "Res:", residue, "at N=", Nt
#Increase resolution
Nt = 2*Nt
niter += 1
if not quiet:
print "Calculated %d roots in %d iterations with approximate error %.2g" % (K, niter, residue)
return calc_roots+z0
def findzero_matrix(A, z0=0, R=1, N=None, tol=1e-6, maxiter=10):
'''
Cauchy integral method for finding the zeros of an analytic matrix function
doesn't require the derivative of the function
A: matrix function of a single variable
z0: center location in the complex plane
R: radius of region in which to bound the search
N: Number of boundary integral points, otherwise automatic
Algorithim from "Foundations of Photonic Crystal Fibers", Zolla et al
'''
Nt = 64 if N is None else N
residue = 1.0
niter = 0
maxiter = 5
while (residue>tol) and (niter<maxiter):
#Evaluate function on circle radius R
dt = 2*pi/Nt
thetas = arange(0,2*pi,dt)
zs = R*exp(1j*thetas)
fz = zeros(A(0).shape+zs.shape, complex_)
for ii in range(Nt):
try:
fz[...,ii] = inv(A(zs[ii]+z0))
except:
break
#Matrix Cauchy integrals
I1 = 1/(2*pi*1j)*sum(fz*1j*zs, axis=-1)*dt
I2 = 1/(2*pi*1j)*sum(zs*fz*1j*zs, axis=-1)*dt
#Diagonalize
v,w = linalg.eig(I1)
calc_roots = dot(linalg.inv(w),dot(I2,w)).diagonal()/v
residue = absolute([linalg.det(A(z+z0)) for z in calc_roots]).max()
#Increase resolution
Nt = 2*Nt
niter += 1
print "Calculated x roots in %d iterations with approximate error %.2g" % (niter, residue)
return calc_roots+z0
| morris254/polymode | Polymode/mathlink/cauchy_findzero.py | Python | gpl-3.0 | 9,376 | [
"CRYSTAL"
] | 9a4d74f855c2e0e7020ab50af9f44839273bba90ad5a056507850dd98a816c4f |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Macsio(CMakePackage):
"""A Multi-purpose, Application-Centric, Scalable I/O Proxy Application."""
tags = ['proxy-app', 'ecp-proxy-app']
homepage = "http://llnl.github.io/MACSio"
url = "https://github.com/LLNL/MACSio/archive/1.0.tar.gz"
git = "https://github.com/LLNL/MACSio.git"
version('develop', branch='master')
version('1.0', '90e8e00ea84af2a47bee387ad331dbde')
variant('mpi', default=True, description="Build MPI plugin")
variant('silo', default=True, description="Build with SILO plugin")
# TODO: multi-level variants for hdf5
variant('hdf5', default=False, description="Build HDF5 plugin")
variant('zfp', default=False, description="Build HDF5 with ZFP compression")
variant('szip', default=False, description="Build HDF5 with SZIP compression")
variant('zlib', default=False, description="Build HDF5 with ZLIB compression")
variant('pdb', default=False, description="Build PDB plugin")
variant('exodus', default=False, description="Build EXODUS plugin")
variant('scr', default=False, description="Build with SCR support")
variant('typhonio', default=False, description="Build TYPHONIO plugin")
depends_on('json-cwx')
depends_on('mpi', when="+mpi")
depends_on('silo', when="+silo")
depends_on('hdf5+hl', when="+hdf5")
# depends_on('hdf5+szip', when="+szip")
depends_on('exodusii', when="+exodus")
# pdb is packaged with silo
depends_on('silo', when="+pdb")
depends_on('typhonio', when="+typhonio")
depends_on('scr', when="+scr")
def cmake_args(self):
spec = self.spec
cmake_args = []
if "~mpi" in spec:
cmake_args.append("-DENABLE_MPI=OFF")
if "~silo" in spec:
cmake_args.append("-DENABLE_SILO_PLUGIN=OFF")
if "+silo" in spec:
cmake_args.append("-DWITH_SILO_PREFIX={0}"
.format(spec['silo'].prefix))
if "+pdb" in spec:
# pdb is a part of silo
cmake_args.append("-DENABLE_PDB_PLUGIN=ON")
cmake_args.append("-DWITH_SILO_PREFIX={0}"
.format(spec['silo'].prefix))
if "+hdf5" in spec:
cmake_args.append("-DENABLE_HDF5_PLUGIN=ON")
cmake_args.append("-DWITH_HDF5_PREFIX={0}"
.format(spec['hdf5'].prefix))
# TODO: Multi-level variants
# ZFP not in hdf5 spack package??
# if "+zfp" in spec:
# cmake_args.append("-DENABLE_HDF5_ZFP")
# cmake_args.append("-DWITH_ZFP_PREFIX={0}"
# .format(spec['silo'].prefix))
# SZIP is an hdf5 spack variant
# if "+szip" in spec:
# cmake_args.append("-DENABLE_HDF5_SZIP")
# cmake_args.append("-DWITH_SZIP_PREFIX={0}"
# .format(spec['SZIP'].prefix))
# ZLIB is on by default, @1.1.2
# if "+zlib" in spec:
# cmake_args.append("-DENABLE_HDF5_ZLIB")
# cmake_args.append("-DWITH_ZLIB_PREFIX={0}"
# .format(spec['silo'].prefix))
if "+typhonio" in spec:
cmake_args.append("-DENABLE_TYPHONIO_PLUGIN=ON")
cmake_args.append("-DWITH_TYPHONIO_PREFIX={0}"
.format(spec['typhonio'].prefix))
if "+exodus" in spec:
cmake_args.append("-DENABLE_EXODUS_PLUGIN=ON")
cmake_args.append("-DWITH_EXODUS_PREFIX={0}"
.format(spec['exodusii'].prefix))
# exodus requires netcdf
cmake_args.append("-DWITH_NETCDF_PREFIX={0}"
.format(spec['netcdf'].prefix))
return cmake_args
| mfherbst/spack | var/spack/repos/builtin/packages/macsio/package.py | Python | lgpl-2.1 | 5,044 | [
"NetCDF"
] | c732f934705e26e58093db41a7c58b9261e982877464802d1a2ddc9c0b6fbb24 |
import unittest
import datetime
from conjure.documents import Document, EmbeddedDocument
from conjure.fields import StringField, IntegerField, ReferenceField, DateTimeField, EmailField, ListField, EmbeddedDocumentField
from conjure.exceptions import ValidationError
from conjure.utils import Alias
import bson
class DocumentTest(unittest.TestCase):
def setUp(self):
class User(Document):
name = StringField()
age = IntegerField()
self.User = User
def test_definition(self):
name_field = StringField()
age_field = IntegerField()
class User(Document):
name = name_field
age = age_field
non_field = True
self.assertEqual(User._fields['name'], name_field)
self.assertEqual(User._fields['age'], age_field)
self.assertFalse('non_field' in User._fields)
self.assertTrue('id' in User._fields)
fields = list(User())
self.assertTrue('name' in fields and 'age' in fields)
self.assertFalse(hasattr(Document, '_fields'))
def test_get_superclasses(self):
class Animal(Document): pass
class Fish(Animal): pass
class Mammal(Animal): pass
class Human(Mammal): pass
class Dog(Mammal): pass
self.assertEqual(Mammal._superclasses, {'Animal': Animal})
self.assertEqual(Dog._superclasses, {
'Animal': Animal,
'Animal.Mammal': Mammal,
})
def test_get_subclasses(self):
class Animal(Document): pass
class Fish(Animal): pass
class Mammal(Animal): pass
class Human(Mammal): pass
class Dog(Mammal): pass
self.assertEqual(Mammal._get_subclasses(), {
'Animal.Mammal.Dog': Dog,
'Animal.Mammal.Human': Human
})
self.assertEqual(Animal._get_subclasses(), {
'Animal.Fish': Fish,
'Animal.Mammal': Mammal,
'Animal.Mammal.Dog': Dog,
'Animal.Mammal.Human': Human
})
def test_polymorphic_queries(self):
class Animal(Document): pass
class Fish(Animal): pass
class Mammal(Animal): pass
class Human(Mammal): pass
class Dog(Mammal): pass
Animal.drop_collection()
Animal().save()
Fish().save()
Mammal().save()
Human().save()
Dog().save()
classes = [obj.__class__ for obj in Animal.objects]
self.assertEqual(classes, [Animal, Fish, Mammal, Human, Dog])
classes = [obj.__class__ for obj in Mammal.objects]
self.assertEqual(classes, [Mammal, Human, Dog])
classes = [obj.__class__ for obj in Human.objects]
self.assertEqual(classes, [Human])
Animal.drop_collection()
def test_inheritance(self):
class Employee(self.User):
salary = IntegerField()
self.assertTrue('name' in Employee._fields)
self.assertTrue('salary' in Employee._fields)
self.assertEqual(Employee._meta['collection'], self.User._meta['collection'])
class A(Document): pass
class B(A): pass
class C(B): pass
def test_inherited_collections(self):
class Drink(Document):
name = StringField()
class AlcoholicDrink(Drink):
meta = {'collection': 'booze'}
class Drinker(Document):
drink = ReferenceField(Drink)
Drink.drop_collection()
AlcoholicDrink.drop_collection()
Drinker.drop_collection()
red_bull = Drink(name='Red Bull')
red_bull.save()
programmer = Drinker(drink=red_bull)
programmer.save()
beer = AlcoholicDrink(name='Beer')
beer.save()
real_person = Drinker(drink=beer)
real_person.save()
self.assertEqual(Drinker.objects[0].drink.name, red_bull.name)
self.assertEqual(Drinker.objects[1].drink.name, beer.name)
def test_custom_id_field(self):
class User(Document):
id = StringField()
name = StringField()
username = Alias('id')
User.drop_collection()
def create_invalid_user():
User(name='test').save()
self.assertRaises(ValidationError, create_invalid_user)
class EmailUser(User):
email = StringField()
user = User(id='test', name='test user')
user.save()
user_obj = User.objects.first()
self.assertEqual(user_obj.id, 'test')
user_son = User.objects._collection.find_one()
self.assertEqual(user_son['_id'], 'test')
self.assertTrue('username' not in user_son)
User.drop_collection()
user = User(id='mongo', name='mongo user')
user.save()
user_obj = User.objects.first()
self.assertEqual(user_obj.id, 'mongo')
user_son = User.objects._collection.find_one()
self.assertEqual(user_son['_id'], 'mongo')
self.assertTrue('username' not in user_son)
User.drop_collection()
def test_db_field(self):
class Date(EmbeddedDocument):
year = IntegerField(db_field='yr')
class BlogPost(Document):
title = StringField()
author = ReferenceField(self.User, db_field='user_id')
date = EmbeddedDocumentField(Date)
slug = StringField()
BlogPost.drop_collection()
author = self.User(username='stanislav')
author.save()
post1 = BlogPost(title='test1', date=Date(year=2009), slug='test', author=author)
post1.save()
self.assertEqual(BlogPost.objects.filter(Date.year == 2009).first().date.year, 2009)
self.assertEqual(BlogPost.objects.filter(Date.year == 2009).first().author, author)
BlogPost.drop_collection()
def test_creation(self):
user = self.User(name="Test User", age=30)
self.assertEqual(user.name, "Test User")
self.assertEqual(user.age, 30)
def test_reload(self):
user = self.User(name="Test User", age=20)
user.save()
user_obj = self.User.objects.first()
user_obj.name = "Mr Test User"
user_obj.age = 21
user_obj.save()
self.assertEqual(user.name, "Test User")
self.assertEqual(user.age, 20)
user.reload()
self.assertEqual(user.name, "Mr Test User")
self.assertEqual(user.age, 21)
def test_dictionary_access(self):
user = self.User(name='Test User', age=30)
self.assertEquals(user['name'], 'Test User')
self.assertRaises(KeyError, user.__getitem__, 'salary')
self.assertRaises(KeyError, user.__setitem__, 'salary', 50)
user['name'] = 'Another User'
self.assertEquals(user['name'], 'Another User')
# Length = length(assigned fields + id)
self.assertEquals(len(user), 3)
self.assertTrue('age' in user)
user.age = None
self.assertFalse('age' in user)
self.assertFalse('nationality' in user)
def test_embedded_document(self):
class Comment(EmbeddedDocument):
content = StringField()
self.assertTrue('content' in Comment._fields)
self.assertFalse('id' in Comment._fields)
self.assertFalse('collection' in Comment._meta)
def test_embedded_document_validation(self):
class Comment(EmbeddedDocument):
date = DateTimeField()
content = StringField(required=True)
comment = Comment()
self.assertRaises(ValidationError, comment.validate)
comment.content = 'test'
comment.validate()
comment.date = 4
self.assertRaises(ValidationError, comment.validate)
comment.date = datetime.datetime.now()
comment.validate()
def test_save(self):
user = self.User(name='Test User', age=30)
user.save()
person_obj = self.User.objects.find_one(self.User.name == 'Test User')
self.assertEqual(person_obj['name'], 'Test User')
self.assertEqual(person_obj['age'], 30)
self.assertEqual(person_obj['_id'], user.id)
class Recipient(Document):
email = EmailField(required=True)
recipient = Recipient(email='root@localhost')
self.assertRaises(ValidationError, recipient.save)
def test_delete(self):
user = self.User(name="Test User", age=30)
user.save()
self.assertEqual(len(self.User.objects), 1)
user.delete()
self.assertEqual(len(self.User.objects), 0)
def test_save_custom_id(self):
user = self.User(name='Test User', age=30, id='497ce96f395f2f052a494fd4')
user.save()
user_obj = self.User.objects.find_one(self.User.name == 'Test User')
self.assertEqual(str(user_obj['_id']), '497ce96f395f2f052a494fd4')
def test_save_list(self):
class Comment(EmbeddedDocument):
content = StringField()
class BlogPost(Document):
content = StringField()
comments = ListField(EmbeddedDocumentField(Comment))
tags = ListField(StringField())
BlogPost.drop_collection()
post = BlogPost(content='Went for a walk today...')
post.tags = tags = ['fun', 'leisure']
comments = [Comment(content='Good for you'), Comment(content='Yay.')]
post.comments = comments
post.save()
post_obj = BlogPost.objects.find_one()
self.assertEqual(post_obj['tags'], tags)
for comment_obj, comment in zip(post_obj['comments'], comments):
self.assertEqual(comment_obj['content'], comment['content'])
BlogPost.drop_collection()
def test_save_embedded_document(self):
class EmployeeDetails(EmbeddedDocument):
position = StringField()
class Employee(self.User):
salary = IntegerField()
details = EmbeddedDocumentField(EmployeeDetails)
employee = Employee(name='Test Employee', age=50, salary=20000)
employee.details = EmployeeDetails(position='Developer')
employee.save()
employee_obj = Employee.objects.find_one({'name': 'Test Employee'})
self.assertEqual(employee_obj['name'], 'Test Employee')
self.assertEqual(employee_obj['age'], 50)
self.assertEqual(employee_obj['details']['position'], 'Developer')
def test_save_reference(self):
class BlogPost(Document):
meta = {'collection': 'blogpost_1'}
content = StringField()
author = ReferenceField(self.User)
BlogPost.drop_collection()
author = self.User(name='Test User')
author.save()
post = BlogPost(content='Watched some TV today... how exciting.')
post.author = author
post.save()
post_obj = BlogPost.objects.first()
self.assertTrue(isinstance(post_obj._data['author'], bson.objectid.ObjectId))
self.assertTrue(isinstance(post_obj.author, self.User))
self.assertEqual(post_obj.author.name, 'Test User')
post_obj.author.age = 25
post_obj.author.save()
author = list(self.User.objects.filter_by(name='Test User'))[-1]
self.assertEqual(author.age, 25)
BlogPost.drop_collection()
def test_meta_cls(self):
class Test(EmbeddedDocument):
name = IntegerField()
class Test2(Test):
name = IntegerField()
self.assertFalse('_cls' in Test().to_mongo())
self.assertTrue('_cls' in Test2().to_mongo())
def tearDown(self):
self.User.drop_collection()
if __name__ == '__main__':
unittest.main() | vishnevskiy/conjure | tests/test_documents.py | Python | mit | 11,711 | [
"exciting"
] | da2575ad8aa8afa8039d6f901a94628e15307ba93fb493a232c2ff70f4141c41 |
import random
import pycurl
import urllib
import cStringIO
import json
def _u(i):
try:
return unicode(i, errors='ignore')
except:
return i
class HerpesNetPanel:
def __init__(self, gateway_url):
self.gateway_url = gateway_url
@staticmethod
def _get_field(gateway, table, column, row):
prefix = ""
while len(prefix) < 6:
prefix += random.choice(['1', '2', '3', '4', '5', '6', '7', '8', '9'])
bot_id = "' AND 1=2 UNION ALL SELECT 0x" + ("' AND 1=2 UNION ALL SELECT 1,2," + column + ",'" +
prefix + "',5 FROM " + table + " LIMIT 1 OFFSET " +
str(row) + " -- --").encode("hex") + ",2,3,4,5,6,7,8,9 -- --"
buf = cStringIO.StringIO()
c = pycurl.Curl()
params = urllib.urlencode({'hwid': bot_id})
c.setopt(pycurl.USERAGENT, "74978b6ecc6c19836a17a3c2cd0840b0")
c.setopt(c.POSTFIELDS, params)
c.setopt(c.URL, gateway)
c.setopt(c.WRITEFUNCTION, buf.write)
c.setopt(pycurl.CONNECTTIMEOUT, 10)
c.setopt(pycurl.TIMEOUT, 10)
c.perform()
command = buf.getvalue()
try:
if command[-(len(prefix) + 1):] == "|" + prefix:
return command[:-(len(prefix) + 1)]
except:
return None
return None
def get_all_bot_details(self):
count = 0
bots = []
while True:
user = _u(self._get_field(self.gateway_url, 'clients', 'hwid', count))
if user is None:
break
bots.append({'hwid': _u(user),
'ip': _u(self._get_field(self.gateway_url, 'clients', 'ip', count)),
'cc': _u(self._get_field(self.gateway_url, 'clients', 'cc', count)),
'time': _u(self._get_field(self.gateway_url, 'clients', 'time', count)),
'userandpc': _u(self._get_field(self.gateway_url, 'clients', 'userandpc', count)),
'admin': _u(self._get_field(self.gateway_url, 'clients', 'admin', count)),
'os': _u(self._get_field(self.gateway_url, 'clients', 'os', count)),
'status': _u(self._get_field(self.gateway_url, 'clients', 'status', count)),
'id': _u(self._get_field(self.gateway_url, 'clients', 'id', count))})
count += 1
return bots
def print_help():
print("usage: herpesnet.class.py [-h] url-of-run.php")
print("")
print("Herpes Net 3.0 Database Extraction")
print("Gathering information via SQLi from Herpes Net 3.0 botnets")
print("By Brian Wallace (@botnet_hunter)")
print("")
print(" url-of-run.php URL of run.php in the Herpes Net panel")
print(" -h --help Print this message")
print("")
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(add_help=False)
parser.add_argument('run', metavar='run', type=str, nargs='?', default=None)
parser.add_argument('-h', '--help', default=False, required=False, action='store_true')
parser.add_argument('-v', '--verbose', default=False, required=False, action='store_true')
args = parser.parse_args()
if args.help or args.run is None:
print_help()
exit()
h = HerpesNetPanel(args.run)
print json.dumps(h.get_all_bot_details(), sort_keys=True, indent=4, separators=(',', ': ')) | bwall/BAMF | IntegrationQueue/http/herpesnet/herpesnet.class.py | Python | mit | 3,564 | [
"Brian"
] | ecb401fdb71b861bcb8b3fdda8f00a40a0ee8010dac1631bf15229cc91128136 |
"""Database models.
Note on filesystem directory structure: (IN PROGRESS)
Since we are storing data output from various bioinformatics programs, the
models below result in the creation and maintenance of a directory
structure for data location. In general, the strategy is to have a
directory corresponding to a model instance when possible. Hierarchy
is used when models can be properly nested.
An example layout for a user's data might look like:
../projects/1324abcd/
../projects/1324abcd/alignments/
../projects/1324abcd/samples/
../projects/1324abcd/samples/1234abcd
../projects/1324abcd/samples/5678jklm
../projects/1324abcd/ref_genomes/
../projects/1324abcd/variant_calls/
Implementation Notes:
* get_field_order() for each model/table is used by the Adapter class
in adapters.py to know WHICH FIELDS are to be displayed and WHAT ORDER.
If you don't return a field in get_field_order, it won't be sent to
datatables.js for display.
Each field consists of a dict with a 'field' key, which is the name of
the field, and an optional 'verbose' key, which is the display name of
the field in the datatable. If 'verbose' is absent, then the underscores
are converted to spaces and each word is Title Cased.
"""
from contextlib import contextmanager
from datetime import datetime
import json
import os
import re
import shutil
import subprocess
from custom_fields import PostgresJsonField
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Model
# from genome_finish.insertion_placement_read_trkg import Junction
from genome_finish.contig_display_utils import create_contig_junction_links
from model_utils import assert_unique_types
from model_utils import ensure_exists_0775_dir
from model_utils import get_dataset_with_type
from model_utils import make_choices_tuple
from model_utils import UniqueUidModelMixin
from model_utils import VisibleFieldMixin
from utils import uppercase_underscore
from variants.filter_key_map_constants import MAP_KEY__ALTERNATE
from variants.filter_key_map_constants import MAP_KEY__COMMON_DATA
from variants.filter_key_map_constants import MAP_KEY__EVIDENCE
from variants.filter_key_map_constants import MAP_KEY__EXPERIMENT_SAMPLE
from variants.materialized_view_manager import MeltedVariantMaterializedViewManager
###############################################################################
# User-related models
###############################################################################
class UserProfile(UniqueUidModelMixin):
"""A UserProfile which is separate from the django auth User.
This references the auth.User and opens up the possibility of
adding additional fields.
"""
# A one-to-one mapping to the django User model.
user = models.OneToOneField(User)
def __unicode__(self):
return self.user.username
###############################################################################
# Data wrappers
###############################################################################
class Dataset(UniqueUidModelMixin):
"""A specific data file with a location on the filesystem.
Basically a wrapper for a file on the file system.
This is similar to the Galaxy notion of a dataset.
"""
# The type of data this represents (e.g. Dataset.Type.BWA_ALIGN).
# This is a semantic identifier for the kinds of operations
# that can be performed with this Dataset.
class TYPE:
"""The type of this dataset.
Limit to 40-chars as per Dataset.type field def.
For internal strings, we will convert to ALL_CAPS_W_UNDERSCORES.
"""
REFERENCE_GENOME_FASTA = 'Reference Genome FASTA' # fasta
REFERENCE_GENOME_GENBANK = 'Reference Genome Genbank' #genbank
REFERENCE_GENOME_GFF = 'Reference Genome GFF' #gff
FASTQ1 = 'FASTQ Forward'
FASTQ2 = 'FASTQ Reverse'
BWA_ALIGN = 'BWA BAM'
BWA_DISCORDANT = 'BWA BAM Discordant Paired Reads'
BWA_SPLIT = 'BWA BAM Split Reads'
BWA_UNMAPPED = 'BWA Unmapped Reads'
BWA_CLIPPED = 'BWA Clipped Reads'
BWA_PILED = 'BWA Piled Reads'
BWA_SV_INDICANTS = 'BWA Structural Variant Indicating Reads'
BWA_FOR_DE_NOVO_ASSEMBLY = 'BWA for De Novo Assembly'
BWA_ALIGN_ERROR = 'BWA Alignment Error'
VCF_FREEBAYES = 'Freebayes VCF'
VCF_PINDEL = 'Pindel VCF'
VCF_DELLY = 'Delly VCF'
VCF_LUMPY = 'Lumpy VCF'
VCF_USERINPUT = 'User VCF'
VCF_FREEBAYES_SNPEFF = 'SNPEff VCF'
VCF_LUMPY_SNPEFF = 'Lumpy SNPEff VCF'
VCF_PINDEL_SNPEFF = 'Pindel SNPEff VCF'
VCF_DE_NOVO_ASSEMBLED_CONTIGS = 'De Novo Assembled Contigs VCF'
VCF_DE_NOVO_ASSEMBLY_GRAPH_WALK = 'De Novo Assembly Graph Walk VCF'
BED_CALLABLE_LOCI = 'Flagged Regions BED'
LUMPY_INSERT_METRICS_HISTOGRAM = 'Lumpy Insert Metrics Histogram'
LUMPY_INSERT_METRICS_MEAN_STDEV = 'Lumpy Insert Metrics Mean Stdev'
FASTQC1_HTML = 'FASTQC Forward HTML Output'
FASTQC2_HTML = 'FASTQC Reverse HTML Output'
TYPE_CHOICES = make_choices_tuple(TYPE)
type = models.CharField(max_length=40, choices=TYPE_CHOICES)
# This relationship lets us know where the dataset points. This
# is important in case we want to duplicate this dataset in order
# to make a compressed/uncompressed version - we need to hook it
# up to the correct related models after copying.
TYPE_TO_RELATED_MODEL = {
TYPE.REFERENCE_GENOME_FASTA : 'referencegenome_set',
TYPE.REFERENCE_GENOME_GENBANK : 'referencegenome_set',
TYPE.FASTQ1 : 'experimentsample_set',
TYPE.FASTQ2 : 'experimentsample_set',
TYPE.BWA_ALIGN : 'experimentsampletoalignment_set',
TYPE.BWA_DISCORDANT : 'experimentsampletoalignment_set',
TYPE.BWA_SPLIT : 'experimentsampletoalignment_set',
TYPE.BWA_CLIPPED : 'experimentsampletoalignment_set',
TYPE.BWA_PILED : 'experimentsampletoalignment_set',
TYPE.BWA_SV_INDICANTS : 'experimentsampletoalignment_set',
TYPE.BWA_ALIGN_ERROR : 'alignmentgroup_set',
TYPE.VCF_FREEBAYES : 'alignmentgroup_set',
TYPE.VCF_PINDEL : 'alignmentgroup_set',
TYPE.VCF_LUMPY : 'alignmentgroup_set',
TYPE.VCF_DELLY : 'alignmentgroup_set',
TYPE.VCF_LUMPY : 'alignmentgroup_set',
TYPE.VCF_LUMPY_SNPEFF: 'alignmentgroup_set',
TYPE.VCF_USERINPUT : 'variantset_set',
TYPE.VCF_FREEBAYES_SNPEFF : 'alignmentgroup_set',
TYPE.VCF_DE_NOVO_ASSEMBLED_CONTIGS : 'experimentsampletoalignment_set',
TYPE.VCF_DE_NOVO_ASSEMBLY_GRAPH_WALK : (
'experimentsampletoalignment_set'),
TYPE.FASTQC1_HTML: 'experimentsample_set',
TYPE.FASTQC2_HTML: 'experimentsample_set',
}
# Human-readable identifier. Also used for JBrowse.
label = models.CharField(max_length=256)
# Location on the filesystem relative to settings.MEDIA_ROOT.
filesystem_location = models.CharField(max_length=512, blank=True)
# Associated with a separate index? (e.g. for vcf/tabix and bam files)
filesystem_idx_location = models.CharField(max_length=512, blank=True)
# When the dataset is a result of a computation, we'll set a status on it.
# NOTE: The reliability of the present implementation of this model feature
# is questionable.
class STATUS:
"""
The status of running this Dataset.
Limit to 40-chars as per Dataset.status field def.
"""
UNKNOWN = 'UNKNOWN'
NOT_STARTED = 'NOT_STARTED'
COMPUTING = 'COMPUTING'
ALIGNING = 'ALIGNING'
VARIANT_CALLING = 'VARIANT_CALLING'
READY = 'READY'
FAILED = 'FAILED'
COPYING = 'COPYING'
QUEUED_TO_COPY = 'QUEUED_TO_COPY'
VERIFYING = 'VERIFYING'
QC = 'RUNNING_QC'
AWAITING_UPLOAD = 'AWAITING_UPLOAD'
STATUS_CHOICES = make_choices_tuple(STATUS)
status = models.CharField(max_length=40, choices=STATUS_CHOICES,
default=STATUS.READY)
# Dictionary of compression suffixes and programs to use to perform
# various actions on a pipe
COMPRESSION_TYPES = {
'.gz': {
'cat': ('gzip', '-dc'),
'zip': ('gzip', '-c')
},
'.bz2': {
'cat': ('bzcat',),
'zip': ('bzip2', '-c')
},
'.zip': {
'cat': ('unzip', '-p'),
'zip': ('zip', '-')
},
'.bgz': {
'cat': (settings.BGZIP_BINARY, '-dc'),
'zip': (settings.BGZIP_BINARY, '-c')
},
}
def __unicode__(self):
return self.label
def get_absolute_location(self):
"""Returns the full path to the file on the filesystem.
"""
return os.path.join(settings.PWD, settings.MEDIA_ROOT,
self.filesystem_location)
def get_absolute_idx_location(self):
return os.path.join(settings.PWD, settings.MEDIA_ROOT,
self.filesystem_idx_location)
def delete_underlying_data(self):
"""Deletes data from filesystem.
"""
full_fs_location = self.get_absolute_location()
if os.path.exists(full_fs_location):
os.remove(full_fs_location)
full_fs_index_location = self.get_absolute_idx_location()
if os.path.exists(full_fs_index_location):
os.remove(full_fs_index_location)
def is_compressed(self):
"""
Checks file path for .bz2 or .gz ending, and if so, returns true.
"""
return self.filesystem_location.endswith(
tuple(self.COMPRESSION_TYPES.keys()))
def is_indexed(self):
"""
Checks if dataset has idx location.
"""
return not (self.filesystem_idx_location == '')
def wrap_if_compressed(self):
""" This helper function returns a process substitution string
to be used by /bin/bash if the fastq read file is compressed, otherwise
it just returns get_absolute_location().
"""
absolute_location = self.get_absolute_location()
if self.is_compressed():
extension = os.path.splitext(self.filesystem_location)[1]
program = ' '.join(self.COMPRESSION_TYPES[extension]['cat'])
return '<({:s} {:s})'.format(
program, absolute_location)
else:
return absolute_location
def internal_string(self, parent_entity):
"""
A string used internally to describe a dataset for an entity.
Our convention is
parent_entity.uid + '_' + dataset.type
(uppercased, whitespace as underscores)
"""
return str(parent_entity.uid) + '_' + uppercase_underscore(self.type)
def external_string(self, parent_entity):
"""
A string used externally to describe a dataset for an entity.
Our convention is
parent_entity.label + ' ' + dataset.type
"""
return str(parent_entity.label) + ' ' + self.type
@contextmanager
def stream(self):
"""
If dataset is compressed, return a named pipe that decompressed the
file, else just return the absolute location.
"""
raise NotImplementedError
# Currently the below isn't working; the mkfifo blocks itself so I can't
# seem to read and write to it at the same time. For now, we've decided
# to go for process substitution in Bash (see wrap_if_compressed(),
# although this requires the use of Shell=True.
# dirname = tempfile.mkdtemp()
# p = None
# try:
# if not self.is_compressed():
# return self.get_absolute_location()
# path = os.path.join(dirname, 'named_pipe')
# os.mkfifo(path)
# extension = os.path.splitext(self.filesystem_location)[1]
# program = self.COMPRESSION_TYPES[extension]
# with open(path, 'w') as wpipe:
# p = Popen(program.append(path)) # write to path
# return path
# finally:
# shutil.rmtree(dirname)
# if p: p.close()
def get_related_model_set(self):
return getattr(self, Dataset.TYPE_TO_RELATED_MODEL[self.type])
def make_compressed(self, compression_type):
"""
Generate a new compressed version of this dataset.
For some cases (like generating a compressed TABIX-indexed VCF),
we want to take a dataset and generate a compressed version of
the file (as a separate model instance) with the same associations
to other related model instances.
TODO: We could just replace the uncompressed version with the
compressed version with the compressed version, but right now that's
too much work, since we'd need to check every time to see if the file
was compressed, and depending on the tool, decide if we'd need to
decompress it via pipe, or write the decompressed version as a new
file, etc etc. So, currently the replace option is unimplemented.
"""
# Check that compression_type is valid
assert compression_type in Dataset.COMPRESSION_TYPES, (
'compression_type is invalid, {:s} is not one of: {:s}'.format(
compression_type, Dataset.COMPRESSION_TYPES.keys()))
# Check that this dataset isn't itself already compressed
assert self.is_compressed() is False, (
'This dataset is already compressed.')
# Check that a compressed dataset isn't already associated with a
# related model (probably just one related model).
related_models = self.get_related_model_set().all()
for obj in related_models:
old_compressed_dataset = get_dataset_with_type(obj, self.type,
compressed=True)
# TODO: In this case, do we want to just return this?
# Maybe with a warning?
assert old_compressed_dataset is None, (
'A related model already compressed' +
'this dataset: {:s}'.format(
compressed_dataset.filesystem_location))
# Generate the new compressed dataset file
# by adding the compression_type suffix
orig_file = self.get_absolute_location()
new_compressed_file = orig_file + compression_type
compression_command = Dataset.COMPRESSION_TYPES[
compression_type]['zip']
with open(orig_file, 'rb') as fh_in:
with open(new_compressed_file, 'wb') as fh_out:
subprocess.check_call(compression_command,
stdin=fh_in, stdout=fh_out)
# Generate the new dataset model object
# need relative path, not absolute
new_compressed_file_rel = self.filesystem_location + compression_type
new_compressed_dataset = Dataset.objects.create(
label= self.label + ' (compressed)',
type= self.type,
filesystem_location= new_compressed_file_rel)
# Finally, add this new compressed dataset to the dataset_set
# field in all the related model objects that point to the
# uncompressed version
[obj.dataset_set.add(new_compressed_dataset) for obj in related_models]
return new_compressed_dataset
# Make sure the Dataset types are unique. This runs once at startup.
assert_unique_types(Dataset.TYPE)
###############################################################################
# Project models
###############################################################################
class Project(UniqueUidModelMixin):
"""A single project belonging to a user.
A project groups together ReferenceGenomes, ExperimentSamples, and other
data generated by tools during analysis.
"""
# The project owner.
# TODO: Implement permissions system so that projects can be shared.
owner = models.ForeignKey('UserProfile')
# The human-readable title of the project.
title = models.CharField(max_length=256)
s3_backed = models.BooleanField(default=settings.S3_ENABLED)
def __unicode__(self):
return self.title + '-' + str(self.owner)
def is_s3_backed(self):
return self.s3_backed
def get_s3_model_data_dir(self):
return os.path.join("projects", str(self.uid))
def get_model_data_root(self):
"""Get the absolute location where all project data is stored.
"""
return os.path.join(settings.PWD, settings.MEDIA_ROOT, 'projects')
def get_model_data_dir(self):
"""Get the full path to where the user's data is stored.
The data dir is the media root url combined with the user id.
"""
return os.path.join(self.get_model_data_root(), str(self.uid))
def ensure_model_data_dir_exists(self):
"""Ensure that a data directory exists for the user.
The data directory is named according to the UserProfile.id.
"""
# Make sure the root of projects exists
ensure_exists_0775_dir(self.get_model_data_root())
# Check whether the data dir exists, and create it if not.
return ensure_exists_0775_dir(self.get_model_data_dir())
def delete_model_data_dir(self):
"""Removes all data associated with this model.
WARNING: Be careful with this method!
"""
data_dir = self.get_model_data_dir()
if os.path.exists(data_dir):
shutil.rmtree(data_dir)
@classmethod
def get_field_order(clazz, **kwargs):
"""Get the order of the models for displaying on the front-end.
Called by the adapter.
"""
return [{'field':'uid'},
{'field':'title'}]
class Chromosome(UniqueUidModelMixin):
"""A locus belonging to a reference genome which Variants
hold foreign keys to. May be a literal chromosome,
bacterial genome, or plasmid.
"""
# A chromosome belongs to a single ReferenceGenome
reference_genome = models.ForeignKey('ReferenceGenome')
# Chromosome label
label = models.CharField(verbose_name="Name", max_length=256)
# The unique id of the SeqRecord object corresponding to this Chromosome.
# In a genbank/multi-fasta file, the sequence belonging to each chromosome
# carries a unique identifier which is parsed by BioPython's SeqIO module
# as the .id attribute of the SeqRecord object. This field ties our
# Chromosome model to a specific chromosome in a reference genome
# fasta/genbank dataset. The seqrecord_id field does not necesarilly
# carry any comprehensible information about the Chromosome, it is only an
# identifier, and the description of the Chromosome is given by the
# label field.
# Ex: A reporter plasmid Chromosome:
# seqrecord_id: pl1839
# label: Reporter plasmid carrying RFP on a lac promoter
seqrecord_id = models.CharField(
verbose_name="SeqRecord ID", max_length=256, default="chrom_1")
# Number of bases on the Chromosome
num_bases = models.BigIntegerField()
@classmethod
def get_field_order(clazz, **kwargs):
"""Get the order of the models for displaying on the front-end.
Called by the adapter.
"""
return [
{'field': 'label', 'verbose': 'Chromosome Name'},
{'field': 'num_bases', 'verbose':'Bases'},
{'field': 'uid'}
]
class ReferenceGenome(UniqueUidModelMixin):
"""A reference genome relative to which alignments and analysis are
performed.
"""
# A ReferenceGenome belongs to a single Project.
project = models.ForeignKey('Project')
# A human-readable label for this genome.
label = models.CharField(verbose_name="Name", max_length=256)
# Datasets pointing to files on the system (e.g. .fasta files, etc.)
dataset_set = models.ManyToManyField('Dataset', blank=True, null=True,
verbose_name="Datasets")
# a key/value list of all possible VCF and sample metadata fields, stored
# as a JsonField and dynamically updated by dynamic_snp_filter_key_map.py
variant_key_map = PostgresJsonField()
# reference genome metadata field for storing key-value pairs of reference
# genome related information e.g. metadata['is_from_de_novo_assembly']=True
metadata = PostgresJsonField()
# Bit that indicates whether the materialized view is up to date.
# This design decision puts a lot on the developer to remember to set this
# false whenever any data changes that would require a refresh of the
# materialized view.
is_materialized_variant_view_valid = models.BooleanField(default=False)
def __unicode__(self):
return self.label
@property
def num_chromosomes(self):
"""Number of Chromosomes belonging to the ReferenceGenome
"""
return len(Chromosome.objects.filter(reference_genome = self))
@property
def num_bases(self):
"""Total number of bases of all Chromosomes belonging to
the ReferenceGenome
"""
return sum([chrom.num_bases for chrom in Chromosome.objects.filter(reference_genome = self)])
@property
def href(self):
"""Link to url view for this model.
"""
return reverse(
'main.views.reference_genome_view',
args=(self.project.uid, self.uid))
def get_model_data_root(self):
"""Get the root location for all data of this type in the project.
"""
return os.path.join(self.project.get_model_data_dir(), 'ref_genomes')
def get_model_data_dir(self):
"""Get the full path to the location of this model's data.
"""
return os.path.join(self.get_model_data_root(), str(self.uid))
def ensure_model_data_dir_exists(self):
"""Ensure that a data directory exists for this model.
"""
# Make sure the root exists.
ensure_exists_0775_dir(self.get_model_data_root())
# Check whether the data dir exists, and create it if not.
return ensure_exists_0775_dir(self.get_model_data_dir())
def get_jbrowse_directory_path(self):
"""Returns the full path to the root of JBrowse data for this
ReferenceGenome.
"""
return os.path.join(self.get_model_data_dir(), 'jbrowse')
def ensure_jbrowse_dir(self):
"""Ensures that the jbrowse data dir exists."""
return ensure_exists_0775_dir(self.get_jbrowse_directory_path())
def get_snpeff_directory_path(self):
"""Returns the full path to the root of snpeff data for this
ReferenceGenome.
"""
return os.path.join(self.get_model_data_dir(), 'snpeff',
self.uid)
def ensure_snpeff_dir(self):
"""Ensures that the snpeff data dir exists."""
return ensure_exists_0775_dir(self.get_snpeff_directory_path())
def get_client_jbrowse_data_path(self):
if self.project.is_s3_backed():
assert False, "url is incorrect."
return os.path.join(
'http://%s.s3.amazonaws.com/' % settings.S3_BUCKET,
'projects',
str(self.project.uid),
'ref_genomes',
str(self.uid),
'jbrowse')
else:
return os.path.join(
'/jbrowse/gd_data/',
'projects',
str(self.project.uid),
'ref_genomes',
str(self.uid),
'jbrowse')
def get_client_jbrowse_link(self):
"""Returns the link to jbrowse redirect for this ReferenceGenome.
Example url for user with uid 'abc', and project id 'xyz', and
refgenome id 456:
'/redirect_jbrowse?data=gd_data/abc/projects/xyz/ref_genomes/456/jbrowse/'
"""
return '/redirect_jbrowse?data=' + self.get_client_jbrowse_data_path()
def is_annotated(self):
"""For several steps (notably snpEff), we want to check that this
ReferenceGenome is annotated (i.e. it has a genbank file associated
with it). This function returns True if a genbank file is available.
"""
return self.dataset_set.filter(
type=Dataset.TYPE.REFERENCE_GENOME_GENBANK).exists()
def get_variant_caller_common_map(self):
return self.variant_key_map[MAP_KEY__COMMON_DATA]
def get_variant_alternate_map(self):
return self.variant_key_map[MAP_KEY__ALTERNATE]
def get_variant_evidence_map(self):
return self.variant_key_map[MAP_KEY__EVIDENCE]
def get_experiment_sample_map(self):
return self.variant_key_map[MAP_KEY__EXPERIMENT_SAMPLE]
def delete_model_data_dir(self):
"""Removes all data associated with this model.
WARNING: Be careful with this method!
"""
data_dir = self.get_model_data_dir()
if os.path.exists(data_dir):
shutil.rmtree(data_dir)
@classmethod
def get_field_order(clazz, **kwargs):
"""Get the order of the models for displaying on the front-end.
Called by the adapter.
"""
return [
{'field': 'label'},
{'field': 'num_chromosomes', 'verbose': '# Chromosomes'},
{'field': 'num_bases', 'verbose': 'Total Size'}
]
def invalidate_materialized_view(self):
self.is_materialized_variant_view_valid = False
self.save(update_fields=['is_materialized_variant_view_valid'])
def drop_materialized_view(self):
"""Deletes associated materialized view.
"""
mvm = MeltedVariantMaterializedViewManager(self)
mvm.drop()
class Contig(UniqueUidModelMixin):
# A human-readable label for this genome.
label = models.CharField(verbose_name="Name", max_length=256)
# Number of bases in the Contig
num_bases = models.BigIntegerField(default=0)
# Datasets pointing to files on the system (e.g. .fasta files, etc.)
dataset_set = models.ManyToManyField('Dataset', blank=True, null=True,
verbose_name="Datasets")
# Reference genome which the insertion belongs to
parent_reference_genome = models.ForeignKey('ReferenceGenome',
related_name='+')
# The sample alignment that provides evidence for the insertion
experiment_sample_to_alignment = models.ForeignKey(
'ExperimentSampleToAlignment')
# The variant caller common data object associated
variant_caller_common_data = models.ForeignKey('VariantCallerCommonData',
blank=True, null=True)
# Contig metadata field for storing key-value pairs of contig
# related information e.g. metadata['is_from_de_novo_assembly']=True
metadata = PostgresJsonField()
def __getattr__(self, name):
"""Automatically called if an attribute is not found in the typical
place.
Our implementation checks the metadata dict, raises AttributeError if
not found
"""
try:
return self.metadata[name]
except:
raise AttributeError
def get_model_data_root(self):
"""Get the root location for all data of this type in the project.
"""
return os.path.join(
self.parent_reference_genome.project.get_model_data_dir(),
'contigs')
def get_model_data_dir(self):
"""Get the full path to the location of this model's data.
"""
return os.path.join(self.get_model_data_root(), str(self.uid))
def ensure_model_data_dir_exists(self):
"""Ensure that a data directory exists for this model.
"""
# Make sure the root exists.
ensure_exists_0775_dir(self.get_model_data_root())
# Check whether the data dir exists, and create it if not.
return ensure_exists_0775_dir(self.get_model_data_dir())
def get_jbrowse_directory_path(self):
"""Returns the full path to the root of JBrowse data for this
Contig.
"""
return os.path.join(self.get_model_data_dir(), 'jbrowse')
def ensure_jbrowse_dir(self):
"""Ensures that the jbrowse data dir exists."""
return ensure_exists_0775_dir(self.get_jbrowse_directory_path())
def get_client_jbrowse_data_path(self):
if self.parent_reference_genome.project.is_s3_backed():
assert False, "url is incorrect."
else:
return os.path.join(
'/jbrowse/gd_data/',
'projects',
str(self.parent_reference_genome.project.uid),
'contigs',
str(self.uid),
'jbrowse')
def get_client_jbrowse_link(self):
"""Returns the link to jbrowse redirect for this Contig.
"""
bam_dataset = self.dataset_set.get(type=Dataset.TYPE.BWA_ALIGN)
bam_label = bam_dataset.internal_string(self)
coverage_label = bam_dataset.internal_string(self) + '_COVERAGE'
track_labels = (settings.JBROWSE_DEFAULT_TRACKS +
[bam_label, coverage_label])
link = '/redirect_jbrowse?data=' + self.get_client_jbrowse_data_path()
link += '&tracks=' + ','.join(track_labels)
return link
@property
def href(self):
"""Link to url view for this model.
"""
return reverse(
'main.views.contig_view',
args=(self.parent_reference_genome.project.uid, self.uid))
@property
def coverage(self):
return self.metadata.get('coverage', '')
@property
def chromosome(self):
return self.metadata.get('chromosome', '')
def get_contig_reads_track(self):
bam_dataset = get_dataset_with_type(
self,
Dataset.TYPE.BWA_SV_INDICANTS)
return str(bam_dataset.internal_string(self))
@property
def left_junctions_html(self):
junctions = self.metadata.get('left_junctions', '')
return create_contig_junction_links(self, junctions)
@property
def right_junctions_html(self):
junctions = self.metadata.get('right_junctions', '')
return create_contig_junction_links(self, junctions)
@property
def experiment_sample(self):
return self.experiment_sample_to_alignment.experiment_sample.label
@classmethod
def get_field_order(clazz, **kwargs):
"""Get the order of the models for displaying on the front-end.
Called by the adapter.
"""
return [
{'field': 'label'},
{'field': 'experiment_sample'},
{'field': 'num_bases', 'verbose': 'Contig Length'},
{'field': 'coverage', 'verbose': 'Average Coverage'},
{'field': 'chromosome'},
{'field': 'left_junctions_html', 'verbose':
'Left Junctions<br>(Reference → Contig)'},
{'field': 'right_junctions_html', 'verbose':
'Right Junctions<br>(Reference → Contig)'}
]
class ExperimentSample(UniqueUidModelMixin):
"""Model representing data for a particular experiment sample.
Usually this corresponds to a pair of fastq reads for a particular
bacterial clone or colony, after barcode removal/de-multiplexing.
"""
# A Sample belongs to a single Project.
project = models.ForeignKey('Project')
# Human-readable identifier.
label = models.CharField('Sample Name', max_length=256)
# The datasets associated with this sample. The semantic sense of the
# dataset can be determined from the Dataset.type field.
dataset_set = models.ManyToManyField('Dataset', blank=True, null=True,
verbose_name="Datasets")
# User specified data fields corresponding to the sample.
# Examples: Growth rate, GFP amount, phenotype, # of mage cycles, etc.
data = PostgresJsonField()
# parent/child relations to other samples
children = models.ManyToManyField('self',
through='ExperimentSampleRelation',
symmetrical=False,
related_name='parents')
def __getattr__(self, name):
"""Automatically called if an attribute is not found in the typical
place.
Our implementation checks the data dict, return the string 'undefined'
if the value is not found.
NOTE: Typically __getattr__ should raise an AttributeError if the value
cannot be found, but the noisy nature or our data means returning
'undefined' is more correct.
See: http://docs.python.org/2/reference/datamodel.html#object.__getattr__
"""
try:
return self.data[name]
except:
raise AttributeError
def add_child(self, sample):
"""
Create a relationship with another sample as as child.
TODO: For now, don't complain if this is a parent sample as well,
since we aren't doing anything fancy.
Return True if successful.
"""
return ExperimentSampleRelation.objects.get_or_create(
parent= self,
child= sample)
def remove_child(self, sample):
"""
Remove a parent/child relationship with another sample.
Return True if present and removed.
"""
child_relation = ExperimentSampleRelation.objects.filter(
parent=self,
child=sample)
if child_relation:
child_relation.delete()
return True
else:
return False
def get_children(self):
"""
Use relationship table to get all children.
"""
return self.children.all()
def get_parents(self):
"""
Use relationship table to get all parents.
"""
return self.parents.all()
@property
def status(self):
"""The status of the data underlying this data.
"""
status_string = 'NO_DATA'
fastq1_dataset_queryset = self.dataset_set.filter(
type=Dataset.TYPE.FASTQ1)
if len(fastq1_dataset_queryset) > 1:
return 'ERROR: More than one forward reads source'
if len(fastq1_dataset_queryset) == 1:
status_string = 'FASTQ1: %s' % fastq1_dataset_queryset[0].status
# Maybe add reverse reads.
fastq2_dataset_queryset = self.dataset_set.filter(
type=Dataset.TYPE.FASTQ2)
if len(fastq2_dataset_queryset) > 1:
return 'ERROR: More than one reverse reads source'
if len(fastq2_dataset_queryset) == 1:
status_string += (
' | FASTQ2: %s' % fastq2_dataset_queryset[0].status)
return status_string
def __unicode__(self):
return self.label
@property
def fastqc_links(self):
"""
Links to the FASTQC output files.
First checks if datasets are present, skips if missing.
"""
links = []
fqc_dataset_types = enumerate([
Dataset.TYPE.FASTQC1_HTML,
Dataset.TYPE.FASTQC2_HTML], start=1)
for read_num, fqc_dataset_type in fqc_dataset_types:
fastqc_dataset = get_dataset_with_type(self, fqc_dataset_type)
if not fastqc_dataset:
continue
links.append(
'<a href="{url}" target="_blank">'
'Read {read_num}</a>'.format(
url=reverse(
'main.views.fastqc_view',
args=(self.project.uid, self.uid,
read_num)),
read_num=read_num))
return ', '.join(links)
def get_model_data_root(self):
"""Get the root location for all data of this type in the project.
"""
return os.path.join(self.project.get_model_data_dir(), 'samples')
def get_model_data_dir(self):
"""Get the full path to the location of this model's data.
"""
return os.path.join(self.get_model_data_root(), str(self.uid))
def ensure_model_data_dir_exists(self):
"""Ensure that a data directory exists for this model.
"""
# Make sure the root of projects exists
ensure_exists_0775_dir(self.get_model_data_root())
# Check whether the data dir exists, and create it if not.
return ensure_exists_0775_dir(self.get_model_data_dir())
def delete_model_data_dir(self):
"""Removes all data associated with this model.
WARNING: Be careful with this method!
"""
data_dir = self.get_model_data_dir()
if os.path.exists(data_dir):
shutil.rmtree(data_dir)
@classmethod
def get_field_order(clazz, **kwargs):
"""Get the order of the models for displaying on the front-end.
Called by the adapter.
"""
return [
{'field': 'label'},
{'field': 'status'},
{'field': 'uid', 'verbose': 'Internal ID'},
{'field': 'fastqc_links', 'verbose': 'FastQC'},
]
class ExperimentSampleRelation(UniqueUidModelMixin):
"""
Explicit table linking parent and child samples.
"""
parent = models.ForeignKey(ExperimentSample, related_name='parent_relationships')
child = models.ForeignKey(ExperimentSample, related_name='child_relationships')
class AlignmentGroup(UniqueUidModelMixin):
"""Collection of alignments of several related ExperimentSamples to the
same ReferenceGenome.
The reason for grouping alignments together is that our variant operations
are generally relative to a single reference genome, and further, variant
calling tools often take multiple alignments as input, thus it makes sense
to group these in the database.
For a one-to-one mapping of Alignment to Sample, see
ExperimentSampleToAlignment.
"""
# Human-readable identifier.
label = models.CharField(max_length=256, blank=True)
# All alignments in this set are relative to this genome.
reference_genome = models.ForeignKey('ReferenceGenome')
# The aligner tool used for this alignment.
class ALIGNER:
"""Constants for representing the aligner type.
"""
BWA = 'BWA'
ALIGNER_CHOICES = make_choices_tuple(ALIGNER)
aligner = models.CharField(max_length=10, choices=ALIGNER_CHOICES)
# Times for the alignment run.
start_time = models.DateTimeField(blank=True, null=True)
end_time = models.DateTimeField(blank=True, null=True)
# Datasets pointing to files on the system (e.g. .fasta files, etc.)
dataset_set = models.ManyToManyField('Dataset', blank=True, null=True,
verbose_name="Datasets")
def default_alignment_options():
"""
Return the default alignment options.
Includes currently:
call_as_haploid : haploid calling mode (defaults to diploid)
skip_het_only : remove het-only calls in diploid mode (default false)
To do at some point:
* custom arguments to bwa, gatk, freebayes, etc
* enabling/changing of proecssing steps (DEFAULT_PROCESSING_MASK)
"""
return json.dumps({
'call_as_haploid': False,
'skip_het_only': False
})
# see default_alignment_options()
alignment_options = PostgresJsonField(default=default_alignment_options)
class STATUS:
"""
The status of running this Dataset.
Limit to 40-chars as per Dataset.status field def.
"""
NOT_STARTED = 'NOT_STARTED'
ALIGNING = 'ALIGNING'
VARIANT_CALLING = 'VARIANT_CALLING'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
UNKNOWN = 'UNKNOWN'
STATUS_CHOICES = make_choices_tuple(STATUS)
status = models.CharField('Alignment Status',
max_length=40, choices=STATUS_CHOICES, default=STATUS.NOT_STARTED)
# Statuses that indicate the alignment pipeline is running.
PIPELINE_IS_RUNNING_STATUSES = [
STATUS.ALIGNING,
STATUS.VARIANT_CALLING
]
def __unicode__(self):
return self.label
def get_model_data_root(self):
"""Get the root location for all data of this type in the project.
"""
return os.path.join(self.reference_genome.project.get_model_data_dir(),
'alignment_groups')
def get_model_data_dir(self):
"""Get the full path to the location of this model's data.
"""
return os.path.join(self.get_model_data_root(), str(self.uid))
def ensure_model_data_dir_exists(self):
"""Ensure that a data directory exists for this model.
"""
# Make sure the root exists.
ensure_exists_0775_dir(self.get_model_data_root())
# Check whether the data dir exists, and create it if not.
return ensure_exists_0775_dir(self.get_model_data_dir())
def delete_model_data_dir(self):
"""Removes all data associated with this model.
WARNING: Be careful with this method!
"""
data_dir = self.get_model_data_dir()
if os.path.exists(data_dir):
shutil.rmtree(data_dir)
@property
def href(self):
"""Link to url view for this model.
"""
return reverse(
'main.views.alignment_view',
args=(self.reference_genome.project.uid, self.uid))
@property
def run_time(self):
"""Time elapsed since alignment start.
NOTE: This might be complicated by the not-so-clean implementation of
the pipeline runner.
"""
# Cases where alignment has not been run before.
if (self.start_time is None or
self.status == AlignmentGroup.STATUS.NOT_STARTED or
self.status == AlignmentGroup.STATUS.UNKNOWN):
return 'NOT RUNNING'
# Determine effective end time to use for calculating running time,
# depending on whether pipeline completed or not.
if self.end_time is None:
# Start time but no end time which typically should mean that
# the pipeline is still running.
# However, we still check for weird states because the pipeline
# occasionally has issues.
if self.status in [
AlignmentGroup.STATUS.FAILED,
AlignmentGroup.STATUS.COMPLETED]:
return 'ERROR'
effective_end_time = datetime.now()
else:
# End time exists so pipeline ran to completion or controlled
# failure.
effective_end_time = self.end_time
# Return time delta, properly formatted.
return re.match('(.*:.*:.*)\.',
str(effective_end_time - self.start_time)).group(1)
@classmethod
def get_field_order(clazz, **kwargs):
"""Get the order of the models for displaying on the front-end.
Called by the adapter.
"""
return [{'field':'label'},
{'field':'reference_genome'},
{'field':'aligner'},
{'field':'status', 'verbose':'Job Status'},
{'field':'start_time'},
{'field':'end_time'},
{'field':'run_time'}]
def get_samples(self):
"""Many different tasks require getting the sample (or their UIDs)
that are in this alignment group.
"""
return ExperimentSample.objects.filter(
experimentsampletoalignment__alignment_group=self)
def get_or_create_vcf_output_dir(self):
"""Returns path to vcf root dir.
"""
vcf_dir = os.path.join(self.get_model_data_dir(), 'vcf')
ensure_exists_0775_dir(vcf_dir)
return vcf_dir
def get_combined_error_log_data(self):
"""Returns raw string representing entire error log for alignment.
"""
vcf_dir = self.get_or_create_vcf_output_dir()
# TODO(gleb): Support other error files.
error_file = os.path.join(vcf_dir, 'merge_variant_data.error')
if os.path.exists(error_file):
with open(error_file) as fh:
raw_data = fh.read()
else:
raw_data = 'None'
return raw_data
class ExperimentSampleToAlignment(UniqueUidModelMixin):
"""Model that describes the alignment of a single ExperimentSample
to an AlignmentGroup.
"""
alignment_group = models.ForeignKey('AlignmentGroup')
experiment_sample = models.ForeignKey('ExperimentSample')
dataset_set = models.ManyToManyField('Dataset', blank=True, null=True)
data = PostgresJsonField()
class ASSEMBLY_STATUS:
"""
The status of an Assembly
"""
QUEUED = 'QUEUED TO ASSEMBLE'
ASSEMBLING = 'ASSEMBLING'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
@property
def status(self):
"""The status of a running alignment job.
"""
alignment_datasets = self.dataset_set.filter(
type=Dataset.TYPE.BWA_ALIGN)
assert len(alignment_datasets) <= 1, (
"Expected only one alignment dataset.")
if len(alignment_datasets) == 1:
return alignment_datasets[0].status
return 'UNDEFINED'
@property
def error_link(self):
return ('<a href="' +
reverse(
'main.views.sample_alignment_error_view',
args=(self.alignment_group.reference_genome.project.uid,
self.alignment_group.uid,
self.uid)) +
'">log output</a>')
@classmethod
def get_field_order(clazz, **kwargs):
"""Get the order of the models for displaying on the front-end.
Called by the adapter.
"""
return [
{'field': 'experiment_sample'},
{'field': 'status', 'verbose': 'Job Status'},
{'field': 'error_link', 'verbose': 'Sample Alignment Log', 'is_href': True},
]
def get_model_data_root(self):
"""Get the root location for all data of this type in the project.
"""
return os.path.join(self.alignment_group.get_model_data_dir(),
'sample_alignments')
def get_model_data_dir(self):
"""Get the full path to the location of this model's data.
"""
return os.path.join(self.get_model_data_root(), str(self.uid))
def ensure_model_data_dir_exists(self):
"""Ensure that a data directory exists for this model.
"""
# Make sure the root exists.
ensure_exists_0775_dir(self.get_model_data_root())
# Check whether the data dir exists, and create it if not.
return ensure_exists_0775_dir(self.get_model_data_dir())
def delete_model_data_dir(self):
"""Removes all data associated with this model.
WARNING: Be careful with this method!
"""
data_dir = self.get_model_data_dir()
if os.path.exists(data_dir):
shutil.rmtree(data_dir)
###############################################################################
# Variants (SNVs and SVs)
###############################################################################
class Variant(UniqueUidModelMixin):
"""An instance of a variation relative to a reference genome.
This might be, for example, a SNV (single nucleotide variation) or a bigger
SV (structural variation). We are intentionally using a unified model for
these two classes of variations as the fundamental unit of genome analysis
is really a diff.
TODO: See code from Gemini paper (PLOS ONE 7/18/13) for ideas.
A variant need not necessarily be associated with a specific sample; the
VariantToExperimentSample model handles this association.
"""
class TYPE:
DELETION = 'DELETION'
INSERTION = 'INSERTION'
TRANSITION = 'TRANSITION'
TRANSVERSION = 'TRANSVERSION'
DUPLICATION = 'DUPLICATION'
INVERSION = 'INVERSION'
COMPLEX = 'COMPLEX' # Multi-base in different genomes
TYPE_CHOICES = make_choices_tuple(TYPE)
type = models.CharField('Type', max_length=40, choices=TYPE_CHOICES)
reference_genome = models.ForeignKey('ReferenceGenome',
verbose_name='Reference Genome')
chromosome = models.ForeignKey('Chromosome')
position = models.BigIntegerField('Position')
ref_value = models.TextField('Ref')
# User specified data fields corresponding to the variant
data = PostgresJsonField()
def __init__(self, *args, **kwargs):
"""If we are passed an alt_value field, we need to get_or_create
VariantAlternate objects corresponding to them, and link them up to
this new variant. We're ignoring the handling the rare situation when a
Variant has no alt_values, which we don't really want to happen. It is
difficult to handle because sometimes the VariantAlternate objects are
declared separately and added to the Variant after __init__()."""
alts = kwargs.get('alt_value', None)
# Here I'm mutating kwargs to get rid of alt_value, but I can't think
# of a reason why this would be a problem, since we've already saved it.
kwargs.pop('alt_value',None)
# call super's __init__ without the alt_value field if present
super(Variant, self).__init__(*args, **kwargs)
if alts is None: return
#handle case of one or multiple alt_values
if not isinstance(alts, basestring):
# alt_value is a list of alts
alt_values = alts
else:
# alt value is one alt
alt_values = [alts]
for alt_value in alt_values:
self.variantalternate_set.add(
VariantAlternate.objects.create(
variant=self,
alt_value=alt_value
)
)
@property
def label(self):
# NOTE: If adding a new VCCD object to a variant, this could change by
# the addition of new variants. Is that an issue?
return (
str(self.position) +
'_' + self.ref_value +
'_' + ','.join(self.get_alternates()))
def get_alternates(self):
""" Return a base string for each alternate for this variant. """
return [alt.alt_value for alt in self.variantalternate_set.all()]
@property
def variant_specific_tracks(self):
return self.data.get(
'variant_specific_tracks',
{'alignment': [], 'coverage': []})
@property
def jbrowse_link(self):
ref_genome_jbrowse = self.reference_genome.get_client_jbrowse_link()
location_param = '&loc=' + str(self.position)
full_href = ref_genome_jbrowse + location_param
return '<a href="' + full_href + '">jbrowse</a>'
@classmethod
def get_field_order(clazz, **kwargs):
raise NotImplementedError(
"Currently, Variants are displayed via model_views.py")
class VariantCallerCommonData(Model, VisibleFieldMixin):
"""Model that describes data provided by a specific caller about a
particular Variant.
The reason for this model is that most variant callers are run for multiple
ExperientSamples at the same time, generating some common data for each
variant found, as well as data unique to each ExperimentSample. This model
represents the common shared data.
To be even more specific, the VCF format typically gives a row for each
variant, where the first several columns describe the variant in general.
This common data is stored in this model. There are additional columns in
the vcf, one per ExperimentSample, which provides data about the
relationship between the Variant and the ExperimentSample for that column.
This data is stored in VariantEvidence instances, one per column.
"""
# Variant this object refers to. It's possible for multiple callers report
# the same Variant so this is a many-to-one relationship.
variant = models.ForeignKey('Variant')
# Source dataset for this data.
source_dataset = models.ForeignKey('Dataset')
# Catch-all key-value data store.
data = PostgresJsonField()
alignment_group = models.ForeignKey('AlignmentGroup')
def __getattr__(self, name):
"""Automatically called if an attribute is not found in the typical
place.
Our implementation checks the data dict, return the string 'undefined'
if the value is not found.
NOTE: Typically __getattr__ should raise an AttributeError if the value
cannot be found, but the noisy nature or our data means returning
'undefined' is more correct.
See: http://docs.python.org/2/reference/datamodel.html#object.__getattr__
"""
try:
return self.data[name]
except:
raise AttributeError
@classmethod
def default_view_fields(clazz):
return []
class VariantAlternate(UniqueUidModelMixin, VisibleFieldMixin):
"""A model listing alternate alleles for each variant."""
# Null is true here because we are adding this relationship during Variant's
# overloaded __init__() so it hasn't been saved() yet. Otherwise it throws
# an django.db.utils.IntegrityError:
# main_variantalternate.variant_id may not be NULL
variant = models.ForeignKey('Variant', null=True)
alt_value = models.TextField('Alt')
is_primary = models.BooleanField(default='False')
# this json fields holds all PER ALT data (INFO data with num -1)
data = PostgresJsonField()
def __unicode__(self):
alt_value = self.alt_value
if len(self.alt_value) > 10:
alt_value = alt_value[:10] + '...'
return 'var: ' + str(self.variant) + ', alt:' + alt_value
# TODO: Do we want to explicitly link each VariantAlternate to
# it's variant index in each VCCD object or VE object?
# Currently it's done implicitly through the VCCD's data['ALT']
# field and VE's data['gt_bases'] and data['GT'] fields, but these
# are not checked for consistency.
@classmethod
def default_view_fields(clazz, **kwargs):
"""Get the order of the models for displaying on the front-end.
Called by the adapter.
"""
return [{'field':'alt_value', 'verbose':'Alt(s)'}]
class VariantEvidence(UniqueUidModelMixin, VisibleFieldMixin):
"""
Evidence for a particular variant occurring in a particular
ExperimentSample.
"""
# The specific ExperimentSample that this object provides evidence
# of the respective variant occurring in.
# NOTE: This implies the ReferenceGenome.
experiment_sample = models.ForeignKey('ExperimentSample')
# The location of the common data for this call.
variant_caller_common_data = models.ForeignKey('VariantCallerCommonData')
# One or more alternate alleles for this variant -
# Multiple are possible if the allele is called for multiple alts
variantalternate_set = models.ManyToManyField('VariantAlternate')
# Catch-all key-value set of data.
# TODO: Extract interesting keys (e.g. gt_type) into their own SQL fields.
data = PostgresJsonField()
def __init__(self, *args, **kwargs):
# HACK: Manually cache data to avoid expensive lookups.
self.manually_cached_data = {}
# call super's __init__ without the alt_value field if present
super(VariantEvidence, self).__init__(*args, **kwargs)
def __getattr__(self, name):
"""Automatically called if an attribute is not found in the typical
place.
Our implementation checks the data dict, return the string 'undefined'
if the value is not found.
NOTE: Typically __getattr__ should raise an AttributeError if the value
cannot be found, but the noisy nature or our data means returning
'undefined' is more correct.
See:
http://docs.python.org/2/reference/datamodel.html#object.__getattr__
"""
try:
return self.data[name]
except:
raise AttributeError
def create_variant_alternate_association(self):
gt_bases = self.data['GT_BASES']
# If this variant evidence is a non-call, no need to add alt alleles.
if gt_bases is None:
return
assert ('|' not in gt_bases), (
'GT bases string is phased;' +
'this is not handled and should never happen...')
# The gt_bases string looks like, e.g. 'A/AT'. Loop over alts.
for gt_base in gt_bases.split('/'):
try:
variant = self.variant_caller_common_data.variant
# Skip if this is not an alternate allele
if variant.ref_value == gt_base:
continue
self.variantalternate_set.add(
VariantAlternate.objects.get(
variant=variant,
alt_value=gt_base
))
except VariantAlternate.DoesNotExist:
# Should not happen.
print ('Attempt to add a SampleEvidence with an alternate ' +
'allele that is not present for this variant!')
raise
@property
def sample_uid(self):
if 'sample_uid' in self.manually_cached_data:
return self.manually_cached_data['sample_uid']
# Otherwise, probably do DB lookup. Guarantee correctness.
return self.experiment_sample.uid
@classmethod
def default_view_fields(clazz):
return [
{'field':'gt_type'},
{'field':'sample_uid', 'verbose':'Samples'},
]
###############################################################################
# Analysis
###############################################################################
class VariantToVariantSet(Model):
"""Relationship between variants and variant sets.
In addition to linking a variant to a set, this model also allows
strengthening the information content of the relationship by indicating
which specific ExperimentSamples this relationship is valid for.
"""
variant = models.ForeignKey('Variant')
variant_set = models.ForeignKey('VariantSet')
sample_variant_set_association = models.ManyToManyField('ExperimentSample',
blank=True, null=True)
@classmethod
def get_field_order(clazz, **kwargs):
"""Get the order of the models for displaying on the front-end.
Called by the adapter.
"""
return [{'field':'variant'},
{'field':'sample_variant_set_association'}]
class VariantSet(UniqueUidModelMixin):
"""Model for grouping together variants for analysis.
This object can also be thought of a 'tag' for a set of variants. For
example, we might create a VariantSet called 'c321D Designed Changes'
to represent the set of Variants that were intended for mutation.
Variants hold a list of Variant objects, and each can, but do not have to,
point to one or more VariantToExperimentSample objects.
Each variant set can contain variants from multiple alignments or samples,
but all variants must belong to a single reference genome.
TODO: In the future, we might come up with a framework for transferring
variants or variant sets to new reference genomes via LiftOver or something
similar.
"""
label = models.CharField(max_length=256)
reference_genome = models.ForeignKey('ReferenceGenome')
variants = models.ManyToManyField('Variant', blank=True, null=True,
# TODO: find correct syntax for limit_choices_to here
#limit_choices_to = {'reference_genome' : self.reference_genome},
through = 'VariantToVariantSet')
# Datasets pointing to files on the system
# Primarily for VCF files uploaded by the user to describe putative vars
dataset_set = models.ManyToManyField('Dataset', blank=True, null=True,
verbose_name="Datasets")
def __unicode__(self):
return self.label
@classmethod
def get_field_order(clazz, **kwargs):
"""Get the order of the models for displaying on the front-end.
Called by the adapter.
"""
return [{'field':'label'},
{'field':'reference_genome'}]
@property
def href(self):
"""Link to url view for this model.
"""
return reverse(
'main.views.variant_set_view',
args=(self.reference_genome.project.uid, self.uid))
def get_model_data_root(self):
"""Get the root location for all data of this type in the project.
"""
return os.path.join(
self.reference_genome.project.get_model_data_dir(),
'variant_sets')
def get_model_data_dir(self):
"""Get the full path to the location of this model's data.
"""
return os.path.join(self.get_model_data_root(), str(self.uid))
def ensure_model_data_dir_exists(self):
"""Ensure that a data directory exists for this model.
"""
# Make sure the root exists.
ensure_exists_0775_dir(self.get_model_data_root())
# Check whether the data dir exists, and create it if not.
return ensure_exists_0775_dir(self.get_model_data_dir())
class Region(UniqueUidModelMixin):
"""Semantic annotation for a disjoint set of intervals in a
ReferenceGenome.
This allows the user to ask semantically deeper questions.
"""
reference_genome = models.ForeignKey('ReferenceGenome')
# Human-readable identifier.
# Depending on the type and how disciplined we are with development,
# this could further be semantically meaningful (e.g. gene name).
label = models.CharField('Region Name', max_length=256)
class TYPE:
"""The type of this region.
Limit to 40-chars as per Dataset.type field def.
"""
POOR_MAPPING_QUALITY = 'POOR_MAPPING_QUALITY'
GENE = 'GENE'
TYPE_CHOICES = make_choices_tuple(TYPE)
type = models.CharField(max_length=40, choices=TYPE_CHOICES)
class RegionInterval(Model):
"""One of possibly several intervals that describe a single region.
"""
region = models.ForeignKey('Region')
# One-indexed.
start = models.BigIntegerField()
# One-indexed.
end = models.BigIntegerField()
class SavedVariantFilterQuery(UniqueUidModelMixin):
"""Saved query belonging to the user.
"""
owner = models.ForeignKey('UserProfile')
text = models.TextField()
class S3File(Model):
"""Model for keeping track of all files in S3 bucket.
"""
bucket = models.CharField(max_length=200)
# key is the actually name of the file stored in S3 bucket.
key = models.CharField(max_length=200)
# name is the original name of the file on uploader's machine
name = models.CharField(max_length=200, null=True)
created_at = models.DateTimeField(auto_now_add = True)
def url(self):
return "s3://%s/%s" % (self.bucket, self.key)
def __unicode__(self):
return unicode(self.url())
def get_or_create_derived_bam_dataset(sample_alignment, dataset_type,
derivation_fn, force_rerun=False):
"""Gets or creates a new bam Dataset derived according to a provided function.
The purpose of this function is to abstract the boilerplate that goes into
creating a derived bam Dataset.
Args:
sample_alignment: ExperimentSampleToAlignment that is in a READY state.
dataset_type: Dataset.TYPE of the dataset to get.
derivation_fn: Function(sample_alignment, new_dataset).
Mutates new_dataset. Should raise CalledProcessError if there is a
problem during computing
Returns:
New Dataset.
"""
# First, ensure the Dataset exists.
new_dataset = get_dataset_with_type(
sample_alignment, dataset_type)
if new_dataset is None:
new_dataset = Dataset.objects.create(
label=dataset_type,
type=dataset_type,
status=Dataset.STATUS.NOT_STARTED)
sample_alignment.dataset_set.add(new_dataset)
# Next, check if the Dataset is already computed and can just be returned.
if (not force_rerun and new_dataset.status == Dataset.STATUS.READY and
os.path.exists(new_dataset.get_absolute_location())):
return new_dataset
# If here, we are going to run or re-run the Dataset so we reset the status
# to indicate incomplete state.
new_dataset.status = Dataset.STATUS.NOT_STARTED
new_dataset.save(update_fields=['status'])
try:
# Start computing.
new_dataset.status = Dataset.STATUS.COMPUTING
new_dataset.save(update_fields=['status'])
derivation_fn(sample_alignment, new_dataset)
# Mark success.
new_dataset.status = Dataset.STATUS.READY
except subprocess.CalledProcessError:
new_dataset.filesystem_location = ''
new_dataset.status = Dataset.STATUS.FAILED
new_dataset.save()
return new_dataset
| woodymit/millstone | genome_designer/main/models.py | Python | mit | 65,925 | [
"BWA",
"Biopython",
"Galaxy"
] | 1303a353b6d6a6b46112fd771a7fad1a3ce516ca1d6537e3ae1e8b40f6d95e39 |
from __future__ import absolute_import
import json
import datetime
import pytz
import logging
from django.core.mail import send_mail
from django.contrib.auth import authenticate, login as auth_login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group, User
from django.contrib.sites.models import Site
from django.contrib import messages
from django.contrib.messages import get_messages
from django.utils.decorators import method_decorator
from django.core.exceptions import ValidationError, PermissionDenied, ObjectDoesNotExist
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse, \
HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import get_object_or_404, render_to_response, render, redirect
from django.template import RequestContext
from django.core import signing
from django.db import Error, IntegrityError
from django import forms
from django.views.generic import TemplateView
from django.core.urlresolvers import reverse
from rest_framework.decorators import api_view
from mezzanine.conf import settings
from mezzanine.pages.page_processors import processor_for
from mezzanine.utils.email import subject_template, send_mail_template
import autocomplete_light
from inplaceeditform.commons import get_dict_from_obj, apply_filters
from inplaceeditform.views import _get_http_response, _get_adaptor
from django_irods.storage import IrodsStorage
from django_irods.icommands import SessionException
from hs_core import hydroshare
from hs_core.hydroshare.utils import get_resource_by_shortkey, resource_modified, resolve_request
from .utils import authorize, upload_from_irods, ACTION_TO_AUTHORIZE, run_script_to_update_hyrax_input_files, \
get_my_resources_list, send_action_to_take_email, get_coverage_data_dict
from hs_core.models import GenericResource, resource_processor, CoreMetaData, Subject
from hs_core.hydroshare.resource import METADATA_STATUS_SUFFICIENT, METADATA_STATUS_INSUFFICIENT
from . import resource_rest_api
from . import resource_metadata_rest_api
from . import user_rest_api
from . import resource_folder_hierarchy
from . import resource_access_api
from . import resource_folder_rest_api
from hs_core.hydroshare import utils
from hs_core.signals import *
from hs_access_control.models import PrivilegeCodes, GroupMembershipRequest, GroupResourcePrivilege
from hs_collection_resource.models import CollectionDeletedResource
logger = logging.getLogger(__name__)
def short_url(request, *args, **kwargs):
try:
shortkey = kwargs['shortkey']
except KeyError:
raise TypeError('shortkey must be specified...')
m = get_resource_by_shortkey(shortkey)
return HttpResponseRedirect(m.get_absolute_url())
def verify(request, *args, **kwargs):
_, pk, email = signing.loads(kwargs['token']).split(':')
u = User.objects.get(pk=pk)
if u.email == email:
if not u.is_active:
u.is_active=True
u.save()
u.groups.add(Group.objects.get(name="Resource Author"))
from django.contrib.auth import login
u.backend = settings.AUTHENTICATION_BACKENDS[0]
login(request, u)
return HttpResponseRedirect('/account/update/')
else:
from django.contrib import messages
messages.error(request, "Your verification token was invalid.")
return HttpResponseRedirect('/')
def change_quota_holder(request, shortkey):
new_holder_uname = request.POST.get('new_holder_username', '')
if not new_holder_uname:
return HttpResponseBadRequest()
new_holder_u = User.objects.filter(username=new_holder_uname).first()
if not new_holder_u:
return HttpResponseBadRequest()
res = utils.get_resource_by_shortkey(shortkey)
try:
res.raccess.set_quota_holder(request.user, new_holder_u)
# send notification to the new quota holder
context = {
"request": request,
"user": request.user,
"new_quota_holder": new_holder_u,
"resource_uuid": res.short_id,
}
subject_template_name = "email/quota_holder_change_subject.txt"
subject = subject_template(subject_template_name, context)
send_mail_template(subject, "email/quota_holder_change",
settings.DEFAULT_FROM_EMAIL, new_holder_u.email,
context=context)
except PermissionDenied:
return HttpResponseForbidden()
return HttpResponseRedirect(res.get_absolute_url())
def add_files_to_resource(request, shortkey, *args, **kwargs):
"""
This view function is called by AJAX in the folder implementation
:param request: AJAX request
:param shortkey: resource uuid
:param args:
:param kwargs:
:return: HTTP response with status code indicating success or failure
"""
resource, _, _ = authorize(request, shortkey,
needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
res_files = request.FILES.values()
extract_metadata = request.REQUEST.get('extract-metadata', 'No')
extract_metadata = True if extract_metadata.lower() == 'yes' else False
file_folder = request.POST.get('file_folder', None)
if file_folder is not None:
if file_folder == "data/contents":
file_folder = None
elif file_folder.startswith("data/contents/"):
file_folder = file_folder[len("data/contents/"):]
try:
utils.resource_file_add_pre_process(resource=resource, files=res_files, user=request.user,
extract_metadata=extract_metadata,
folder=file_folder)
except hydroshare.utils.ResourceFileSizeException as ex:
msg = 'file_size_error: ' + ex.message
return HttpResponse(msg, status=500)
except (hydroshare.utils.ResourceFileValidationException, Exception) as ex:
msg = 'validation_error: ' + ex.message
return HttpResponse(msg, status=500)
try:
hydroshare.utils.resource_file_add_process(resource=resource, files=res_files,
user=request.user,
extract_metadata=extract_metadata,
folder=file_folder)
except (hydroshare.utils.ResourceFileValidationException, Exception) as ex:
msg = 'validation_error: ' + ex.message
return HttpResponse(msg, status=500)
return HttpResponse(status=200)
def _get_resource_sender(element_name, resource):
core_metadata_element_names = [el_name.lower() for el_name in CoreMetaData.get_supported_element_names()]
if element_name in core_metadata_element_names:
sender_resource = GenericResource().__class__
else:
sender_resource = resource.__class__
return sender_resource
def get_supported_file_types_for_resource_type(request, resource_type, *args, **kwargs):
resource_cls = hydroshare.check_resource_type(resource_type)
if request.is_ajax:
# TODO: use try catch
ajax_response_data = {'file_types': json.dumps(resource_cls.get_supported_upload_file_types())}
return HttpResponse(json.dumps(ajax_response_data))
else:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def is_multiple_file_upload_allowed(request, resource_type, *args, **kwargs):
resource_cls = hydroshare.check_resource_type(resource_type)
if request.is_ajax:
# TODO: use try catch
ajax_response_data = {'allow_multiple_file': resource_cls.allow_multiple_file_upload()}
return HttpResponse(json.dumps(ajax_response_data))
else:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def update_key_value_metadata(request, shortkey, *args, **kwargs):
"""
This one view function is for CRUD operation for resource key/value arbitrary metadata.
key/value data in request.POST is assigned to the resource.extra_metadata field
"""
res, _, _ = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
post_data = request.POST.copy()
resource_mode = post_data.pop('resource-mode', None)
res.extra_metadata = post_data.dict()
is_update_success = True
err_message = ""
try:
res.save()
except Error as ex:
is_update_success = False
err_message = ex.message
if is_update_success:
resource_modified(res, request.user, overwrite_bag=False)
res_metadata = res.metadata
res_metadata.set_dirty(True)
if request.is_ajax():
if is_update_success:
ajax_response_data = {'status': 'success',
'is_dirty': res.metadata.is_dirty if
hasattr(res.metadata, 'is_dirty') else False}
else:
ajax_response_data = {'status': 'error', 'message': err_message}
return HttpResponse(json.dumps(ajax_response_data))
if resource_mode is not None:
request.session['resource-mode'] = 'edit'
if is_update_success:
messages.success(request, "Metadata update successful")
else:
messages.error(request, err_message)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@api_view(['POST'])
def update_key_value_metadata_public(request, pk):
res, _, _ = authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
post_data = request.data.copy()
res.extra_metadata = post_data
is_update_success = True
try:
res.save()
except Error as ex:
is_update_success = False
if is_update_success:
resource_modified(res, request.user, overwrite_bag=False)
if is_update_success:
return HttpResponse(status=200)
else:
return HttpResponse(status=400)
def add_metadata_element(request, shortkey, element_name, *args, **kwargs):
"""This function is normally for adding/creating new resource level metadata elements.
However, for the metadata element 'subject' (keyword) this function allows for creating,
updating and deleting metadata elements.
"""
res, _, _ = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
is_add_success = False
err_msg = "Failed to create metadata element '{}'. {}."
element = None
sender_resource = _get_resource_sender(element_name, res)
if element_name.lower() == 'subject' and len(request.POST['value']) == 0:
# seems the user wants to delete all keywords - no need for pre-check in signal handler
res.metadata.subjects.all().delete()
is_add_success = True
if not res.can_be_public_or_discoverable:
res.raccess.public = False
res.raccess.discoverable = False
res.raccess.save()
elif not res.raccess.discoverable:
res.raccess.discoverable = True
res.raccess.save()
resource_modified(res, request.user, overwrite_bag=False)
else:
handler_response = pre_metadata_element_create.send(sender=sender_resource,
element_name=element_name,
request=request)
for receiver, response in handler_response:
if 'is_valid' in response:
if response['is_valid']:
element_data_dict = response['element_data_dict']
if element_name == 'subject':
# using set() to remove any duplicate keywords
keywords = set([k.strip() for k in element_data_dict['value'].split(',')])
keyword_maxlength = Subject._meta.get_field('value').max_length
keywords_to_add = []
for kw in keywords:
if len(kw) > keyword_maxlength:
kw = kw[:keyword_maxlength]
# skip any duplicate keywords (case insensitive)
if kw not in keywords_to_add and kw.lower() not in keywords_to_add:
keywords_to_add.append(kw)
if len(keywords_to_add) > 0:
res.metadata.subjects.all().delete()
for kw in keywords_to_add:
res.metadata.create_element(element_name, value=kw)
is_add_success = True
else:
try:
element = res.metadata.create_element(element_name, **element_data_dict)
is_add_success = True
except ValidationError as exp:
err_msg = err_msg.format(element_name, exp.message)
request.session['validation_error'] = err_msg
except Error as exp:
# some database error occurred
err_msg = err_msg.format(element_name, exp.message)
request.session['validation_error'] = err_msg
except Exception as exp:
# some other error occurred
err_msg = err_msg.format(element_name, exp.message)
request.session['validation_error'] = err_msg
if is_add_success:
resource_modified(res, request.user, overwrite_bag=False)
elif "errors" in response:
err_msg = err_msg.format(element_name, response['errors'])
if request.is_ajax():
if is_add_success:
res_public_status = 'public' if res.raccess.public else 'not public'
res_discoverable_status = 'discoverable' if res.raccess.discoverable \
else 'not discoverable'
if res.can_be_public_or_discoverable:
metadata_status = METADATA_STATUS_SUFFICIENT
else:
metadata_status = METADATA_STATUS_INSUFFICIENT
if element_name == 'subject':
ajax_response_data = {'status': 'success', 'element_name': element_name,
'metadata_status': metadata_status,
'res_public_status': res_public_status,
'res_discoverable_status': res_discoverable_status}
elif element_name.lower() == 'site' and res.resource_type == 'TimeSeriesResource':
# get the spatial coverage element
spatial_coverage_dict = get_coverage_data_dict(res)
ajax_response_data = {'status': 'success',
'element_name': element_name,
'spatial_coverage': spatial_coverage_dict,
'metadata_status': metadata_status,
'res_public_status': res_public_status,
'res_discoverable_status': res_discoverable_status
}
if element is not None:
ajax_response_data['element_id'] = element.id
else:
ajax_response_data = {'status': 'success',
'element_name': element_name,
'metadata_status': metadata_status,
'res_public_status': res_public_status,
'res_discoverable_status': res_discoverable_status
}
if element is not None:
ajax_response_data['element_id'] = element.id
ajax_response_data['is_dirty'] = res.metadata.is_dirty if \
hasattr(res.metadata, 'is_dirty') else False
return JsonResponse(ajax_response_data)
else:
ajax_response_data = {'status': 'error', 'message': err_msg}
return JsonResponse(ajax_response_data)
if 'resource-mode' in request.POST:
request.session['resource-mode'] = 'edit'
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def update_metadata_element(request, shortkey, element_name, element_id, *args, **kwargs):
res, _, _ = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
sender_resource = _get_resource_sender(element_name, res)
handler_response = pre_metadata_element_update.send(sender=sender_resource,
element_name=element_name,
element_id=element_id, request=request)
is_update_success = False
err_msg = "Failed to update metadata element '{}'. {}."
for receiver, response in handler_response:
if 'is_valid' in response:
if response['is_valid']:
element_data_dict = response['element_data_dict']
try:
res.metadata.update_element(element_name, element_id, **element_data_dict)
post_handler_response = post_metadata_element_update.send(
sender=sender_resource, element_name=element_name, element_id=element_id)
is_update_success = True
# this is how we handle if a post_metadata_element_update receiver
# is not implemented in the resource type's receivers.py
element_exists = True
for receiver, response in post_handler_response:
if 'element_exists' in response:
element_exists = response['element_exists']
except ValidationError as exp:
err_msg = err_msg.format(element_name, exp.message)
request.session['validation_error'] = err_msg
except Error as exp:
# some database error occurred
err_msg = err_msg.format(element_name, exp.message)
request.session['validation_error'] = err_msg
if element_name == 'title':
if res.raccess.public:
if not res.can_be_public_or_discoverable:
res.raccess.public = False
res.raccess.save()
if is_update_success:
resource_modified(res, request.user, overwrite_bag=False)
elif "errors" in response:
err_msg = err_msg.format(element_name, response['errors'])
if request.is_ajax():
if is_update_success:
res_public_status = 'public' if res.raccess.public else 'not public'
res_discoverable_status = 'discoverable' if res.raccess.discoverable \
else 'not discoverable'
if res.can_be_public_or_discoverable:
metadata_status = METADATA_STATUS_SUFFICIENT
else:
metadata_status = METADATA_STATUS_INSUFFICIENT
if element_name.lower() == 'site' and res.resource_type == 'TimeSeriesResource':
# get the spatial coverage element
spatial_coverage_dict = get_coverage_data_dict(res)
ajax_response_data = {'status': 'success',
'element_name': element_name,
'spatial_coverage': spatial_coverage_dict,
'metadata_status': metadata_status,
'res_public_status': res_public_status,
'res_discoverable_status': res_discoverable_status,
'element_exists': element_exists}
else:
ajax_response_data = {'status': 'success',
'element_name': element_name,
'metadata_status': metadata_status,
'res_public_status': res_public_status,
'res_discoverable_status': res_discoverable_status,
'element_exists': element_exists}
ajax_response_data['is_dirty'] = res.metadata.is_dirty if \
hasattr(res.metadata, 'is_dirty') else False
return JsonResponse(ajax_response_data)
else:
ajax_response_data = {'status': 'error', 'message': err_msg}
return JsonResponse(ajax_response_data)
if 'resource-mode' in request.POST:
request.session['resource-mode'] = 'edit'
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@api_view(['GET'])
def file_download_url_mapper(request, shortkey):
""" maps the file URIs in resourcemap document to django_irods download view function"""
resource, _, _ = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE)
istorage = resource.get_irods_storage()
irods_file_path = '/'.join(request.path.split('/')[2:-1])
file_download_url = istorage.url(irods_file_path)
return HttpResponseRedirect(file_download_url)
def delete_metadata_element(request, shortkey, element_name, element_id, *args, **kwargs):
res, _, _ = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
res.metadata.delete_element(element_name, element_id)
resource_modified(res, request.user, overwrite_bag=False)
request.session['resource-mode'] = 'edit'
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def delete_file(request, shortkey, f, *args, **kwargs):
res, _, user = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
hydroshare.delete_resource_file(shortkey, f, user)
request.session['resource-mode'] = 'edit'
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def delete_multiple_files(request, shortkey, *args, **kwargs):
res, _, user = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
# file_ids is a string of file ids separated by comma
f_ids = request.POST['file_ids']
f_id_list = f_ids.split(',')
for f_id in f_id_list:
f_id = f_id.strip()
try:
hydroshare.delete_resource_file(shortkey, f_id, user)
except ObjectDoesNotExist as ex:
# Since some specific resource types such as feature resource type delete all other
# dependent content files together when one file is deleted, we make this specific
# ObjectDoesNotExist exception as legitimate in deplete_multiple_files() without
# raising this specific exceptoin
logger.debug(ex.message)
continue
request.session['resource-mode'] = 'edit'
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def delete_resource(request, shortkey, *args, **kwargs):
res, _, user = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.DELETE_RESOURCE)
res_title = res.metadata.title
res_id = shortkey
res_type = res.resource_type
resource_related_collections = [col for col in res.collections.all()]
owners_list = [owner for owner in res.raccess.owners.all()]
ajax_response_data = {'status': 'success'}
try:
hydroshare.delete_resource(shortkey)
except ValidationError as ex:
if request.is_ajax():
ajax_response_data['status'] = 'error'
ajax_response_data['message'] = ex.message
return JsonResponse(ajax_response_data)
else:
request.session['validation_error'] = ex.message
return HttpResponseRedirect(request.META['HTTP_REFERER'])
# if the deleted resource is part of any collection resource, then for each of those collection
# create a CollectionDeletedResource object which can then be used to list collection deleted
# resources on collection resource landing page
for collection_res in resource_related_collections:
o=CollectionDeletedResource.objects.create(
resource_title=res_title,
deleted_by=user,
resource_id=res_id,
resource_type=res_type,
collection=collection_res
)
o.resource_owners.add(*owners_list)
post_delete_resource.send(sender=type(res), request=request, user=user,
resource_shortkey=shortkey, resource=res,
resource_title=res_title, resource_type=res_type, **kwargs)
if request.is_ajax():
return JsonResponse(ajax_response_data)
else:
return HttpResponseRedirect('/my-resources/')
def rep_res_bag_to_irods_user_zone(request, shortkey, *args, **kwargs):
'''
This function needs to be called via AJAX. The function replicates resource bag to iRODS user zone on users.hydroshare.org
which is federated with hydroshare zone under the iRODS user account corresponding to a xDCIShare user. This function
should only be called or exposed to be called from web interface when a corresponding iRODS user account on hydroshare
user Zone exists. The purpose of this function is to allow xDCIShare resource bag that a xDCIShare user has access
to be copied to xDCIShare user's iRODS space in xDCIShare user zone so that users can do analysis or computations on
the resource
Args:
request: an AJAX request
shortkey: UUID of the resource to be copied to the login user's iRODS user space
Returns:
JSON list that indicates status of resource replication, i.e., success or error
'''
res, authorized, user = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE, raises_exception=False)
if not authorized:
return HttpResponse(
json.dumps({"error": "You are not authorized to replicate this resource."}),
content_type="application/json"
)
try:
utils.replicate_resource_bag_to_user_zone(user, shortkey)
return HttpResponse(
json.dumps({"success": "This resource bag zip file has been successfully copied to your iRODS user zone."}),
content_type = "application/json"
)
except SessionException as ex:
return HttpResponse(
json.dumps({"error": ex.stderr}),
content_type="application/json"
)
def copy_resource(request, shortkey, *args, **kwargs):
res, authorized, user = authorize(request, shortkey,
needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE)
new_resource = None
try:
new_resource = hydroshare.create_empty_resource(shortkey, user, action='copy')
new_resource = hydroshare.copy_resource(res, new_resource)
except Exception as ex:
if new_resource:
new_resource.delete()
request.session['resource_creation_error'] = 'Failed to copy this resource: ' + ex.message
return HttpResponseRedirect(res.get_absolute_url())
# go to resource landing page
request.session['just_created'] = True
request.session['just_copied'] = True
return HttpResponseRedirect(new_resource.get_absolute_url())
@api_view(['POST'])
def copy_resource_public(request, pk):
response = copy_resource(request, pk)
return HttpResponse(response.url.split('/')[2], status=202)
def create_new_version_resource(request, shortkey, *args, **kwargs):
res, authorized, user = authorize(request, shortkey,
needed_permission=ACTION_TO_AUTHORIZE.CREATE_RESOURCE_VERSION)
if res.locked_time:
elapsed_time = datetime.datetime.now(pytz.utc) - res.locked_time
if elapsed_time.days >= 0 or elapsed_time.seconds > settings.RESOURCE_LOCK_TIMEOUT_SECONDS:
# clear the lock since the elapsed time is greater than timeout threshold
res.locked_time = None
res.save()
else:
# cannot create new version for this resource since the resource is locked by another user
request.session['resource_creation_error'] = 'Failed to create a new version for ' \
'this resource since another user is ' \
'creating a new version for this ' \
'resource synchronously.'
return HttpResponseRedirect(res.get_absolute_url())
new_resource = None
try:
# lock the resource to prevent concurrent new version creation since only one new version for an
# obsoleted resource is allowed
res.locked_time = datetime.datetime.now(pytz.utc)
res.save()
new_resource = hydroshare.create_empty_resource(shortkey, user)
new_resource = hydroshare.create_new_version_resource(res, new_resource, user)
except Exception as ex:
if new_resource:
new_resource.delete()
# release the lock if new version of the resource failed to create
res.locked_time = None
res.save()
request.session['resource_creation_error'] = 'Failed to create a new version of ' \
'this resource: ' + ex.message
return HttpResponseRedirect(res.get_absolute_url())
# release the lock if new version of the resource is created successfully
res.locked_time = None
res.save()
# go to resource landing page
request.session['just_created'] = True
return HttpResponseRedirect(new_resource.get_absolute_url())
@api_view(['POST'])
def create_new_version_resource_public(request, pk):
redirect = create_new_version_resource(request, pk)
return HttpResponse(redirect.url.split('/')[2], status=202)
def publish(request, shortkey, *args, **kwargs):
# only resource owners are allowed to change resource flags (e.g published)
res, _, _ = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.SET_RESOURCE_FLAG)
try:
hydroshare.publish_resource(request.user, shortkey)
except ValidationError as exp:
request.session['validation_error'] = exp.message
else:
request.session['just_published'] = True
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def set_resource_flag(request, shortkey, *args, **kwargs):
# only resource owners are allowed to change resource flags
res, _, user = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.SET_RESOURCE_FLAG)
t = resolve_request(request).get('t', None)
if t == 'make_public':
_set_resource_sharing_status(request, user, res, flag_to_set='public', flag_value=True)
elif t == 'make_private' or t == 'make_not_discoverable':
_set_resource_sharing_status(request, user, res, flag_to_set='public', flag_value=False)
elif t == 'make_discoverable':
_set_resource_sharing_status(request, user, res, flag_to_set='discoverable', flag_value=True)
elif t == 'make_not_shareable':
_set_resource_sharing_status(request, user, res, flag_to_set='shareable', flag_value=False)
elif t == 'make_shareable':
_set_resource_sharing_status(request, user, res, flag_to_set='shareable', flag_value=True)
if request.META.get('HTTP_REFERER', None):
request.session['resource-mode'] = request.POST.get('resource-mode', 'view')
return HttpResponseRedirect(request.META.get('HTTP_REFERER', None))
return HttpResponse(status=202)
@api_view(['POST'])
def set_resource_flag_public(request, pk):
http_request = request._request
http_request.data = request.data.copy()
response = set_resource_flag(http_request, pk)
messages = get_messages(request)
for message in messages:
if(message.tags == "error"):
return HttpResponse(message, status=400)
return response
def share_resource_with_user(request, shortkey, privilege, user_id, *args, **kwargs):
"""this view function is expected to be called by ajax"""
return _share_resource(request, shortkey, privilege, user_id, user_or_group='user')
def share_resource_with_group(request, shortkey, privilege, group_id, *args, **kwargs):
"""this view function is expected to be called by ajax"""
return _share_resource(request, shortkey, privilege, group_id, user_or_group='group')
def _share_resource(request, shortkey, privilege, user_or_group_id, user_or_group):
"""
share resource with a user or group
:param request:
:param shortkey: id of the resource to share with
:param privilege: access privilege need for the resource
:param user_or_group_id: id of the user or group with whom the resource to be shared
:param user_or_group: indicates if the resource to be shared with a user or group. A value of 'user' will share
the resource with a user whose id is provided with the parameter 'user_or_group_id'.
Any other value for this parameter assumes resource to be shared with a group.
:return:
"""
res, _, user = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE)
user_to_share_with = None
group_to_share_with = None
status_code = 200
if user_or_group == 'user':
user_to_share_with = utils.user_from_id(user_or_group_id)
else:
group_to_share_with = utils.group_from_id(user_or_group_id)
status = 'success'
err_message = ''
if privilege == 'view':
access_privilege = PrivilegeCodes.VIEW
elif privilege == 'edit':
access_privilege = PrivilegeCodes.CHANGE
elif privilege == 'owner':
if user_or_group != 'user':
status_code = 400
err_message = "Group can't have owner privilege over a resource"
access_privilege = PrivilegeCodes.NONE
else:
access_privilege = PrivilegeCodes.OWNER
else:
status_code = 400
err_message = "Not a valid privilege"
access_privilege = PrivilegeCodes.NONE
if access_privilege != PrivilegeCodes.NONE:
try:
if user_or_group == 'user':
user.uaccess.share_resource_with_user(res, user_to_share_with, access_privilege)
else:
user.uaccess.share_resource_with_group(res, group_to_share_with, access_privilege)
except PermissionDenied as exp:
status = 'error'
err_message = exp.message
else:
status = 'error'
current_user_privilege = res.raccess.get_effective_privilege(user)
if current_user_privilege == PrivilegeCodes.VIEW:
current_user_privilege = "view"
elif current_user_privilege == PrivilegeCodes.CHANGE:
current_user_privilege = "change"
elif current_user_privilege == PrivilegeCodes.OWNER:
current_user_privilege = "owner"
if user_or_group == 'user':
is_current_user = False
if user == user_to_share_with:
is_current_user = True
picture_url = 'No picture provided'
if user_to_share_with.userprofile.picture:
picture_url = user_to_share_with.userprofile.picture.url
ajax_response_data = {'status': status, 'name': user_to_share_with.get_full_name(),
'username': user_to_share_with.username, 'privilege_granted': privilege,
'current_user_privilege': current_user_privilege,
'profile_pic': picture_url, 'is_current_user': is_current_user,
'error_msg': err_message}
else:
group_pic_url = 'No picture provided'
if group_to_share_with.gaccess.picture:
group_pic_url = group_to_share_with.gaccess.picture.url
ajax_response_data = {'status': status, 'name': group_to_share_with.name,
'privilege_granted': privilege, 'group_pic': group_pic_url,
'current_user_privilege': current_user_privilege,
'error_msg': err_message}
return HttpResponse(json.dumps(ajax_response_data), status=status_code)
def unshare_resource_with_user(request, shortkey, user_id, *args, **kwargs):
"""this view function is expected to be called by ajax"""
res, _, user = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE)
user_to_unshare_with = utils.user_from_id(user_id)
ajax_response_data = {'status': 'success'}
try:
user.uaccess.unshare_resource_with_user(res, user_to_unshare_with)
if user not in res.raccess.view_users:
# user has no explict access to the resource - redirect to resource listing page
ajax_response_data['redirect_to'] = '/my-resources/'
except PermissionDenied as exp:
ajax_response_data['status'] = 'error'
ajax_response_data['message'] = exp.message
return JsonResponse(ajax_response_data)
def unshare_resource_with_group(request, shortkey, group_id, *args, **kwargs):
"""this view function is expected to be called by ajax"""
res, _, user = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE)
group_to_unshare_with = utils.group_from_id(group_id)
ajax_response_data = {'status': 'success'}
try:
user.uaccess.unshare_resource_with_group(res, group_to_unshare_with)
if user not in res.raccess.view_users:
# user has no explicit access to the resource - redirect to resource listing page
ajax_response_data['redirect_to'] = '/my-resources/'
except PermissionDenied as exp:
ajax_response_data['status'] = 'error'
ajax_response_data['message'] = exp.message
return JsonResponse(ajax_response_data)
def undo_share_resource_with_user(request, shortkey, user_id, *args, **kwargs):
"""this view function is expected to be called by ajax"""
res, _, user = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE)
user_to_unshare_with = utils.user_from_id(user_id)
ajax_response_data = {'status': 'success'}
try:
user.uaccess.undo_share_resource_with_user(res, user_to_unshare_with)
undo_user_privilege = res.raccess.get_effective_privilege(user_to_unshare_with)
if undo_user_privilege == PrivilegeCodes.VIEW:
undo_user_privilege = "view"
elif undo_user_privilege == PrivilegeCodes.CHANGE:
undo_user_privilege = "change"
elif undo_user_privilege == PrivilegeCodes.OWNER:
undo_user_privilege = "owner"
else:
undo_user_privilege = 'none'
ajax_response_data['undo_user_privilege'] = undo_user_privilege
if user not in res.raccess.view_users:
# user has no explict access to the resource - redirect to resource listing page
ajax_response_data['redirect_to'] = '/my-resources/'
except PermissionDenied as exp:
ajax_response_data['status'] = 'error'
ajax_response_data['message'] = exp.message
return JsonResponse(ajax_response_data)
def undo_share_resource_with_group(request, shortkey, group_id, *args, **kwargs):
"""this view function is expected to be called by ajax"""
res, _, user = authorize(request, shortkey, needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE)
group_to_unshare_with = utils.group_from_id(group_id)
ajax_response_data = {'status': 'success'}
try:
user.uaccess.undo_share_resource_with_group(res, group_to_unshare_with)
if group_to_unshare_with in res.raccess.edit_groups:
undo_group_privilege = 'change'
elif group_to_unshare_with in res.raccess.view_groups:
undo_group_privilege = 'view'
else:
undo_group_privilege = 'none'
ajax_response_data['undo_group_privilege'] = undo_group_privilege
if user not in res.raccess.view_users:
# user has no explicit access to the resource - redirect to resource listing page
ajax_response_data['redirect_to'] = '/my-resources/'
except PermissionDenied as exp:
ajax_response_data['status'] = 'error'
ajax_response_data['message'] = exp.message
return JsonResponse(ajax_response_data)
# view functions mapped with INPLACE_SAVE_URL(/hsapi/save_inline/) for Django inplace editing
def save_ajax(request):
if not request.method == 'POST':
return _get_http_response({'errors': 'It is not a POST request'})
adaptor = _get_adaptor(request, 'POST')
if not adaptor:
return _get_http_response({'errors': 'Params insufficient'})
if not adaptor.can_edit():
return _get_http_response({'errors': 'You can not edit this content'})
value = adaptor.loads_to_post(request)
new_data = get_dict_from_obj(adaptor.obj)
form_class = adaptor.get_form_class()
field_name = adaptor.field_name
new_data['in_menus'] = ''
form = form_class(data=new_data, instance=adaptor.obj)
try:
value_edit = adaptor.get_value_editor(value)
value_edit_with_filter = apply_filters(value_edit, adaptor.filters_to_edit)
new_data[field_name] = value_edit_with_filter
new_data[field_name] = value_edit_with_filter
if form.is_valid():
adaptor.save(value_edit_with_filter)
return _get_http_response({'errors': False,
'value': adaptor.render_value_edit()})
messages = [] # The error is for another field that you are editing
for field_name_error, errors_field in form.errors.items():
for error in errors_field:
messages.append("%s: %s" % (field_name_error, unicode(error)))
message_i18n = ','.join(messages)
return _get_http_response({'errors': message_i18n})
except ValidationError as error: # The error is for a field that you are editing
message_i18n = ', '.join([u"%s" % m for m in error.messages])
return _get_http_response({'errors': message_i18n})
def verify_account(request, *args, **kwargs):
context = {
'username' : request.GET['username'],
'email' : request.GET['email']
}
return render_to_response('pages/verify-account.html', context, context_instance=RequestContext(request))
@processor_for('resend-verification-email')
def resend_verification_email(request):
u = get_object_or_404(User, username=request.GET['username'], email=request.GET['email'])
try:
token = signing.dumps('verify_user_email:{0}:{1}'.format(u.pk, u.email))
u.email_user(
'Please verify your new xDCIShare account.',
"""
This is an automated email from xDCIShare.org. If you requested a xDCIShare account, please
go to http://{domain}/verify/{token}/ and verify your account.
""".format(
domain=Site.objects.get_current().domain,
token=token
))
context = {
'is_email_sent' : True
}
return render_to_response('pages/verify-account.html', context, context_instance=RequestContext(request))
except:
pass # FIXME should log this instead of ignoring it.
class FilterForm(forms.Form):
start = forms.IntegerField(required=False)
published = forms.BooleanField(required=False)
edit_permission = forms.BooleanField(required=False)
owner = forms.CharField(required=False)
user = forms.ModelChoiceField(queryset=User.objects.all(), required=False)
from_date = forms.DateTimeField(required=False)
class GroupForm(forms.Form):
name = forms.CharField(required=True)
description = forms.CharField(required=True)
purpose = forms.CharField(required=False)
picture = forms.ImageField(required=False)
privacy_level = forms.CharField(required=True)
auto_approve = forms.BooleanField(required=False)
def clean_privacy_level(self):
data = self.cleaned_data['privacy_level']
if data not in ('public', 'private', 'discoverable'):
raise forms.ValidationError("Invalid group privacy level.")
return data
def _set_privacy_level(self, group, privacy_level):
if privacy_level == 'public':
group.gaccess.public = True
group.gaccess.discoverable = True
elif privacy_level == 'private':
group.gaccess.public = False
group.gaccess.discoverable = False
elif privacy_level == 'discoverable':
group.gaccess.discoverable = True
group.gaccess.public = False
group.gaccess.save()
class GroupCreateForm(GroupForm):
def save(self, request):
frm_data = self.cleaned_data
new_group = request.user.uaccess.create_group(title=frm_data['name'],
description=frm_data['description'],
purpose=frm_data['purpose'],
auto_approve=frm_data['auto_approve'])
if 'picture' in request.FILES:
new_group.gaccess.picture = request.FILES['picture']
privacy_level = frm_data['privacy_level']
self._set_privacy_level(new_group, privacy_level)
return new_group
class GroupUpdateForm(GroupForm):
def update(self, group_to_update, request):
frm_data = self.cleaned_data
group_to_update.name = frm_data['name']
group_to_update.save()
group_to_update.gaccess.description = frm_data['description']
group_to_update.gaccess.purpose = frm_data['purpose']
group_to_update.gaccess.auto_approve = frm_data['auto_approve']
if 'picture' in request.FILES:
group_to_update.gaccess.picture = request.FILES['picture']
privacy_level = frm_data['privacy_level']
self._set_privacy_level(group_to_update, privacy_level)
@processor_for('my-resources')
@login_required
def my_resources(request, page):
resource_collection = get_my_resources_list(request)
context = {'collection': resource_collection}
return context
@processor_for(GenericResource)
def add_generic_context(request, page):
user = request.user
in_production, user_zone_account_exist = utils.get_user_zone_status_info(user)
class AddUserForm(forms.Form):
user = forms.ModelChoiceField(User.objects.filter(is_active=True).all(),
widget=autocomplete_light.ChoiceWidget("UserAutocomplete"))
class AddGroupForm(forms.Form):
group = forms.ModelChoiceField(Group.objects.filter(gaccess__active=True).exclude(name='Resource Author').all(),
widget=autocomplete_light.ChoiceWidget("GroupAutocomplete"))
return {
'add_owner_user_form': AddUserForm(),
'add_view_user_form': AddUserForm(),
'add_edit_user_form': AddUserForm(),
'add_view_group_form': AddGroupForm(),
'add_edit_group_form': AddGroupForm(),
'user_zone_account_exist': user_zone_account_exist,
}
@login_required
def create_resource_select_resource_type(request, *args, **kwargs):
return render_to_response('pages/create-resource.html', context_instance=RequestContext(request))
@login_required
def create_resource(request, *args, **kwargs):
# Note: This view function must be called by ajax
ajax_response_data = {'status': 'error', 'message': ''}
resource_type = request.POST['resource-type']
res_title = request.POST['title']
resource_files = request.FILES.values()
source_names = []
irods_fnames = request.POST.get('irods_file_names')
federated = request.POST.get("irods_federated").lower() == 'true'
# TODO: need to make REST API consistent with internal API. This is just "move" now there.
fed_copy_or_move = request.POST.get("copy-or-move")
if irods_fnames:
if federated:
source_names = irods_fnames.split(',')
else:
user = request.POST.get('irods-username')
password = request.POST.get("irods-password")
port = request.POST.get("irods-port")
host = request.POST.get("irods-host")
zone = request.POST.get("irods-zone")
try:
upload_from_irods(username=user, password=password, host=host, port=port,
zone=zone, irods_fnames=irods_fnames, res_files=resource_files)
except utils.ResourceFileSizeException as ex:
ajax_response_data['message'] = ex.message
return JsonResponse(ajax_response_data)
except SessionException as ex:
ajax_response_data['message'] = ex.stderr
return JsonResponse(ajax_response_data)
url_key = "page_redirect_url"
try:
_, res_title, metadata, fed_res_path = \
hydroshare.utils.resource_pre_create_actions(resource_type=resource_type,
files=resource_files,
resource_title=res_title,
source_names=source_names,
page_redirect_url_key=url_key,
requesting_user=request.user,
**kwargs)
except utils.ResourceFileSizeException as ex:
ajax_response_data['message'] = ex.message
return JsonResponse(ajax_response_data)
except utils.ResourceFileValidationException as ex:
ajax_response_data['message'] = ex.message
return JsonResponse(ajax_response_data)
except Exception as ex:
ajax_response_data['message'] = ex.message
return JsonResponse(ajax_response_data)
resource = hydroshare.create_resource(
resource_type=request.POST['resource-type'],
owner=request.user,
title=res_title,
metadata=metadata,
files=resource_files,
source_names=source_names,
# TODO: should probably be resource_federation_path like it is set to.
fed_res_path=fed_res_path[0] if len(fed_res_path) == 1 else '',
move=(fed_copy_or_move == 'move'),
content=res_title
)
try:
utils.resource_post_create_actions(request=request, resource=resource,
user=request.user, metadata=metadata, **kwargs)
except (utils.ResourceFileValidationException, Exception) as ex:
request.session['validation_error'] = ex.message
ajax_response_data['message'] = ex.message
ajax_response_data['status'] = 'success'
ajax_response_data['file_upload_status'] = 'error'
ajax_response_data['resource_url'] = resource.get_absolute_url()
return JsonResponse(ajax_response_data)
request.session['just_created'] = True
if not ajax_response_data['message']:
if resource.files.all():
ajax_response_data['file_upload_status'] = 'success'
ajax_response_data['status'] = 'success'
ajax_response_data['resource_url'] = resource.get_absolute_url()
return JsonResponse(ajax_response_data)
@login_required
def create_user_group(request, *args, **kwargs):
group_form = GroupCreateForm(request.POST, request.FILES)
if group_form.is_valid():
try:
new_group = group_form.save(request)
messages.success(request, "Group creation was successful.")
return HttpResponseRedirect(reverse('group', args=[new_group.id]))
except IntegrityError as ex:
if group_form.cleaned_data['name'] in ex.message:
message = "Group name '{}' already exists".format(group_form.cleaned_data['name'])
messages.error(request, "Group creation errors: {}.".format(message))
else:
messages.error(request, "Group creation errors:{}.".format(ex.message))
else:
messages.error(request, "Group creation errors:{}.".format(group_form.errors.as_json))
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@login_required
def update_user_group(request, group_id, *args, **kwargs):
user = request.user
group_to_update = utils.group_from_id(group_id)
if user.uaccess.can_change_group_flags(group_to_update):
group_form = GroupUpdateForm(request.POST, request.FILES)
if group_form.is_valid():
try:
group_form.update(group_to_update, request)
messages.success(request, "Group update was successful.")
except IntegrityError as ex:
if group_form.cleaned_data['name'] in ex.message:
message = "Group name '{}' already exists".format(group_form.cleaned_data['name'])
messages.error(request, "Group update errors: {}.".format(message))
else:
messages.error(request, "Group update errors:{}.".format(ex.message))
else:
messages.error(request, "Group update errors:{}.".format(group_form.errors.as_json))
else:
messages.error(request, "Group update errors: You don't have permission to update this group")
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@login_required
def delete_user_group(request, group_id, *args, **kwargs):
"""This one is not really deleting the group object, rather setting the active status
to False (delete) which can be later restored (undelete) )"""
try:
hydroshare.set_group_active_status(request.user, group_id, False)
messages.success(request, "Group delete was successful.")
except PermissionDenied:
messages.error(request, "Group delete errors: You don't have permission to delete"
" this group.")
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@login_required
def restore_user_group(request, group_id, *args, **kwargs):
"""This one is for setting the active status of the group back to True"""
try:
hydroshare.set_group_active_status(request.user, group_id, True)
messages.success(request, "Group restore was successful.")
except PermissionDenied:
messages.error(request, "Group restore errors: You don't have permission to restore"
" this group.")
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@login_required
def share_group_with_user(request, group_id, user_id, privilege, *args, **kwargs):
requesting_user = request.user
group_to_share = utils.group_from_id(group_id)
user_to_share_with = utils.user_from_id(user_id)
if privilege == 'view':
access_privilege = PrivilegeCodes.VIEW
elif privilege == 'edit':
access_privilege = PrivilegeCodes.CHANGE
elif privilege == 'owner':
access_privilege = PrivilegeCodes.OWNER
else:
access_privilege = PrivilegeCodes.NONE
if access_privilege != PrivilegeCodes.NONE:
if requesting_user.uaccess.can_share_group(group_to_share, access_privilege):
try:
requesting_user.uaccess.share_group_with_user(group_to_share, user_to_share_with, access_privilege)
messages.success(request, "User successfully added to the group")
except PermissionDenied as ex:
messages.error(request, ex.message)
else:
messages.error(request, "You don't have permission to add users to group")
else:
messages.error(request, "Invalid privilege for sharing group with user")
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@login_required
def unshare_group_with_user(request, group_id, user_id, *args, **kwargs):
"""
Remove a user from a group
:param request: group owner who is removing the user from the group
:param group_id: id of the user being removed from the group
:param user_id: id of the group from which the user to be removed
:return:
"""
requesting_user = request.user
group_to_unshare = utils.group_from_id(group_id)
user_to_unshare_with = utils.user_from_id(user_id)
try:
requesting_user.uaccess.unshare_group_with_user(group_to_unshare, user_to_unshare_with)
if requesting_user == user_to_unshare_with:
success_msg = "You successfully left the group."
else:
success_msg = "User successfully removed from the group."
messages.success(request, success_msg)
except PermissionDenied as ex:
messages.error(request, ex.message)
if requesting_user == user_to_unshare_with:
return HttpResponseRedirect(reverse("my_groups"))
else:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@login_required
def make_group_membership_request(request, group_id, user_id=None, *args, **kwargs):
"""
Allows either an owner of the group to invite a user to join a group or a user to make a request
to join a group
:param request: the user who is making the request
:param group_id: ID of the group for which the join request/invitation to me made
:param user_id: needed only when an owner is inviting a user to join a group. This is the id of the user the owner
is inviting
:return:
"""
requesting_user = request.user
group_to_join = utils.group_from_id(group_id)
user_to_join = None
if user_id is not None:
user_to_join = utils.user_from_id(user_id)
try:
membership_request = requesting_user.uaccess.create_group_membership_request(
group_to_join, user_to_join)
if user_to_join is not None:
message = 'Group membership invitation was successful'
# send mail to the user who was invited to join group
send_action_to_take_email(request, user=user_to_join, action_type='group_membership',
group=group_to_join, membership_request=membership_request)
else:
message = 'You are now a member of this group'
# membership_request is None in case where group allows auto approval of membership
# request. no need send email notification to group owners for membership approval
if membership_request is not None:
message = 'Group membership request was successful'
# send mail to all owners of the group for approval of the request
for grp_owner in group_to_join.gaccess.owners:
send_action_to_take_email(request, user=requesting_user,
action_type='group_membership',
group=group_to_join, group_owner=grp_owner,
membership_request=membership_request)
else:
# send mail to all owners of the group to let them know that someone has
# joined this group
for grp_owner in group_to_join.gaccess.owners:
send_action_to_take_email(request, user=requesting_user,
action_type='group_auto_membership',
group=group_to_join,
group_owner=grp_owner)
messages.success(request, message)
except PermissionDenied as ex:
messages.error(request, ex.message)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def group_membership(request, uidb36, token, membership_request_id, **kwargs):
"""
View for the link in the verification email that was sent to a user
when they request/invite to join a group.
User is logged in and the request to join a group is accepted. Then the user is redirected to the group
profile page of the group for which the membership got accepted.
:param uidb36: ID of the user to whom the email was sent (part of the link in the email)
:param token: token that was part of the link in the email
:param membership_request_id: ID of the GroupMembershipRequest object (part of the link in the email)
"""
membership_request = GroupMembershipRequest.objects.filter(id=membership_request_id).first()
if membership_request is not None:
if membership_request.group_to_join.gaccess.active:
user = authenticate(uidb36=uidb36, token=token, is_active=True)
if user is not None:
user.uaccess.act_on_group_membership_request(membership_request, accept_request=True)
auth_login(request, user)
# send email to notify membership acceptance
_send_email_on_group_membership_acceptance(membership_request)
if membership_request.invitation_to is not None:
message = "You just joined the group '{}'".format(membership_request.group_to_join.name)
else:
message = "User '{}' just joined the group '{}'".format(membership_request.request_from.first_name,
membership_request.group_to_join.name)
messages.info(request, message)
# redirect to group profile page
return HttpResponseRedirect('/group/{}/'.format(membership_request.group_to_join.id))
else:
messages.error(request, "The link you clicked is no longer valid.")
return redirect("/")
else:
messages.error(request, "The group is no longer active.")
return redirect("/")
else:
messages.error(request, "The link you clicked is no longer valid.")
return redirect("/")
@login_required
def act_on_group_membership_request(request, membership_request_id, action, *args, **kwargs):
"""
Take action (accept or decline) on group membership request
:param request: requesting user is either owner of the group taking action on a request from a user
or a user taking action on a invitation to join a group from a group owner
:param membership_request_id: id of the membership request object (an instance of GroupMembershipRequest)
to act on
:param action: need to have a value of either 'accept' or 'decline'
:return:
"""
accept_request = action == 'accept'
user_acting = request.user
try:
membership_request = GroupMembershipRequest.objects.get(pk=membership_request_id)
except ObjectDoesNotExist:
messages.error(request, 'No matching group membership request was found')
else:
if membership_request.group_to_join.gaccess.active:
try:
user_acting.uaccess.act_on_group_membership_request(membership_request, accept_request)
if accept_request:
message = 'Membership request accepted'
messages.success(request, message)
# send email to notify membership acceptance
_send_email_on_group_membership_acceptance(membership_request)
else:
message = 'Membership request declined'
messages.success(request, message)
except PermissionDenied as ex:
messages.error(request, ex.message)
else:
messages.error(request, "Group is not active")
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@login_required
def get_file(request, *args, **kwargs):
from django_irods.icommands import RodsSession
name = kwargs['name']
session = RodsSession("./", "/usr/bin")
session.runCmd("iinit")
session.runCmd('iget', [ name, 'tempfile.' + name ])
return HttpResponse(open(name), content_type='x-binary/octet-stream')
processor_for(GenericResource)(resource_processor)
def get_metadata_terms_page(request, *args, **kwargs):
return render(request, 'pages/metadata_terms.html')
@login_required
def get_user_or_group_data(request, user_or_group_id, is_group, *args, **kwargs):
"""
This view function must be called as an AJAX call
:param user_or_group_id: id of the user or group for which data is needed
:param is_group : (string) 'false' if the id is for a group, 'true' if id is for a user
:return: JsonResponse() containing user data
"""
user_data = {}
if is_group == 'false':
user = utils.user_from_id(user_or_group_id)
if user.userprofile.middle_name:
user_name = "{} {} {}".format(user.first_name, user.userprofile.middle_name, user.last_name)
else:
user_name = "{} {}".format(user.first_name, user.last_name)
user_data['name'] = user_name
user_data['email'] = user.email
user_data['url'] = '{domain}/user/{uid}/'.format(domain=utils.current_site_url(), uid=user.pk)
if user.userprofile.phone_1:
user_data['phone'] = user.userprofile.phone_1
elif user.userprofile.phone_2:
user_data['phone'] = user.userprofile.phone_2
else:
user_data['phone'] = ''
address = ''
if user.userprofile.state and user.userprofile.state.lower() != 'unspecified':
address = user.userprofile.state
if user.userprofile.country and user.userprofile.country.lower() != 'unspecified':
if len(address) > 0:
address += ', ' + user.userprofile.country
else:
address = user.userprofile.country
user_data['address'] = address
user_data['organization'] = user.userprofile.organization if user.userprofile.organization else ''
user_data['website'] = user.userprofile.website if user.userprofile.website else ''
else:
group = utils.group_from_id(user_or_group_id)
user_data['organization'] = group.name
user_data['url'] = '{domain}/user/{uid}/'.format(domain=utils.current_site_url(),
uid=group.pk)
user_data['description'] = group.gaccess.description
return JsonResponse(user_data)
def _send_email_on_group_membership_acceptance(membership_request):
"""
Sends email notification of group membership acceptance
:param membership_request: an instance of GroupMembershipRequest class
:return:
"""
if membership_request.invitation_to is not None:
# user accepted invitation from the group owner
# here we are sending email to group owner who invited
email_msg = """Dear {}
<p>Your invitation to user '{}' to join the group '{}' has been accepted.</p>
<p>Thank you</p>
<p>The xDCIShare Team</p>
""".format(membership_request.request_from.first_name,
membership_request.invitation_to.first_name, membership_request.group_to_join.name)
else:
# group owner accepted user request
# here wre are sending email to the user whose request to join got accepted
email_msg = """Dear {}
<p>Your request to join the group '{}' has been accepted.</p>
<p>Thank you</p>
<p>The xDCIShare Team</p>
""".format(membership_request.request_from.first_name, membership_request.group_to_join.name)
send_mail(subject="xDCIShare group membership",
message=email_msg,
html_message=email_msg,
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[membership_request.request_from.email])
def _share_resource_with_user(request, frm, resource, requesting_user, privilege):
if frm.is_valid():
try:
requesting_user.uaccess.share_resource_with_user(resource, frm.cleaned_data['user'], privilege)
except PermissionDenied as exp:
messages.error(request, exp.message)
else:
messages.error(request, frm.errors.as_json())
def _unshare_resource_with_users(request, requesting_user, users_to_unshare_with, resource, privilege):
users_to_keep = User.objects.in_bulk(users_to_unshare_with).values()
owners = set(resource.raccess.owners.all())
editors = set(resource.raccess.edit_users.all()) - owners
viewers = set(resource.raccess.view_users.all()) - editors - owners
if privilege == 'owner':
all_shared_users = owners
elif privilege == 'edit':
all_shared_users = editors
elif privilege == 'view':
all_shared_users = viewers
else:
all_shared_users = []
go_to_resource_listing_page = False
for user in all_shared_users:
if user not in users_to_keep:
try:
# requesting user is the resource owner or requesting_user is self unsharing
# COUCH: no need for undo_share; doesn't do what is intended 11/19/2016
requesting_user.uaccess.unshare_resource_with_user(resource, user)
if requesting_user == user and not resource.raccess.public:
go_to_resource_listing_page = True
except PermissionDenied as exp:
messages.error(request, exp.message)
break
return go_to_resource_listing_page
def _set_resource_sharing_status(request, user, resource, flag_to_set, flag_value):
if not user.uaccess.can_change_resource_flags(resource):
messages.error(request, "You don't have permission to change resource sharing status")
return
if flag_to_set == 'shareable':
if resource.raccess.shareable != flag_value:
resource.raccess.shareable = flag_value
resource.raccess.save()
return
has_files = False
has_metadata = False
can_resource_be_public_or_discoverable = False
is_public = (flag_to_set == 'public' and flag_value)
is_discoverable = (flag_to_set == 'discoverable' and flag_value)
if is_public or is_discoverable:
has_files = resource.has_required_content_files()
has_metadata = resource.metadata.has_all_required_elements()
can_resource_be_public_or_discoverable = has_files and has_metadata
if is_public and not can_resource_be_public_or_discoverable:
messages.error(request, _get_message_for_setting_resource_flag(has_files, has_metadata, resource_flag='public'))
else:
if is_discoverable:
if can_resource_be_public_or_discoverable:
resource.raccess.public = False
resource.raccess.discoverable = True
else:
messages.error(request, _get_message_for_setting_resource_flag(has_files, has_metadata,
resource_flag='discoverable'))
else:
resource.raccess.public = is_public
resource.raccess.discoverable = is_public
resource.raccess.save()
# set isPublic metadata AVU accordingly
res_coll = resource.root_path
istorage = resource.get_irods_storage()
istorage.setAVU(res_coll, "isPublic", str(resource.raccess.public).lower())
# run script to update hyrax input files when a private netCDF resource is made public
if flag_to_set=='public' and flag_value and settings.RUN_HYRAX_UPDATE and \
resource.resource_type=='NetcdfResource':
run_script_to_update_hyrax_input_files(resource.short_id)
def _get_message_for_setting_resource_flag(has_files, has_metadata, resource_flag):
msg = ''
if not has_metadata and not has_files:
msg = "Resource does not have sufficient required metadata and content files to be {flag}".format(
flag=resource_flag)
elif not has_metadata:
msg = "Resource does not have sufficient required metadata to be {flag}".format(flag=resource_flag)
elif not has_files:
msg = "Resource does not have required content files to be {flag}".format(flag=resource_flag)
return msg
class MyGroupsView(TemplateView):
template_name = 'pages/my-groups.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(MyGroupsView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
u = User.objects.get(pk=self.request.user.id)
groups = u.uaccess.view_groups
group_membership_requests = GroupMembershipRequest.objects.filter(invitation_to=u).exclude(
group_to_join__gaccess__active=False).all()
# for each group object, set a dynamic attribute to know if the user owns the group
for g in groups:
g.is_group_owner = u.uaccess.owns_group(g)
active_groups = [g for g in groups if g.gaccess.active]
inactive_groups = [g for g in groups if not g.gaccess.active]
my_pending_requests = GroupMembershipRequest.objects.filter(request_from=u).exclude(
group_to_join__gaccess__active=False)
return {
'profile_user': u,
'groups': active_groups,
'inactive_groups': inactive_groups,
'my_pending_requests': my_pending_requests,
'group_membership_requests': group_membership_requests
}
class AddUserForm(forms.Form):
user = forms.ModelChoiceField(User.objects.all(), widget=autocomplete_light.ChoiceWidget("UserAutocomplete"))
class GroupView(TemplateView):
template_name = 'pages/group.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(GroupView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
group_id = kwargs['group_id']
g = Group.objects.get(pk=group_id)
u = User.objects.get(pk=self.request.user.id)
u.is_group_owner = u.uaccess.owns_group(g)
u.is_group_editor = g in u.uaccess.edit_groups
u.is_group_viewer = g in u.uaccess.view_groups
g.join_request_waiting_owner_action = g.gaccess.group_membership_requests.filter(request_from=u).exists()
g.join_request_waiting_user_action = g.gaccess.group_membership_requests.filter(invitation_to=u).exists()
g.join_request = g.gaccess.group_membership_requests.filter(invitation_to=u).first()
group_resources = []
# for each of the resources this group has access to, set resource dynamic
# attributes (grantor - group member who granted access to the resource) and (date_granted)
for res in g.gaccess.view_resources:
grp = GroupResourcePrivilege.objects.get(resource=res, group=g)
res.grantor = grp.grantor
res.date_granted = grp.start
group_resources.append(res)
# TODO: need to sort this resource list using the date_granted field
return {
'profile_user': u,
'group': g,
'view_users': g.gaccess.get_users_with_explicit_access(PrivilegeCodes.VIEW),
'group_resources': group_resources,
'add_view_user_form': AddUserForm(),
}
class CollaborateView(TemplateView):
template_name = 'pages/collaborate.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(CollaborateView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
u = User.objects.get(pk=self.request.user.id)
groups = Group.objects.filter(gaccess__active=True).exclude(name="Resource Author")
# for each group set group dynamic attributes
for g in groups:
g.is_user_member = u in g.gaccess.members
g.join_request_waiting_owner_action = g.gaccess.group_membership_requests.filter(request_from=u).exists()
g.join_request_waiting_user_action = g.gaccess.group_membership_requests.filter(invitation_to=u).exists()
g.join_request = None
if g.join_request_waiting_owner_action or g.join_request_waiting_user_action:
g.join_request = g.gaccess.group_membership_requests.filter(request_from=u).first() or \
g.gaccess.group_membership_requests.filter(invitation_to=u).first()
return {
'profile_user': u,
'groups': groups,
}
| RENCI/xDCIShare | hs_core/views/__init__.py | Python | bsd-3-clause | 76,892 | [
"NetCDF"
] | 8a476ce1ddad82406576763372e02f30efdb2541ebd33fb950a43145cb13c656 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Development script to test the algorithms of a given model coordination environments
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
import itertools
import time
from math import factorial
from random import shuffle
import numpy as np
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import (
AllCoordinationGeometries,
)
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import (
AbstractGeometry,
LocalGeometryFinder,
)
if __name__ == "__main__":
allcg = AllCoordinationGeometries()
while True:
cg_symbol = input("Enter symbol of the geometry for which you want to get the explicit permutations : ")
try:
cg = allcg[cg_symbol]
break
except LookupError:
print("Wrong geometry, try again ...")
continue
lgf = LocalGeometryFinder()
lgf.setup_parameters(structure_refinement=lgf.STRUCTURE_REFINEMENT_NONE)
myindices = range(cg.coordination_number)
test = input(
'Enter if you want to test all possible permutations ("all" or "a") or a given number of random permutations (i.e. "25")'
)
if test == "all" or test == "a":
perms_iterator = itertools.permutations(myindices)
nperms = factorial(cg.coordination_number)
else:
try:
nperms = int(test)
except Exception:
raise ValueError(f"Could not turn {test} into integer ...")
perms_iterator = []
for ii in range(nperms):
shuffle(myindices)
perms_iterator.append(list(myindices))
iperm = 1
t1 = time.clock()
for indices_perm in perms_iterator:
lgf.setup_test_perfect_environment(cg_symbol, indices=indices_perm)
lgf.perfect_geometry = AbstractGeometry.from_cg(cg=cg)
points_perfect = lgf.perfect_geometry.points_wocs_ctwocc()
print(f"Perm # {iperm:d}/{nperms:d} : ", indices_perm)
algos_results = []
for algo in cg.algorithms:
print(algo)
if algo.algorithm_type == "EXPLICIT_PERMUTATIONS":
raise ValueError("Do something for the explicit ones ... (these should anyway be by far ok!)")
results = lgf.coordination_geometry_symmetry_measures_separation_plane(
coordination_geometry=cg,
separation_plane_algo=algo,
tested_permutations=False,
points_perfect=points_perfect,
)
print("Number of permutations tested : ", len(results[0]))
algos_results.append(min(results[0]))
if not np.isclose(min(results[0]), 0.0):
print("Following is not 0.0 ...")
input(results)
print(" => ", algos_results)
iperm += 1
t2 = time.clock()
print(
'Time to test {:d} permutations for geometry "{}" (symbol "{}") : {:.2f} seconds'.format(
nperms, cg.name, cg_symbol, t2 - t1
)
)
| materialsproject/pymatgen | dev_scripts/chemenv/test_algos.py | Python | mit | 3,284 | [
"pymatgen"
] | ba16e84f7b5d28f3e6f6cdd08d823b31afca755965b5692178e85c9bb9048fa3 |
import csv,re
import cx_Oracle
filename='BlueFly-test.csv'
pipe_names = {}
BULLET1_MAPS = ['ARMS', 'BEZEL', 'BEZEL FUNCTION', 'BEZEL MATERIAL', 'BRACELET', 'BRACELET COLOR', 'BRACELET LENGTH', 'BRACELET MATERIAL', 'BRACELET WIDTH',]
BULLET2_MAPS = ['CALENDAR', 'CASE', 'CASE BACK', 'CASE DIAMETER', 'CASE HEIGHT', 'CASE SHAPE', 'CASE THICKNESS', 'CASE WIDTH', 'CLASP', 'CLASP TYPE', 'CLOSURE', 'COLOR', 'CROWN', 'CRYSTAL',]
BULLET3_MAPS = ['DESCRIPTION', 'DIAL COLOR', 'DIAMOND CLARITY', 'DIAMOND COLOR', 'DIAMONDS', 'DIMENSION', 'DIMENSIONS', 'EXTERIOR', 'FEATURES', 'FINISH', 'FRAME', 'FRAME MATERIAL', 'FRAME STYLE',]
BULLET4_MAPS = ['GENDER', 'HANDS', 'HINGE', 'INCLUDES', 'INTERIOR', 'LENS', 'LUMINOUS', 'MANUFACTURED', 'MARKERS', 'MATERIAL', 'MATERIALS', 'MODEL ALIAS', 'MODEL NUMBER', 'MOVEMENT', 'MULTI-FUNCTION',]
BULLET5_MAPS = ['NOSE BRIDGE', 'NOSE PADS', 'OTHER', 'PROTECTION', 'RIM', 'RX', 'SERIES', 'SIZE', 'STONES', 'STRAP', 'STRAP COLOR', 'STRAP LENGTH', 'STRAP MATERIAL', 'STRAP WIDTH', 'STYLE', 'SUBDIAL',]
BULLET6_MAPS = ['SUBDIALS', 'SWEEP SECOND HAND', 'TEMPLE', 'TEMPLES', 'WATER RESISTANT', 'WEIGHT']
LONG_DESCRIPTION_MAP = ['PRODUCT_DESCRIPTION']
connection = cx_Oracle.connect("pomgr", "j1mmych00", "BFYQA1201_QARAC201-VIP.QA.BLUEFLY.COM")
cursor = connection.cursor()
#cursor.execute("""
# select sysdate from dual""",)
#for column_1, in cursor:
# print "Values:", column_1
def update_watches(item):
# Get ID, Product_ID
cursor.execute("""
select id,product_id
from pomgr.product_color
where vendor_style = :arg_1
""", arg_1 = item['SWI_SKU'],)
print "Checking: ", item['SWI_SKU']
id = ''
product_id = ''
for id, product_id in cursor:
print id,product_id
item['ID'] = id
item['PRODUCT_ID'] = product_id
if id != '':
print 'Found'
# print sorted(item)
# Update Material
if 'CASE' in item:
cursor.execute('update pomgr.product_detail set material = :item where product_id = :product_id', {'item': item['CASE'], 'product_id': str(item['PRODUCT_ID'])})
b1_string = ""
b2_string = ""
b3_string = ""
b4_string = ""
b5_string = ""
b6_string = ""
for key in BULLET1_MAPS:
if key in item:
b1_string += key.title() + ': ' + item[key] + '<br>'
for key in BULLET2_MAPS:
if key in item:
b2_string += key.title() + ': ' + item[key] + '<br>'
for key in BULLET3_MAPS:
if key in item:
b3_string += key.title() + ': ' + item[key] + '<br>'
for key in BULLET4_MAPS:
if key in item:
b4_string += key.title() + ': ' + item[key] + '<br>'
for key in BULLET5_MAPS:
if key in item:
b5_string += key.title() + ': ' + item[key] + '<br>'
for key in BULLET6_MAPS:
if key in item:
b6_string += key.title() + ': ' + item[key] + '<br>'
cursor.execute("""update pomgr.product_color_detail
set bullet_1 = :b1s ,
bullet_2 = :b2s ,
bullet_3 = :b3s ,
bullet_4 = :b4s ,
bullet_5 = :b5s ,
bullet_6 = :b6s ,
long_description = :prod_desc
where product_color_id = :id
""", { 'b1s': b1_string,
'b2s': b2_string,
'b3s': b3_string,
'b4s': b4_string,
'b5s': b5_string,
'b6s': b6_string,
'prod_desc' : item['PRODUCT_DESCRIPTION'],
'id' : item['ID']})
connection.commit()
with open(filename, 'rb') as f:
reader = csv.reader(f,quoting=csv.QUOTE_ALL,delimiter=',')
headers = reader.next()
swi = {}
for index,item in enumerate(headers):
swi[item] = index
SWI_item = {}
for row in reader:
for key in swi:
SWI_item[key] = row[swi[key]]
pipe_out = re.split("\|",row[swi['FEATURES_PIPED']])
field_name = ""
field_value = ""
for pipe_index,pipe_value in enumerate(pipe_out):
if pipe_index != 0:
if pipe_index % 2:
field_name = pipe_value
else:
field_value = pipe_value
# print ' ',field_name,':',field_value
SWI_item[field_name.upper()] = field_value
if SWI_item['STORE'] == 'Watches':
update_watches(SWI_item)
| relic7/prodimages | python/SWIReader.py | Python | mit | 4,497 | [
"CRYSTAL"
] | ee0d5d87af0a6d279ccc1ab32dbc7388901833e5808b15200ce27248fa79e639 |
#!/usr/bin/env python
# L. Brodeau, april 2010
import sys
import numpy as nmp
from netCDF4 import Dataset
from string import replace
if len(sys.argv) != 2:
print 'Usage: '+sys.argv[0]+' <mesh_mask_ORCA1_file.nc>'
sys.exit(0)
cf_mm = sys.argv[1]
cf_out = replace(cf_mm, 'mesh_mask', 'basin_mask')
print '\n'
# Opening the Netcdf file:
f_mm = Dataset(cf_mm)
print 'File ', cf_mm, 'is open...\n'
# Extracting the longitude 2D array:
xlon = f_mm.variables['nav_lon'][:,:]
# Extracting the longitude 2D array:
xlat = f_mm.variables['nav_lat'][:,:]
# Extracting tmask at surface level:
tmask = f_mm.variables['tmask'][0,0,:,:]
f_mm.close()
# Info on the shape of t:
[ nj, ni ] = tmask.shape
print 'Dimension = ', ni, nj, '\n'
mask_atl = nmp.zeros((nj,ni))
mask_pac = nmp.zeros((nj,ni))
mask_ind = nmp.zeros((nj,ni))
mask_soc = nmp.zeros((nj,ni)) ; # Souther Ocean
mask_wed = nmp.zeros((nj,ni)) ; # Weddell Sea
mask_ip1 = nmp.zeros((nj,ni))
mask_inp = nmp.zeros((nj,ni))
# ATL for ORCA1
# ~~~~~~~~~~~~~
mask_atl[:,:] = tmask[:,:]
# Removing Southern Ocean:
#mask_atl[:95,:] = 0
# Removing Pacific and Indian
mask_atl[0:246,0:190] = 0 # 246 => to keep Pacific side of the arctic basin...
mask_atl[0:168,0:223] = 0 ; mask_atl[0:255,310:] = 0
mask_atl[165:177,190:204] = 0; mask_atl[165:180,190:198] = 0; mask_atl[165:170,200:206] = 0
mask_atl[188:209,282:] = 0; mask_atl[209:215,288:] = 0
# REMOVING INDONESIA + AUSTRALIA
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
mask_ip1[:,:] = tmask[:,:]
mask_ip1[114:122,53:75] = 0
mask_ip1[119:126,68:74] = 0
mask_ip1[124:143,44:59] = 0
mask_ip1[128:159,33:42] = 0
mask_ip1[120:142,52:61] = 0
mask_ip1[124:136,41:70] = 0
mask_ip1[127:128,37:42] = 0
mask_ip1[120:126,60:70] = 0
mask_ip1[141:158,30:33] = 0
mask_ip1[152:162,26:30] = 0
# PAC for ORCA1
# ~~~~~~~~~~~~~
mask_pac[:,:] = tmask[:,:]
# Removing Southern Ocean until souther Australia:
mask_pac[:95,:] = 0
# Removing Indonesian side
mask_pac[:,:45] = 0
mask_pac[88:145,45:61] = 0
mask_pac[112:125,59:70] = 0
mask_pac[123:136,60:67] = 0
mask_pac[88:99,60:71] = 0 # bottom Australia
# V2
#mask_pac[:,:26] = 0
# Removing Atlantic
idxatl = nmp.where(mask_atl == 1.0)
mask_pac[idxatl] = 0
# Removing atlantic bottom and the rest (Indian)
mask_pac[83:,224:] = 0
# IND for ORCA1
# ~~~~~~~~~~~~~
mask_ind[:,:] = tmask[:,:]
# Removing Southern Ocean until southern Australia:
mask_ind[:95,:] = 0
# Removing Atl and Pac
mask_ind[:,:] = mask_ind[:,:] - mask_atl[:,:] - mask_pac[:,:]
mask_ind[93:100,46:68] = 0 # australia bottom
# Removing Mediterranean+Caspian sea:
mask_ind[192:228,279:329] = 0
mask_ind[198:242,328:344] = 0
# Indo-Pacific
# ~~~~~~~~~~~~
mask_inp[:,:] = tmask[:,:]
mask_inp[:95,:] = 0
# Removing Atlantic
idxatl = nmp.where(mask_atl == 1.0)
mask_inp[idxatl] = 0
mask_inp[93:100,46:68] = 0 # australia bottom
# Removing Mediterranean sea:
mask_inp[192:228,279:329] = 0
mask_inp[198:242,328:344] = 0
# Removing indonesia
#mask_inp[:,:] = mask_inp[:,:] * mask_ip1[:,:]
# Souther Ocean
mask_soc[:,:] = tmask[:,:]
idxatl = nmp.where(mask_atl+mask_pac+mask_ind > 0.5)
mask_soc[idxatl] = 0
mask_soc[122:,:] = 0
# Weddell Sea:
mask_wed[:,:] = tmask[:,:]
mask_wed[:,:233] = 0
mask_wed[55:,:] = 0
mask_wed[:,300:] = 0
# Creating output file:
f_out = Dataset(cf_out, 'w',format='NETCDF3_CLASSIC')
# Dimensions:
f_out.createDimension('x', ni)
f_out.createDimension('y', nj)
# Variables
id_lon = f_out.createVariable('nav_lon','f4',('y','x',))
id_lat = f_out.createVariable('nav_lat','f4',('y','x',))
id_atl = f_out.createVariable('tmaskatl' ,'f4',('y','x',)) ; id_atl.long_name = 'Atlantic Basin'
id_pac = f_out.createVariable('tmaskpac' ,'f4',('y','x',)) ; id_pac.long_name = 'Pacific Basin'
id_ind = f_out.createVariable('tmaskind' ,'f4',('y','x',)) ; id_ind.long_name = 'Indian Basin'
id_soc = f_out.createVariable('tmasksoc' ,'f4',('y','x',)) ; id_soc.long_name = 'Southern Basin'
id_inp = f_out.createVariable('tmaskinp' ,'f4',('y','x',)) ; id_inp.long_name = 'Indo-Pacific Basin'
id_wed = f_out.createVariable('tmaskwed' ,'f4',('y','x',)) ; id_wed.long_name = 'Weddell Sea'
# Filling variables:
id_lat[:,:] = xlat[:,:]
id_lon[:,:] = xlon[:,:]
id_atl[:,:] = mask_atl[:,:]
id_pac[:,:] = mask_pac[:,:]
id_ind[:,:] = mask_ind[:,:]
id_soc[:,:] = mask_soc[:,:]
id_inp[:,:] = mask_inp[:,:]
id_wed[:,:] = mask_wed[:,:]
f_out.About = 'ORCA1 main oceanic basin land-sea mask created from '+cf_mm
f_out.Author = ' Generated with "orca1_create_basin_mask_from_meshmask.py" of BaraKuda (https://github.com/brodeau/barakuda)'
f_out.close()
print cf_out+' sucessfully created!'
| brodeau/barakuda | python/exec/.old/orca1_create_basin_mask_from_meshmask.py | Python | gpl-2.0 | 4,667 | [
"NetCDF"
] | 1de4b53490261c39e76c09b7743e09921b6b5f5d10471e1920038ecdd3a99406 |
from morphforge.stdimports import *
from morphforgecontrib.stdimports import StdChlLeak
# Create the morphology for the cell:
morphDict1 = {'root': {'length': 20, 'diam': 20, 'id':'soma'} }
m1 = MorphologyTree.fromDictionary(morphDict1)
# Create the environment:
env = NEURONEnvironment()
# Create the simulation:
sim = env.Simulation()
# Create a cell:
cell = sim.create_cell(name="Cell1", morphology=m1)
# Apply the mechanisms to the cells
lk_chl = env.Channel(StdChlLeak,
name="LkChl",
conductance=qty("0.25:mS/cm2"),
reversalpotential=qty("-51:mV"),
)
cell.apply_channel( lk_chl)
cell.set_passive( PassiveProperty.SpecificCapacitance, qty('1.0:uF/cm2'))
# Create the stimulus and record the injected current:
cc = sim.create_currentclamp(name="Stim1", amp=qty("200:pA"), dur=qty("100:ms"), delay=qty("100:ms"), cell_location=cell.soma)
# Define what to record:
sim.record(cell, what=StandardTags.Voltage, name="SomaVoltage", cell_location = cell.soma)
sim.recordall(lk_chl, cell_location=cell.soma)
# run the simulation
results = sim.run()
# Create an output .pdf
SimulationMRedoc.build( sim ).to_pdf(__file__ + '.pdf')
# Display the results:
TagViewer([results], figtitle="The response of a neuron to step current injection", timerange=(95, 200)*units.ms, show=True)
| mikehulluk/morphforge | doc/srcs_generated_examples/python_srcs/singlecell_simulation010.py | Python | bsd-2-clause | 1,399 | [
"NEURON"
] | df16f270ee9b4567f26040ca0d04c43acf9d055d7a8830c5d65f614f2c408764 |
# -*- coding: utf-8 -*-
"""
marksweep.group-crawler
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This modules contains a crawler that will traverse the facebook graph depth-first and persist
all groups, posts, comments and likes.
"""
__author__ = 'JasonLiu'
from Queue import Queue
import logging
import datetime
import time
from pymongo import MongoClient
import facebook_user
class AbstractBaseCrawler(object):
"""AbstractBaseCrawler: contains the required access for writing FBObjects to MongoDB"""
logging.basicConfig(
filename="../logs/crawler.log",
level=logging.DEBUG
)
def __init__(self, name="marksweep"):
self.name = name
self.user = facebook_user.User()
self.groups = self.user.groups(limit=1000).filter(
lambda _: "hack" in _.name.lower() or "hh" in _.name.lower()
)
self.DAO = MongoClient().hackathonhackers
self.LOG = logging.getLogger("bfs-crawler : {}".format(name))
self.get_all_posts = False
def update_groups(self):
self.groups = self.user.groups(limit=1000).filter(
lambda _: "hack" in _.name.lower() or "hh" in _.name.lower()
)
def get_all_posts(self):
"""
If this is set, the crawler will go through all of the posts for each group instead of a single page
:return:
"""
self.get_all_posts = True
return self
def _crawl_group(self, group):
"""
Take a group FBObject and persist to MongoDB
:param group:
:return:
"""
group_obj = group.persist()
group_obj["last_updated"] = time.time()
self.DAO.groups.save(group_obj)
# save and log action
self.LOG.info("[GROUP-{}] (id={},time={})".format(
group.name, group.id, datetime.datetime.now()
))
def _crawl_group_post(self, post, current_group_id):
"""
Take a post FBObject and persist to MongoDB
:param post:
:param current_group_id:
:return:
"""
post_obj = post.persist()
post_obj["group_id"] = current_group_id
# save and log action
self.DAO.posts.save(post_obj)
self.LOG.info("[GROUP-POST] (id={},time={})".format(
post.id, datetime.datetime.now()
))
def _crawl_post_comments(self, comment, group_id, post_id):
"""
Take a post comment FBObject and persist to MongoDB
:param comment:
:param group_id:
:param post_id:
:return:
"""
comment_obj = comment.persist()
comment_obj["group_id"] = group_id
comment_obj["post_id"] = post_id
current_comment_id = comment_obj["id"]
# save and log action
self.DAO.comments.save(comment_obj)
self.LOG.info("[COMMENT] (id={},time={})".format(
current_comment_id, datetime.datetime.now()
))
def _crawl_post_likes(self, like, group_id, post_id):
"""
Take a post comment FBObject and persist it MongoDb
:param like:
:param group_id:
:param post_id:
:return:
"""
like_obj = like.persist()
like_obj["group_id"] = group_id
like_obj["post_id"] = post_id
# save and log action
self.DAO.likes.save(like_obj)
# noinspection PyUnresolvedReferences
self.LOG.info("[POST-LIKE] (id={},time={})".format(
post_id, datetime.datetime.now()
))
def crawl(self):
raise NotImplementedError("What, why did you use the abstract base class?")
class GroupCrawlerDFS(AbstractBaseCrawler):
def __init__(self, name="marksweep"):
super(GroupCrawlerDFS, self).__init__(name)
self.LOG = logging.getLogger("dfs-crawler : {}".format(name))
def crawl(self, lim=100):
"""
This crawl will traverse the Facebook graph depth first and persist all FBObjects
:param lim: total number of posts to get per page.
:return:
"""
self.LOG.info("[JOB INITIATED] {}".format(datetime.datetime.now()))
for group in self.groups:
current_group_id = group.id
self._crawl_group(group)
for post in group.posts_(limit=lim, all=self.get_all_posts):
current_post_id = post.id_
self._crawl_group_post(post, current_group_id)
for comment in post.comments_(limit=500, all=True):
self._crawl_post_comments(comment, current_group_id, current_post_id)
for like in post.likes_(limit=500, all=True):
self._crawl_post_likes(like, current_group_id, current_post_id)
self.LOG.info("[JOB COMPLETED] {}".format(datetime.datetime.now()))
class GroupCrawlerBFS(AbstractBaseCrawler):
def __init__(self, name='marksweep'):
super(GroupCrawlerBFS, self).__init__(name)
self.group_queue = Queue()
self.posts_queue = Queue()
self.LOG = logging.getLogger("bfs-crawler : {}".format(name))
def crawl(self, lim=100):
"""
This crawl will traverse the Facebook graph breadth first with respect
to posts and then and persist all FBObjects
:rtype : void
:param lim: total number of posts to get per page.
:return:
"""
self.LOG.info("[JOB INITIATED : QUEUING GROUP NODES] {}".format(datetime.datetime.now()))
# Visit all groups and push into queue
for group in self.groups:
self.group_queue.put(group)
self.LOG.info("[VISITING GROUP NODES] {}".format(datetime.datetime.now()))
# For each group get the first 100 posts
# Push each post onto the queue
while not self.group_queue.empty():
group = self.group_queue.get()
self.LOG.info("[QUEUEING POST NODES] {}".format(datetime.datetime.now()))
for post in group.posts_(limit=lim, all=True):
self.posts_queue.put(post)
self.LOG.info("[VISITING POST NODES] {}".format(datetime.datetime.now()))
# For each post from the queue
# Persist all comments and likes
while not self.posts_queue.empty():
post = self.posts_queue.get()
current_post_id = int(post.id_)
current_group_id = int(post.group_id_)
self._crawl_group_post(post, current_group_id)
# Comments and Likes are crawled depth first
self.LOG.info("[VISITING COMMENT NODES] {}".format(datetime.datetime.now()))
for comment in post.comments_(limit=500, all=True):
self._crawl_post_comments(comment, current_group_id, current_post_id)
self.LOG.info("[VISITING LIKE NODES] {}".format(datetime.datetime.now()))
for like in post.likes_(limit=500, all=True):
self._crawl_post_likes(like, current_group_id, current_post_id)
self.LOG.info("[JOB COMPLETED] {}".format(datetime.datetime.now()))
if __name__ == "__main__":
crawlerz = GroupCrawlerBFS()
crawlerz.crawl(lim=100)
| jxnl/fbms-crawler | graph-crawler/crawlers.py | Python | mit | 7,106 | [
"VisIt"
] | 8f5a3da9f5702c93fe447bc754f647910bc772fc9b2b18f04d75510fcd9ddcf9 |
"""
The B{0install select} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
import sys
from zeroinstall import _, logger
from zeroinstall.cmd import UsageError
from zeroinstall.injector import model, selections
from zeroinstall.injector.requirements import Requirements
from zeroinstall.injector.driver import Driver
from zeroinstall.support import tasks
syntax = "URI"
def add_generic_select_options(parser):
"""All options for selecting."""
parser.add_option("", "--before", help=_("choose a version before this"), metavar='VERSION')
parser.add_option("", "--command", help=_("command to select"), metavar='COMMAND')
parser.add_option("", "--cpu", help=_("target CPU type"), metavar='CPU')
parser.add_option("", "--message", help=_("message to display when interacting with user"))
parser.add_option("", "--not-before", help=_("minimum version to choose"), metavar='VERSION')
parser.add_option("-o", "--offline", help=_("try to avoid using the network"), action='store_true')
parser.add_option("", "--os", help=_("target operation system type"), metavar='OS')
parser.add_option("-r", "--refresh", help=_("refresh all used interfaces"), action='store_true')
parser.add_option("-s", "--source", help=_("select source code"), action='store_true')
parser.add_option("", "--version", help=_("specify version contraint (e.g. '3' or '3..')"), metavar='RANGE')
parser.add_option("", "--version-for", help=_("set version constraints for a specific interface"),
nargs=2, metavar='URI RANGE', action='append')
def add_options(parser):
"""Options for 'select' and 'download' (but not 'run')"""
add_generic_select_options(parser)
parser.add_option("", "--xml", help=_("write selected versions as XML"), action='store_true')
def get_selections(config, options, iface_uri, select_only, download_only, test_callback, requirements = None):
"""Get selections for iface_uri, according to the options passed.
Will switch to GUI mode if necessary.
@param options: options from OptionParser
@param iface_uri: canonical URI of the interface
@param select_only: return immediately even if the selected versions aren't cached
@param download_only: wait for stale feeds, and display GUI button as Download, not Run
@param requirements: requirements to use; if None, requirements come from options (since 1.15)
@type requirements: Requirements
@return: the selected versions, or None if the user cancels
@rtype: L{selections.Selections} | None
"""
if options.offline:
config.network_use = model.network_offline
iface_cache = config.iface_cache
# Try to load it as a feed. If it is a feed, it'll get cached. If not, it's a
# selections document and we return immediately.
maybe_selections = iface_cache.get_feed(iface_uri, selections_ok = True)
if isinstance(maybe_selections, selections.Selections):
if not select_only:
blocker = maybe_selections.download_missing(config)
if blocker:
logger.info(_("Waiting for selected implementations to be downloaded..."))
tasks.wait_for_blocker(blocker)
return maybe_selections
if requirements is None:
requirements = Requirements(iface_uri)
requirements.parse_options(options)
return get_selections_for(requirements, config, options, select_only, download_only, test_callback)
def get_selections_for(requirements, config, options, select_only, download_only, test_callback):
"""Get selections for given requirements.
@since: 1.9"""
if options.offline:
config.network_use = model.network_offline
iface_cache = config.iface_cache
driver = Driver(config = config, requirements = requirements)
# Note that need_download() triggers a solve
if options.refresh or options.gui:
# We could run immediately, but the user asked us not to
can_run_immediately = False
else:
if select_only:
# --select-only: we only care that we've made a selection, not that we've cached the implementations
driver.need_download()
can_run_immediately = driver.solver.ready
else:
can_run_immediately = not driver.need_download()
stale_feeds = [feed for feed in driver.solver.feeds_used if
not feed.startswith('distribution:') and # Ignore (memory-only) PackageKit feeds
iface_cache.is_stale(feed, config.freshness)]
if download_only and stale_feeds:
can_run_immediately = False
if can_run_immediately:
if stale_feeds:
if config.network_use == model.network_offline:
logger.debug(_("No doing background update because we are in off-line mode."))
elif options.dry_run:
print(_("[dry-run] would check for updates in the background"))
else:
# There are feeds we should update, but we can run without them.
# Do the update in the background while the program is running.
from zeroinstall.injector import background
background.spawn_background_update(driver, options.verbose)
return driver.solver.selections
# If we need to download anything, we might as well
# refresh all the feeds first.
options.refresh = True
if options.gui != False:
# If the user didn't say whether to use the GUI, choose for them.
gui_args = driver.requirements.get_as_options()
if download_only:
# Just changes the button's label
gui_args.append('--download-only')
if options.refresh:
gui_args.append('--refresh')
if options.verbose:
gui_args.insert(0, '--verbose')
if options.verbose > 1:
gui_args.insert(0, '--verbose')
if options.with_store:
for x in options.with_store:
gui_args += ['--with-store', x]
if select_only:
gui_args.append('--select-only')
from zeroinstall import helpers
sels = helpers.get_selections_gui(requirements.interface_uri, gui_args, test_callback, use_gui = options.gui)
if not sels:
return None # Aborted
elif sels is helpers.DontUseGUI:
sels = None
else:
sels = None
if sels is None:
# Note: --download-only also makes us stop and download stale feeds first.
downloaded = driver.solve_and_download_impls(refresh = options.refresh or download_only or False,
select_only = select_only)
if downloaded:
tasks.wait_for_blocker(downloaded)
sels = driver.solver.selections
return sels
def handle(config, options, args):
if len(args) != 1:
raise UsageError()
app = config.app_mgr.lookup_app(args[0], missing_ok = True)
if app is not None:
old_sels = app.get_selections()
requirements = app.get_requirements()
changes = requirements.parse_update_options(options)
iface_uri = old_sels.interface
if requirements.extra_restrictions and not options.xml:
print("User-provided restrictions in force:")
for uri, expr in requirements.extra_restrictions.items():
print(" {uri}: {expr}".format(uri = uri, expr = expr))
print()
else:
iface_uri = model.canonical_iface_uri(args[0])
requirements = None
changes = False
sels = get_selections(config, options, iface_uri,
select_only = True, download_only = False, test_callback = None, requirements = requirements)
if not sels:
sys.exit(1) # Aborted by user
if options.xml:
show_xml(sels)
else:
show_human(sels, config.stores)
if app is not None:
from zeroinstall.cmd import whatchanged
changes = whatchanged.show_changes(old_sels.selections, sels.selections) or changes
if changes:
print(_("(note: use '0install update' instead to save the changes)"))
def show_xml(sels):
doc = sels.toDOM()
doc.writexml(sys.stdout)
sys.stdout.write('\n')
def show_human(sels, stores):
done = set() # detect cycles
def print_node(uri, commands, indent):
if uri in done: return
done.add(uri)
impl = sels.selections.get(uri, None)
print(indent + "- URI:", uri)
if impl:
print(indent + " Version:", impl.version)
#print indent + " Command:", command
if impl.id.startswith('package:'):
path = "(" + impl.id + ")"
else:
path = impl.get_path(stores, missing_ok = True) or _("(not cached)")
print(indent + " Path:", path)
indent += " "
deps = impl.dependencies
for c in commands:
deps += impl.get_command(c).requires
for child in deps:
print_node(child.interface, child.get_required_commands(), indent)
else:
print(indent + " No selected version")
if sels.command:
print_node(sels.interface, [sels.command], "")
else:
print_node(sels.interface, [], "")
def complete(completion, args, cword):
if len(args) != 1 or cword != 0: return
completion.expand_apps()
completion.expand_interfaces()
| dsqmoore/0install | zeroinstall/cmd/select.py | Python | lgpl-2.1 | 8,502 | [
"VisIt"
] | 0688ede0f9d91a3889bcfdb7b520ae302a149e55339f5aeec1da78e2eb9601a8 |
from __future__ import division, print_function, absolute_import
from scipy import stats
import numpy as np
from numpy.testing import (assert_almost_equal, assert_,
assert_array_almost_equal, assert_array_almost_equal_nulp)
import pytest
from pytest import raises as assert_raises
def test_kde_1d():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density function for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
prob1 = gkde.integrate_box_1d(xnmean, np.inf)
prob2 = gkde.integrate_box_1d(-np.inf, xnmean)
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13)
assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
@pytest.mark.slow
def test_kde_2d():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
mean = np.array([1.0, 3.0])
covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
# Need transpose (shape (2, 500)) for kde
xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density function for the kde for some points
x, y = np.mgrid[-7:7:500j, -7:7:500j]
grid_coords = np.vstack([x.ravel(), y.ravel()])
kdepdf = gkde.evaluate(grid_coords)
kdepdf = kdepdf.reshape(500, 500)
normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), mean=mean, cov=covariance)
intervall = y.ravel()[1] - y.ravel()[0]
assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01)
small = -1e100
large = 1e100
prob1 = gkde.integrate_box([small, mean[1]], [large, large])
prob2 = gkde.integrate_box([small, small], [large, mean[1]])
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*(intervall**2), decimal=2)
assert_almost_equal(gkde.integrate_gaussian(mean, covariance),
(kdepdf*normpdf).sum()*(intervall**2), decimal=2)
def test_kde_bandwidth_method():
def scotts_factor(kde_obj):
"""Same as default, just check that it works."""
return np.power(kde_obj.n, -1./(kde_obj.d+4))
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
# Supply a callable
gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor)
# Supply a scalar
gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor)
xs = np.linspace(-7,7,51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf2)
kdepdf3 = gkde3.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf3)
assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring')
# Subclasses that should stay working (extracted from various sources).
# Unfortunately the earlier design of gaussian_kde made it necessary for users
# to create these kinds of subclasses, or call _compute_covariance() directly.
class _kde_subclass1(stats.gaussian_kde):
def __init__(self, dataset):
self.dataset = np.atleast_2d(dataset)
self.d, self.n = self.dataset.shape
self.covariance_factor = self.scotts_factor
self._compute_covariance()
class _kde_subclass2(stats.gaussian_kde):
def __init__(self, dataset):
self.covariance_factor = self.scotts_factor
super(_kde_subclass2, self).__init__(dataset)
class _kde_subclass3(stats.gaussian_kde):
def __init__(self, dataset, covariance):
self.covariance = covariance
stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance(self):
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = np.sqrt(np.linalg.det(2*np.pi * self.covariance)) \
* self.n
class _kde_subclass4(stats.gaussian_kde):
def covariance_factor(self):
return 0.5 * self.silverman_factor()
def test_gaussian_kde_subclassing():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# gaussian_kde itself
kde = stats.gaussian_kde(x1)
ys = kde(xs)
# subclass 1
kde1 = _kde_subclass1(x1)
y1 = kde1(xs)
assert_array_almost_equal_nulp(ys, y1, nulp=10)
# subclass 2
kde2 = _kde_subclass2(x1)
y2 = kde2(xs)
assert_array_almost_equal_nulp(ys, y2, nulp=10)
# subclass 3
kde3 = _kde_subclass3(x1, kde.covariance)
y3 = kde3(xs)
assert_array_almost_equal_nulp(ys, y3, nulp=10)
# subclass 4
kde4 = _kde_subclass4(x1)
y4 = kde4(x1)
y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017]
assert_array_almost_equal(y_expected, y4, decimal=6)
# Not a subclass, but check for use of _compute_covariance()
kde5 = kde
kde5.covariance_factor = lambda: kde.factor
kde5._compute_covariance()
y5 = kde5(xs)
assert_array_almost_equal_nulp(ys, y5, nulp=10)
def test_gaussian_kde_covariance_caching():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754, 0.01664475]
# Set the bandwidth, then reset it to the default.
kde = stats.gaussian_kde(x1)
kde.set_bandwidth(bw_method=0.5)
kde.set_bandwidth(bw_method='scott')
y2 = kde(xs)
assert_array_almost_equal(y_expected, y2, decimal=7)
def test_gaussian_kde_monkeypatch():
"""Ugly, but people may rely on this. See scipy pull request 123,
specifically the linked ML thread "Width of the Gaussian in stats.kde".
If it is necessary to break this later on, that is to be discussed on ML.
"""
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# The old monkeypatched version to get at Silverman's Rule.
kde = stats.gaussian_kde(x1)
kde.covariance_factor = kde.silverman_factor
kde._compute_covariance()
y1 = kde(xs)
# The new saner version.
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
y2 = kde2(xs)
assert_array_almost_equal_nulp(y1, y2, nulp=10)
def test_kde_integer_input():
"""Regression test for #1181."""
x1 = np.arange(5)
kde = stats.gaussian_kde(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721]
assert_array_almost_equal(kde(x1), y_expected, decimal=6)
def test_pdf_logpdf():
np.random.seed(1)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
xs = np.linspace(-15, 12, 25)
pdf = gkde.evaluate(xs)
pdf2 = gkde.pdf(xs)
assert_almost_equal(pdf, pdf2, decimal=12)
logpdf = np.log(pdf)
logpdf2 = gkde.logpdf(xs)
assert_almost_equal(logpdf, logpdf2, decimal=12)
# There are more points than data
gkde = stats.gaussian_kde(xs)
pdf = np.log(gkde.evaluate(xn))
pdf2 = gkde.logpdf(xn)
assert_almost_equal(pdf, pdf2, decimal=12)
| gfyoung/scipy | scipy/stats/tests/test_kdeoth.py | Python | bsd-3-clause | 7,955 | [
"Gaussian"
] | d708fe652d45548d5c6039ceb1400aaae675c1a6b138dcf1e580f3d495b04bff |
#!/usr/bin/python
"""
This module contains an OpenSoundControl implementation (in Pure Python), based (somewhat) on the
good old 'SimpleOSC' implementation by Daniel Holth & Clinton McChesney.
This implementation is intended to still be 'Simple' to the user, but much more complete
(with OSCServer & OSCClient classes) and much more powerful
(the OSCMultiClient supports subscriptions & message-filtering,
OSCMessage & OSCBundle are now proper container-types)
================
OpenSoundControl
================
OpenSoundControl is a network-protocol for sending (small) packets of addressed data over network sockets.
This OSC-implementation uses the UDP/IP protocol for sending and receiving packets.
(Although it is theoretically possible to send OSC-packets over TCP, almost all known implementations use UDP)
OSC-packets come in two kinds:
- OSC-messages consist of an 'address'-string (not to be confused with a (host:port) network-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
The OSCMessage class makes it easy to create & manipulate OSC-messages of this kind in a 'pythonesque' way
(that is, OSCMessage-objects behave a lot like lists)
- OSC-bundles are a special type of OSC-message containing only OSC-messages as 'payload'. Recursively.
(meaning; an OSC-bundle could contain other OSC-bundles, containing OSC-bundles etc.)
OSC-bundles start with the special keyword '#bundle' and do not have an OSC-address. (but the OSC-messages
a bundle contains will have OSC-addresses!)
Also, an OSC-bundle can have a timetag, essentially telling the receiving Server to 'hold' the bundle until
the specified time.
The OSCBundle class allows easy cration & manipulation of OSC-bundles.
see also http://opensoundcontrol.org/spec-1_0
---------
To send OSC-messages, you need an OSCClient, and to receive OSC-messages you need an OSCServer.
The OSCClient uses an 'AF_INET / SOCK_DGRAM' type socket (see the 'socket' module) to send
binary representations of OSC-messages to a remote host:port address.
The OSCServer listens on an 'AF_INET / SOCK_DGRAM' type socket bound to a local port, and handles
incoming requests. Either one-after-the-other (OSCServer) or in a multi-threaded / multi-process fashion
(ThreadingOSCServer / ForkingOSCServer). If the Server has a callback-function (a.k.a. handler) registered
to 'deal with' (i.e. handle) the received message's OSC-address, that function is called, passing it the (decoded) message
The different OSCServers implemented here all support the (recursive) un-bundling of OSC-bundles,
and OSC-bundle timetags.
In fact, this implementation supports:
- OSC-messages with 'i' (int32), 'f' (float32), 's' (string) and 'b' (blob / binary data) types
- OSC-bundles, including timetag-support
- OSC-address patterns including '*', '?', '{,}' and '[]' wildcards.
(please *do* read the OSC-spec! http://opensoundcontrol.org/spec-1_0 it explains what these things mean.)
In addition, the OSCMultiClient supports:
- Sending a specific OSC-message to multiple remote servers
- Remote server subscription / unsubscription (through OSC-messages, of course)
- Message-address filtering.
---------
Stock, V2_Lab, Rotterdam, 2008
----------
Changelog:
----------
v0.3.0 - 27 Dec. 2007
Started out to extend the 'SimpleOSC' implementation (v0.2.3) by Daniel Holth & Clinton McChesney.
Rewrote OSCMessage
Added OSCBundle
v0.3.1 - 3 Jan. 2008
Added OSClient
Added OSCRequestHandler, loosely based on the original CallbackManager
Added OSCServer
Removed original CallbackManager
Adapted testing-script (the 'if __name__ == "__main__":' block at the end) to use new Server & Client
v0.3.2 - 5 Jan. 2008
Added 'container-type emulation' methods (getitem(), setitem(), __iter__() & friends) to OSCMessage
Added ThreadingOSCServer & ForkingOSCServer
- 6 Jan. 2008
Added OSCMultiClient
Added command-line options to testing-script (try 'python OSC.py --help')
v0.3.3 - 9 Jan. 2008
Added OSC-timetag support to OSCBundle & OSCRequestHandler
Added ThreadingOSCRequestHandler
v0.3.4 - 13 Jan. 2008
Added message-filtering to OSCMultiClient
Added subscription-handler to OSCServer
Added support fon numpy/scipy int & float types. (these get converted to 'standard' 32-bit OSC ints / floats!)
Cleaned-up and added more Docstrings
v0.3.5 - 14 aug. 2008
Added OSCServer.reportErr(...) method
-----------------
Original Comments
-----------------
> Open SoundControl for Python
> Copyright (C) 2002 Daniel Holth, Clinton McChesney
>
> This library is free software; you can redistribute it and/or modify it under
> the terms of the GNU Lesser General Public License as published by the Free
> Software Foundation; either version 2.1 of the License, or (at your option) any
> later version.
>
> This library is distributed in the hope that it will be useful, but WITHOUT ANY
> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
> PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
> details.
> You should have received a copy of the GNU Lesser General Public License along
> with this library; if not, write to the Free Software Foundation, Inc., 59
> Temple Place, Suite 330, Boston, MA 02111-1307 USA
> For questions regarding this module contact Daniel Holth <dholth@stetson.edu>
> or visit http://www.stetson.edu/~ProctoLogic/
> Changelog:
> 15 Nov. 2001:
> Removed dependency on Python 2.0 features.
> - dwh
> 13 Feb. 2002:
> Added a generic callback handler.
> - dwh
"""
import math, re, socket, select, string, struct, sys, threading, time, types
from SocketServer import UDPServer, DatagramRequestHandler, ForkingMixIn, ThreadingMixIn
global version
version = ("0.3","5b", "$Rev: 5294 $"[6:-2])
global FloatTypes
FloatTypes = [types.FloatType]
global IntTypes
IntTypes = [types.IntType]
##
# numpy/scipy support:
##
try:
from numpy import typeDict
for ftype in ['float32', 'float64', 'float128']:
try:
FloatTypes.append(typeDict[ftype])
except KeyError:
pass
for itype in ['int8', 'int16', 'int32', 'int64']:
try:
IntTypes.append(typeDict[itype])
IntTypes.append(typeDict['u' + itype])
except KeyError:
pass
# thanks for those...
del typeDict, ftype, itype
except ImportError:
pass
######
#
# OSCMessage classes
#
######
class OSCMessage(object):
""" Builds typetagged OSC messages.
OSCMessage objects are container objects for building OSC-messages.
On the 'front' end, they behave much like list-objects, and on the 'back' end
they generate a binary representation of the message, which can be sent over a network socket.
OSC-messages consist of an 'address'-string (not to be confused with a (host, port) IP-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
On the Python end, OSCMessage are lists of arguments, prepended by the message's address.
The message contents can be manipulated much like a list:
>>> msg = OSCMessage("/my/osc/address")
>>> msg.append('something')
>>> msg.insert(0, 'something else')
>>> msg[1] = 'entirely'
>>> msg.extend([1,2,3.])
>>> msg += [4, 5, 6.]
>>> del msg[3:6]
>>> msg.pop(-2)
5
>>> print msg
/my/osc/address ['something else', 'entirely', 1, 6.0]
OSCMessages can be concatenated with the + operator. In this case, the resulting OSCMessage
inherits its address from the left-hand operand. The right-hand operand's address is ignored.
To construct an 'OSC-bundle' from multiple OSCMessage, see OSCBundle!
Additional methods exist for retreiving typetags or manipulating items as (typetag, value) tuples.
"""
def __init__(self, address=""):
"""Instantiate a new OSCMessage.
The OSC-address can be specified with the 'address' argument
"""
self.clear(address)
def setAddress(self, address):
"""Set or change the OSC-address
"""
self.address = address
def clear(self, address=""):
"""Clear (or set a new) OSC-address and clear any arguments appended so far
"""
self.address = address
self.clearData()
def clearData(self):
"""Clear any arguments appended so far
"""
self.typetags = ","
self.message = ""
def append(self, argument, typehint=None):
"""Appends data to the message, updating the typetags based on
the argument's type. If the argument is a blob (counted
string) pass in 'b' as typehint.
'argument' may also be a list or tuple, in which case its elements
will get appended one-by-one, all using the provided typehint
"""
if type(argument) == types.DictType:
argument = argument.items()
elif isinstance(argument, OSCMessage):
raise TypeError("Can only append 'OSCMessage' to 'OSCBundle'")
if hasattr(argument, '__iter__'):
for arg in argument:
self.append(arg, typehint)
return
if typehint == 'b':
binary = OSCBlob(argument)
tag = 'b'
elif typehint == 't':
binary = OSCTimeTag(argument)
tag = 't'
else:
tag, binary = OSCArgument(argument, typehint)
self.typetags += tag
self.message += binary
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString(self.address)
binary += OSCString(self.typetags)
binary += self.message
return binary
def __repr__(self):
"""Returns a string containing the decode Message
"""
return str(decodeOSC(self.getBinary()))
def __str__(self):
"""Returns the Message's address and contents as a string.
"""
return "%s %s" % (self.address, str(self.values()))
def __len__(self):
"""Returns the number of arguments appended so far
"""
return (len(self.typetags) - 1)
def __eq__(self, other):
"""Return True if two OSCMessages have the same address & content
"""
if not isinstance(other, self.__class__):
return False
return (self.address == other.address) and (self.typetags == other.typetags) and (self.message == other.message)
def __ne__(self, other):
"""Return (not self.__eq__(other))
"""
return not self.__eq__(other)
def __add__(self, values):
"""Returns a copy of self, with the contents of 'values' appended
(see the 'extend()' method, below)
"""
msg = self.copy()
msg.extend(values)
return msg
def __iadd__(self, values):
"""Appends the contents of 'values'
(equivalent to 'extend()', below)
Returns self
"""
self.extend(values)
return self
def __radd__(self, values):
"""Appends the contents of this OSCMessage to 'values'
Returns the extended 'values' (list or tuple)
"""
out = list(values)
out.extend(self.values())
if type(values) == types.TupleType:
return tuple(out)
return out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = self.values()
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in self.values())
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return self.values()[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = self.items()
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = values.items()
elif type(values) == types.ListType:
items = []
for val in values:
if type(val) == types.TupleType:
items.append(val[:2])
else:
items.append((typehint, val))
elif type(values) == types.TupleType:
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = self.items()
new_items = self._buildItemList(val)
if type(i) != types.SliceType:
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = self.items()
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return self.values().count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return self.values().index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = self.items() + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = self.items()
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = self.items()
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = self.items()
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = self.items()
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(m))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(self.values())
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(self.items())
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in self.values():
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if type(argument) == types.DictType:
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), str(next))
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if type(next) in types.StringTypes:
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next)
else:
binary = ""
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
binary = struct.pack('>ll', long(secs), long(fract * 1e9))
else:
binary = struct.pack('>ll', 0L, 1L)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
length = string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print "Error: too few bytes for int", data, len(data)
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (long(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">ll", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(high) + float(low / 1e9)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print "Error: too few bytes for float", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print "byte 0 1 2 3 4 5 6 7 8 9 A B C D E F"
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % ord(bytes[i])
if (i+1) % 16 == 0:
print "%s: %s" % (line, repr(bytes[i-15:i+1]))
line = ""
bytes_left = num % 16
if bytes_left:
print "%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:]))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == types.TupleType:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if type(port) == types.IntType:
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (type(url) in types.StringTypes and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
When the 'address' argument is given this client is connected to a specific remote server.
- address ((host, port) tuple): the address of the remote server to send all messages to
Otherwise it acts as a generic client:
If address == 'None', the client doesn't connect to a specific remote server,
and the remote address must be supplied when calling sendto()
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
if server == None:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
self.server = None
else:
self.setServer(server)
self.client_address = None
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
if self.socket != None:
self.close()
self.socket = server.socket.dup()
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
return self.socket.getpeername()
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self.socket.connect(address)
self.client_address = address
except socket.error, e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.connect(address)
self.socket.sendall(msg.getBinary())
if self.client_address:
self.socket.connect(self.client_address)
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if type(args) in types.StringTypes:
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in filters.keys():
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in filters.values():
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in filters.items():
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
OSCtrans = string.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in self.targets.keys():
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in src.keys(): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in src.items():
if (addr in dst.keys()) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): IP-address & UDP-port
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if address not in self.targets.keys():
self.targets[address] = ["",{}]
if prefix != None:
if len(prefix):
# make sure prefix starts with ONE '/', and does not end with '/'
prefix = '/' + prefix.strip('/')
self.targets[address][0] = prefix
if filters != None:
if type(filters) in types.StringTypes:
(_, filters) = parseFilterStr(filters)
elif type(filters) != types.DictType:
raise TypeError("'filters' argument must be a dict with {addr:bool} entries")
self._updateFilters(self.targets[address][1], filters)
def setOSCTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
the 'address' argument can be a ((host, port) tuple) : The target server address & UDP-port
or a 'host' (string) : The host will be looked-up
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
elif (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except:
pass
address = (host, port)
else:
raise TypeError("'address' argument must be a (host, port) tuple or a 'host' string")
self._setTarget(address, prefix, filters)
def setOSCTargetFromStr(self, url):
"""Adds or modifies a subscribed OSCTarget from the given string, which should be in the
'<host>:<port>[/<prefix>] [+/<filter>]|[-/<filter>] ...' format.
"""
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
self._setTarget(addr, prefix, filters)
def _delTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument must be a (host, port) tuple.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
try:
if prefix == None:
del self.targets[address]
elif prefix == self.targets[address][0]:
del self.targets[address]
except KeyError:
raise NotSubscribedError(address, prefix)
def delOSCTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
self._delTarget(address, prefix)
def hasOSCTarget(self, address, prefix=None):
"""Return True if the given OSCTarget exists in the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the return-value is only True if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if address in self.targets.keys():
if prefix == None:
return True
elif prefix == self.targets[address][0]:
return True
return False
def getOSCTargets(self):
"""Returns the dict of OSCTargets: {addr:[prefix, filters], ...}
"""
out = {}
for ((host, port), pf) in self.targets.items():
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
out[(host, port)] = pf
return out
def getOSCTarget(self, address):
"""Returns the OSCTarget matching the given address as a ((host, port), [prefix, filters]) tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, ['',{}]) if address not found.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if (address in self.targets.keys()):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
return ((host, port), self.targets[address])
return (None, ['',{}])
def clearOSCTargets(self):
"""Erases all OSCTargets from the Client's dict
"""
self.targets = {}
def updateOSCTargets(self, dict):
"""Update the Client's OSCTargets dict with the contents of 'dict'
The given dict's items MUST be of the form
{ (host, port):[prefix, filters], ... }
"""
for ((host, port), (prefix, filters)) in dict.items():
val = [prefix, {}]
self._updateFilters(val[1], filters)
try:
host = socket.gethostbyname(host)
except socket.error:
pass
self.targets[(host, port)] = val
def getOSCTargetStr(self, address):
"""Returns the OSCTarget matching the given address as a ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, []) if address not found.
"""
(addr, (prefix, filters)) = self.getOSCTarget(address)
if addr == None:
return (None, [])
return ("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters))
def getOSCTargetStrings(self):
"""Returns a list of all OSCTargets as ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuples.
"""
out = []
for (addr, (prefix, filters)) in self.targets.items():
out.append(("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters)))
return out
def connect(self, address):
"""The OSCMultiClient isn't allowed to connect to any specific
address.
"""
return NotImplemented
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage.
The specified address is ignored. Instead this method calls send() to
send the message to all subscribed clients.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
self.send(msg, timeout)
def _filterMessage(self, filters, msg):
"""Checks the given OSCMessge against the given filters.
'filters' is a dict containing OSC-address:bool pairs.
If 'msg' is an OSCBundle, recursively filters its constituents.
Returns None if the message is to be filtered, else returns the message.
or
Returns a copy of the OSCBundle with the filtered messages removed.
"""
if isinstance(msg, OSCBundle):
out = msg.copy()
msgs = out.values()
out.clearData()
for m in msgs:
m = self._filterMessage(filters, m)
if m: # this catches 'None' and empty bundles.
out.append(m)
elif isinstance(msg, OSCMessage):
if '/*' in filters.keys():
if filters['/*']:
out = msg
else:
out = None
elif False in filters.values():
out = msg
else:
out = None
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
expr = getRegEx(msg.address)
for addr in filters.keys():
if addr == '/*':
continue
match = expr.match(addr)
if match and (match.end() == len(addr)):
if filters[addr]:
out = msg
else:
out = None
break
return out
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = out.values()
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
def send(self, msg, timeout=None):
"""Send the given OSCMessage to all subscribed OSCTargets
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
for (address, (prefix, filters)) in self.targets.items():
if len(filters):
out = self._filterMessage(filters, msg)
if not out: # this catches 'None' and empty bundles.
continue
else:
out = msg
if len(prefix):
out = self._prefixAddress(prefix, msg)
binary = out.getBinary()
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
while len(binary):
sent = self.socket.sendto(binary, address)
binary = binary[sent:]
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
######
#
# OSCRequestHandler classes
#
######
class OSCRequestHandler(DatagramRequestHandler):
"""RequestHandler class for the OSCServer
"""
def dispatchMessage(self, pattern, tags, data):
"""Attmept to match the given OSC-address pattern, which may contain '*',
against all callbacks registered with the OSCServer.
Calls the matching callback and returns whatever it returns.
If no match is found, and a 'default' callback is registered, it calls that one,
or raises NoCallbackError if a 'default' callback is not registered.
- pattern (string): The OSC-address of the receied message
- tags (string): The OSC-typetags of the receied message's arguments, without ','
- data (list): The message arguments
"""
if len(tags) != len(data):
raise OSCServerError("Malformed OSC-message; got %d typetags [%s] vs. %d values" % (len(tags), tags, len(data)))
expr = getRegEx(pattern)
replies = []
matched = 0
for addr in self.server.callbacks.keys():
match = expr.match(addr)
if match and (match.end() == len(addr)):
reply = self.server.callbacks[addr](pattern, tags, data, self.client_address)
matched += 1
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks[addr], type(reply)))
if matched == 0:
if 'default' in self.server.callbacks:
reply = self.server.callbacks['default'](pattern, tags, data, self.client_address)
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks['default'], type(reply)))
else:
raise NoCallbackError(pattern)
return replies
def setup(self):
"""Prepare RequestHandler.
Unpacks request as (packet, source socket address)
Creates an empty list for replies.
"""
(self.packet, self.socket) = self.request
self.replies = []
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:])
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def handle(self):
"""Handle incoming OSCMessage
"""
decoded = decodeOSC(self.packet)
if not len(decoded):
return
self._unbundle(decoded)
def finish(self):
"""Finish handling OSCMessage.
Send any reply returned by the callback(s) back to the originating client
as an OSCMessage or OSCBundle
"""
if self.server.return_port:
self.client_address = (self.client_address[0], self.server.return_port)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
return
self.server.client.sendto(msg, self.client_address)
class ThreadingOSCRequestHandler(OSCRequestHandler):
"""Multi-threaded OSCRequestHandler;
Starts a new RequestHandler thread for each unbundled OSCMessage
"""
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function
This version starts a new thread for each sub-Bundle found in the Bundle,
then waits for all its children to finish.
"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:])
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
now = time.time()
children = []
for msg in decoded[2:]:
t = threading.Thread(target = self._unbundle, args = (msg,))
t.start()
children.append(t)
# wait for all children to terminate
for t in children:
t.join()
######
#
# OSCServer classes
#
######
class OSCServer(UDPServer):
"""A Synchronous OSCServer
Serves one request at-a-time, until the OSCServer is closed.
The OSC address-pattern is matched against a set of OSC-adresses
that have been registered to the server with a callback-function.
If the adress-pattern of the message machtes the registered address of a callback,
that function is called.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = OSCRequestHandler
# define a socket timeout, so the serve_forever loop can actually exit.
socket_timeout = 1
# DEBUG: print error-tracebacks (to stderr)?
print_tracebacks = False
def __init__(self, server_address, client=None, return_port=0):
"""Instantiate an OSCServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens on
- client (OSCClient instance): The OSCClient used to send replies from this server.
If none is supplied (default) an OSCClient will be created.
- return_port (int): if supplied, sets the default UDP destination-port
for replies coming from this server.
"""
UDPServer.__init__(self, server_address, self.RequestHandlerClass)
self.callbacks = {}
self.setReturnPort(return_port)
self.error_prefix = ""
self.info_prefix = "/info"
self.socket.settimeout(self.socket_timeout)
self.running = False
self.client = None
if client == None:
self.client = OSCClient(server=self)
else:
self.setClient(client)
def setClient(self, client):
"""Associate this Server with a new local Client instance, closing the Client this Server is currently using.
"""
if not isinstance(client, OSCClient):
raise ValueError("'client' argument is not a valid OSCClient object")
if client.server != None:
raise OSCServerError("Provided OSCClient already has an OSCServer-instance: %s" % str(client.server))
# Server socket is already listening at this point, so we can't use the client's socket.
# we'll have to force our socket on the client...
client_address = client.address() # client may be already connected
client.close() # shut-down that socket
# force our socket upon the client
client.socket = self.socket.dup()
client.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, client.sndbuf_size)
client._fd = client.socket.fileno()
client.server = self
if client_address:
client.connect(client_address)
if not self.return_port:
self.return_port = client_address[1]
if self.client != None:
self.client.close()
self.client = client
def serve_forever(self):
"""Handle one request at a time until server is closed."""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def close(self):
"""Stops serving requests, closes server (socket), closes used client
"""
self.running = False
self.client.close()
self.server_close()
def __str__(self):
"""Returns a string containing this Server's Class-name, software-version and local bound address (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " listening on osc://%s" % getUrlStr(addr)
else:
out += " (unbound)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
return cmp(self.socket._sock, other.socket._sock)
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the local address this server is bound to,
or None if not bound to any address.
"""
try:
return self.socket.getsockname()
except socket.error:
return None
def setReturnPort(self, port):
"""Set the destination UDP-port for replies returning from this server to the remote client
"""
if (port > 1024) and (port < 65536):
self.return_port = port
else:
self.return_port = None
def setSrvInfoPrefix(self, pattern):
"""Set the first part of OSC-address (pattern) this server will use to reply to server-info requests.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.info_prefix = pattern
def setSrvErrorPrefix(self, pattern=""):
"""Set the OSC-address (pattern) this server will use to report errors occuring during
received message handling to the remote client.
If pattern is empty (default), server-errors are not reported back to the client.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.error_prefix = pattern
def addMsgHandler(self, address, callback):
"""Register a handler for an OSC-address
- 'address' is the OSC address-string.
the address-string should start with '/' and may not contain '*'
- 'callback' is the function called for incoming OSCMessages that match 'address'.
The callback-function will be called with the same arguments as the 'msgPrinter_handler' below
"""
for chk in '*?,[]{}# ':
if chk in address:
raise OSCServerError("OSC-address string may not contain any characters in '*?,[]{}# '")
if type(callback) not in (types.FunctionType, types.MethodType):
raise OSCServerError("Message callback '%s' is not callable" % repr(callback))
if address != 'default':
address = '/' + address.strip('/')
self.callbacks[address] = callback
def delMsgHandler(self,address):
"""Remove the registered handler for the given OSC-address
"""
del self.callbacks[address]
def getOSCAddressSpace(self):
"""Returns a list containing all OSC-addresses registerd with this Server.
"""
return self.callbacks.keys()
def addDefaultHandlers(self, prefix="", info_prefix="/info", error_prefix="/error"):
"""Register a default set of OSC-address handlers with this Server:
- 'default' -> noCallback_handler
the given prefix is prepended to all other callbacks registered by this method:
- '<prefix><info_prefix' -> serverInfo_handler
- '<prefix><error_prefix> -> msgPrinter_handler
- '<prefix>/print' -> msgPrinter_handler
and, if the used Client supports it;
- '<prefix>/subscribe' -> subscription_handler
- '<prefix>/unsubscribe' -> subscription_handler
Note: the given 'error_prefix' argument is also set as default 'error_prefix' for error-messages
*sent from* this server. This is ok, because error-messages generally do not elicit a reply from the receiver.
To do this with the serverInfo-prefixes would be a bad idea, because if a request received on '/info' (for example)
would send replies to '/info', this could potentially cause a never-ending loop of messages!
Do *not* set the 'info_prefix' here (for incoming serverinfo requests) to the same value as given to
the setSrvInfoPrefix() method (for *replies* to incoming serverinfo requests).
For example, use '/info' for incoming requests, and '/inforeply' or '/serverinfo' or even just '/print' as the
info-reply prefix.
"""
self.error_prefix = error_prefix
self.addMsgHandler('default', self.noCallback_handler)
self.addMsgHandler(prefix + info_prefix, self.serverInfo_handler)
self.addMsgHandler(prefix + error_prefix, self.msgPrinter_handler)
self.addMsgHandler(prefix + '/print', self.msgPrinter_handler)
if isinstance(self.client, OSCMultiClient):
self.addMsgHandler(prefix + '/subscribe', self.subscription_handler)
self.addMsgHandler(prefix + '/unsubscribe', self.subscription_handler)
def printErr(self, txt):
"""Writes 'OSCServer: txt' to sys.stderr
"""
sys.stderr.write("OSCServer: %s\n" % txt)
def sendOSCerror(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'error_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.error_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.error_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
def reportErr(self, txt, client_address):
"""Writes 'OSCServer: txt' to sys.stderr
If self.error_prefix is defined, sends 'txt' as an OSC error-message to the client(s)
(see printErr() and sendOSCerror())
"""
self.printErr(txt)
if len(self.error_prefix):
self.sendOSCerror(txt, client_address)
def sendOSCinfo(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'info_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.info_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.info_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
###
# Message-Handler callback functions
###
def handle_error(self, request, client_address):
"""Handle an exception in the Server's callbacks gracefully.
Writes the error to sys.stderr and, if the error_prefix (see setSrvErrorPrefix()) is set,
sends the error-message as reply to the client
"""
(e_type, e) = sys.exc_info()[:2]
self.printErr("%s on request from %s: %s" % (e_type.__name__, getUrlStr(client_address), str(e)))
if self.print_tracebacks:
import traceback
traceback.print_exc() # XXX But this goes to stderr!
if len(self.error_prefix):
self.sendOSCerror("%s: %s" % (e_type.__name__, str(e)), client_address)
def noCallback_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints a "No callback registered to handle ..." message.
Returns None
"""
self.reportErr("No callback registered to handle OSC-address '%s'" % addr, client_address)
def msgPrinter_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints the received message.
Returns None
"""
txt = "OSCMessage '%s' from %s: " % (addr, getUrlStr(client_address))
txt += str(data)
self.printErr(txt) # strip trailing comma & space
def serverInfo_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler returns a reply to the client, which can contain various bits of information
about this server, depending on the first argument of the received OSC-message:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'address <string>' messages, listing the server's
OSC address-space.
- 'clients' | 'targets' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
"""
if len(data) == 0:
return None
cmd = data.pop(0)
reply = None
if cmd in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('info_command', "ls | list : list OSC address-space"))
reply.append(('info_command', "clients | targets : list subscribed clients"))
elif cmd in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for addr in self.callbacks.keys():
reply.append(('address', addr))
elif cmd in ('clients', 'targets'):
if hasattr(self.client, 'getOSCTargetStrings'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
else:
cli_addr = self.client.address()
if cli_addr:
reply = OSCMessage(self.info_prefix)
reply.append(('target', "osc://%s/" % getUrlStr(cli_addr)))
else:
self.reportErr("unrecognized command '%s' in /info request from osc://%s. Try 'help'" % (cmd, getUrlStr(client_address)), client_address)
return reply
def _subscribe(self, data, client_address):
"""Handle the actual subscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>] [<filter>] [...]' string, which is then passed to
parseUrlStr() & parseFilterStr() to actually retreive <host>, <port>, etc.
This 'long way 'round' approach (almost) guarantees that the subscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
if addr != None:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
port = client_address[1]
addr = (host, port)
else:
addr = client_address
self.client._setTarget(addr, prefix, filters)
trg = self.client.getOSCTargetStr(addr)
if trg[0] != None:
reply = OSCMessage(self.info_prefix)
reply.append(('target',) + trg)
return reply
def _unsubscribe(self, data, client_address):
"""Handle the actual unsubscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>]' string, which is then passed to
parseUrlStr() to actually retreive <host>, <port> & <prefix>.
This 'long way 'round' approach (almost) guarantees that the unsubscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, _) = parseUrlStr(url)
if addr == None:
addr = client_address
else:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
try:
(host, port) = self.client._searchHostAddr(host)
except NotSubscribedError:
port = client_address[1]
addr = (host, port)
try:
self.client._delTarget(addr)
except NotSubscribedError, e:
txt = "%s: %s" % (e.__class__.__name__, str(e))
self.printErr(txt)
reply = OSCMessage(self.error_prefix)
reply.append(txt)
return reply
def subscription_handler(self, addr, tags, data, client_address):
"""Handle 'subscribe' / 'unsubscribe' requests from remote hosts,
if the local Client supports this (i.e. OSCMultiClient).
Supported commands:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
- '[subscribe | listen | sendto | target] <url> [<filter> ...] : Subscribe remote client/server at <url>,
and/or set message-filters for messages being sent to the subscribed host, with the optional <filter>
arguments. Filters are given as OSC-addresses (or '*') prefixed by a '+' (send matching messages) or
a '-' (don't send matching messages). The wildcard '*', '+*' or '+/*' means 'send all' / 'filter none',
and '-*' or '-/*' means 'send none' / 'filter all' (which is not the same as unsubscribing!)
Reply is an OSCMessage with the (new) subscription; 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
- '[unsubscribe | silence | nosend | deltarget] <url> : Unsubscribe remote client/server at <url>
If the given <url> isn't subscribed, a NotSubscribedError-message is printed (and possibly sent)
The <url> given to the subscribe/unsubscribe handler should be of the form:
'[osc://][<host>][:<port>][<prefix>]', where any or all components can be omitted.
If <host> is not specified, the IP-address of the message's source is used.
If <port> is not specified, the <host> is first looked up in the list of subscribed hosts, and if found,
the associated port is used.
If <port> is not specified and <host> is not yet subscribed, the message's source-port is used.
If <prefix> is specified on subscription, <prefix> is prepended to the OSC-address of all messages
sent to the subscribed host.
If <prefix> is specified on unsubscription, the subscribed host is only unsubscribed if the host,
port and prefix all match the subscription.
If <prefix> is not specified on unsubscription, the subscribed host is unsubscribed if the host and port
match the subscription.
"""
if not isinstance(self.client, OSCMultiClient):
raise OSCServerError("Local %s does not support subsctiptions or message-filtering" % self.client.__class__.__name__)
addr_cmd = addr.split('/')[-1]
if len(data):
if data[0] in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('subscribe_command', "ls | list : list subscribed targets"))
reply.append(('subscribe_command', "[subscribe | listen | sendto | target] <url> [<filter> ...] : subscribe to messages, set filters"))
reply.append(('subscribe_command', "[unsubscribe | silence | nosend | deltarget] <url> : unsubscribe from messages"))
return reply
if data[0] in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
return reply
if data[0] in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data[1:], client_address)
if data[0] in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data[1:], client_address)
if addr_cmd in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data, client_address)
if addr_cmd in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data, client_address)
class ForkingOSCServer(ForkingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server forks a new process to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
class ThreadingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server starts a new thread to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
######
#
# OSCError classes
#
######
class OSCError(Exception):
"""Base Class for all OSC-related errors
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OSCClientError(OSCError):
"""Class for all OSCClient errors
"""
pass
class OSCServerError(OSCError):
"""Class for all OSCServer errors
"""
pass
class NoCallbackError(OSCServerError):
"""This error is raised (by an OSCServer) when an OSCMessage with an 'unmatched' address-pattern
is received, and no 'default' handler is registered.
"""
def __init__(self, pattern):
"""The specified 'pattern' should be the OSC-address of the 'unmatched' message causing the error to be raised.
"""
self.message = "No callback registered to handle OSC-address '%s'" % pattern
class NotSubscribedError(OSCClientError):
"""This error is raised (by an OSCMultiClient) when an attempt is made to unsubscribe a host
that isn't subscribed.
"""
def __init__(self, addr, prefix=None):
if prefix:
url = getUrlStr(addr, prefix)
else:
url = getUrlStr(addr, '')
self.message = "Target osc://%s is not subscribed" % url
######
#
# Testing Program
#
######
if __name__ == "__main__":
import optparse
default_port = 2222
# define command-line options
op = optparse.OptionParser(description="OSC.py OpenSoundControl-for-Python Test Program")
op.add_option("-l", "--listen", dest="listen",
help="listen on given host[:port]. default = '0.0.0.0:%d'" % default_port)
op.add_option("-s", "--sendto", dest="sendto",
help="send to given host[:port]. default = '127.0.0.1:%d'" % default_port)
op.add_option("-t", "--threading", action="store_true", dest="threading",
help="Test ThreadingOSCServer")
op.add_option("-f", "--forking", action="store_true", dest="forking",
help="Test ForkingOSCServer")
op.add_option("-u", "--usage", action="help", help="show this help message and exit")
op.set_defaults(listen=":%d" % default_port)
op.set_defaults(sendto="")
op.set_defaults(threading=False)
op.set_defaults(forking=False)
# Parse args
(opts, args) = op.parse_args()
addr, server_prefix = parseUrlStr(opts.listen)
if addr != None and addr[0] != None:
if addr[1] != None:
listen_address = addr
else:
listen_address = (addr[0], default_port)
else:
listen_address = ('', default_port)
targets = {}
for trg in opts.sendto.split(','):
(addr, prefix) = parseUrlStr(trg)
if len(prefix):
(prefix, filters) = parseFilterStr(prefix)
else:
filters = {}
if addr != None:
if addr[1] != None:
targets[addr] = [prefix, filters]
else:
targets[(addr[0], listen_address[1])] = [prefix, filters]
elif len(prefix) or len(filters):
targets[listen_address] = [prefix, filters]
welcome = "Welcome to the OSC testing program."
print welcome
hexDump(welcome)
print
message = OSCMessage()
message.setAddress("/print")
message.append(44)
message.append(11)
message.append(4.5)
message.append("the white cliffs of dover")
print message
hexDump(message.getBinary())
print "\nMaking and unmaking a message.."
strings = OSCMessage("/prin{ce,t}")
strings.append("Mary had a little lamb")
strings.append("its fleece was white as snow")
strings.append("and everywhere that Mary went,")
strings.append("the lamb was sure to go.")
strings.append(14.5)
strings.append(14.5)
strings.append(-400)
raw = strings.getBinary()
print strings
hexDump(raw)
print "Retrieving arguments..."
data = raw
for i in range(6):
text, data = _readString(data)
print text
number, data = _readFloat(data)
print number
number, data = _readFloat(data)
print number
number, data = _readInt(data)
print number
print decodeOSC(raw)
print "\nTesting Blob types."
blob = OSCMessage("/pri*")
blob.append("","b")
blob.append("b","b")
blob.append("bl","b")
blob.append("blo","b")
blob.append("blob","b")
blob.append("blobs","b")
blob.append(42)
print blob
hexDump(blob.getBinary())
print1 = OSCMessage()
print1.setAddress("/print")
print1.append("Hey man, that's cool.")
print1.append(42)
print1.append(3.1415926)
print "\nTesting OSCBundle"
bundle = OSCBundle()
bundle.append(print1)
bundle.append({'addr':"/print", 'args':["bundled messages:", 2]})
bundle.setAddress("/*print")
bundle.append(("no,", 3, "actually."))
print bundle
hexDump(bundle.getBinary())
# Instantiate OSCClient
print "\nInstantiating OSCClient:"
if len(targets):
c = OSCMultiClient()
c.updateOSCTargets(targets)
else:
c = OSCClient()
c.connect(listen_address) # connect back to our OSCServer
print c
if hasattr(c, 'getOSCTargetStrings'):
print "Sending to:"
for (trg, filterstrings) in c.getOSCTargetStrings():
out = trg
for fs in filterstrings:
out += " %s" % fs
print out
# Now an OSCServer...
print "\nInstantiating OSCServer:"
# define a message-handler function for the server to call.
def printing_handler(addr, tags, stuff, source):
msg_string = "%s [%s] %s" % (addr, tags, str(stuff))
sys.stdout.write("OSCServer Got: '%s' from %s\n" % (msg_string, getUrlStr(source)))
# send a reply to the client.
msg = OSCMessage("/printed")
msg.append(msg_string)
return msg
if opts.threading:
s = ThreadingOSCServer(listen_address, c, return_port=listen_address[1])
elif opts.forking:
s = ForkingOSCServer(listen_address, c, return_port=listen_address[1])
else:
s = OSCServer(listen_address, c, return_port=listen_address[1])
print s
# Set Server to return errors as OSCMessages to "/error"
s.setSrvErrorPrefix("/error")
# Set Server to reply to server-info requests with OSCMessages to "/serverinfo"
s.setSrvInfoPrefix("/serverinfo")
# this registers a 'default' handler (for unmatched messages),
# an /'error' handler, an '/info' handler.
# And, if the client supports it, a '/subscribe' & '/unsubscribe' handler
s.addDefaultHandlers()
s.addMsgHandler("/print", printing_handler)
# if client & server are bound to 'localhost', server replies return to itself!
s.addMsgHandler("/printed", s.msgPrinter_handler)
s.addMsgHandler("/serverinfo", s.msgPrinter_handler)
print "Registered Callback-functions:"
for addr in s.getOSCAddressSpace():
print addr
print "\nStarting OSCServer. Use ctrl-C to quit."
st = threading.Thread(target=s.serve_forever)
st.start()
if hasattr(c, 'targets') and listen_address not in c.targets.keys():
print "\nSubscribing local Server to local Client"
c2 = OSCClient()
c2.connect(listen_address)
subreq = OSCMessage("/subscribe")
subreq.append(listen_address)
print "sending: ", subreq
c2.send(subreq)
c2.close()
time.sleep(0.1)
print "\nRequesting OSC-address-space and subscribed clients from OSCServer"
inforeq = OSCMessage("/info")
for cmd in ("info", "list", "clients"):
inforeq.clearData()
inforeq.append(cmd)
print "sending: ", inforeq
c.send(inforeq)
time.sleep(0.1)
print2 = print1.copy()
print2.setAddress('/noprint')
print "\nSending Messages"
for m in (message, print1, print2, strings, bundle):
print "sending: ", m
c.send(m)
time.sleep(0.1)
print "\nThe next message's address will match both the '/print' and '/printed' handlers..."
print "sending: ", blob
c.send(blob)
time.sleep(0.1)
print "\nBundles can be given a timestamp.\nThe receiving server should 'hold' the bundle until its time has come"
waitbundle = OSCBundle("/print")
waitbundle.setTimeTag(time.time() + 5)
if s.__class__ == OSCServer:
waitbundle.append("Note how the (single-thread) OSCServer blocks while holding this bundle")
else:
waitbundle.append("Note how the %s does not block while holding this bundle" % s.__class__.__name__)
print "Set timetag 5 s into the future"
print "sending: ", waitbundle
c.send(waitbundle)
time.sleep(0.1)
print "Recursing bundles, with timetags set to 10 s [25 s, 20 s, 10 s]"
bb = OSCBundle("/print")
bb.setTimeTag(time.time() + 10)
b = OSCBundle("/print")
b.setTimeTag(time.time() + 25)
b.append("held for 25 sec")
bb.append(b)
b.clearData()
b.setTimeTag(time.time() + 20)
b.append("held for 20 sec")
bb.append(b)
b.clearData()
b.setTimeTag(time.time() + 15)
b.append("held for 15 sec")
bb.append(b)
if s.__class__ == OSCServer:
bb.append("Note how the (single-thread) OSCServer handles the bundle's contents in order of appearance")
else:
bb.append("Note how the %s handles the sub-bundles in the order dictated by their timestamps" % s.__class__.__name__)
bb.append("Each bundle's contents, however, are processed in random order (dictated by the kernel's threading)")
print "sending: ", bb
c.send(bb)
time.sleep(0.1)
print "\nMessages sent!"
print "\nWaiting for OSCServer. Use ctrl-C to quit.\n"
try:
while True:
time.sleep(30)
except KeyboardInterrupt:
print "\nClosing OSCServer."
s.close()
print "Waiting for Server-thread to finish"
st.join()
print "Closing OSCClient"
c.close()
print "Done"
sys.exit(0)
| ilzxc/m158a-node_python_odot | python/python2/pyOSC-0.3.5b-5294/OSC.py | Python | bsd-2-clause | 79,549 | [
"VisIt"
] | 1355b741a0af433286d21c8f7824209433b8b099b737c54f27c8416d5809bfe3 |
"""
SEMI-PASSIVE Plugin for Testing for Session Management Schema (OWASP-SM-001)
https://www.owasp.org/index.php/Testing_for_Session_Management_Schema_%28OWASP-SM-001%29
"""
import json
from collections import defaultdict
from owtf.config import config_handler
from owtf.requester.base import requester
from owtf.managers.transaction import get_transaction_by_id, search_by_regex_names
DESCRIPTION = "Normal requests to gather session management info"
def run(PluginInfo):
# True = Use Transaction Cache if possible: Visit the start URLs if not already visited
# Step 1 - Find transactions that set cookies
# Step 2 - Request 10 times per URL that sets cookies
# Step 3 - Compare values and calculate randomness
url_list = []
cookie_dict = defaultdict(list)
# Get all possible values of the cookie names and values
for id in search_by_regex_names(
[config_handler.get_val("HEADERS_FOR_COOKIES")]
): # Transactions with cookies
url = get_transaction_by_id(id)
if url:
url = url.url # Limitation: Not Checking POST, normally not a problem
else:
continue
if url not in url_list: # Only if URL not already processed!
url_list.append(url) # Keep track of processed URLs
for _ in range(0, 10): # Get more cookies to perform analysis
transaction = requester.get_transaction(False, url)
cookies = transaction.get_session_tokens()
for cookie in cookies:
cookie_dict[cookie.name].append(str(cookie.value))
# Leave the randomness test to the user
return json.dumps(cookie_dict)
| owtf/owtf | owtf/plugins/web/semi_passive/Session_Management_Schema@OWTF-SM-001.py | Python | bsd-3-clause | 1,677 | [
"VisIt"
] | e284f017addf4746e87a4f0881d507728c1f31c4cf9d65b7654672f9d4dbd01b |
from __future__ import division
import numpy as np
import time
from scipy.signal import convolve2d
def lpq(img,winSize=7,freqestim=1,mode='h'):
rho=0.90
STFTalpha=1/winSize # alpha in STFT approaches (for Gaussian derivative alpha=1)
sigmaS=(winSize-1)/4 # Sigma for STFT Gaussian window (applied if freqestim==2)
sigmaA=8/(winSize-1) # Sigma for Gaussian derivative quadrature filters (applied if freqestim==3)
convmode='valid' # Compute descriptor responses only on part that have full neigborhood. Use 'same' if all pixels are included (extrapolates np.image with zeros).
img=np.float64(img) # Convert np.image to double
r=(winSize-1)/2 # Get radius from window size
x=np.arange(-r,r+1)[np.newaxis] # Form spatial coordinates in window
if freqestim==1: # STFT uniform window
# Basic STFT filters
w0=np.ones_like(x)
w1=np.exp(-2*np.pi*x*STFTalpha*1j)
w2=np.conj(w1)
## Run filters to compute the frequency response in the four points. Store np.real and np.imaginary parts separately
# Run first filter
filterResp1=convolve2d(convolve2d(img,w0.T,convmode),w1,convmode)
filterResp2=convolve2d(convolve2d(img,w1.T,convmode),w0,convmode)
filterResp3=convolve2d(convolve2d(img,w1.T,convmode),w1,convmode)
filterResp4=convolve2d(convolve2d(img,w1.T,convmode),w2,convmode)
# Initilize frequency domain matrix for four frequency coordinates (np.real and np.imaginary parts for each frequency).
freqResp=np.dstack([filterResp1.real, filterResp1.imag,
filterResp2.real, filterResp2.imag,
filterResp3.real, filterResp3.imag,
filterResp4.real, filterResp4.imag])
## Perform quantization and compute LPQ codewords
inds = np.arange(freqResp.shape[2])[np.newaxis,np.newaxis,:]
LPQdesc=((freqResp>0)*(2**inds)).sum(2)
## Switch format to uint8 if LPQ code np.image is required as output
if mode=='im':
LPQdesc=np.uint8(LPQdesc)
## Histogram if needed
if mode=='nh' or mode=='h':
LPQdesc=np.histogram(LPQdesc.flatten(),range(256))[0]
## Normalize histogram if needed
if mode=='nh':
LPQdesc=LPQdesc/LPQdesc.sum()
#print LPQdesc
return np.asarray(LPQdesc).reshape(-1).tolist()
def colbp(img, delta=1, a=2):
x_max = np.shape(img)[0]
y_max = np.shape(img)[1]
f_dict = {}
h_sum1 = np.zeros((16, 16))
for x in range(x_max + a):
for y in range(y_max + a):
if x < x_max and y < y_max:
up = 1 if y - delta >= 0 and img[x, y] > img[x, y - delta] else 0
down = 1 if y + delta < y_max and img[x, y] > img[x, y + delta] else 0
left = 1 if x - delta >= 0 and img[x, y] > img[x - delta, y] else 0
right = 1 if x + delta < x_max and img[x, y] > img[x + delta, y] else 0
# clockwise
binary = str(up) + str(right) + str(down) + str(left)
current_lbp = int(binary, 2)
# F function
f_dict[(x, y)] = []
for label in range(2 ** 4):
current_f = 1 if label == current_lbp else 0
f_dict[(x, y)].append(current_f)
x_temp = x if x < x_max else x - x_max
y_temp = y if y < y_max else y - y_max
f_a = np.array(f_dict[(x_temp, y_temp)])[np.newaxis]
# (dr,0)
if x >= a and y < y_max:
f = np.array(f_dict[(x - a, y)])[np.newaxis]
#f_a = np.array(f_dict[(x, y)])[np.newaxis] if x < x_max else np.array(f_dict[(x - x_max, y)])[np.newaxis]
f_trans = f_a.T
h_sum1 += f * f_trans
return np.asarray(h_sum1).reshape(-1).tolist()
def lbp_plus(img):
x_max = np.shape(img)[0]
y_max = np.shape(img)[1]
delta = 1
lbps_labels = []
labels_dict = {}
print('x_max: {} y_max: {}'.format(x_max, y_max))
for x in range(x_max):
for y in range(y_max):
up = 1 if y - delta >= 0 and img[x, y] > img[x, y - delta] else 0
down = 1 if y + delta < y_max and img[x, y] > img[x, y + delta] else 0
left = 1 if x - delta >= 0 and img[x, y] > img[x - delta, y] else 0
right = 1 if x + delta < x_max and img[x, y] > img[x + delta, y] else 0
# clockwise
binary = str(up) + str(right) + str(down) + str(left)
current_lbp = int(binary, 2)
labels_dict[(x, y)] = current_lbp
if current_lbp not in lbps_labels:
lbps_labels.append(current_lbp)
return lbps_labels, labels_dict
def f_function(img, labels, labels_dict):
x_max = np.shape(img)[0]
y_max = np.shape(img)[1]
lbps_labels = []
print('x_max: {} y_max: {}'.format(x_max, y_max))
f_dict = {}
for x in range(x_max):
for y in range(y_max):
f_dict[(x,y)] = []
for label in range(2 ** 4):
start = time.time()
current_f = 1 if label == labels_dict[(x, y)] else 0
end = time.time()
#print('label == labels_dict: {}'.format(end - start))
start = time.time()
f_dict[(x, y)].append(current_f)
end = time.time()
#print('appen: {}'.format(end - start))
return f_dict
def h_matrix(img, f_dict):
x_max = np.shape(img)[0]
y_max = np.shape(img)[1]
a = 2
h_sum = np.zeros((16, 16))
for x in range(x_max):
for y in range(y_max):
f = np.array(f_dict[(x,y)])[np.newaxis]
f_a = np.array(f_dict[(x+a, y)])[np.newaxis] if x+a < x_max else np.zeros(16)[np.newaxis]
f_trans = f_a.T
h_sum += f * f_trans
return np.asarray(h_sum).reshape(-1)
| psilva-leo/AutonomousDoorman | system/livenessDetection.py | Python | mit | 5,871 | [
"Gaussian"
] | 356a824301edc5f190a338ee71fa16bd6ce172e8a38ab5be353d9471ab2a5e0c |
import logging
import sys
# Need to import rdBase to properly wrap exceptions
# otherwise they will leak memory
from . import rdBase
try:
from .rdBase import rdkitVersion as __version__
except ImportError:
__version__ = 'Unknown'
raise
logger = logging.getLogger("rdkit")
# if we are running in a jupyter notebook, enable the extensions
try:
kernel_name = get_ipython().__class__.__name__
if kernel_name == 'ZMQInteractiveShell':
logger.info("Enabling RDKit %s jupyter extensions" % __version__)
from rdkit.Chem.Draw import IPythonConsole
rdBase.LogToPythonStderr()
except:
pass
# Do logging setup at the end, so users can suppress the
# "enabling jupyter" message at the root logger.
log_handler = logging.StreamHandler(sys.stderr)
logger.addHandler(log_handler)
logger.setLevel(logging.DEBUG)
logger.propagate = False
# Uncomment this to use Python logging by default:
# rdBase.LogToPythonLogger()
| bp-kelley/rdkit | rdkit/__init__.py | Python | bsd-3-clause | 931 | [
"RDKit"
] | 8ff6bd167fa87581b6c4ebeb320f0863259924a1838ce3f7a99d0c1e2f08fb8a |
import vtk
class VTKAlgorithm(object):
"""This is a superclass which can be derived to implement
Python classes that work with vtkPythonAlgorithm. It implements
Initialize(), ProcessRequest(), FillInputPortInformation() and
FillOutputPortInformation().
Initialize() sets the input and output ports based on data
members.
ProcessRequest() calls RequestXXX() methods to implement
various pipeline passes.
FillInputPortInformation() and FillOutputPortInformation() set
the input and output types based on data members.
"""
def __init__(self, nInputPorts=1, inputType='vtkDataSet',
nOutputPorts=1, outputType='vtkPolyData'):
"""Sets up default NumberOfInputPorts, NumberOfOutputPorts,
InputType and OutputType that are used by various initialization
methods."""
self.NumberOfInputPorts = nInputPorts
self.NumberOfOutputPorts = nOutputPorts
self.InputType = inputType
self.OutputType = outputType
def Initialize(self, vtkself):
"""Sets up number of input and output ports based on
NumberOfInputPorts and NumberOfOutputPorts."""
vtkself.SetNumberOfInputPorts(self.NumberOfInputPorts)
vtkself.SetNumberOfOutputPorts(self.NumberOfOutputPorts)
def GetInputData(self, inInfo, i, j):
"""Convenience method that returns an input data object
given a vector of information objects and two indices."""
return inInfo[i].GetInformationObject(j).Get(vtk.vtkDataObject.DATA_OBJECT())
def GetOutputData(self, outInfo, i):
"""Convenience method that returns an output data object
given an information object and an index."""
return outInfo.GetInformationObject(i).Get(vtk.vtkDataObject.DATA_OBJECT())
def RequestDataObject(self, vtkself, request, inInfo, outInfo):
"""Overwritten by subclass to manage data object creation.
There is not need to overwrite this class if the output can
be created based on the OutputType data member."""
return 1
def RequestInformation(self, vtkself, request, inInfo, outInfo):
"""Overwritten by subclass to provide meta-data to downstream
pipeline."""
return 1
def RequestUpdateExtent(self, vtkself, request, inInfo, outInfo):
"""Overwritten by subclass to modify data request going
to upstream pipeline."""
return 1
def RequestData(self, vtkself, request, inInfo, outInfo):
"""Overwritten by subclass to execute the algorithm."""
raise NotImplementedError('RequestData must be implemented')
def ProcessRequest(self, vtkself, request, inInfo, outInfo):
"""Splits a request to RequestXXX() methods."""
if request.Has(vtk.vtkDemandDrivenPipeline.REQUEST_DATA_OBJECT()):
return self.RequestDataObject(vtkself, request, inInfo, outInfo)
elif request.Has(vtk.vtkDemandDrivenPipeline.REQUEST_INFORMATION()):
return self.RequestInformation(vtkself, request, inInfo, outInfo)
elif request.Has(vtk.vtkStreamingDemandDrivenPipeline.REQUEST_UPDATE_EXTENT()):
return self.RequestUpdateExtent(vtkself, request, inInfo, outInfo)
elif request.Has(vtk.vtkDemandDrivenPipeline.REQUEST_DATA()):
return self.RequestData(vtkself, request, inInfo, outInfo)
return 1
def FillInputPortInformation(self, vtkself, port, info):
"""Sets the required input type to InputType."""
info.Set(vtk.vtkAlgorithm.INPUT_REQUIRED_DATA_TYPE(), self.InputType)
return 1
def FillOutputPortInformation(self, vtkself, port, info):
"""Sets the default output type to OutputType."""
info.Set(vtk.vtkDataObject.DATA_TYPE_NAME(), self.OutputType)
return 1
| berendkleinhaneveld/VTK | Wrapping/Python/vtk/util/vtkAlgorithm.py | Python | bsd-3-clause | 3,826 | [
"VTK"
] | bc00336217593b6b5eb4961b4bb40bdbce15dc016be506f5fc374650ecb9daae |
#!/usr/bin/env python
"""
Simple JavaScript Checker Module for Grabber v0.1
Copyright (C) 2006 - Romain Gaucher - http://rgaucher.info
- Look at the JavaScript Source...
"""
import sys, re, os
from spider import htmlencode
from xml.sax import * # Need PyXML [http://pyxml.sourceforge.net/]
# JavaScript Configuration variables
jsAnalyzerBin= None
jsAnalyzerInputParam = None
jsAnalyzerOutputParam = None
jsAnalyzerConfParam = None
jsAnalyzerConfFile= None
jsAnalyzerExtension = None
jsAnalyzerPattern = None
# { 'FILENAME' : { 'line' : ['error 1', 'error 2'] } }
jsDatabase = {}
"""
<?xml version="1.0"?>
<!-- JavaScript Source Code Analyzer configuration file -->
<javascript version="0.1">
<!--
Analyzer information, here JavaScript Lint by Matthias Miller
http://www.JavaScriptLint.com
-->
<analyzer>
<path input="-process" output="">C:\server\jsl-0.3.0\jsl.exe</path>
<configuration param="-conf">C:\server\jsl-0.3.0\jsl.grabber.conf</configuration>
<extension>js</extension>
</analyzer>
</javascript>
"""
def normalize_whitespace(text):
return ' '.join(text.split())
def clear_whitespace(text):
return text.replace(' ','')
# Handle the XML file with a SAX Parser
class JavaScriptConfHandler(ContentHandler):
def __init__(self):
self.inAnalyzer = False
self.string = ""
def startElement(self, name, attrs):
global jsAnalyzerInputParam, jsAnalyzerOutputParam, jsAnalyzerConfParam
self.string = ""
self.currentKeys = []
if name == 'analyzer':
self.inAnalyzer = True
elif name == 'path' and self.inAnalyzer:
# store the attributes input and output
if 'input' in attrs.keys() and 'output' in attrs.keys():
jsAnalyzerInputParam = attrs.getValue('input')
jsAnalyzerOutputParam = attrs.getValue('output')
else:
raise KeyError("JavaScriptXMLConf: <path> needs 'input' and 'output' attributes")
elif name == 'configuration' and self.inAnalyzer:
# store the attribute 'param'
if 'param' in attrs.keys():
jsAnalyzerConfParam = attrs.getValue('param')
else:
raise KeyError("JavaScriptXMLConf: <configuration> needs 'param' attribute")
def characters(self, ch):
self.string = self.string + ch
def endElement(self, name):
global jsAnalyzerBin, jsAnalyzerConfFile, jsAnalyzerExtension,jsAnalyzerPattern
if name == 'configuration':
jsAnalyzerConfFile = normalize_whitespace(self.string)
elif name == 'extension' and self.inAnalyzer:
jsAnalyzerExtension = normalize_whitespace(self.string)
elif name == 'path' and self.inAnalyzer:
jsAnalyzerBin = normalize_whitespace(self.string)
elif name == "analyzer":
self.inAnalyzer = False
elif name == "pattern":
jsAnalyzerPattern = normalize_whitespace(self.string)
def execCmd(program, args):
buff = []
p = os.popen(program + " " + args)
buff = p.readlines()
p.close()
return buff
def generateListOfFiles(localDB, urlGlobal):
global jsDatabase
"""
Create a ghost in ./local/crystal/current and /local/crystal/analyzed
And run the SwA tool
"""
regScripts = re.compile(r'(.*).' + jsAnalyzerExtension + '$', re.I)
# escape () and []
localRegOutput = jsAnalyzerPattern
localRegOutput = localRegOutput.replace('(', '\(')
localRegOutput = localRegOutput.replace(')', '\)')
localRegOutput = localRegOutput.replace('[', '\[')
localRegOutput = localRegOutput.replace(']', '\]')
localRegOutput = localRegOutput.replace(':', '\:')
localRegOutput = localRegOutput.replace('__LINE__', '(\d+)')
localRegOutput = localRegOutput.replace('__FILENAME__', '(.+)')
localRegOutput = localRegOutput.replace('__ERROR__', '(.+)')
regOutput = re.compile('^'+localRegOutput+'$', re.I)
print "Running the static analysis tool..."
for file in localDB:
print file
file = file.replace(urlGlobal + '/', '')
fileIn = os.path.abspath(os.path.join('./local', file))
cmdLine = jsAnalyzerConfParam + " " +jsAnalyzerConfFile + " " + jsAnalyzerInputParam + " " + fileIn
if jsAnalyzerOutputParam != "":
cmdLine += " " + jsAnalyzerOutputParam + " " + fileIn+'.jslint'
output = execCmd(jsAnalyzerBin, cmdLine)
# Analyze the output
for o in output:
lO = o.replace('\n','')
if regOutput.match(lO):
out = regOutput.search(lO)
if file not in jsDatabase:
jsDatabase[file] = {}
line = clear_whitespace(out.group(2))
if line not in jsDatabase[file]:
jsDatabase[file][line] = []
jsDatabase[file][line].append(normalize_whitespace(out.group(3)))
# sort the dictionary
# + file
# + lines
def process(urlGlobal, localDB, attack_list):
"""
Crystal Module entry point
"""
print "JavaScript Module Start"
try:
f = open("javascript.conf.xml", 'r')
f.close()
except IOError:
print "The javascript module needs the 'javascript.conf.xml' configuration file."
sys.exit(1)
parser = make_parser()
js_handler = JavaScriptConfHandler()
# Tell the parser to use our handler
parser.setContentHandler(js_handler)
try:
parser.parse("javascript.conf.xml")
except KeyError, e:
print e
sys.exit(1)
# only a white box testing...
generateListOfFiles(localDB,urlGlobal)
# create the report
plop = open('results/javascript_Grabber.xml','w')
plop.write("<javascript>\n")
plop.write("<site>\n")
for file in jsDatabase:
plop.write("\t<file name='%s'>\n" % file)
for line in jsDatabase[file]:
if len(jsDatabase[file][line]) > 1:
plop.write("\t\t<line number='%s'>\n" % line)
for error in jsDatabase[file][line]:
plop.write("\t\t\t<error>%s</error>\n" % htmlencode(error))
plop.write("\t\t</line>\n")
else:
plop.write("\t\t<line number='%s'>%s</line>\n" % (line, htmlencode(jsDatabase[file][line][0])))
plop.write("\t</file>\n")
plop.write("</site>\n")
plop.write("</javascript>\n")
plop.close()
| pwnieexpress/raspberry_pwn | src/pentest/grabber/javascript.py | Python | gpl-3.0 | 5,929 | [
"CRYSTAL"
] | 77342eff84da0a07de3b4af13169a77103807ee5a5413f28c378d7805c0c112c |
# Package imports
from ..workspace import Block, Disconnected, Cancelled, Aborted, anyOfStackIs
# Octopus Imports
from octopus.constants import State
from octopus.sequence.error import NotRunning, AlreadyRunning, NotPaused
# Twisted Imports
from twisted.internet import reactor, defer
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
# Python Imports
from time import time as now
import re
class controls_run (Block):
pass
class controls_parallel (Block):
def _getStacks (self):
return [
input for name, input in self.inputs.items()
if name[:5] == "STACK" and input is not None
]
@defer.inlineCallbacks
def _run (self):
self._deferredList = []
self.finishedCount = 0
stacks = set(self._getStacks())
complete = defer.Deferred()
runOnResume = []
if len(stacks) == 0:
return
def _trapCancelledDisconnected (failure):
error = failure.trap(Cancelled, Disconnected)
if error is Aborted:
return failure
def _errback (failure):
self.finishedCount += 1
if not complete.called:
complete.errback(failure)
def _callback (result):
self.finishedCount += 1
if self.finishedCount == len(self._deferredList):
if not complete.called:
complete.callback(None)
def append (deferred):
self._deferredList.append(deferred)
deferred.addErrback(_trapCancelledDisconnected)
deferred.addCallbacks(_callback, _errback)
@self.on('connectivity-changed')
def onConnectivityChanged (data):
updatedStacks = set(self._getStacks())
# Stacks added
for stack in updatedStacks - stacks:
if self.state is State.RUNNING:
try:
stack.reset()
append(stack.run())
except AlreadyRunning:
if stack._complete is not None:
append(stack._complete)
elif self.state is State.PAUSED:
if anyOfStackIs(stack, [State.PAUSED]):
append(stack._complete)
elif anyOfStackIs(stack, [State.RUNNING]):
stack.pause()
append(stack._complete)
else:
stack.reset()
runOnResume.append(stack)
self._onResume = resume
stacks.add(stack)
# Stacks removed
for stack in stacks - updatedStacks:
stacks.discard(stack)
def resume ():
for stack in runOnResume:
try:
append(stack.run())
except AlreadyRunning:
pass
runOnResume = []
try:
for stack in stacks:
try:
stack.reset()
append(stack.run())
except AlreadyRunning:
pass
yield complete
finally:
self.off('connectivity-changed', onConnectivityChanged)
class controls_if (Block):
def _nextInput (self, i = -1):
# Find the next input after IF{i}
return next((
(int(name[2:]), input) for name, input in self.inputs.items()
if input is not None and name[:2] == "IF" and int(name[2:]) > i
), (None, None))
@defer.inlineCallbacks
def _run (self):
i, input = self._nextInput()
# Try each IF input, in ascending numerical order.
while input is not None:
try:
result = yield input.eval()
except (Cancelled, Disconnected):
result = False
# Attempt to run DO{i} if IF{i} was True.
if result:
try:
action = self.getInput("DO" + str(i))
yield action.run()
except Disconnected:
yield self.cancel()
except (KeyError, Cancelled):
pass
# Skip any further conditions
return
# Move to the next condition
i, input = self._nextInput(i)
# Run the else clause if it exists.
try:
action = self.getInput("ELSE")
except KeyError:
action = None
if action is not None:
try:
yield action.run()
except Disconnected:
yield self.cancel()
except Cancelled:
pass
class controls_log (Block):
@defer.inlineCallbacks
def _run (self):
message = yield self.getInputValue("TEXT", "")
self.emitLogMessage(message, "info")
class controls_wait (Block):
_wait_re = re.compile("(?:(\d+) *h(?:our(?:s)?)?)? *(?:(\d+) *m(?:in(?:ute(?:s)?)?)?)? *(?:(\d+) *s(?:ec(?:ond(?:s)?)?)?)? *(?:(\d+) *m(?:illi)?s(?:ec(?:ond(?:s)?)?)?)?", re.I)
def __init__ (self, workspace, id):
Block.__init__(self, workspace, id)
self._c = None
self._start = 0
self._delay = 0
def _run (self):
complete = defer.Deferred()
self.duration = None
self._variables = []
@defer.inlineCallbacks
def _update (data = None):
if self.state is not State.RUNNING:
return
time = yield self.getInputValue("TIME", 0)
timeType = type(time)
if timeType in (int, float):
duration = time
elif timeType is str:
match = self._wait_re.match(time)
if match is None:
raise Exception('{:s} is not a valid time string'.format(time))
# Convert human-readable time to number of seconds
match = [int(x or 0) for x in match.groups()]
duration = \
(match[0] * 3600) + \
(match[1] * 60) + match[2] + \
(match[3] * 0.001)
else:
raise Exception('{:s} is not a valid time'.format(time))
if duration == self.duration:
return
else:
self.duration = duration
if not (self._c and self._c.active()):
self._start = now()
self._c = reactor.callLater(duration, _done)
else:
self._c.reset(max(0, duration - (now() - self._start)))
def _tryUpdate (data = None):
_update().addErrback(_error)
def _setListeners (data = None):
for v in self._variables:
v.off('change', _tryUpdate)
try:
self._variables = set(self.getInput("TIME").getReferencedVariables())
except (KeyError, AttributeError):
self._variables = []
for v in self._variables:
v.on('change', _tryUpdate)
_tryUpdate()
def _removeListeners ():
self.off("value-changed", _setListeners)
self.off("connectivity-changed", _setListeners)
for v in self._variables:
v.off('change', _tryUpdate)
def _done ():
_removeListeners()
complete.callback(None)
def _error (failure = None):
_removeListeners()
try:
self._c.cancel()
except (AttributeError, AlreadyCalled, AlreadyCancelled):
pass
try:
complete.errback(failure)
except defer.AlreadyCalledError:
pass
self.on("value-changed", _setListeners)
self.on("connectivity-changed", _setListeners)
_setListeners()
return complete
def _pause (self):
d = Block._pause(self)
complete = self._c.func # i.e. _done
self._c.cancel()
remaining = self._c.getTime() - now()
self._pauseTime = now()
def on_resume ():
self._delay += now() - self._pauseTime
self._c = reactor.callLater(remaining, complete)
# TODO: announce new delay of round(self._delay, 4))
self._onResume = on_resume
return d
def _reset (self):
return Block._reset(self)
self._c = None
self._start = 0
self._delay = 0
def _cancel (self, abort = False):
# Cancel the timer, ignoring any error if the timer
# doesn't exist or has finished already.
try:
complete = self._c.func # i.e. _done
self._c.cancel()
reactor.callLater(0, complete)
except (AttributeError, AlreadyCalled, AlreadyCancelled):
pass
class controls_wait_until (Block):
def _run (self):
complete = defer.Deferred()
self._variables = []
@defer.inlineCallbacks
def runTest (data = None):
if self.state is State.PAUSED:
self._onResume = runTest
return
elif self.state is not State.RUNNING:
removeListeners()
complete.callback(None)
defer.returnValue(None)
try:
result = yield self.getInputValue("CONDITION", True)
except Exception as e:
removeListeners()
complete.errback(e)
else:
if result == True:
done()
def setListeners (data = None):
for v in self._variables:
v.off('change', runTest)
try:
self._variables = set(self.getInput("CONDITION").getReferencedVariables())
except AttributeError:
self._variables = []
for v in self._variables:
v.on('change', runTest)
runTest()
def removeListeners ():
self.off("connectivity-changed", setListeners)
self.off("value-changed", runTest)
for v in self._variables:
v.off('change', runTest)
def done ():
removeListeners()
complete.callback(None)
self.on("connectivity-changed", setListeners)
self.on("value-changed", runTest)
setListeners()
return complete
class controls_maketime (Block):
def eval (self):
hour = float(self.getFieldValue('HOUR'))
minute = float(self.getFieldValue('MINUTE'))
second = float(self.getFieldValue('SECOND'))
return defer.succeed(hour * 3600 + minute * 60 + second)
class controls_whileUntil (Block):
@defer.inlineCallbacks
def _run (self):
self.iterations = 0
while True:
if self.state is State.PAUSED:
self._onResume = self._run
return
elif self.state is not State.RUNNING:
return
condition = yield self.getInputValue('BOOL', False)
if self.fields['MODE'] == "UNTIL":
condition = (condition == False)
if condition:
try:
input = self.getInput('DO')
yield input.reset()
yield input.run()
except Disconnected:
pass
except Cancelled:
break
else:
break
self.iterations += 1
class controls_repeat_ext (Block):
@defer.inlineCallbacks
def _run (self):
self.iterations = 0
while True:
if self.state is State.PAUSED:
self._onResume = self._run
return
elif self.state is not State.RUNNING:
return
# Recalculate count on each iteration.
# I imagine this is expected if a simple number block is used,
# but if variables are involved it may turn out to lead to
# unexpected behaviour!
count = yield self.getInputValue('TIMES', None)
if count is None or self.iterations >= count:
break
try:
input = self.getInput('DO')
yield input.reset()
yield input.run()
except (Disconnected, Cancelled, AttributeError):
pass
self.iterations += 1
| richardingham/octopus | octopus/blocktopus/blocks/controls.py | Python | mit | 9,739 | [
"Octopus"
] | f6f325232733f26b4071737e361a65254b8e0e01c71897736b53a1be1b002410 |
# This file is part of the Fluggo Media Library for high-quality
# video and audio processing.
#
# Copyright 2010 Brian J. Crowell <brian@fluggo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
class ForegroundMarker(object):
def boundingRect(self, view):
'''
Return the bounding rectangle of the marker in view coordinates.
'''
raise NotImplementedError
def paint(self, view, painter, rect):
'''
Paint the marker for the given view using the painter and rect, which are both in scene coordinates.
'''
pass
class HorizontalSnapMarker(ForegroundMarker):
def __init__(self, y):
self.y = y
def bounding_rect(self, view):
pos_y = view.viewportTransform().map(QtCore.QPointF(0.0, float(self.y))).y()
return QtCore.QRectF(0.0, pos_y - (view_snap_marker_width / 2.0), view.viewport().width(), view.snap_marker_width)
def paint(self, view, painter, rect):
pos_y = painter.transform().map(QtCore.QPointF(0.0, float(self.y))).y()
rect = painter.transform().mapRect(rect)
painter.save()
painter.resetTransform()
gradient = QtGui.QLinearGradient(0.0, pos_y, 0.0, pos_y + view.snap_marker_width / 2.0)
gradient.setSpread(QtGui.QGradient.ReflectSpread)
gradient.setStops([
(0.0, QtGui.QColor.fromRgbF(1.0, 1.0, 1.0, 1.0)),
(0.5, QtGui.QColor.fromRgbF(view.snap_marker_color.redF(), view.snap_marker_color.greenF(), view.snap_marker_color.blueF(), 0.5)),
(1.0, QtGui.QColor.fromRgbF(0.0, 0.0, 0.0, 0.0))])
painter.setPen(Qt.transparent)
painter.setBrush(QtGui.QBrush(gradient))
painter.drawRect(QtCore.QRectF(rect.x(), pos_y - (view.snap_marker_width / 2.0), rect.width(), view.snap_marker_width))
painter.restore()
class VerticalSnapMarker(ForegroundMarker):
def __init__(self, time):
self.time = time
def bounding_rect(self, view):
pos_x = view.viewportTransform().map(QtCore.QPointF(float(self.time), 0.0)).x()
return QtCore.QRectF(pos_x - (view.snap_marker_width / 2.0), 0.0, view.snap_marker_width, view.viewport().height())
def paint(self, view, painter, rect):
pos_x = painter.transform().map(QtCore.QPointF(float(self.time), 0.0)).x()
rect = painter.transform().mapRect(rect)
painter.save()
painter.resetTransform()
gradient = QtGui.QLinearGradient(pos_x, 0.0, pos_x + view.snap_marker_width / 2.0, 0.0)
gradient.setSpread(QtGui.QGradient.ReflectSpread)
gradient.setStops([
(0.0, QtGui.QColor.fromRgbF(1.0, 1.0, 1.0, 1.0)),
(0.5, QtGui.QColor.fromRgbF(view.snap_marker_color.redF(), view.snap_marker_color.greenF(), view.snap_marker_color.blueF(), 0.5)),
(1.0, QtGui.QColor.fromRgbF(0.0, 0.0, 0.0, 0.0))])
painter.setPen(Qt.transparent)
painter.setBrush(QtGui.QBrush(gradient))
painter.drawRect(QtCore.QRectF(pos_x - (view.snap_marker_width / 2.0), rect.y(), view.snap_marker_width, rect.height()))
painter.restore()
| fluggo/Canvas | fluggo/editor/ui/canvas/markers.py | Python | gpl-3.0 | 3,772 | [
"Brian"
] | 7a0d6d30e9c6aa253a431c392044204135f9059ab365e5043a00102fac072341 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import imp
import os
import platform
import sys
import numpy
import ctypes
try:
# Not normally needed. Not available in demo app.
import hotshot
except:
pass
# Attempt to import OpenCV's ctypes-based bindings
try:
from opencv.cvtypes import cv
except:
cv = None
from StringIO import StringIO
from PIL import (Image,
ImageChops)
from nupic.regions.PyRegion import PyRegion, RealNumpyDType
from nupic.regions.Spec import *
# Global counter used for some debugging operations
id = 0
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# GaborNode
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
class GaborNode2(PyRegion):
"""
Performs dense Gabor filtering upon a multi-resolution grid.
"""
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Class constants
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# The minimum filter size dimension (3x3)
minFilterDim = 3
# The minimum filter size dimension (3x3)
minNumOrients = 0
# List of filter dimensions supported by the optimized
# C library
_optimizedFilterDims = [5, 7, 9, 11, 13]
# Valid parameter values
_validValues = {
'phaseMode': ('single', 'dual'),
'targetType': ('edge', 'line'),
'boundaryMode': ('constrained', 'sweepOff'),
'normalizationMethod': ('fixed', 'max', 'mean'),
'postProcessingMethod': ('raw', 'sigmoid', 'threshold'),
'nta_morphologyMethod': ('best', 'opencv', 'nta'),
}
# Default parameter values
_defaults = {
# Documented parameters:
'filterDim': 9,
'numOrientations': 4,
'phaseMode': 'single',
'centerSurround': False,
'targetType': 'edge',
'gainConstant': 1.0,
'normalizationMethod': 'fixed',
'perPlaneNormalization': False,
'perPhaseNormalization': True,
'postProcessingMethod': 'raw',
'postProcessingSlope': 1.0,
'postProcessingCenter': 0.5,
'postProcessingMin': 0.0,
'postProcessingMax': 1.0,
'zeroThresholdOut': 0.0,
'boundaryMode': 'constrained',
'offImagePixelValue': 0,
'suppressOutsideBox': True,
'forceBoxContraction': False,
'suppressByAlpha': False,
'logPrefix': None,
# Undocumented parameters:
'nta_aspectRatio': 0.3,
'nta_effectiveWidth': 4.5,
'nta_wavelength': 5.6,
'nta_lobeSuppression': True,
'nta_debugLogBuffers': False,
'nta_morphologyMethod': 'best',
}
# Our C implementation performs the 2D convolution using
# integer math, but scales the operands to preserve
# precision. The scaling is done by left shifting the Gabor
# filter coefficients by a fixed number of bits:
_integerMathShifts = 12 # 2^12 = 4096
_integerMathScale = 1 << _integerMathShifts
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Public API calls
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def __init__(self,
# Filter size:
filterDim=None,
# Filter responses:
numOrientations=None,
phaseMode=None,
centerSurround=None,
targetType=None,
# Normalization:
gainConstant=None,
normalizationMethod=None,
perPlaneNormalization=None,
perPhaseNormalization=None,
# Post-processing:
postProcessingMethod=None,
postProcessingSlope=None,
postProcessingCenter=None,
postProcessingMin=None,
postProcessingMax=None,
zeroThresholdOut=None,
# Bounding effects:
boundaryMode=None,
offImagePixelValue=None,
suppressOutsideBox=None,
forceBoxContraction=None,
suppressByAlpha=None,
# Logging
logPrefix=None,
# Additional keywords
**keywds
):
"""
@param filterDim -- The size (in pixels) of both the width and height of the
gabor filters. Defaults to 9x9.
@param numOrientations -- The number of gabor filter orientations to produce.
The half-circle (180 degrees) of rotational angle will be evenly partitioned.
Defaults to 4, which produces a gabor bank containing filters oriented
at 0, 45, 90, and 135 degrees.
@param phaseMode -- The number of separate phases to compute per orientation.
Valid values are: 'single' or 'dual'. In 'single', responses to each such
orientation are rectified by absolutizing them; i.e., a 90-degree edge
will produce the same responses as a 270-degree edge, and the two
responses will be indistinguishable. In "dual" mode, the responses to
each orientation are rectified by clipping at zero, and then creating
a second output response by inverting the raw response and again clipping
at zero; i.e., a 90-degree edge will produce a response only in the
90-degree-oriented plane, and a 270-degree edge will produce a response
only the dual phase plane associated with the 90-degree plane (an
implicit 270-degree plane.) Default is 'single'.
@param centerSurround -- Controls whether an additional filter corresponding to
a non-oriented "center surround" response is applied to the image.
If phaseMode is "dual", then a second "center surround" response plane
is added as well (the inverted version of the center-surround response.)
Defaults to False.
@param targetType -- The preferred "target" of the gabor filters. A value of
'line' specifies that line detectors (peaks in the center and troughs
on either side) are to be used. A value of 'edge' specifies that edge
detectors (with a peak on one side and a trough on the other) are to
be used. Default is 'edge'.
@param gainConstant -- A multiplicative amplifier that is applied to the gabor
responses after any normalization. Defaults to 1.0; larger values
increase the sensitivity to edges.
@param normalizationMethod -- Controls the method by which responses are
normalized on a per image (and per scale) basis. Accepts the following
three legal values:
"fixed": No response normalization;
"max": Applies a global gain value to the responses so that the
max response equals the value of 'gainConstant'
"mean": Applies a global gain value to the responses so that the
mean response equals the value of 'gainConstant'
Default is 'fixed'.
@param perPlaneNormalization -- Controls whether normalization (as specified by
'normalizationMethod') is applied globally across all response planes
(for a given scale), or individually to each response plane. Default
is False. Note: this parameter is ignored if normalizationMethod is "fixed".
@param perPhaseNormalization -- Controls whether normalization (as specified by
'normalizationMethod') is applied globally across both phases for a
particular response orientation and scale, or individually to each
phase of the response. Default is True. Note: this parameter is
ignored if normalizationMethod is "fixed".
@param postProcessingMethod -- Controls what type of post-processing (if any)
is to be performed on the normalized responses. Valid value are:
"raw": No post-processing is performed; final output values are
unmodified after normalization
"sigmoid": Passes normalized output values through a sigmoid function
parameterized by 'postProcessingSlope' and 'postProcessingCenter'.
"threshold": Passes normalized output values through a piecewise linear
thresholding function parameterized by 'postProcessingMin'
and 'postProcessingMax'.
@param postProcessingSlope -- Controls the slope (steepness) of the sigmoid
function used when 'postProcessingMethod' is set to 'sigmoid'.
@param postProcessingCenter -- Controls the center point of the sigmoid function
used when 'postProcessingMethod' is set to 'sigmoid'.
@param postProcessingMin -- If 'postProcessingMethod' is set to 'threshold', all
normalized response values less than 'postProcessingMin' are suppressed to zero.
@param postProcessingMax -- If 'postProcessingMethod' is set to 'threshold', all
normalized response values greater than 'postProcessingMax' are clamped to one.
@param zeroThresholdOut -- if all outputs of a gabor node are below this threshold,
they will all be driven to absolute 0. This is useful in conjunction with
using the product mode/don't care spatial pooler which needs to know when
an input should be treated as 0 vs being normalized to sum to 1.
@param boundaryMode -- Controls how GaborNode deals with boundary effects. Accepts
two valid parameters:
'constrained' -- Gabor responses are normally only computed for image locations
that are far enough from the edge of the input image so that the entire
filter mask fits within the input image. Thus, the spatial dimensions of
the output gabor maps will be smaller than the input image layers.
'sweepOff' -- Gabor responses will be generated at every location within
the input image layer. Thus, the spatial dimensions of the output gabor
maps will be identical to the spatial dimensions of the input image.
For input image locations that are near the edge (i.e., a portion of
the gabor filter extends off the edge of the input image), the values
of pixels that are off the edge of the image are taken to be as specifed
by the parameter 'offImagePixelValue'.
Default is 'constrained'.
@param offImagePixelValue -- If 'boundaryMode' is set to 'sweepOff', then this
parameter specifies the value of the input pixel to use for "filling"
enough image locations outside the bounds of the original image.
Ignored if 'boundaryMode' is 'constrained'. Default value is 0.
@param suppressOutsideBox -- If True, then gabor responses outside of the bounding
box (provided from the sensor) are suppressed. Internally, the bounding
box is actually expanded by half the filter dimension (respecting the edge
of the image, of course) so that responses can be computed for all image
locations within the original bounding box.
@param forceBoxContraction -- Fine-tunes the behavior of bounding box suppression.
If False (the default), then the bounding box will only be 'contracted'
(by the half-width of the filter) in the dimenion(s) in which it is not
the entire span of the image. If True, then the bounding box will be
contracted unconditionally.
@param suppressByAlpha -- A boolean that, if True, instructs GaborNode to use
the pixel-accurate alpha mask received on the input 'validAlphaIn' for
the purpose of suppression of responses.
@param logPrefix -- If non-None, causes the response planes at each scale, and
for each input image, to be written to disk using the specified prefix
for the name of the log images. Default is None (no such logging.)
"""
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
#| The following parameters are for advanced configuration and unsupported at this time |
#| They may be specified via keyword arguments only. |
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
#
# @param nta_aspectRatio -- Controls how "fat" (i.e., how oriented) the Gabor
# filters are. A value of 1 would produce completely non-oriented
# (circular) filters; smaller values will produce a more oriented
# filter. Default is 0.3.
#
# @param nta_effectiveWidth -- Controls the rate of exponential drop-off in
# the Gaussian component of the Gabor filter. Default is 4.5.
#
# @param nta_wavelength -- Controls the frequency of the sinusoidal component
# of the Gabor filter. Default is 5.6.
#
# @param nta_lobeSuppression -- Controls whether or not the secondary lobes of the
# Gabor filters are suppressed. The suppression is performed based
# on the radial distance from the oriented edge to which the Gabor
# filter is tuned. If True, then the secondary lobes produced
# by the pure mathematical Gabor equation will be suppressed
# and have no effect; if False, then the pure mathematical
# Gabor equation (digitized into discrete sampling points, of
# course) will be used. Default is True.
#
# @param nta_debugLogBuffers -- If enabled, causes internal memory buffers used
# C implementation to be dumped to disk after each compute()
# cycle as an aid in the debugging of the C code path.
#
# @param nta_morphologyMethod -- Controls the method to use for performing
# morphological operations (erode or dilate) upon the
# valid alpha masks. Legal values are: 'opencv' (use the
# faster OpenCV routines), 'nta' (use the slower routines,
# or 'best' (use OpenCV if it is available on the platform,
# otherwise use the slower routines.)
#
# ------------------------------------------------------
# Handle hidden/undocumented parameters
for paramName in [p for p in self._defaults if self._isHiddenParam(p)]:
exec("%s = keywds.pop('%s', None)" % (paramName, paramName))
# ------------------------------------------------------
# Assign default values to missing parameters
for paramName, paramValue in self._defaults.items():
if eval(paramName) is None:
exec("%s = paramValue" % paramName)
# ------------------------------------------------------
# Handle deprecated parameters
# Deprecated: numOrients
numOrients = keywds.pop('numOrients', None)
if numOrients:
print "WARNING: 'numOrients' has been deprecated and replaced with 'numOrientations'"
if numOrientations is None:
numOrientations = numOrients
elif numOrients != numOrientations:
print "WARNING: 'numOrients' (%s) is inconsistent with 'numOrientations' (%s) and will be ignored" % \
(str(numOrients), str(numOrientations))
# Deprecated: filterPhase
filterPhase = keywds.pop('filterPhase', None)
if filterPhase:
print "WARNING: 'filterPhase' has been deprecated and replaced with 'targetType'"
if targetType is None:
targetType = filterPhase
elif filterPhase != targetType:
print "WARNING: 'filterPhase' (%s) is inconsistent with 'targetType' (%s) and will be ignored" % \
(str(filterPhase), str(targetType))
# Deprecated: nta_edgeMode
nta_edgeMode = keywds.pop('nta_edgeMode', None)
if nta_edgeMode:
print "WARNING: 'nta_edgeMode' has been deprecated and replaced with 'edgeMode'"
if edgeMode is None:
edgeMode = nta_edgeMode
elif nta_edgeMode != edgeMode:
print "WARNING: 'nta_edgeMode' (%s) is inconsistent with 'edgeMode' (%s) and will be ignored" % \
(str(nta_edgeMode), str(edgeMode))
# Deprecated: lateralInhibition
lateralInhibition = keywds.pop('nta_lateralInhibition', None)
if lateralInhibition:
print "WARNING: 'lateralInhibition' has been deprecated and will not be supported in future releases"
# Deprecated: validityShrinkage
validityShrinkage = keywds.pop('validityShrinkage', None)
if validityShrinkage:
print "WARNING: 'validityShrinkage' has been deprecated and replaced with 'suppressOutsideBox'"
if suppressOutsideBox is None:
suppressOutsideBox = (validityShrinkage >= 0.0)
elif suppressOutsideBox != (validityShrinkage >= 0.0):
print "WARNING: 'validityShrinkage' (%s) is inconsistent with 'suppressOutsideBox' (%s) and will be ignored" % \
(str(validityShrinkage), str(suppressOutsideBox))
self._numScales = None
self.nta_phaseIndex = 0
self._inputPyramidTopology = None
self._outputPyramidTopology = None
self._topDownCombiner = None
self._tdNumParents = None
self._enabledNodes = []
self._nodesWithReceptiveField = None
# These are cached inputs/outputs used for detecting/skipping either the
# bottom up or top down compute to improve performance.
self._cachedRFInput = None
self._cachedBUInput = None
self._cachedBUOutput = None
self._cachedTDInput = None
self._cachedTDOutput = None
self._cachedResetIn = None
self._cachedValidRegionIn = None
self._cachedValidRegionOut = None
# Profiling information
self._profileObj = None
self._iterations = 0
# No longer neede for receptivefields_test, but still needed to satisfy
# an assertion in _checkEphemeralMembers
if not hasattr(self, "_inputSplitter"):
self._inputSplitter = None
self._rfMask = None
self._rfSize = None
self._rfInvLenY = None
self._rfCenterX = None
self._rfCenterY = None
self._rfMinX = None
self._rfMinY = None
self._rfInvLenX = None
self._rfMaxX = None
self._rfMaxY = None
self._initEphemerals()
# ------------------------------------------------------
# Validate each parameter
for paramName in self._defaults.keys():
self._validate(paramName, eval(paramName))
# ------------------------------------------------------
# Store each parameter value
for paramName in self._defaults.keys():
# Hidden parameters have the 'nta_' prefix stripped
#if self._isHiddenParam(paramName):
# internalName = paramName[4:]
#else:
# internalName = paramName
internalName = self._stripHidingPrefixIfPresent(paramName)
exec("self._%s = %s" % (internalName, paramName))
# ------------------------------------------------------
# Perform additional validations that operate on
# combinations/interactions of parameters
self._doHolisticValidation()
# ------------------------------------------------------
# Set up internal state
# This node always get its input as a padded image cube from the ImageSensor
# It may change in the future when ImageSensor supports packed image pyramids
self._gaborBank = None
# Generation of response images must be explicitly enabled
self.disableResponseImages()
# This node type is non-learning, and thus begins life in 'infer' mode.
# This is only needed because our base class requires it.
self._stage = 'infer'
# We are always connected to an image sensor with padded pixels
self._inputPyramidFormat = 'padded'
# Store the number of output planes we'll produce
self._numPlanes = self.getNumPlanes()
# Initially, we do not generate response images
self._makeResponseImages = False
# Where we keep the maxTopDownOut for every node
self._maxTopDownOut = []
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _stripHidingPrefixIfPresent(self, paramName):
"""
If the named parameter is hidden, strip off the
leading "nta_" prefix.
"""
if self._isHiddenParam(paramName):
return paramName[4:]
else:
return paramName
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _isHiddenParam(self, paramName):
"""
Utility method for returning True if 'paramName' is the name
of a hidden parameter.
"""
return paramName.find('nta_') == 0
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getOutputDims(self, inputDims):
"""
Instance method version of class method
"""
return self.calcOutputDims(inputDims,
self._filterDim,
self._boundaryMode)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getNumPlanes(self):
"""
Instance method version of class method
"""
return self.calcNumPlanes(self._numOrientations,
self._phaseMode,
self._centerSurround)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def calcOutputDims(cls, inputDims,
filterDim,
boundaryMode,
**keywds):
"""
Public utility method that computes the output dimensions
in form (height, width), given 'inputDims' (height, width),
for a particular 'filterDim'.
"""
# Assign default values to missing parameters
for paramName in ['filterDim', 'boundaryMode']:
if eval(paramName) is None:
defValue = cls._defaults[paramName]
exec("%s = defValue" % paramName)
# Validatation
cls._validate('filterDim', filterDim)
cls._validate('boundaryMode', boundaryMode)
# Compute output dimensions
if boundaryMode == 'sweepOff':
shrinkage = 0
elif boundaryMode == 'constrained':
shrinkage = filterDim - 1
return tuple([dim - shrinkage for dim in inputDims])
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def calcNumPlanes(cls, numOrientations=None,
phaseMode=None,
centerSurround=None,
**keywds):
"""
Public utility method that computes the number
of responses planes for a particular Gabor
configuration.
"""
# Assign default values to missing parameters
for paramName in ['numOrientations', 'phaseMode', 'centerSurround']:
if eval(paramName) is None:
defValue = cls._defaults[paramName]
exec("%s = defValue" % paramName)
# Validatation
cls._validate('phaseMode', phaseMode)
cls._validate('numOrientations', numOrientations)
cls._validate('centerSurround', centerSurround)
# Compute output planes
numPlanes = numOrientations
if centerSurround:
numPlanes += 1
if phaseMode == 'dual':
numPlanes *= 2
return numPlanes
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doHolisticValidation(self):
"""
Perform additional validations that operate on
combinations/interactions of parameters.
"""
# We must have at least one response plane
if self.getNumPlanes() < 1:
raise RuntimeError("Configuration error: no response planes; " \
"either 'numOrientations' must be > 0 or " \
"'centerSurround' must be True")
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def _validate(cls, name, value):
"""
Validate a parameter. Raises a RunTimeError if
the parameter is invalid.
"""
# ------------------------------------------------------
# Filter size:
# Validation: filterDim
if name == "filterDim":
if type(value) != type(0) or \
value < cls.minFilterDim or \
value % 2 != 1:
raise RuntimeError("Value error: '%s' must be an odd integer >= %d; your value: %s" % \
(name, cls.minFilterDim, str(value)))
# ------------------------------------------------------
# Filter responses:
# Validation: numOrientations
elif name == "numOrientations":
if type(value) != type(0) or \
value < cls.minNumOrients:
raise RuntimeError("Value error: '%s' must be an integers >= %d; your value: %s" % \
(name, cls.minNumOrients, str(value)))
# Validation: phaseMode
elif name == "phaseMode":
if value not in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %s; your value: %s" % \
(name, str(cls._validValues[name]), value))
# Validation: centerSurround
elif name == "centerSurround":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: targetType
elif name == "targetType":
if value not in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), value))
# ------------------------------------------------------
# Normalization:
elif name == "gainConstant":
if type(value) not in [type(0), type(0.0)] or float(value) < 0.0:
raise RuntimeError("Value error: '%s' must be a float or integer >= 0.0; your value: %s" % \
(name, str(value)))
# Validation: targetType
elif name == "normalizationMethod":
if not value in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), value))
# Validation: perPlaneNormalization
elif name == "perPlaneNormalization":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: perPhaseNormalization
elif name == "perPhaseNormalization":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Post-processing:
# Validation: targetType
elif name == "postProcessingMethod":
if not value in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), value))
# Validation: postProcessingSlope
elif name == "postProcessingSlope":
if type(value) not in [type(0), type(0.0)] or float(value) <= 0.0:
raise RuntimeError("Value error: '%s' must be a float or integer > 0.0; your value: %s" % \
(name, str(value)))
# Validation: postProcessingCenter
elif name == "postProcessingCenter":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer; your value: %s" % \
(name, str(value)))
# Validation: postProcessingMin
elif name == "postProcessingMin":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer; your value: %s" % \
(name, str(value)))
# Validation: postProcessingMax
elif name == "postProcessingMax":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer; your value: %s" % \
(name, str(value)))
# Validation: zeroThresholdOut
elif name == "zeroThresholdOut":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer >= 0.0; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Boundary effects:
# Validation: boundaryMode
elif name == "boundaryMode":
if not value in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), str(value)))
# Validation: offImagePixelValue
elif name == "offImagePixelValue":
if value != 'colorKey' and (type(value) not in (int, float) or float(value) < 0.0 or float(value) > 255.0):
raise RuntimeError("Value error: '%s' must be a float or integer between 0 and 255, or 'colorKey'; your value: %s" % \
(name, str(value)))
# Validation: suppressOutsideBox
elif name == "suppressOutsideBox":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: forceBoxContraction
elif name == "forceBoxContraction":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: suppressByAlpha
elif name == "suppressByAlpha":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Logging
# Validation: logPrefix
elif name == "logPrefix":
if value is not None and (type(value) != type("") or len(value) == 0):
raise RuntimeError("Value error: '%s' must be a string; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Undocumented parameters:
# Validation: aspectRatio
elif name == "nta_aspectRatio":
if type(value) not in [type(0), type(0.)] or value <= 0.0:
raise RuntimeError("Value error: '%s' must be a float > 0.0; your value: %s" % \
(name, str(value)))
# Validation: effectiveWidth
elif name == "nta_effectiveWidth":
if type(value) not in [type(0), type(0.)] or value <= 0.0:
raise RuntimeError("Value error: '%s' must be a float > 0.0; your value: %s" % \
(name, str(value)))
# Validation: wavelength
elif name == "nta_wavelength":
if type(value) not in [type(0), type(0.)] or value <= 0.0:
raise RuntimeError("Value error: '%s' must be a float > 0.0; your value: %s" % \
(name, str(value)))
# Validation: lobeSuppression
elif name == "nta_lobeSuppression":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: debugLogBuffers
elif name == "nta_debugLogBuffers":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: morphologyMethod
elif name == "nta_morphologyMethod":
if value not in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), str(value)))
elif value == "opencv" and cv is None:
raise RuntimeError(
"'%s' was explicitly specified as 'opencv' " \
"but OpenCV is not available on this platform" % name)
# ------------------------------------------------------
# Deprecated parameters:
# Validation: numOrients
elif name == "numOrients":
if type(value) != type(0) or \
value < cls.minNumOrients:
raise RuntimeError("Value error: '%s' must be an integers >= %d; your value: %s" % \
(name, cls.minNumOrients, str(value)))
# Validation: lateralInhibition
elif name == "lateralInhibition":
if type(value) not in [type(0), type(0.0)] or value < 0.0 or value > 1.0:
raise RuntimeError("Value error: '%s' must be a float >= 0 and <= 1; your value: %s" % \
(name, str(value)))
# Validation: validityShrinkage
elif name == "validityShrinkage":
if type(value) not in [type(0), type(0.0)] or float(value) < 0.0 or float(value) > 1.0:
raise RuntimeError("Value error: '%s' must be a float or integer between 0 and 1; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Unknown parameter
else:
raise RuntimeError("Unknown parameter: %s [%s]" % (name, value))
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def initialize(self, dims, splitterMaps):
"""Build the gaborfilter bank.
This method is called after construction.
"""
# Preparations (creation of buffer, etc.)
# Send the dims as a tuple that contains one pair. This needed to make
# the node treat its input as a single scale.
self._prepare((dims,))
# Determine the number of response planes
self._numPlanes = self.getNumPlanes()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getParameter(self, parameterName, nodeSet=""):
"""
Get the value of an PyMultiNode parameter.
@param parameterName -- the name of the parameter to retrieve, as defined
by the Node Spec.
"""
if parameterName in self._defaults:
# Hidden "nta_" parameters are internally stored as
# class attributes without the leading "nta"
if parameterName.startswith("nta_"):
parameterName = parameterName[4:]
return eval("self._%s" % parameterName)
# Handle standard MRG infrastructure
elif parameterName == 'nta_width':
return self._inputPyramidTopology[0]['numNodes'][0]
elif parameterName == 'nta_height':
return self._inputPyramidTopology[0]['numNodes'][1]
# Handle the maxTopDownOut read-only parameter
elif parameterName == 'maxTopDownOut':
return self._maxTopDownOut
# Handle deprecated parameters
elif parameterName == 'numOrients':
return self._numPlanes
elif parameterName == 'filterPhase':
return self._targetType
elif parameterName == 'nta_edgeMode':
return self._boundaryMode
elif parameterName == 'nta_lateralInhibition':
return 0.0
# Unknown parameter (at least by GaborNode)
else:
return PyRegion.getParameter(self, parameterName, nodeSet)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def setParameter(self, parameterName, parameterValue, nodeSet=""):
"""
Set the value of an PyRegion parameter.
@param parameterName -- the name of the parameter to update, as defined
by the Node Spec.
@param parameterValue -- the value to which the parameter is to be set.
"""
# @todo -- Need to add validation of parameter changes
settableParams = ["suppressOutsideBox", "forceBoxContraction",
"suppressByAlpha", "offImagePixelValue",
"perPlaneNormalization", "perPhaseNormalization",
"nta_debugLogBuffers", "logPrefix",
"zeroThresholdOut"]
regenParams = ["gainConstant", "normalizationMethod",
"postProcessingMethod", "postProcessingSlope",
"postProcessingCenter", "postProcessingMin",
"postProcessingMax"]
if parameterName in settableParams + regenParams:
exec("self._%s = parameterValue" % parameterName)
elif parameterName == 'nta_morphologyMethod':
self._morphologyMethod = parameterValue
# Not one of our parameters
else:
return PyRegion.setParameter(self, parameterName, parameterValue, nodeSet)
# Generate post-processing lookup-tables (LUTs) that will be
# used by the C implementation
if parameterName in regenParams:
self._makeLUTs()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def enableResponseImages(self):
"""
Enable the generation of PIL Images representing the Gabor reponses.
"""
self._makeResponseImages = True
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def disableResponseImages(self):
"""
Disable the generation of PIL Images representing the Gabor reponses.
"""
self._makeResponseImages = False
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getResponseImages(self, whichResponse='all',
preSuppression=False,
whichScale='all',
whichPhase=0,
whichDirection='bottomUp'):
"""
Return a list of PIL Images representing the Gabor responses
computed upon the latest multi-resolution input image pyramid.
@param whichResponse -- Indicates which Gabor orientation response
should be returned. If 'all' (the default), then false
color composite images will be generated that contains the
gabor responses for all orientations. Otherwise, it should
be an integer index between 0 and numOrients-1, in which
case grayscale images will be generated.
@param preSuppression -- Indicates whether the images should be
generated before bounding box suppression is performed
(if True), or after suppression (if False, the default.)
@param whichScale -- Indicates which multi-resolution scale
should be used to generate the response Images. If 'all'
(the default), then images will be generated for each
scale in the input multi-resolution grid, and will be
returned in a list. Otherwise, it should be an integer
index between 0 and numResolutions-1 (the number of
layers in the multi-resolution grid), in which case a
single Image will be returned (not a list).
@param whichDirection -- Indicates which phase of resonse images should
be returned ('bottomUp', 'topDown', 'combined'). 'bottomUp'
gets the unaltered bottom-up responses, 'top-down' gets the
top-down feedback responses, and 'combined'
@returns -- Either a single PIL Image, or a list of PIL Images
that correspond to different resolutions.
"""
# Make sure response images were enabled
if not self._makeResponseImages:
# Need to generate images now
if whichDirection == 'bottomUp':
if self.response is None:
return
response = self.response
elif whichDirection == 'topDown':
if self.tdInput is None:
return
response = self.tdInput
elif whichDirection == 'combined':
if self.selectedBottomUpOut:
return
response = self.selectedBottomUpOut
if response is None:
# No response to use
return
self._genResponseImages(response, preSuppression=preSuppression, phase=whichDirection)
# Make sure we have images to provide
if self._responseImages is None:
return
# Pull subset of images based on 'preSuppression' setting
imageSet = self._responseImages.get(self._getResponseKey(preSuppression))
# Validate format of 'whichScale' arg
numScales = len(self._inputPyramidTopology)
if whichScale != 'all' and (type(whichScale) != type(0) or whichScale < 0 or whichScale >= numScales):
raise RuntimeError, \
"'whichScale' must be 'all' or an integer between 0 and %d." % self._numScales
# Validate format of 'whichResponse' arg
if whichResponse not in ['all', 'centerSurround']:
if type(whichResponse) != type(0) or whichResponse < 0 or whichResponse >= self._numPlanes:
raise RuntimeError, \
"'whichResponse' must be 'all' or an integer between 0 and %d." % self._numPlanes
# Make sure the requested phase of response exists
if not imageSet.has_key(whichDirection):
return
# Handle "exotic" responses
if whichResponse != 'all':
if whichResponse == 'centerSurround':
whichResponse = self._numOrientations
assert type(whichResponse) == type(0)
if whichPhase > 0:
whichResponse += self._numOrientations
if self._centerSurround:
whichResponse += 1
# Return composite gabor response(s)
return imageSet[whichDirection][whichResponse][whichScale]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Public class methods
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def deserializeImage(cls, serialized):
"""
Helper function that training/testing scripts can invoke in order
to deserialize debugging images provided by the getResponseImages()
method.
"""
image = Image.open(StringIO(serialized))
image.load()
return image
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Private methods - Overriding base class
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
class ARRAY(ctypes.Structure):
_fields_ = [
("nd", ctypes.c_int),
("dimensions", ctypes.c_void_p),
("strides", ctypes.c_void_p),
("data", ctypes.c_void_p),
]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _wrapArray(self, array):
"""
Helper function that takes a numpy array and returns
a 4-tuple consisting of ctypes references to the
following:
(nd, dimensions, strides, data)
"""
if array is None:
return None
else:
return ctypes.byref(self.ARRAY(len(array.ctypes.shape),
ctypes.cast(array.ctypes.shape, ctypes.c_void_p),
ctypes.cast(array.ctypes.strides, ctypes.c_void_p),
array.ctypes.data))
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _prepare(self, inputDims):
"""
Perform one-time preparations need for gabor processing.
"""
#inputDims = [(inputDim['numNodes'][1], inputDim['numNodes'][0]) \
# for inputDim in self._inputPyramidTopology]
self.prepare(inputDims)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def prepare(self, inputDims):
"""
Perform one-time preparations need for gabor processing.
Public interface allowing the GaborNode to be tested
outside of the full RTE.
@param inputDims: a list of input image sizes in the
form of 2-tuples (width, height)
"""
# Reverse the input dims into (height, width) format for internal storage
self._numScales = len(inputDims)
self._inputDims = inputDims
# Compute output dims for each input dim
self._outputDims = [self.getOutputDims(inputDim) for inputDim in inputDims]
# Compute the minimum output dimension
self._minInputDim = min([min(inputDim) for inputDim in self._inputDims])
self._minOutputDim = min([min(outputDim) for outputDim in self._outputDims])
# Break out
self._inHeight, self._inWidth = [float(x) for x in self._inputDims[0]]
self._outHeight, self._outWidth = [float(x) for x in self._outputDims[0]]
# Load the _gaborNode C library
libGabor = self._loadLibrary("_algorithms")
# Prepare the C calls
if libGabor:
self._gaborComputeProc = libGabor.gaborCompute
else:
raise Exception('Unable to load gaborNode C library _algorithms')
# If we could not load the library, then we'll default to
# using numpy for our gabor processing.
self._gaborComputeProc = None
# Prepare some data structures in advance
# Allocate working buffers to be used by the C implementation
#self._buffers = [numpy.zeros(inputDim, dtype=numpy.int32) for inputDim in inputDims]
self._allocBuffers()
# Generate post-processing lookup-tables (LUTs) that will be
# used by the C implementation
self._makeLUTs()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _alignToFour(self, val):
"""
Utility macro that increases a value 'val' to ensure
that it is evenly divisible by four (e.g., for
purposes of memory alignment, etc.)
"""
return (((val - 1) / 4) + 1) * 4
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _makeLUTs(self):
"""
Generate post-processing lookup-tables (LUTs) that will be
used by the C implementation
"""
# --------------------------------------------------
# Define LUT parameters
# For 'normalizationMethod' of 'mean', this internal parameter
# controls the trade-off between how finely we can discretize our
# LUT bins vs. how often a raw response value "overflows" the
# maximum LUT bin and has to be clamped. In essence, any raw
# response value greater than 'meanLutCushionFactor' times the
# mean response for the image will "overflow" and be clamped
# to the response value of the largest bin in the LUT.
meanLutCushionFactor = 4.0
# We'll use a LUT large enough to give us decent precision
# but not so large that it causes cache problems.
# A total of 1024 bins seems reasonable:
numLutShifts = 10
numLutBins = (1 << numLutShifts)
# --------------------------------------------------
# Build LUT
# Build our Gabor Bank if it doesn't already exist
self._buildGaborBankIfNeeded()
# Empirically compute the maximum possible response value
# given our current parameter settings. We do this by
# generating a fake image of size (filterDim X filterDim)
# that has a pure vertical edge and then convolving it with
# the first gabor filter (which is always vertically oriented)
# and measuring the response.
testImage = numpy.ones((self._filterDim, self._filterDim), dtype=numpy.float32) * 255.0
#testImage[:, :(self._filterDim/2)] = 0
testImage[numpy.where(self._gaborBank[0] < 0.0)] *= -1.0
maxRawResponse = (testImage * self._gaborBank[0]).sum()
# At run time our Gabor responses will be scaled (via
# bit shifting) so that we can do integer match instead of
# floating point match, but still have high precision.
# So we'll simulate that in order to get a comparable result.
maxShiftedResponse = maxRawResponse / (255.0 * float(self._integerMathScale))
# Depending on our normalization method, our LUT will have a
# different scaling factor (for pre-scaling values prior
# to discretizing them into LUT bins)
if self._normalizationMethod == 'fixed':
postProcScalar = float(numLutBins - 1) / maxShiftedResponse
elif self._normalizationMethod == 'max':
postProcScalar = float(numLutBins - 1)
elif self._normalizationMethod == 'mean':
postProcScalar = float(numLutBins - 1) / meanLutCushionFactor
else:
assert False
# Build LUT
lutInputs = numpy.array(range(numLutBins), dtype=numpy.float32) / postProcScalar
# Sigmoid: output = 1 / (1 + exp(input))
if self._postProcessingMethod == 'sigmoid':
offset = 1.0 / (1.0 + numpy.exp(self._postProcessingSlope * self._postProcessingCenter))
scaleFactor = 1.0 / (1.0 - offset)
postProcLUT = ((1.0 / (numpy.exp(numpy.clip(self._postProcessingSlope \
* (self._postProcessingCenter - lutInputs), \
-40.0, 40.0)) + 1.0)) - offset) * scaleFactor
# For some parameter choices, it is possible that numerical precision
# issues will result in the 'offset' being ever so slightly larger
# than the value of postProcLUT[0]. This will result in a very
# tiny negative value in the postProcLUT[0] slot, which is
# undesireable because the output of a sigmoid should always
# be bound between (0.0, 1.0).
# So we clip the LUT values to this range just to keep
# things clean.
postProcLUT = numpy.clip(postProcLUT, 0.0, 1.0)
# Threshold: Need piecewise linear LUT
elif self._postProcessingMethod == "threshold":
postProcLUT = lutInputs
postProcLUT[lutInputs < self._postProcessingMin] = 0.0
postProcLUT[lutInputs > self._postProcessingMax] = 1.0
# Raw: no LUT needed at all
else:
assert self._postProcessingMethod == "raw"
postProcLUT = None
# If we are in 'dual' phase mode, then we'll reflect
# the LUT on the negative side of zero to speed up
# processing inside the C function.
if False:
if postProcLUT is not None and self._phaseMode == 'dual':
# Make a reflected LUT
comboLut = numpy.concatenate((numpy.fliplr(postProcLUT[numpy.newaxis,:]),
postProcLUT[numpy.newaxis,:]),
axis=1)
# Now clone the reflected LUT and clip it's responses
# for positive and negative phases
postProcLUT = numpy.concatenate((comboLut, comboLut), axis=1).reshape(4*numLutBins)
# First half of it is for positive phase
postProcLUT[:numLutBins] = 0.0
# Second half of it is for negative phase
postProcLUT[-numLutBins:] = 0.0
# Store our LUT and it's pre-scaling factor
self._postProcLUT = postProcLUT
self._postProcLutScalar = postProcScalar
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _allocBuffers(self):
"""
Allocate some working buffers that are required
by the C implementation.
"""
# Allocate working buffers to be used by the C implementation
#self._buffers = [numpy.zeros(inputDim, dtype=numpy.int32) for inputDim in self._inputDims]
# Compute how much "padding" ou input buffers
# we will need due to boundary effects
if self._boundaryMode == 'sweepOff':
padding = self._filterDim - 1
else:
padding = 0
# For each scale, allocate a set of buffers
# Allocate a working "input buffer" of unsigned int32
# We want our buffers to have rows that are aligned on 16-byte boundaries
#self._bufferSetIn = []
#for inHeight, inWidth in self._inputDims:
# self._bufferSetIn = numpy.zeros((inHeight + padding,
# _alignToFour(inWidth + padding)),
# dtype=numpy.int32)
self._bufferSetIn = [numpy.zeros((inHeight + padding,
self._alignToFour(inWidth + padding)),
dtype=numpy.int32) \
for inHeight, inWidth in self._inputDims]
# Allocate a working plane of "output buffers" of unsigned int32
# We want our buffers to have rows that are aligned on 16-byte boundaries
#self._bufferSetOut = []
#for outHeight, outWidth in self._outputDims:
# self._bufferSetOut += numpy.zeros((self._numOrientations,
# outHeight,
# _alignToFour(outWith)),
# dtype=numpy.int32)
numBuffersNeeded = self._numOrientations
if self._centerSurround:
numBuffersNeeded += 1
self._bufferSetOut = [numpy.zeros((numBuffersNeeded,
outHeight,
self._alignToFour(outWidth)),
dtype=numpy.int32) \
for outHeight, outWidth in self._outputDims]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _initEphemerals(self):
self._gaborComputeProc = None
# For (optional) debug logging, we keep track of the number of
# images we have seen
self._imageCounter = 0
self._bufferSetIn = None
self._bufferSetOut = None
self._morphHeader = None
self._erosion = None
self._numScales = None
self._inputDims = None
self._outputDims = None
self._minInputDim = None
self._minOutputDim = None
self._inHeight = None
self._inWidth = None
self._outHeight = None
self._outWidth = None
self._postProcLUT = None
self._postProcLutScalar = None
self._filterPhase = None
self.response = None
self._responseImages = None
self._makeResponseImages = None
self.tdInput = None
self.selectedBottomUpOut = None
self._tdThreshold = None
self._morphHeader = None
if not hasattr(self, '_numPlanes'):
self._numPlanes = None
# Assign default values to missing parameters
for paramName, paramValue in self._defaults.items():
paramName = self._stripHidingPrefixIfPresent(paramName)
if not hasattr(self, "_%s" % paramName):
exec("self._%s = paramValue" % paramName)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getEphemeralMembers(self):
"""
Callback (to be overridden) allowing the class to publish a list of
all "ephemeral" members (i.e., data members that should not and/or
cannot be pickled.)
"""
# We can't pickle a pointer to a C function
return [
'_gaborComputeProc',
'_bufferSetIn',
'_bufferSetOut',
'_imageCounter',
'_morphHeader',
'_erosion',
]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _loadLibrary(self, libraryName, libSubDir=None):
"""
Utility method for portably loading a NuPIC shared library.
Note: we assume the library lives in the NuPIC "lib" directory.
@param: libraryName - the name of the library (sans extension)
@returns: reference to the loaded library; otherwise raises
a runtime exception.
"""
# By default, we will look for our shared library in our
# bindings directory.
if not libSubDir:
libSubDir = "bindings"
# Attempt to load the library
try:
# All of these shared libraries are python modules. Let python find them
# for us. Once it finds us the path, we'll load it with CDLL.
dottedPath = ('.'.join(['nupic', libSubDir, libraryName]))
exec("import %s" % dottedPath)
libPath = eval("%s.__file__" % dottedPath)
lib = ctypes.cdll.LoadLibrary(libPath)
# These calls initialize the logging system inside
# the loaded library. Disabled for now.
# See comments at INIT_FROM_PYTHON in gaborNode.cpp
# pythonSystemRefP = PythonSystem.getInstanceP()
# lib.initFromPython(ctypes.c_void_p(pythonSystemRefP))
return lib
except Exception, e:
print "Warning: Could not load shared library: %s" % libraryName
print "Exception: %s" % str(e)
return None
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def compute(self, inputs, outputs):
"""
Run one iteration of fat node, profiling it if requested.
Derived classes should NOT override this method.
The guts of the compute are contained in the _compute() call so that
we can profile it if requested.
"""
# Modify this line to turn on profiling for a given node. The results file
# ('hotshot.stats') will be sensed and printed out by the vision framework's
# RunInference.py script and the end of inference.
# Also uncomment the hotshot import at the top of this file.
if False:
if self._profileObj is None:
self._profileObj = hotshot.Profile("hotshot.stats", 1, 1)
# filename, lineevents, linetimings
self._profileObj.runcall(self._gaborCompute, *[inputs, outputs])
else:
self._gaborCompute(inputs, outputs)
self._imageCounter += 1
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getUpperLeftPixelValue(self, inputs, validAlpha=None):
"""
Extract the intensity value of the upper-left pixel.
"""
# Obtain raw input pixel data
#buInputVector = inputs['bottomUpIn'][0].array()
buInputVector = inputs['bottomUpIn']
# Respect valid region for selection of
# color key value
pixelIndex = 0
# If we have an alpha channel, then we need to find
# the first pixel for which the alpha is nonzero
if validAlpha is not None:
# Temporarily decode the polarity that is stored
# in the first alpha element
indicatorValue = validAlpha[0,0]
if indicatorValue < 0.0:
validAlpha[0,0] = -1.0 - indicatorValue
alphaLocns = numpy.where(validAlpha >= 0.5)[0]
# Put the indicator back
validAlpha[0,0] = indicatorValue
# If there are no positive alpha pixels anywhere, then
# just use white (255) as the color key (which may not
# be the "correct" thing to do, but we have no other
# options really.
if len(alphaLocns) == 0:
return 255.0;
pixelIndex = alphaLocns[0]
# Otherwise, if we have a bounding box, then we
# need to find the first (upper-left) pixel in
# the valid bounding box
elif 'validRegionIn' in inputs:
#validRegionIn = inputs['validRegionIn'][0].array()
validRegionIn = inputs['validRegionIn']
left = int(validRegionIn[0])
top = int(validRegionIn[1])
if left > 0 or top > 0:
pixelIndex = left + top * int(self._inWidth)
return buInputVector[pixelIndex]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _gaborCompute(self, inputs, outputs):
"""
Run one iteration of multi-node.
We are taking the unconventional approach of overridding the
base class compute() method in order to avoid applying the
splitter map, since this is an expensive process for a densely
overlapped node such as GaborNode.
"""
# Build our Gabor Bank (first time only)
self._buildGaborBankIfNeeded()
# If we are using "color-key" mode, then detect the value of
# the upper-left pixel and use it as the value of
# 'offImagePixelValue'
if self._offImagePixelValue == "colorKey":
offImagePixelValue = self._getUpperLeftPixelValue(inputs)
else:
offImagePixelValue = float(self._offImagePixelValue)
# Fast C implementation
# Get our inputs into numpy arrays
buInputVector = inputs['bottomUpIn']
validRegionIn = inputs.get('validRegionIn', None)
# Obtain access to valid alpha region, if it exists
# and if we are configured to use the pixel-accurate
# alpha validity mask (as opposed to using the
# valid bounding box.)
if self._suppressByAlpha and 'validAlphaIn' in inputs:
if self._numScales > 1:
raise NotImplementedError("Multi-scale GaborNodes cannot currently handle alpha channels")
# We assume alpha channels are expressed in a format in
# which '0.0' corresponds to total suppression of
# responses, and '255.0' corresponds to no suppression
# whatsoever, and intermediate values apply a linearly
# proportional degree of suppression (e.g., a value of
# '127.5' would result in a 50% suppression of the
# raw responses.)
#validAlpha = inputs['validAlphaIn'][0].array()[:, numpy.newaxis] * (1.0/255.0)
validAlpha = inputs['validAlphaIn'][:, numpy.newaxis] * (1.0/255.0)
# If we are using an alpha channel, then it will take
# a bit more work to find the correct "upper left"
# pixel because we can't just look for the first
# upper-left pixel in the valid bounding box; we have
# to find the first upper-left pixel in the actual
# valid alpha zone.
if self._offImagePixelValue == "colorKey":
offImagePixelValue = self._getUpperLeftPixelValue(inputs, validAlpha)
else:
validAlpha = None
if self.nta_phaseIndex == 0: # Do bottom-up inference.
self._computeWithC(buInputVector, validRegionIn,
outputs, offImagePixelValue, validAlpha)
# Cache input. The output is already stored in self.response
if self._topDownCombiner is not None and self._stage == 'infer':
self._cachedBUInput = buInputVector
self._cachedValidRegionIn = validRegionIn
else: # Try top-down inference.
cachedBUInput = self._cachedBUInput \
if self._cachedBUInput is not None else numpy.zeros(0)
validCachedBUInput = numpy.array_equal(buInputVector, cachedBUInput)
cachedValidRegionIn = self._cachedValidRegionIn \
if self._cachedValidRegionIn is not None else numpy.zeros(0)
validCachedValidRegionIn = ((validRegionIn is None) or
numpy.array_equal(validRegionIn, cachedValidRegionIn))
# See if we can use the cached values from the last bottom up compute. For better performance,
# we only perform the cache checking when we know we might have top down computes.
topDownConditionsMet = (self.nta_phaseIndex == 1) and \
(self._stage == 'infer') and \
(self._topDownCombiner is not None) and \
validCachedBUInput and validCachedValidRegionIn
if not topDownConditionsMet:
message = (
("Top-down conditions were not met for GaborNode:\n") +
(" phaseIndex=%s (expected %d)\n" % (self.nta_phaseIndex, 1)) +
(" stage='%s' (expected '%s')\n" % (self._stage, "infer")) +
(" topDownCombiner is %s (expected not None)\n" %
("not None" if (self._topDownCombiner is not None) else "None")) +
(" buInputVector %s cache (expected ==)\n" %
("==" if validCachedBUInput else "!=")) +
(" validRegionIn %s cache (expected ==)\n" %
("==" if validCachedValidRegionIn else "!="))
)
import warnings
warnings.warn(message, stacklevel=2)
return
# No need to copy to the node outputs, they should be the same as last time.
# IMPORTANT: When using the pipeline scheduler, you MUST write to the output buffer
# each time because there are 2 output buffers. But, we know that for feedback
# networks, the pipleline scheduler cannot and will not be used, so it's OK to
# skip the write to the output when we have top down computes.
# Perform the topDown compute instead
#print "Gabor topdown"
buOutput = self.response.reshape(self._inputSplitter.shape[0], self._numPlanes)
PyRegion._topDownCompute(self, inputs, outputs, buOutput,
buInputVector)
# DEBUG DEBUG
#self._logPrefix = "debug"
#print "WARNING: using a hacked version of GaborNode.py [forced logging]"
# Write debugging images
if self._logPrefix is not None:
self._doDebugLogging()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doDebugLogging(self):
"""
Dump the most recently computed responses to logging image files.
"""
preSuppression = False
# Make the response images if they haven't already been made
if not self._makeResponseImages:
self._genResponseImages(self.response, preSuppression=False)
# Write the response images to disk
imageSet = self._responseImages[self._getResponseKey(preSuppression=False)]['bottomUp']
for orient, orientImages in imageSet.items():
for scale, image in orientImages.items():
if type(scale) == type(0):
if type(orient) == type(0):
orientCode = "%02d" % orient
else:
orientCode = "%s" % orient
debugPath = "%s.img-%04d.scale-%02d.orient-%s.png" % (self._logPrefix,
self._imageCounter,
scale, orientCode)
self.deserializeImage(image).save(debugPath)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def filter(self, image, validRegionIn=None,
orientation='all', phase=0,
scaleIndex=0,
cachedResponse=None,
gain=1.0):
"""
Perform gabor filtering on a PIL image, and return a PIL
image containing the composite responses.
@param validRegion: [left, top, right, bottom]
"""
if validRegionIn is None:
validRegionIn = (0, 0, image.size[0], image.size[1])
# Decide whether or not to use numpy
self._buildGaborBankIfNeeded()
# Determine proper input/output dimensions
inHeight, inWidth = self._inputDims[scaleIndex]
outHeight, outWidth = self._outputDims[scaleIndex]
inputSize = inHeight * inWidth
outputSize = outHeight * outWidth * self._numPlanes
inputVector = numpy.array(image.getdata()).astype(RealNumpyDType)
inputVector.shape = (inHeight, inWidth)
assert image.size[1] == inHeight
assert image.size[0] == inWidth
# Locate correct portion of output
outputVector = numpy.zeros((outHeight, outWidth, self._numPlanes), dtype=RealNumpyDType)
outputVector.shape = (self._numPlanes, outHeight, outWidth)
inputVector.shape = (inHeight, inWidth)
# Use a provided responses
if cachedResponse is not None:
response = cachedResponse
# If we need to re-generate the gabor response cache:
else:
# If we are using "color-key" mode, then detect the value of
# the upper-left pixel and use it as the value of
# 'offImagePixelValue'
if self._offImagePixelValue == "colorKey":
# Respect valid region for selection of
# color key value
[left, top, right, bottom] = validRegionIn
offImagePixelValue = inputVector[top, left]
#offImagePixelValue = inputVector[0, 0]
else:
offImagePixelValue = self._offImagePixelValue
# Extract the bounding box signal (if present).
validPyramid = validRegionIn / numpy.array([self._inWidth,
self._inHeight,
self._inWidth,
self._inHeight],
dtype=RealNumpyDType)
# Compute the bounding box to use for our C implementation
bbox = self._computeBBox(validPyramid, outWidth, outHeight)
imageBox = numpy.array([0, 0, self._inputDims[scaleIndex][1],
self._inputDims[scaleIndex][0]],
dtype=numpy.int32)
# Perform gabor processing
self._doGabor(inputVector, bbox, imageBox, outputVector, scaleIndex, offImagePixelValue)
outputVector = numpy.rollaxis(outputVector, 0, 3)
outputVector = outputVector.reshape(outWidth * outHeight, self._numPlanes).flatten()
assert outputVector.dtype == RealNumpyDType
numLocns = len(outputVector) / self._numPlanes
response = outputVector.reshape(numLocns, self._numPlanes)
nCols, nRows = self._outputPyramidTopology[scaleIndex]['numNodes']
startNodeIdx, stopNodeIdx = self._getNodeRangeByScale(scaleIndex)
# Make composite response
if orientation == 'all':
# Build all the single-orientation responses
responseSet = []
for responseIdx in xrange(self._numPlanes):
img = Image.new('L', (nCols, nRows))
img.putdata((gain * 255.0 * response[:stopNodeIdx-startNodeIdx, responseIdx]).astype(numpy.uint8))
responseSet += [img]
finalResponse = self._makeCompositeImage(responseSet)
# Make an individual response
else:
img = Image.new('L', (nCols, nRows))
if orientation == 'centerSurround':
orientation = self._numOrientations
if phase > 0:
orientation += self._numOrientations
if self._centerSurround:
orientation += 1
img.putdata((gain * 255.0 * response[:stopNodeIdx-startNodeIdx, orientation]).astype(numpy.uint8))
finalResponse = img
return finalResponse, response
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _buildGaborBankIfNeeded(self):
"""
Check to see if we have a Gabor Bank, and if not, then build it.
"""
if self._gaborBank is None:
self._buildGaborBank()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doCompute(self, rfInput, rfMask, rfSize, resetSignal, validPyramid):
"""
Actual compute() implementation. This is a placeholder that should
be overridden by derived sub-classes
@param inputPyramid -- a list of numpy array containing planes of the
input pyramid.
@param rfMask -- a 2-dimensional numpy array (of same shape as 'inputPyramid')
that contains a value of 0.0 for every element that corresponds
to a padded "dummy" (sentinel) value within 'inputPyramid', and
a value of 1.0 for every real input element.
@param rfSize -- a 1-dimensional numpy array (same number of rows as
'inputPyramid') containing the total number of real (non-dummy)
elements for each row of 'inputPyramid'.
@param reset -- boolean indicating whether the current input is the first
of a new temporal sequence.
@param validPyramid -- a 4-element numpy array (vector) that specifies the
zone in which the input pyramid is "valid". A point in the
pyramid is "valid" if that point maps to a location in the
original image, rather than a "padded" region that was added
around the original image in order to scale/fit it into the
dimensions of the input pyramid.
The 4-element array is in the following format:
[left, top, right, bottom]
where 'left' is the fraction (between 0 and 1) of the width of
the image where the valid zone begins, etc.
Returns:
outputPyramid -- a list of numpy arrays containing planes of the
output pyramid.
"""
numGaborFilters = self._gaborBank.shape[1]
numOutputLocns = rfInput.shape[0]
# ---------------------------------------------------------------
# Conceptual pipeline:
#
# 1. Apply Gabor filtering upon the input pixels X to
# generate raw responses Y0 Even in dual-phase mode,
# we will only need to perform the actual computations
# on a single phase (because the responses can be inverted).
#
# 2. Rectify the raw Gabor responses Y0 to produce rectified
# responses Y1.
#
# 3. Apply an adaptive normalization operation to the
# rectified responses Y1 to produce Y2.
#
# 4. Amplify the normalized responses Y2 by a fixed gain G
# to produce amplified responses Y3.
#
# 5. Apply post-processing upon the amplified responses Y3 to
# produce final responses Z.
#
#----------------------------------
# Step 1 - Raw Gabor filtering:
# Convolve each output location against the complete gabor bank.
responseRaw = numpy.dot(rfInput, self._gaborBank)
#----------------------------------
# Step 2 - Rectify responses:
effectiveInfinity = 1.0e7
if self._phaseMode == 'single':
responseRectified = numpy.abs(responseRaw)
elif self._phaseMode == 'dual':
responseRectified = numpy.concatenate((responseRaw.clip(min=0.0, max=effectiveInfinity),
(-responseRaw).clip(min=0.0, max=effectiveInfinity)),
axis=1)
#----------------------------------
# Step 3 - Adaptive normalization:
# Step 4 - Amplification
# If we are not doing any normalization, then it is easy:
if self._normalizationMethod == 'fixed':
# In 'fixed' mode, we simply apply a default normalization
# that takes into account the fact that the input range
# lies between 0 and 255.
responseAmplified = responseRectified * (self._gainConstant / 255.0)
# Otherwise, we have to perform normalization
else:
# First we'll apply the power rule, if needed
if self._normalizationMethod in ['meanPower', 'maxPower']:
responseToUse = (responseRectified * responseRectified)
elif self._normalizationMethod in ['mean', 'max']:
responseToUse = responseRectified
# At this point, our responseRectified array is of
# the shape (totNumOutputLocns, numOrients)
# First, we will perform the max/mean operation over
# the spatial dimensions; the result will be an
# intermediate array of the shape:
# (numScales, numOrients) which will contain the
# max/mean over the spatial dimensions for each
# scale and orientation.
numLayers = len(self._inputPyramidTopology)
layerOffsets = self._computeLayerOffsets(self._inputPyramidTopology)
responseStats = []
for k in xrange(numLayers):
startOffset = layerOffsets[k]
stopOffset = layerOffsets[k+1]
if self._normalizationMethod in ['max', 'maxPower']:
responseStats += [responseToUse[startOffset:stopOffset].max(axis=0)[numpy.newaxis, :]]
elif self._normalizationMethod in ['mean', 'meanPower']:
responseStats += [responseToUse[startOffset:stopOffset].mean(axis=0)[numpy.newaxis, :]]
responseStats = numpy.array(responseStats).reshape(numLayers, self._numPlanes)
# This should be a numpy array containing the desired statistics
# over the spatial dimensions; one statistic for each tuple
# of (scale, orientation)
# If we used a power law, then take the square root of the statistics
if self._normalizationMethod in ['maxPower', 'meanPower']:
responseStats = numpy.sqrt(responseStats)
# Compute statistics over orientation (if needed)
if not self._perOrientNormalization:
if self._normalizationMethod in ['max', 'maxPower']:
responseStats = responseStats.max(axis=1)
elif self._normalizationMethod in ['mean', 'meanPower']:
responseStats = responseStats.mean(axis=1)
responseStats = responseStats[:, numpy.newaxis]
# At this point, responseStats is of shape: (numLayers, 1)
# Compute statistics over scale (if needed)
if not self._perScaleNormalization:
if self._normalizationMethod in ['max', 'maxPower']:
responseStats = responseStats.max(axis=0)
elif self._normalizationMethod in ['mean', 'meanPower']:
responseStats = responseStats.mean(axis=0)
# Expand back out for each scale
responseStats = responseStats[numpy.newaxis, :] * numpy.ones((numLayers, 1))
# Expand back out for each orientation
if not self._perOrientNormalization:
responseStats = responseStats[:, numpy.newaxis] * numpy.ones((1, self._numPlanes))
# Step 4 - Amplification
responseStats = responseStats.reshape(numLayers, self._numPlanes)
gain = self._gainConstant * numpy.ones((numLayers, self._numPlanes), dtype=RealNumpyDType)
nonZeros = numpy.where(responseStats > 0.0)
gain[nonZeros] /= responseStats[nonZeros]
# Fast usage case: neither per-scale nor per-orient normalization
if not self._perScaleNormalization and not self._perOrientNormalization:
responseAmplified = responseRectified * gain[0, 0]
# Somewhat slower: per-orient (but not per-scale) normalization
elif not self._perScaleNormalization:
responseAmplified = responseRectified * gain[0, :]
# Slowest: per-scale normalization
else:
responseAmplified = None
for k in xrange(numLayers):
startOffset = layerOffsets[k]
stopOffset = layerOffsets[k+1]
if not self._perOrientNormalization:
gainToUse = gain[k, 0]
else:
gainToUse = gain[k, :]
thisResponse = responseRectified[startOffset:stopOffset, :] * gainToUse
if responseAmplified is None:
responseAmplified = thisResponse
else:
responseAmplified = numpy.concatenate((responseAmplified, thisResponse), axis=0)
#----------------------------------
# Step 5 - Post-processing
# No post-processing (linear)
if self._postProcessingMethod == "raw":
responseFinal = responseAmplified
# Sigmoidal post-processing
elif self._postProcessingMethod == "sigmoid":
offset = 1.0 / (1.0 + numpy.exp(self._postProcessingSlope * self._postProcessingCenter))
scaleFactor = 1.0 / (1.0 - offset)
responseFinal = ((1.0 / (numpy.exp(numpy.clip(self._postProcessingSlope \
* (self._postProcessingCenter - responseAmplified), \
-40.0, 40.0)) + 1.0)) - offset) * scaleFactor
# Piece-wise linear post-processing
elif self._postProcessingMethod == "threshold":
responseFinal = responseAmplified
responseFinal[responseAmplified < self._postProcessingMin] = 0.0
responseFinal[responseAmplified > self._postProcessingMax] = 1.0
#----------------------------------
# Optional: Dump statistics for comparative purposes
#self._dumpStats(responseFinal, "gabor.stats.txt")
# Generate raw response images (prior to suppression)
if self._makeResponseImages:
self._genResponseImages(responseFinal, preSuppression=True)
# Apply suppression to responses outside valid pyramid.
if self._suppressOutsideBox:
self._applyValiditySuppression(responseFinal, validPyramid)
# Perform the zeroOutThreshold clipping now if requested
if self._zeroThresholdOut > 0.0:
# Get the max of each node
nodeMax = responseFinal.max(axis=1).reshape(numOutputLocns)
# Zero out children where all elements are below the threshold
responseFinal[nodeMax < self._zeroThresholdOut] = 0
# Generate final response images (after suppression)
if self._makeResponseImages:
self._genResponseImages(responseFinal, preSuppression=False)
# Store the response so that it can be retrieved later
self.response = responseFinal
return responseFinal
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _applyValiditySuppression(self, response, validPyramid):
"""
Apply suppression to responses outside valid pyramid.
This overrides the default PyRegion implementation.
"""
# We compute the valid fraction of each output locations' RF by
# computing the valid fraction of it's spatial dimension.
# @todo -- Generalize this to handle more than two spatial dimensions.
validX = (self._rfMaxX.clip(min=validPyramid[0], max=validPyramid[2]) - \
self._rfMinX.clip(min=validPyramid[0], max=validPyramid[2])) * \
self._rfInvLenX
validY = (self._rfMaxY.clip(min=validPyramid[1], max=validPyramid[3]) - \
self._rfMinY.clip(min=validPyramid[1], max=validPyramid[3])) * \
self._rfInvLenY
# At this point the validX and validY numpy vectors contain values
# between 0 and 1 that encode the validity of each output location
# with respect to the X and Y spatial dimensions, respectively.
# Now we map the raw validities of each output location into
# suppression factors; i.e., a scalar (for each output location)
# that will be multiplied against each response for that particular
# output location.
# Use a hard threshold:
# Discovered a nasty, subtle bug here. The code used to be like this:
#
# suppressionFactor = ((validX * validY) >= self._validitySuppressionLow).astype(RealNumpyDType)
#
# However, in the case of validitySuppressionLow of 1.0, numpy experienced
# "random" roundoff errors, and nodes for which both validX and validY were
# 1.0 would be computed as 1 - epsilon, which would fail the test against
# validitySuppressionLow, and thus get suppressed incorrectly.
# So we introduced an epsilon to deal with this situation.
suppressionFactor = ((validX * validY) + self._epsilon >= \
self._validitySuppressionLow).astype(RealNumpyDType)
# Apply the suppression factor to the output response array
response *= suppressionFactor[:, numpy.newaxis]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _dumpStats(self, response, statsLogPath):
"""
In order to do a kind of "unit testing" of the GaborNode
tuning parameters for a particular application, it is useful
to dump statistics on the responses at different scales
and orientations/phases.
We'll dump the following statistics for each (scale, orientation) tuple:
* response mean
* response standard deviation
* power mean (squared response mean)
* response max
@param response -- response array of shape (totNumOutputLocns, numOrients)
"""
meanResponse = []
meanPower = []
stddevResponse = []
maxResponse = []
# Compute a squared (power) response
power = response * response
# Compute our mean/max/stddev statistics over the spatial dimensions
# for each scale and for each orientation. The result will be four
# array of shape: (numScales, numOrients) which will contain the
# statistics over the spatial dimensions for each scale and orientation.
numLayers = len(self._outputPyramidTopology)
layerOffsets = self._computeLayerOffsets(self._outputPyramidTopology)
for k in xrange(numLayers):
startOffset = layerOffsets[k]
stopOffset = layerOffsets[k+1]
# Mean response
meanResponse += [response[startOffset:stopOffset].mean(axis=0)[numpy.newaxis, :]]
# Max response
maxResponse += [response[startOffset:stopOffset].max(axis=0)[numpy.newaxis, :]]
# Std. deviation response
stddevResponse += [response[startOffset:stopOffset].std(axis=0)[numpy.newaxis, :]]
# Mean power
meanPower += [power[startOffset:stopOffset].mean(axis=0)[numpy.newaxis, :]]
# Now compile the responses at each scale into overall arrays
# of shape: (numScales, numOrientations)
meanResponse = numpy.array(meanResponse).reshape(numLayers, self._numPlanes)
maxResponse = numpy.array(maxResponse).reshape(numLayers, self._numPlanes)
stddevResponse = numpy.array(stddevResponse).reshape(numLayers, self._numPlanes)
meanPower = numpy.array(meanPower).reshape(numLayers, self._numPlanes)
# Finally, form the different statistics into a single desriptive vector
responseStats = numpy.concatenate((meanResponse[numpy.newaxis,:,:],
maxResponse[numpy.newaxis,:,:],
stddevResponse[numpy.newaxis,:,:],
meanPower[numpy.newaxis,:,:]), axis=0)
# Append to the stats log
fpStatsLog = open(statsLogPath, "a")
response = " ".join(["%f" % x for x in responseStats.flatten().tolist()])
fpStatsLog.write(response + "\n")
fpStatsLog.close()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doTopDownInfer(self, tdInput, tdNumParents, buOutput, buInput):
"""
Actual top down compute() implementation. This is a placeholder that should
be overridden by derived sub-classes.
@param tdInput -- a 3D array containing the top-down inputs to each baby node.
Think of this as N 2D arrays, where N is the number of baby nodes.
Each baby node's 2D array has R rows, where each row is the top-down
output from one of the parents. The width of each row is equal to the
width of the bottomUpOut of the baby node. If a baby node
has only 2 parents, but R is 5 for example, then the last 3 rows
of the 2D array will contain all 0's. The tdNumParents argument
can be referenced to find out how many parents the node actually has.
The tdInput array is structured in this manner to make it easy to
sum the contributions from the parents. All the sub-class needs to
do is a numpy.add.reduce(tdInput, axis=1).
@param tdNumParents a vector whose length is equal to the number of baby nodes. Each
element contains the number of parents of each baby node.
@param buInput -- a 2D array containing the bottom-up inputs to each baby node.
This is the same input that is passed to the _doCompute() method,
but it is called rfInput there.
@param buOutput -- a 2D array containing the results of the bottomUp compute for
this node. This is a copy of the return value returned from the
_doCompute method of the node.
Returns:
tdOutput -- a 2-D numpy array containing the outputs from each baby node. Each
row is a baby node output.
"""
# NOTE: Making this a float32 makes the copy to the node outputs at the end of
# the compute faster.
#tdOutput = numpy.zeros(self._inputSplitter.shape, dtype='float32')
# print "Top-down infer called on a Gabor node. Use breakpoint to step through"
# print "and make sure things are as expected:"
# import pdb; pdb.set_trace()
numBabyNodes = len(tdInput)
numOrients = len(tdInput[0][0])
assert self._numPlanes == numOrients # Number of filters must match top-down input
tdThreshold = numpy.ones((numBabyNodes, numOrients))
version=('tdThreshold', 'combine', 'td_normalize')
minResponse=1e-10
# Average top-down inputs for each baby Node
tdInput_avg = numpy.add.reduce(tdInput, axis=1) / tdNumParents
# For the gabor node, we will usually get 1 orientation fed down from
# the complex level above us. This is because the SparsePooler above that
# sparsified it's inputs and only saves one orientation from each complex node.
# But, for the Gabor node which is at the bottom of the hierarchy, it makes more
# sense to spread the topdown activation among all the orientations since
# each gabor covers only a few pixels and won't select one object from another.
tdMaxes = tdInput_avg.max(axis=1)
tdInput_avg *= 0
tdInput_avg += tdMaxes.reshape(-1,1)
if tdInput_avg.max() <= minResponse:
#print "Top-down Input is Blank"
pass
else:
if 'combine' in version: # Combine top-down and bottom-up inputs
tdInput_avg *= buOutput
if 'td_normalize' in version: # Normalize top-down inputs for viewing
# td_max = tdInput_avg.max()
# tdInput_avg /= td_max
td_max = tdInput_avg.max()
if td_max != 0:
tdInput_avg /= td_max
if 'tdThreshold' in version: # Use tdInput_avg to threshold bottomUp outputs
if not hasattr(self, '_tdThreshold'):
self._tdThreshold = 0.01
tdThreshold = tdInput_avg > self._tdThreshold
self.tdInput = tdInput_avg
self.selectedBottomUpOut = buOutput * tdThreshold
theMax = self.selectedBottomUpOut.max()
if theMax > 0:
self.selectedBottomUpOut /= theMax
# Generate response images
if self._makeResponseImages:
self._genResponseImages(self.tdInput, preSuppression=False, phase='topDown')
self._genResponseImages(self.selectedBottomUpOut, preSuppression=False,
phase='combined')
# Generate the topDown outputs. At this point, tdMaxes contains the max gabor orientation
# output from each baby node. We will simply "spread" this value across all of the
# topDown outputs for each baby node as an indication of their input activation level.
# In a perfect world, you would try and reconstruct the input by summing the inverse of the
# gabor operation for each output orientation. But, for now, we are only using the top
# down output of the Gabor as an indication of the relative input strength to each gabor
# filter - essentially as a mask on the input image.
tdOutput = numpy.ones(self._inputSplitter.shape, dtype='float32')
tdOutput *= tdMaxes.reshape(-1,1)
# Save the maxTopDownOut for each baby node so that it can be returned as a read-only
# parameter. This provides faster performance for things like the top down image inspector
# that only need the max output from each node
self._maxTopDownOut = tdMaxes
return tdOutput
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _computeWithC(self,
inputPlane,
validRegionIn,
outputs,
offImagePixelValue,
validAlpha):
"""
Perform Gabor processing using custom C library.
"""
if validRegionIn is None:
validRegionIn = (0, 0, self._inWidth, self._inHeight)
inputLen = len(inputPlane)
if self._inputPyramidTopology is None or \
inputLen == self._inWidth * self._inHeight * len(self._inputPyramidTopology):
isPadded = True
else:
assert inputLen == sum([lvl['numNodes'][0] * lvl['numNodes'][1] \
for lvl in self._inputPyramidTopology])
isPadded = False
# Extract the bounding box signal (if present).
validPyramid = validRegionIn / numpy.array([self._inWidth,
self._inHeight,
self._inWidth,
self._inHeight],
dtype=RealNumpyDType)
# First extract a numpy array containing the entire input vector
assert inputPlane.dtype == numpy.float32
# Convert the output images to a numpy vector
#outputPlane = outputs['bottomUpOut'].wvector()[:].array()
outputPlane = outputs['bottomUpOut']
assert outputPlane.dtype == numpy.float32
inputOffset = 0
outputOffset = 0
for scaleIndex in xrange(self._numScales):
# Handle padded case (normal)
if isPadded:
inputScaleIndex = 0
# Handle packed case (deployed)
else:
inputScaleIndex = scaleIndex
# Determine proper input/output dimensions
inHeight, inWidth = self._inputDims[inputScaleIndex]
outHeight, outWidth = self._outputDims[scaleIndex]
inputSize = inHeight * inWidth
outputSize = outHeight * outWidth * self._numPlanes
# Locate correct portion of input
inputVector = inputPlane[inputOffset:inputOffset+inputSize]
inputOffset += inputSize
inputVector.shape = (inHeight, inWidth)
# Locate correct portion of output
outputVector = outputPlane[outputOffset:outputOffset+outputSize]
outputVector.shape = (self._numPlanes, outHeight, outWidth)
# Compute the bounding box to use for our C implementation
bbox = self._computeBBox(validPyramid, self._inputDims[scaleIndex][1],
self._inputDims[scaleIndex][0])
imageBox = numpy.array([0, 0, self._inputDims[scaleIndex][1],
self._inputDims[scaleIndex][0]],
dtype=numpy.int32)
## --- DEBUG CODE ----
#global id
#o = inputVector
#print outputVector.shape, len(o)
#f = os.path.abspath('gabor_input_%d.txt' % id)
#print f
#numpy.savetxt(f, o)
#id += 1
##from dbgp.client import brk; brk(port=9019)
## --- DEBUG CODE END ----
# Erode and/or dilate the alpha channel
# @todo -- This should be moved into the C function
if validAlpha is not None:
validAlpha = self._adjustAlphaChannel(validAlpha)
# Perform gabor processing
self._doGabor(inputVector,
bbox,
imageBox,
outputVector,
scaleIndex,
offImagePixelValue,
validAlpha)
# Optionally, dump working buffers for debugging purposes
if self._debugLogBuffers:
self._logDebugBuffers(outputVector, scaleIndex);
# Note: it would be much better if we did not have to do this
# post-processing "transposition" operation, and instead just
# performed all the different orientation computations for
# each pixel.
# Note: this operation costs us about 1 msec
outputVector = numpy.rollaxis(outputVector, 0, 3)
outputVector = outputVector.reshape(outWidth * outHeight, self._numPlanes)
assert outputVector.dtype == numpy.float32
# Perform the zeroOutThreshold clipping now if requested
# @todo -- This should be moved into the C function
if self._zeroThresholdOut > 0.0:
# Get the max of each node
nodeMax = outputVector.max(axis=1).reshape(outWidth * outHeight)
# Zero out children where all elements are below the threshold
outputVector[nodeMax < self._zeroThresholdOut] = 0.0
outputPlane[outputOffset:outputOffset+outputSize] = outputVector.flatten()
outputOffset += outputSize
# Generate final response images (after suppression)
if self._makeResponseImages:
self._genResponseImages(outputPlane, preSuppression=False)
# Store the response so that it can be retrieved later
self.response = outputPlane
## --- DEBUG CODE ----
#global id
#o = outputPlane
##print outputVector.shape, len(o)
#f = os.path.abspath('gabor_output_%d.txt' % id)
#print f
#numpy.savetxt(f, o)
#id += 1
##from dbgp.client import brk; brk(port=9019)
## --- DEBUG CODE END ----
# De-multiplex inputs/outputs
#outputs['bottomUpOut'].wvector()[:] = outputPlane
outputs['bottomUpOut'] = outputPlane
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _adjustAlphaChannel(self, alphaMask):
"""
Apply an alpha suppression channel (in place) to each plane
of gabor responses.
@param alphaMask: a numpy array of shape (numPixels, 1)
containing the alpha mask that determines which responses
are to be suppressed. If the values in the alpha mask
are in the range (0.0, 255.0), then the alpha mask will
be eroded by halfFilterDim; if the values in the alpha
mask are in the range (-255.0, 0.0), then the mask will
be dilated by halfFilterDim.
"""
# Determine whether to erode or dilate.
# In order to make this determination, we check
# the sign of the first alpha pixel:
#
# MorphOp true mask[0,0] alpha[0,0] code
# ======= ============== ===============
# erode 0 (background) 0
# erode 255 (foreground) 255
# dilate 0 (background) -1
# dilate 255 (foreground) -256
indicatorValue = alphaMask[0,0]
if indicatorValue < 0.0:
operation = 'dilate'
# Convert the alpha value back to it's
# true value
alphaMask[0,0] = -1.0 - indicatorValue
else:
operation = 'erode'
# We need to perform enough iterations to cover
# half of the filter dimension
halfFilterDim = (self._filterDim - 1) / 2
if self._morphologyMethod == "opencv" or \
(self._morphologyMethod == "best" and cv is not None):
# Use the faster OpenCV code path
assert cv is not None
# Lazily allocate the necessary OpenCV wrapper structure(s)
self._prepMorphology()
# Make the OpenCV image header structure's pixel buffer
# pointer point at the underlying memory buffer of
# the alpha channel (numpy array)
self._morphHeader.contents.imageData = alphaMask.ctypes.data
# Perform dilation in place
if operation == 'dilate':
cv.Dilate(self._morphHeader, self._morphHeader, iterations=halfFilterDim)
# Perform erosion in place
else:
cv.Erode(self._morphHeader, self._morphHeader, iterations=halfFilterDim)
else:
# Use the custom C++ code path
if not self._erosion:
from nupic.bindings.algorithms import Float32Erosion
self._erosion = Float32Erosion()
self._erosion.init(int(self._inHeight), int(self._inWidth))
# Perform the erosion/dilation in-place
self._erosion.compute(alphaMask,
alphaMask,
halfFilterDim,
(operation=='dilate'))
# Legacy numpy method
# If we are in constrained mode, then the size of our
# response planes will be less than the size of our
# alpha mask (by halfFilterDim along each edge).
# So we need to "shave off" halfFilterDim pixels
# from all edges of the alpha mask before applying
# suppression to the response planes.
inWidth = int(self._inWidth)
inHeight = int(self._inHeight)
# For erosion mode, we need to shave off halfFilterDim
# from the four edges of the alpha mask.
if operation == "erode":
alphaMask.shape = (inHeight, inWidth)
alphaMask[:halfFilterDim, :] = 0.0
alphaMask[-halfFilterDim:, :] = 0.0
alphaMask[:, :halfFilterDim] = 0.0
alphaMask[:, -halfFilterDim:] = 0.0
alphaMask.shape = (inHeight * inWidth, 1)
# For dilation mode, we need to shave off halfFilterDim
# from any edge of the alpha mask that touches the
# image boundary *unless* the alpha mask is "full"
# (i.e., consumes the entire image.)
elif operation == "dilate":
# Handle top, bottom, left, and right
alphaMask.shape = (inHeight, inWidth)
zapTop = numpy.where(alphaMask[0,:])[0]
zapBottom = numpy.where(alphaMask[-1,:])[0]
zapLeft = numpy.where(alphaMask[:,0])[0]
zapRight = numpy.where(alphaMask[:,-1])[0]
# Apply zaps unless all of them are of the full
# length possible
if len(zapTop) < inWidth or len(zapBottom) < inWidth or \
len(zapLeft) < inHeight or len(zapRight) < inHeight:
alphaMask[:halfFilterDim, zapTop] = 0.0
alphaMask[-halfFilterDim:, zapBottom] = 0.0
alphaMask[zapLeft, :halfFilterDim] = 0.0
alphaMask[zapRight, -halfFilterDim:] = 0.0
alphaMask.shape = (inHeight * inWidth, 1)
return alphaMask
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _prepMorphology(self):
"""
Prepare buffers used for eroding/dilating alpha
channels.
"""
# Check if we've already allocated a header
#if not hasattr(self, '_morphHeader'):
if not getattr(self, '_morphHeader', None):
if cv is None:
raise RuntimeError("OpenCV not available on this platform")
# Create a header only (not backed by data memory) that will
# allow us to operate on numpy arrays (valid alpha channels)
# using OpenCV operations
self._morphHeader = cv.CreateImageHeader(cv.Size(int(self._inWidth),
int(self._inHeight)), 32, 1)
# @todo: this will leak a small bit of memory every time
# we create and use a new GaborNode unless we find a way
# to guarantee the invocation of cv.ReleaseImageHeader()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _computeBBox(self, validPyramid, inWidth, inHeight):
"""
Compute a bounding box given the validPyramid (a fraction
of the valid input region as provided by the sensor) and
the output dimensions for a particular current scale.
"""
# Assemble the bounding box by converting 'validPyramid' from float (0,1) to integer (O,N)
if self._suppressOutsideBox:
halfFilterDim = (self._filterDim - 1) / 2
bbox = numpy.round((validPyramid * numpy.array([inWidth, inHeight, inWidth, inHeight],
dtype=validPyramid.dtype))).astype(numpy.int32)
# Subtract enough padding for our filter on all four edges
# We'll only subtract enough padding if we have a non-trivlal bounding box.
# In other words, if our validRegionIn is [0, 25, 200, 175] for input image
# dimensions of [0, 0, 200, 200], then we will assume that two horizontal strips
# of filler pixels were artificially added at the top and bottom, but no
# such artificial vertical strips were added. So we don't need to erode the
# bounding box horizontally, only vertically.
if self._forceBoxContraction or bbox[0] > 0:
bbox[0] += halfFilterDim
if self._forceBoxContraction or bbox[1] > 0:
bbox[1] += halfFilterDim
if self._forceBoxContraction or bbox[2] < inWidth:
bbox[2] -= halfFilterDim
if self._forceBoxContraction or bbox[3] < inHeight:
bbox[3] -= halfFilterDim
# Clip the bounding box to the size of the image
bbox[0] = max(bbox[0], 0)
bbox[1] = max(bbox[1], 0)
bbox[2] = min(bbox[2], inWidth)
bbox[3] = min(bbox[3], inHeight)
# Make sure the bounding box didn't become negative width/height
bbox[0] = min(bbox[0], bbox[2])
bbox[1] = min(bbox[1], bbox[3])
# If absolutely no suppression is requested under any
# circumstances, then force the bbox to be the entire image
else:
bbox = numpy.array([0, 0, inWidth, inHeight], dtype=numpy.int32)
# Check in case bbox is non-existent or mal-formed
if bbox[0] < 0 or bbox[1] < 0 or bbox[2] <= bbox[0] or bbox[3] <= bbox[1]:
print "WARNING: empty or malformed bounding box:", bbox
# Fix bbox so that it is a null box but at least not malformed
if bbox[0] < 0:
bbox[0] = 0
if bbox[1] < 0:
bbox[1] = 0
if bbox[2] < bbox[0]:
bbox[2] = bbox[0]
if bbox[3] < bbox[1]:
bbox[3] = bbox[1]
return bbox
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _logDebugBuffers(self, outputVector, scaleIndex, outPrefix="debug"):
"""
Dump detailed debugging information to disk (specifically, the
state of internal working buffers used by C implementaiton.
@param outPrefix -- Prefix to prepend to standard names
for debugging images.
"""
# Save input buffer
self._saveImage(self._bufferSetIn[scaleIndex],
"%s.buffer.in.%02d.png" % (outPrefix, scaleIndex))
# Save output buffer planes
for k in xrange(self._bufferSetOut[scaleIndex].shape[0]):
# We do integer arithmetic shifted by 12 bits
buf = (self._bufferSetOut[scaleIndex][k] / 4096).clip(min=0, max=255);
self._saveImage(buf, "%s.buffer.out.%02d.%02d.png" % (outPrefix, scaleIndex, k))
# Save raw gabor output images (from C implementation)
for k in xrange(self._numPlanes):
self._saveImage(outputVector[k], "%s.out.%02d.%02d.png" % \
(outPrefix, scaleIndex, k))
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _saveImage(self, imgArray, outPath):
imgDims = imgArray.shape
img = Image.new('L', (imgDims[1], imgDims[0]))
if imgArray.dtype == numpy.float32:
img.putdata( ((254.9 * imgArray.flatten()).clip(min=0.0, max=255.0)).astype(numpy.uint8) )
#img.putdata((255.0 * imgArray.flatten()).astype(numpy.uint8))
elif imgArray.dtype == numpy.int32:
img.putdata((imgArray.flatten()).astype(numpy.uint8))
else:
assert imgArray.dtype == numpy.uint8
img.putdata(imgArray.flatten())
img.save(outPath)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doGabor(self, inputVector,
bbox,
imageBox,
outputVector,
scaleIndex,
offImagePixelValue=None,
validAlpha=None):
"""
Prepare arguments and invoke C function for
performing actual 2D convolution, rectification,
normalization, and post-processing.
"""
if offImagePixelValue is None:
assert type(offImagePixelValue) in [type(0), type(0.0)]
offImagePixelValue = self._offImagePixelValue
# If we actually have a valid validAlpha mask,
# then reshape it to the input image size
if validAlpha is not None:
origAlphaShape = validAlpha.shape
validAlpha.shape = inputVector.shape
# Invoke C function
result = self._gaborComputeProc(
self._wrapArray(self._gaborBank),
self._wrapArray(inputVector),
self._wrapArray(validAlpha),
self._wrapArray(bbox),
self._wrapArray(imageBox),
self._wrapArray(outputVector),
ctypes.c_float(self._gainConstant),
self._mapParamFromPythonToC('boundaryMode'),
ctypes.c_float(offImagePixelValue),
self._mapParamFromPythonToC('phaseMode'),
self._mapParamFromPythonToC('normalizationMethod'),
self._mapParamFromPythonToC('perPlaneNormalization'),
self._mapParamFromPythonToC('perPhaseNormalization'),
self._mapParamFromPythonToC('postProcessingMethod'),
ctypes.c_float(self._postProcessingSlope),
ctypes.c_float(self._postProcessingCenter),
ctypes.c_float(self._postProcessingMin),
ctypes.c_float(self._postProcessingMax),
self._wrapArray(self._bufferSetIn[scaleIndex]),
self._wrapArray(self._bufferSetOut[scaleIndex]),
self._wrapArray(self._postProcLUT),
ctypes.c_float(self._postProcLutScalar),
)
if result < 0:
raise Exception("gaborCompute failed")
# If we actually have a valid validAlpha mask,
# then reshape it back to it's original shape
if validAlpha is not None:
validAlpha.shape = origAlphaShape
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _convertEnumValue(self, enumValue):
"""
Convert a Python integer object into a ctypes integer
that can be passed to a C function and seen as an
int on the C side.
"""
return ctypes.c_int(enumValue)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _mapParamFromPythonToC(self, paramName):
"""
Map Python object values to equivalent enumerated C values.
"""
# boundaryMode
if paramName == "boundaryMode":
if self._boundaryMode == 'constrained':
enumValue = 0
elif self._boundaryMode == 'sweepOff':
enumValue = 1
return self._convertEnumValue(enumValue)
# phaseMode
elif paramName == "phaseMode":
if self._phaseMode == 'single':
enumValue = 0
elif self._phaseMode == 'dual':
enumValue = 1
return self._convertEnumValue(enumValue)
# normalizationMethod
elif paramName == "normalizationMethod":
if self._normalizationMethod == 'fixed':
enumValue = 0
elif self._normalizationMethod == 'max':
enumValue = 1
elif self._normalizationMethod == 'mean':
enumValue = 2
#elif self._normalizationMethod == 'maxPower':
# enumValue = 3
#elif self._normalizationMethod == 'meanPower':
# enumValue = 4
return self._convertEnumValue(enumValue)
# perPlaneNormalization
elif paramName == "perPlaneNormalization":
if not self._perPlaneNormalization:
enumValue = 0
else:
enumValue = 1
return self._convertEnumValue(enumValue)
# perPhaseNormalization
elif paramName == "perPhaseNormalization":
if not self._perPhaseNormalization:
enumValue = 0
else:
enumValue = 1
return self._convertEnumValue(enumValue)
# postProcessingMethod
elif paramName == "postProcessingMethod":
if self._postProcessingMethod == 'raw':
enumValue = 0
elif self._postProcessingMethod == 'sigmoid':
enumValue = 1
elif self._postProcessingMethod == 'threshold':
enumValue = 2
return self._convertEnumValue(enumValue)
# Invalid parameter
else:
assert False
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Private helper methods
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getValidEdgeModes(self):
"""
Returns a list of the valid edge modes.
"""
return ['constrained', 'sweepOff']
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _serializeImage(self, image):
"""
Serialize a PIL image so that it can be transported through
the runtime engine.
"""
s = StringIO()
format = 'png'
if hasattr(image, 'format') and image.format:
format = image.format
try:
image.save(s, format=format)
except:
image.save(s, format='png')
return s.getvalue()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getResponseKey(self, preSuppression):
"""
Returns a key used to index the response image dict
(either 'raw' or 'final')
"""
if preSuppression:
return 'raw'
else:
return 'final'
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _genResponseImages(self, rawResponse, preSuppression, phase='bottomUp'):
"""
Generate PIL images from the response array.
@param preSuppression -- a boolean, which indicates whether to
store the generated images using the key 'raw' (if True)
or 'final' (if False) within the _responseImages member dict.
@param phase -- 'bottomUp', 'topDown', or 'combined', depending on which
phase of response image we're generating
Generate a dict of dicts. The primary dict is keyed by response,
which can be either 'all' or an integer between 0 and numOrients-1;
the secondary dicts are keyed by scale, which can be either 'all'
or an integer between 0 and numScales.
"""
if phase not in ('bottomUp', 'topDown', 'combined'):
raise RuntimeError, "phase must be either 'bottomUp', 'topDown', or 'combined'"
numLocns = len(rawResponse.flatten()) / self._numPlanes
response = rawResponse.reshape(numLocns, self._numPlanes)
#numScales = len(self._inputPyramidTopology)
numScales = self._numScales
imageSet = {}
# Build all the single-orientation responses
for responseIdx in xrange(self._numPlanes):
responseSet = {}
# Build all the scales
for scaleIdx in xrange(numScales):
responseSet[scaleIdx] = self._makeImage(response, scaleIdx, responseIdx)
# Build the "all scale" list
#responseSet['all'] = responseSet.values()
imageSet[responseIdx] = responseSet
# Build the composite respones
responseSet = {}
for scaleIdx in xrange(numScales):
scaleSet = [imageSet[orientIdx][scaleIdx] for orientIdx in xrange(self._numPlanes)]
responseSet[scaleIdx] = self._makeCompositeImage(scaleSet)
imageSet['all'] = responseSet
# Serialize all images
for orientIdx, orientResponses in imageSet.items():
for scaleIdx, scaleResponse in orientResponses.items():
imageSet[orientIdx][scaleIdx] = self._serializeImage(scaleResponse)
imageSet[orientIdx]['all'] = imageSet[orientIdx].values()
# Store the image set
if self._responseImages is None:
self._responseImages = {self._getResponseKey(preSuppression): {}}
self._responseImages[self._getResponseKey(preSuppression)][phase] = imageSet
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getNodeRangeByScale(self, whichScale):
"""
Returns a 2-tuple of node indices corresponding to the set of
nodes associated with the specified 'whichScale'.
"""
assert whichScale >= 0
#assert whichScale < len(self._outputPyramidTopology)
assert whichScale < self._numScales
startNodeIdx = 0
#for scaleIndex, outputTopo in enumerate(self._outputPyramidTopology):
for scaleIndex, outputDim in enumerate(self._outputDims):
#nCols, nRows = outputTopo['numNodes']
nRows, nCols = outputDim
stopNodeIdx = startNodeIdx + nCols * nRows
if scaleIndex == whichScale:
return (startNodeIdx, stopNodeIdx)
else:
startNodeIdx = stopNodeIdx
assert False
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _makeImage(self, response, whichScale, whichOrient, gain=1.0):
"""
Generate a single PIL image (using the raw response array) for a
particular scale and orientation.
"""
#nCols, nRows = self._outputPyramidTopology[whichScale]['numNodes']
nRows, nCols = self._outputDims[whichScale]
img = Image.new('L', (nCols, nRows))
startNodeIdx, stopNodeIdx = self._getNodeRangeByScale(whichScale)
img.putdata((gain * 255.0 * response[startNodeIdx:stopNodeIdx,
whichOrient]).clip(min=0.0, max=255.0).astype(numpy.uint8))
return img
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _makeCompositeImage(self, imageSet):
"""
Create a false color composite image of the individiual
orientation-specific gabor response images in 'imageSet'.
"""
# Generate the bands
numBands = 3
bands = [Image.new('L',imageSet[0].size)] * numBands
for k, img in enumerate(imageSet):
whichBand = k % numBands
bands[whichBand] = ImageChops.add(bands[whichBand], img)
# Make final composite for this scale
compositeImage = Image.merge(mode='RGB', bands=bands)
return compositeImage
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
if False:
def _getEffectiveOrients(self):
"""
Internal helper method that returns the number of "effective"
orientations (which treats the dual phases responses as a
single orientation.)
"""
numEffectiveOrients = self._numPlanes
if self._phaseMode == 'dual':
numEffectiveOrients /= 2
if self._centerSurround:
numEffectiveOrients -= 1
return numEffectiveOrients
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _buildGaborBank(self):
"""
Build an array of Gabor filters. Also build a 1-D vector of
filter bank indices that maps each output location to a particular
(customized) bank of gabor filters.
"""
# Make sure dimensions of our Gabor filters are odd
assert self._filterDim % 2 == 1
# Create mesh grid indices. The result will be a numpy array of
# shape (2, filterDim, filterDim).
# Then meshGrid[0] stores the row indices of the master grid,
# and meshGrid[1] stores the column indices.
lowerIndex = -(self._filterDim / 2)
upperIndex = 1 + self._filterDim / 2
meshGrid = numpy.mgrid[lowerIndex:upperIndex, lowerIndex:upperIndex]
# If we are supposed to produce only center-surround output
# (no oriented responses), then we will still go through the
# process of making a minimalist bank of 2 oriented gabor
# filters since that is needed by the center-surround filter
# generation code
numOrientations = self._numOrientations
if numOrientations == 0:
numOrientations = 2
# Select the orientation sample points (in radians)
radianInterval = numpy.pi / float(numOrientations)
orientations = numpy.array(range(numOrientations), dtype=RealNumpyDType) * \
radianInterval
# Compute trigonometric functions of orientation
sinTheta = numpy.sin(orientations).reshape(numOrientations, 1, 1)
cosTheta = numpy.cos(orientations).reshape(numOrientations, 1, 1)
# Construct two filterDim X filterDim arrays containing y (row) and
# x (column) coordinates (in dimensions of pixels), respectively.
y = meshGrid[0].reshape(1, self._filterDim, self._filterDim)
x = meshGrid[1].reshape(1, self._filterDim, self._filterDim)
X = x * cosTheta - y * sinTheta
Y = x * sinTheta + y * cosTheta
# Build the Gabor filters
#if hasattr(self, '_phase') and self._phase == 'edge':
if self._targetType == 'edge':
sinusoidalTerm = numpy.sin(2.0 * numpy.pi / self._wavelength * X)
else:
sinusoidalTerm = numpy.cos(2.0 * numpy.pi / self._wavelength * X)
numerator = (X * X + self._aspectRatio * self._aspectRatio * Y * Y)
denominator = -2.0 * self._effectiveWidth * self._effectiveWidth
exponentialTerm = numpy.exp(numerator / denominator)
gaborBank = sinusoidalTerm * exponentialTerm
# Add center-surround filters, if requsted
if self._centerSurround:
expFilter = exponentialTerm[0] * exponentialTerm[numOrientations/2]
# Cubing the raw exponential component seems to give a nice
# center-surround filter
centerSurround = expFilter * expFilter * expFilter
# If our center-surround filter is in addition to the oriented
# filter, then concatenate it to our filter bank; otherwise
# it is the filter bank
if self._numOrientations > 0:
gaborBank = numpy.concatenate((gaborBank, centerSurround[numpy.newaxis,:,:]))
else:
gaborBank = centerSurround[numpy.newaxis,:,:]
# Apply lobe suppression: Suppress the outer lobes of the sinusoidal
# component of the Gabor filters so as to avoid "ringing" effects in
# the Gabor response maps.
#
# We make a single lobe-suppression mask (which is directionally
# oriented.) Then we rotate this mask by each orientation and
# apply it to the pre-suppressed filter bank.
# In order to minimize discontinuities in the gradients, the
# suppression mask will be constructed as follows:
#
# y = 1 - |x|^p
#
# where:
# y = Suppression (0 for total suppression, 1 for no-suppression)
# x = position relative to center
# p = Some exponent that controls the sharpness of suppression
numGaborFilters = gaborBank.shape[0]
# New lobe suppression.
if self._lobeSuppression:
# The orientation is always vertical, so we'll locate the discrete
# filter cell where we go negative
halfFilterDim = (self._filterDim - 1) / 2
firstBadCell = None
for cellIdx in xrange(halfFilterDim, self._filterDim):
if gaborBank[0, 0, cellIdx] < 0.0:
firstBadCell = cellIdx - halfFilterDim
break
if firstBadCell is not None:
radialDist = numpy.abs(X / float(halfFilterDim))
# Establish a radial distance threshold that is halfway
# between the first discrete bad cell and the last good cell.
if firstBadCell == halfFilterDim:
distThresh = 0.5 * (radialDist[0, 0, halfFilterDim + firstBadCell] + \
radialDist[0, 0, halfFilterDim + firstBadCell - 1])
else:
assert firstBadCell < halfFilterDim
# Establish a radial distance threshold that is halfway
# between the first discrete bad cell and the second bad cell.
# This seems to give good results in practice.
distThresh = 0.5 * (radialDist[0, 0, halfFilterDim + firstBadCell] + \
radialDist[0, 0, halfFilterDim + firstBadCell + 1])
suppressTerm = (radialDist < distThresh).astype(RealNumpyDType)
if self._centerSurround:
suppressTerm = numpy.concatenate((suppressTerm,
numpy.ones((1, self._filterDim, self._filterDim),
dtype=RealNumpyDType)))
gaborBank *= suppressTerm
# Normalize so that mean of each filter is zero
means = gaborBank.mean(axis=2).mean(axis=1).reshape(numGaborFilters, 1, 1)
offsets = means.repeat(self._filterDim, axis=1).repeat(self._filterDim, axis=2)
gaborBank -= offsets
# Normalize so that sum of squares over each filter is one
squareSums = (gaborBank * gaborBank).sum(axis=2).sum(axis=1).reshape(numGaborFilters, 1, 1)
scalars = 1.0 / numpy.sqrt(squareSums)
gaborBank *= scalars
# Log gabor filters to disk
if self._logPrefix:
for k in xrange(numGaborFilters):
img = Image.new('L', (self._filterDim, self._filterDim))
minVal = gaborBank[k].min()
gaborFilter = gaborBank[k] - minVal
gaborFilter *= (254.99 / gaborFilter.max())
img.putdata(gaborFilter.flatten().astype(numpy.uint8))
img.save("%s.filter.%03d.png" % (self._logPrefix, k))
# Store the Gabor Bank as a transposed set of 'numOrients' 1-D column-vectors
# which can be easily dot-producted-ed against the split input vectors
# during our compute() calls.
self._gaborBank = (gaborBank.astype(numpy.float32) * 4096.0).astype(numpy.int32)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def getSpec(cls):
ns = Spec(description = cls.__doc__,
singleNodeOnly=False)
ns.inputs = dict(
bottomUpIn=InputSpec(
description="""The input signal, conceptually organized as an
image pyramid data structure, but internally
organized as a flattened vector.""",
dataType='float',
regionLevel=False,
requireSplitterMap=False),
validRegionIn=InputSpec(
description="""A bounding box around the valid region of the image,
expressed in pixel coordinates; if the first element
of the bounding box is negative, then the valid
region is specified by 'validAlphaIn', in the form
of a non-rectangular alpha channel.""",
dataType='float',
regionLevel=True,
requireSplitterMap=False),
validAlphaIn=InputSpec(
description="""An alpha channel that may be used (in place of the
'validRegionIn' bounding box) to specify the valid
region of the image on a per-pixel basis; the channel
should be an image of identical size to the finest
resolution data input image.""",
dataType='float',
regionLevel=True,
requireSplitterMap=False)
)
ns.outputs = dict(
bottomUpOut=OutputSpec(
description="""The output signal, conceptually organized as an
image pyramid data structure, but internally
organized as a flattened vector.""",
dataType='float',
count=0,
regionLevel=False,
isDefaultOutput=True
),
topDownOut=OutputSpec(
description="""The feedback output signal, sent to the topDownIn
input of the next level down.""",
dataType='float',
count=0,
regionLevel=True)
)
ns.parameters = dict(
# -------------------------------------
# Create/Read-only parameters
filterDim=ParameterSpec(dataType='int', accessMode='Create',
description="""
The size (in pixels) of both the width and height of the
gabor filters. Defaults to 9x9.
""",
defaultValue=9),
numOrientations=ParameterSpec(dataType='int', accessMode='Create',
description="""
The number of gabor filter orientations to produce.
The half-circle (180 degrees) of rotational angle will be evenly partitioned.
Defaults to 4, which produces a gabor bank containing filters oriented
at 0, 45, 90, and 135 degrees.
"""),
phaseMode=ParameterSpec(dataType='str', accessMode='Create',
description="""
The number of separate phases to compute per orientation.
Valid values are: 'single' or 'dual'. In 'single', responses to each such
orientation are rectified by absolutizing them; i.e., a 90-degree edge
will produce the same responses as a 270-degree edge, and the two
responses will be indistinguishable. In "dual" mode, the responses to
each orientation are rectified by clipping at zero, and then creating
a second output response by inverting the raw response and again clipping
at zero; i.e., a 90-degree edge will produce a response only in the
90-degree-oriented plane, and a 270-degree edge will produce a response
only the dual phase plane associated with the 90-degree plane (an
implicit 270-degree plane.) Default is 'single'.
""",
constraints="enum: single, dual",
defaultValue='single'),
centerSurround=ParameterSpec(dataType='int', accessMode='Create',
description="""
Controls whether an additional filter corresponding to
a non-oriented "center surround" response is applied to the image.
If phaseMode is "dual", then a second "center surround" response plane
is added as well (the inverted version of the center-surround response.)
Defaults to False.
""",
defaultValue=0),
targetType=ParameterSpec(dataType='str', accessMode='Create',
description="""
The preferred "target" of the gabor filters. A value of
'line' specifies that line detectors (peaks in the center and troughs
on either side) are to be used. A value of 'edge' specifies that edge
detectors (with a peak on one side and a trough on the other) are to
be used. Default is 'edge'.
""",
constraints="enum: line,edge",
defaultValue='edge'),
gainConstant=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
A multiplicative amplifier that is applied to the gabor
responses after any normalization. Defaults to 1.0; larger values
increase the sensitivity to edges.
"""),
normalizationMethod=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
Controls the method by which responses are
normalized on a per image (and per scale) basis. Accepts the following
three legal values:
"fixed": No response normalization;
"max": Applies a global gain value to the responses so that the
max response equals the value of 'gainConstant'
"mean": Applies a global gain value to the responses so that the
mean response equals the value of 'gainConstant'
Default is 'fixed'.
""",
constraints="enum: fixed, mean, max"
),
perPlaneNormalization=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
Controls whether normalization (as specified by
'normalizationMethod') is applied globally across all response planes
(for a given scale), or individually to each response plane. Default
is False. Note: this parameter is ignored if normalizationMethod is "fixed".
""",
),
perPhaseNormalization=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
Controls whether normalization (as specified by
'normalizationMethod') is applied globally across both phases for a
particular response orientation and scale, or individually to each
phase of the response. Default is True. Note: this parameter is
ignored if normalizationMethod is "fixed".
""",
),
postProcessingMethod=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
Controls what type of post-processing (if any)
is to be performed on the normalized responses. Valid value are:
"raw": No post-processing is performed; final output values are
unmodified after normalization
"sigmoid": Passes normalized output values through a sigmoid function
parameterized by 'postProcessingSlope' and 'postProcessingCenter'.
"threshold": Passes normalized output values through a piecewise linear
thresholding function parameterized by 'postProcessingMin'
and 'postProcessingMax'.
""",
constraints="enum: raw, sigmoid, threshold"),
postProcessingSlope=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the slope of the sigmoid function to apply if the
post-processing mode is set to 'sigmoid'.
"""),
postProcessingCenter=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the mid-point of the sigmoid function to apply if the
post-processing mode is set to 'sigmoid'.
"""),
postProcessingMin=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the value below which responses will be clipped to zero
when post-processing mode is set to 'threshold'.
"""),
postProcessingMax=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the value above which responses will be clipped to one
when post-processing mode is set to 'threshold'.
"""),
zeroThresholdOut=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
If all outputs of a gabor node are below this threshold,
they will all be driven to absolute 0. This is useful in conjunction with
using the product mode/don't care spatial pooler which needs to know when
an input should be treated as 0 vs being normalized to sum to 1.
"""),
boundaryMode=ParameterSpec(dataType='str', accessMode='Create',
description="""
Controls how GaborNode deals with boundary effects. Accepts
two valid parameters:
'constrained' -- Gabor responses are normally only computed for image locations
that are far enough from the edge of the input image so that the entire
filter mask fits within the input image. Thus, the spatial dimensions of
the output gabor maps will be smaller than the input image layers.
'sweepOff' -- Gabor responses will be generated at every location within
the input image layer. Thus, the spatial dimensions of the output gabor
maps will be identical to the spatial dimensions of the input image.
For input image locations that are near the edge (i.e., a portion of
the gabor filter extends off the edge of the input image), the values
of pixels that are off the edge of the image are taken to be as specifed
by the parameter 'offImagePixelValue'.
Default is 'constrained'.
""",
constraints='enum: constrained, sweepOff',
defaultValue='constrained'),
offImagePixelValue=ParameterSpec(dataType="str", accessMode='ReadWrite',
description="""
If 'boundaryMode' is set to 'sweepOff', then this
parameter specifies the value of the input pixel to use for "filling"
enough image locations outside the bounds of the original image.
Ignored if 'boundaryMode' is 'constrained'. Default value is 0.
"""
),
suppressOutsideBox=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
If True, then gabor responses outside of the bounding
box (provided from the sensor) are suppressed. Internally, the bounding
box is actually expanded by half the filter dimension (respecting the edge
of the image, of course) so that responses can be computed for all image
locations within the original bounding box.
"""),
forceBoxContraction=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
Fine-tunes the behavior of bounding box suppression.
If False (the default), then the bounding box will only be 'contracted'
(by the half-width of the filter) in the dimenion(s) in which it is not
the entire span of the image. If True, then the bounding box will be
contracted unconditionally.
"""),
suppressByAlpha=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
A boolean that, if True, instructs GaborNode to use
the pixel-accurate alpha mask received on the input 'validAlphaIn' for
the purpose of suppression of responses.
"""),
logPrefix=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
If non-None, causes the response planes at each scale, and
for each input image, to be written to disk using the specified prefix
for the name of the log images. Default is None (no such logging.)
"""),
maxTopDownOut=ParameterSpec(dataType='float', accessMode='Read', count=0,
description="""
The max top-down output from each node. It is faster to access this
variable than to fetch the entire top-down output of every node. The
top down image inspector fetches this parameter (if available)
instead of the topDownOut output variable for better performance.
"""),
# -------------------------------------
# Undocumented parameters
nta_aspectRatio=ParameterSpec(dataType='float', accessMode='Create',
description="""
Controls how "fat" (i.e., how oriented) the Gabor
filters are. A value of 1 would produce completely non-oriented
(circular) filters; smaller values will produce a more oriented
filter. Default is 0.3.
""",
defaultValue=0.3),
nta_effectiveWidth=ParameterSpec(dataType='float', accessMode='Create',
description="""
Controls the rate of exponential drop-off in
the Gaussian component of the Gabor filter. Default is 4.5.
""",
defaultValue=4.5),
nta_wavelength=ParameterSpec(dataType='float', accessMode='Create',
description="""
Controls the frequency of the sinusoidal component
of the Gabor filter. Default is 5.6.
""",
defaultValue=5.6),
nta_lobeSuppression=ParameterSpec(dataType='int', accessMode='Create',
description="""
Controls whether or not the secondary lobes of the
Gabor filters are suppressed. The suppression is performed based
on the radial distance from the oriented edge to which the Gabor
filter is tuned. If True, then the secondary lobes produced
by the pure mathematical Gabor equation will be suppressed
and have no effect; if False, then the pure mathematical
Gabor equation (digitized into discrete sampling points, of
course) will be used. Default is True.
""",
defaultValue=1),
nta_debugLogBuffers=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
If enabled, causes internal memory buffers used
C implementation to be dumped to disk after each compute()
cycle as an aid in the debugging of the C code path.
Defaults to False.
""",
),
nta_width=ParameterSpec(dataType="int", accessMode='Read',
description="""Width of the maximum resolution."""),
nta_height=ParameterSpec(dataType="int", accessMode='Read',
description="""Width of the maximum resolution."""),
nta_morphologyMethod=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
Controls the routines used to perform dilation and erosion of
valid alpha masks. Legal values are:
'opencv' -- use faster OpenCV routines;
'nta' -- use the slower Numenta routines;
'best' -- use OpenCV if it is available on the platform,
otherwise use the slower routines.
Default is 'best'.
"""),
)
return ns.toDict()
#---------------------------------------------------------------------------------
def getOutputElementCount(self, name):
"""This method will be called only when the node is used in nuPIC 2"""
if name == 'bottomUpOut':
return self.getNumPlanes()
elif name == 'topDownOut':
return 0
else:
raise Exception('Unknown output: ' + name)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Command line unit testing
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
if __name__=='__main__':
from nupic.engine import Network
n = Network()
gabor = n.addRegion(
'gabor',
'py.GaborNode2',
"""{ filterDim: 5,
numOrientations: 2,
centerSurround: 1,
phaseMode: single,
targetType: edge,
gainConstant: 1.0,
normalizationMethod: max,
postProcessingMethod: threshold,
postProcessingMin: 0.15,
postProcessingMax: 1.0,
boundaryMode: sweepOff,
#suppressOutsideBox: False,
#suppressByAlpha: True,
offImagePixelValue: colorKey,
zeroThresholdOut: 0.003
}""")
print 'Done.'
| 0x0all/nupic | py/regions/extra/GaborNode2.py | Python | gpl-3.0 | 141,860 | [
"Gaussian"
] | 20a65ad9dcb9e19962edb16a79296a1916474c0d7521788f38401a0d82a2197c |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkRectilinearGridReader(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkRectilinearGridReader(), 'Reading vtkRectilinearGrid.',
(), ('vtkRectilinearGrid',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| chrisidefix/devide | modules/vtk_basic/vtkRectilinearGridReader.py | Python | bsd-3-clause | 512 | [
"VTK"
] | 04548348b872513085bda9b2e3b80dbac937f5efc6039a1aa44eadac47a874c3 |
"""
Helper functions for mlab. These combine creation of the data sources,
and applying the modules to them to make standard visualization
operation. They should always return the module object created, for
consistency, and because retrieving the vtk data source from a module object
is possible via tools.get_vtk_src
Each helper function should have a test function associated with it,
both for testing and to ilustrate its use.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
from modules import VectorsFactory, StreamlineFactory, GlyphFactory, \
IsoSurfaceFactory, SurfaceFactory, ContourSurfaceFactory, \
ImageActorFactory, glyph_mode_dict
from sources import vector_scatter, vector_field, scalar_scatter, \
scalar_field, line_source, array2d_source, grid_source, \
triangular_mesh_source, vertical_vectors_source
from filters import ExtractVectorNormFactory, WarpScalarFactory, \
TubeFactory, ExtractEdgesFactory, PolyDataNormalsFactory, \
StripperFactory
from animator import animate
from mayavi.core.scene import Scene
from auto_doc import traits_doc, dedent
import tools
from traits.api import Array, Callable, CFloat, HasTraits, \
List, Trait, Any, Instance, TraitError, true
import numpy
def document_pipeline(pipeline):
def the_function(*args, **kwargs):
return pipeline(*args, **kwargs)
if hasattr(pipeline, 'doc'):
doc = pipeline.doc
elif pipeline.__doc__ is not None:
doc = pipeline.__doc__
else:
doc = ''
the_function.__doc__ = dedent("""%s
**Keyword arguments:**
%s""") % (dedent(doc),
traits_doc(pipeline.get_all_traits()),)
return the_function
#############################################################################
class Pipeline(HasTraits):
""" Function used to build pipelines for helper functions """
#doc = ''
_source_function = Callable()
_pipeline = List()
# Traits here only for documentation purposes
figure = Instance('mayavi.core.scene.Scene',
help='Figure to populate.')
def __call__(self, *args, **kwargs):
""" Calls the logics of the factory, but only after disabling
rendering, if needed.
"""
# First retrieve the scene, if any.
if 'figure' in kwargs:
figure = kwargs['figure']
assert isinstance(figure, (Scene, None))
scene = figure.scene
else:
scene = tools.gcf().scene
if scene is not None:
self._do_redraw = not scene.disable_render
scene.disable_render = True
# Then call the real logic
output = self.__call_internal__(*args, **kwargs)
# And re-enable the rendering, if needed.
if scene is not None:
scene.disable_render = not self._do_redraw
return output
def __call_internal__(self, *args, **kwargs):
""" Builds the source and runs through the pipeline, returning
the last object created by the pipeline."""
self.store_kwargs(kwargs)
self.source = self._source_function(*args, **kwargs)
# Copy the pipeline so as not to modify it for the next call
self.pipeline = self._pipeline[:]
return self.build_pipeline()
def store_kwargs(self, kwargs):
""" Merges the given keyword argument, with traits default and
store the resulting dictionary in self.kwargs."""
kwargs = kwargs.copy()
all_traits = self.get_all_traits()
if not set(kwargs.keys()).issubset(all_traits.keys()):
raise ValueError("Invalid keyword arguments : %s" % \
', '.join(
str(k) for k in
set(kwargs.keys()).difference(all_traits.keys())))
traits = self.get(self.class_trait_names())
[traits.pop(key) for key in traits.keys() if key[0] == '_']
traits.update(kwargs)
self.kwargs = traits
def build_pipeline(self):
""" Runs through the pipeline, applying pipe after pipe. """
object = self.source
for pipe in self.pipeline:
keywords = set(pipe.class_trait_names())
keywords.remove('trait_added')
keywords.remove('trait_modified')
this_kwargs = {}
for key, value in self.kwargs.iteritems():
if key in keywords:
this_kwargs[key] = value
object = pipe(object, **this_kwargs)._target
return object
def get_all_traits(self):
""" Returns all the traits of class, and the classes in the pipeline.
"""
traits = {}
for pipe in self._pipeline:
traits.update(pipe.class_traits())
traits.update(self.class_traits())
traits.pop('trait_added')
traits.pop('trait_modified')
return traits
#############################################################################
class Points3d(Pipeline):
"""
Plots glyphs (like points) at the position of the supplied data.
**Function signatures**::
points3d(x, y, z...)
points3d(x, y, z, s, ...)
points3d(x, y, z, f, ...)
x, y and z are numpy arrays, or lists, all of the same shape, giving
the positions of the points.
If only 3 arrays x, y, z are given, all the points are drawn with the
same size and color.
In addition, you can pass a fourth array s of the same
shape as x, y, and z giving an associated scalar value for each
point, or a function f(x, y, z) returning the scalar value. This
scalar value can be used to modulate the color and the size of the
points."""
_source_function = Callable(scalar_scatter)
_pipeline = [GlyphFactory, ]
scale_factor = Any('auto', help='The scaling applied to the glyphs. '
'the size of the glyph is by default calculated '
'from the inter-glyph spacing. Specify a float to '
'give the maximum glyph size in drawing units'
)
def __call_internal__(self, *args, **kwargs):
""" Override the call to be able to scale automatically the glyphs.
"""
scale_factor = kwargs.get('scale_factor', 'auto')
if scale_factor == 'auto':
kwargs['scale_factor'] = 1
g = Pipeline.__call_internal__(self, *args, **kwargs)
if scale_factor == 'auto':
g.glyph.glyph.scale_factor = \
tools._typical_distance(g.mlab_source.dataset)
g.glyph.glyph.clamping = True
else:
g.glyph.glyph.clamping = False
return g
points3d = document_pipeline(Points3d())
def test_points3d():
t = numpy.linspace(0, 4 * numpy.pi, 20)
cos = numpy.cos
sin = numpy.sin
x = sin(2 * t)
y = cos(t)
z = cos(2 * t)
s = 2 + sin(t)
return points3d(x, y, z, s, colormap="copper", scale_factor=.25)
@animate
def test_points3d_anim(obj=None):
"""Animates the test_points3d example."""
g = obj if obj is not None else test_points3d()
t = numpy.linspace(0, 4 * numpy.pi, 20)
# Animate the points3d.
ms = g.mlab_source
for i in range(10):
ms.z = numpy.cos(2 * t * 0.1 * (i + 1))
yield
def test_molecule():
"""Generates and shows a Caffeine molecule."""
o = [[30, 62, 19], [8, 21, 10]]
ox, oy, oz = map(numpy.array, zip(*o))
n = [[31, 21, 11], [18, 42, 14], [55, 46, 17], [56, 25, 13]]
nx, ny, nz = map(numpy.array, zip(*n))
c = [[5, 49, 15], [30, 50, 16], [42, 42, 15], [43, 29, 13], [18, 28, 12],
[32, 6, 8], [63, 36, 15], [59, 60, 20]]
cx, cy, cz = map(numpy.array, zip(*c))
h = [[23, 5, 7], [32, 0, 16], [37, 5, 0], [73, 36, 16], [69, 60, 20],
[54, 62, 28], [57, 66, 12], [6, 59, 16], [1, 44, 22], [0, 49, 6]]
hx, hy, hz = map(numpy.array, zip(*h))
oxygen = points3d(ox, oy, oz, scale_factor=16, scale_mode='none',
resolution=20, color=(1, 0, 0), name='Oxygen')
nitrogen = points3d(nx, ny, nz, scale_factor=20, scale_mode='none',
resolution=20, color=(0, 0, 1),
name='Nitrogen')
carbon = points3d(cx, cy, cz, scale_factor=20, scale_mode='none',
resolution=20, color=(0, 1, 0), name='Carbon')
hydrogen = points3d(hx, hy, hz, scale_factor=10, scale_mode='none',
resolution=20, color=(1, 1, 1),
name='Hydrogen')
return oxygen, nitrogen, carbon, hydrogen
#############################################################################
class Quiver3D(Points3d):
"""
Plots glyphs (like arrows) indicating the direction of the vectors
at the positions supplied.
**Function signatures**::
quiver3d(u, v, w, ...)
quiver3d(x, y, z, u, v, w, ...)
quiver3d(x, y, z, f, ...)
u, v, w are numpy arrays giving the components of the vectors.
If only 3 arrays, u, v, and w are passed, they must be 3D arrays, and
the positions of the arrows are assumed to be the indices of the
corresponding points in the (u, v, w) arrays.
If 6 arrays, (x, y, z, u, v, w) are passed, the 3 first arrays give
the position of the arrows, and the 3 last the components. They
can be of any shape.
If 4 positional arguments, (x, y, z, f) are passed, the last one must be
a callable, f, that returns vectors components (u, v, w) given the
positions (x, y, z)."""
scalars = Array(help="""optional scalar data.""")
_source_function = Callable(vector_scatter)
_pipeline = [VectorsFactory, ]
quiver3d = document_pipeline(Quiver3D())
def test_quiver3d():
x, y, z = numpy.mgrid[-2:3, -2:3, -2:3]
r = numpy.sqrt(x ** 2 + y ** 2 + z ** 4)
u = y * numpy.sin(r) / (r + 0.001)
v = -x * numpy.sin(r) / (r + 0.001)
w = numpy.zeros_like(z)
obj = quiver3d(x, y, z, u, v, w, line_width=3, scale_factor=1)
return obj
def test_quiver3d_cone():
xmin, xmax, ymin, ymax, zmin, zmax = [-5, 5, -5, 5, -5, 5]
x, y, z = numpy.mgrid[-5:5:8j, -5:5:8j, -5:5:8j]
x = x.astype('f')
y = y.astype('f')
z = z.astype('f')
sin = numpy.sin
cos = numpy.cos
u = cos(x)
v = sin(y)
w = sin(x * z)
obj = quiver3d(x, y, z, u, v, w, mode='cone', extent=(0, 1, 0, 1, 0, 1),
scale_factor=0.9)
return obj
def test_quiver3d_2d_data():
dims = [32, 32]
xmin, xmax, ymin, ymax = [-5, 5, -5, 5]
x, y = numpy.mgrid[xmin:xmax:dims[0] * 1j,
ymin:ymax:dims[1] * 1j]
x = x.astype('f')
y = y.astype('f')
sin = numpy.sin
cos = numpy.cos
u = cos(x)
v = sin(y)
w = numpy.zeros_like(x)
return quiver3d(x, y, w, u, v, w, colormap="Purples",
scale_factor=0.5, mode="2dthick_arrow")
#############################################################################
class Flow(Pipeline):
"""
Creates a trajectory of particles following the flow of a vector field.
**Function signatures**::
flow(u, v, w, ...)
flow(x, y, z, u, v, w, ...)
flow(x, y, z, f, ...)
u, v, w are numpy arrays giving the components of the vectors.
If only 3 arrays, u, v, and w are passed, they must be 3D arrays, and
the positions of the arrows are assumed to be the indices of the
corresponding points in the (u, v, w) arrays.
If 6 arrays, (x, y, z, u, v, w) are passed, the 3 first arrays give
the position of the arrows, and the 3 last the components. The x, y
and z arrays are then supposed to have been generated by
`numpy.mgrid`, in other words, they are 3D arrays, with positions
lying on a 3D orthogonal and regularly spaced grid with nearest
neighbor in space matching nearest neighbor in the array. The
function builds a vector field assuming the points are regularly
spaced.
If 4 positional arguments, (x, y, z, f) are passed, the last one must be
a callable, f, that returns vectors components (u, v, w) given the
positions (x, y, z)."""
scalars = Array(help="""optional scalar data.""")
_source_function = Callable(vector_field)
_pipeline = [ExtractVectorNormFactory, StreamlineFactory, ]
def __call_internal__(self, *args, **kwargs):
""" Override the call to be able to choose whether to apply an
ExtractVectorNorm filter.
"""
self.source = self._source_function(*args, **kwargs)
kwargs.pop('name', None)
self.store_kwargs(kwargs)
# Copy the pipeline so as not to modify it for the next call
self.pipeline = self._pipeline[:]
if tools._has_scalar_data(self.source):
self.pipeline.pop(0)
return self.build_pipeline()
flow = document_pipeline(Flow())
def test_flow():
x, y, z = numpy.mgrid[-4:4:40j, -4:4:40j, 0:4:20j]
r = numpy.sqrt(x ** 2 + y ** 2 + z ** 2 + 0.1)
u = y * numpy.sin(r) / r
v = -x * numpy.sin(r) / r
w = numpy.ones_like(z)*0.05
obj = flow(u, v, w)
return obj
def test_flow_tubes():
dims = [32, 32, 32]
xmin, xmax, ymin, ymax, zmin, zmax = [-5, 5, -5, 5, -5, 5]
x, y, z = numpy.mgrid[xmin:xmax:dims[0] * 1j,
ymin:ymax:dims[1] * 1j,
zmin:zmax:dims[2] * 1j]
x = x.astype('f')
y = y.astype('f')
z = z.astype('f')
sin = numpy.sin
cos = numpy.cos
u = cos(x / 2.)
v = sin(y / 2.)
w = sin(x * z / 4.)
obj = flow(x, y, z, u, v, w, linetype='tube')
return obj
@animate
def test_flow_anim(obj=None):
obj = obj if obj is not None else test_flow_tubes()
# Now animate the flow.
ms = obj.mlab_source
x, y, z = ms.x, ms.y, ms.z
for i in range(10):
u = numpy.cos(x / 2. + numpy.pi * (i + 1) / 10.)
w = numpy.sin(x * z / 4. + numpy.pi * (i + 1) / 10.)
ms.set(u=u, w=w)
yield
def test_flow_scalars():
dims = [32, 32, 32]
xmin, xmax, ymin, ymax, zmin, zmax = [-5, 5, -5, 5, -5, 5]
x, y, z = numpy.mgrid[xmin:xmax:dims[0] * 1j,
ymin:ymax:dims[1] * 1j,
zmin:zmax:dims[2] * 1j]
x = x.astype('f')
y = y.astype('f')
z = z.astype('f')
sin = numpy.sin
cos = numpy.cos
u = cos(x / 2.)
v = sin(y / 2.)
w = sin(x * z / 8.)
t = x * z
obj = flow(u, v, w, scalars=t, seedtype='plane',
linetype='tube', colormap='Spectral')
return obj
#############################################################################
class Contour3d(Pipeline):
"""
Plots iso-surfaces for a 3D volume of data suplied as arguments.
**Function signatures**::
contour3d(scalars, ...)
contour3d(x, y, z, scalars, ...)
scalars is a 3D numpy arrays giving the data on a grid.
If 4 arrays, (x, y, z, scalars) are passed, the 3 first arrays give
the position of the arrows, and the last the scalar value. The x, y
and z arrays are then supposed to have been generated by
`numpy.mgrid`, in other words, they are 3D arrays, with positions
lying on a 3D orthogonal and regularly spaced grid with nearest
neighbor in space matching nearest neighbor in the array. The
function builds a scalar field assuming the points are regularly
spaced.
If 4 positional arguments, (x, y, z, f) are passed, the last one
can also be a callable, f, that returns vectors components (u, v, w)
given the positions (x, y, z)."""
_source_function = Callable(scalar_field)
_pipeline = [IsoSurfaceFactory, ]
contour3d = document_pipeline(Contour3d())
def test_contour3d():
x, y, z = numpy.ogrid[-5:5:64j, -5:5:64j, -5:5:64j]
scalars = x * x * 0.5 + y * y + z * z * 2.0
obj = contour3d(scalars, contours=4, transparent=True)
return obj
@animate
def test_contour3d_anim(obj=None):
obj = obj if obj is not None else test_contour3d()
x, y, z = numpy.ogrid[-5:5:64j, -5:5:64j, -5:5:64j]
# Now animate the contours.
ms = obj.mlab_source
for i in range(1, 10):
ms.scalars = x * x * 0.5 + y * x * 0.1 * (i + 1) + z * z * 0.25
yield
#############################################################################
class Plot3d(Pipeline):
"""
Draws lines between points.
**Function signatures**::
plot3d(x, y, z, ...)
plot3d(x, y, z, s, ...)
x, y, z and s are numpy arrays or lists of the same shape. x, y and z
give the positions of the successive points of the line. s is an
optional scalar value associated with each point."""
tube_radius = Trait(0.025, CFloat, None,
adapts='filter.radius',
help="""radius of the tubes used to represent the
lines, If None, simple lines are used.
""")
_source_function = Callable(line_source)
_pipeline = [StripperFactory, TubeFactory, SurfaceFactory, ]
def __call_internal__(self, *args, **kwargs):
""" Override the call to be able to choose whether to apply
filters.
"""
self.source = self._source_function(*args, **kwargs)
kwargs.pop('name', None)
self.store_kwargs(kwargs)
# Copy the pipeline so as not to modify it for the next call
self.pipeline = self._pipeline[:]
if self.kwargs['tube_radius'] is None:
self.pipeline.remove(TubeFactory)
self.pipeline.remove(StripperFactory)
return self.build_pipeline()
plot3d = document_pipeline(Plot3d())
def test_plot3d():
"""Generates a pretty set of lines."""
n_mer, n_long = 6, 11
pi = numpy.pi
dphi = pi / 1000.0
phi = numpy.arange(0.0, 2 * pi + 0.5 * dphi, dphi)
mu = phi * n_mer
x = numpy.cos(mu) * (1 + numpy.cos(n_long * mu / n_mer) * 0.5)
y = numpy.sin(mu) * (1 + numpy.cos(n_long * mu / n_mer) * 0.5)
z = numpy.sin(n_long * mu / n_mer) * 0.5
l = plot3d(x, y, z, numpy.sin(mu), tube_radius=0.025, colormap='Spectral')
return l
@animate
def test_plot3d_anim(obj=None):
"""Generates a pretty set of lines and animates it."""
# Run the standard example and get the module generated.
obj = obj if obj is not None else test_plot3d()
# Some data from the test example for the animation.
n_mer, n_long = 6, 11
pi = numpy.pi
dphi = pi / 1000.0
phi = numpy.arange(0.0, 2 * pi + 0.5 * dphi, dphi, 'd')
mu = phi * n_mer
# Now animate the data.
ms = obj.mlab_source
for i in range(10):
x = numpy.cos(mu) * (1 + numpy.cos(n_long * mu / n_mer +
numpy.pi * (i + 1) / 5.) * 0.5)
scalars = numpy.sin(mu + numpy.pi * (i + 1) / 5)
ms.set(x=x, scalars=scalars)
yield
#############################################################################
class ImShow(Pipeline):
"""
View a 2D array as an image.
**Function signatures**::
imshow(s, ...)
s is a 2 dimension array. The values of s are mapped to a color using
the colormap."""
_source_function = Callable(array2d_source)
_pipeline = [ImageActorFactory, ]
imshow = document_pipeline(ImShow())
def test_imshow():
""" Use imshow to visualize a 2D 10x10 random array.
"""
s = numpy.random.random((10, 10))
return imshow(s, colormap='gist_earth')
#############################################################################
class Surf(Pipeline):
"""
Plots a surface using regularly-spaced elevation data supplied as a 2D
array.
**Function signatures**::
surf(s, ...)
surf(x, y, s, ...)
surf(x, y, f, ...)
s is the elevation matrix, a 2D array, where indices along the first
array axis represent x locations, and indices along the second array
axis represent y locations.
x and y can be 1D or 2D arrays such as returned by numpy.ogrid or
numpy.mgrid. Arrays returned by numpy.meshgrid require a transpose
first to obtain correct indexing order.
The points should be located on an orthogonal grid (possibly
non-uniform). In other words, all the points sharing a same
index in the s array need to have the same x or y value. For
arbitrary-shaped position arrays (non-orthogonal grids), see the mesh
function.
If only 1 array s is passed, the x and y arrays are assumed to be
made from the indices of arrays, and an uniformly-spaced data set is
created.
If 3 positional arguments are passed the last one must be an array s,
or a callable, f, that returns an array. x and y give the
coordinates of positions corresponding to the s values."""
_source_function = Callable(array2d_source)
_pipeline = [WarpScalarFactory, PolyDataNormalsFactory, SurfaceFactory]
warp_scale = Any(1, help="""scale of the z axis (warped from
the value of the scalar). By default this scale
is a float value.
If you specify 'auto', the scale is calculated to
give a pleasant aspect ratio to the plot,
whatever the bounds of the data.
If you specify a value for warp_scale in
addition to an extent, the warp scale will be
determined by the warp_scale, and the plot be
positioned along the z axis with the zero of the
data centered on the center of the extent. If you
are using explicit extents, this is the best way
to control the vertical scale of your plots.
If you want to control the extent (or range)
of the surface object, rather than its scale,
see the `extent` keyword argument.
""")
mask = Array(help="""boolean mask array to suppress some data points.
Note: this works based on colormapping of scalars and will
not work if you specify a solid color using the
`color` keyword.""")
def __call_internal__(self, *args, **kwargs):
""" Override the call to be able to scale automatically the axis.
"""
self.source = self._source_function(*args, **kwargs)
kwargs.pop('name', None)
# Deal with both explicit warp scale and extent, this is
# slightly hairy. The wigner example is a good test case for
# this.
if not 'warp_scale' in kwargs and not 'extent' in kwargs:
try:
xi, xf, yi, yf, _, _ = self.source.data.bounds
zi, zf = self.source.data.scalar_range
except AttributeError:
xi, xf, yi, yf, _, _ = self.source.image_data.bounds
zi, zf = self.source.image_data.scalar_range
aspect_ratios = [(zf - zi) / (xf - xi), (zf - zi) / (yf - yi)]
if min(aspect_ratios) < 0.01 or max(aspect_ratios) > 100:
print 'Warning: the range of your scalar values differs by ' \
'more than a factor 100 than the range of the grid values ' \
'and you did not '\
'specify a warp_scale. You could try warp_scale="auto".'
if 'warp_scale' in kwargs and not kwargs['warp_scale'] == 'auto' \
and 'extent' in kwargs:
# XXX: I should use the logging module.
print 'Warning: both warp_scale and extent keyword argument ' \
'specified, the z bounds of the extents will be overridden'
xi, xf, yi, yf, zi, zf = kwargs['extent']
zo = 0.5 * (zi + zf)
try:
si, sf = self.source.data.scalar_range
except AttributeError:
si, sf = self.source.image_data.scalar_range
z_span = kwargs['warp_scale'] * abs(sf - si)
zi = zo + si * kwargs['warp_scale']
zf = zi + z_span
kwargs['extent'] = (xi, xf, yi, yf, zi, zf)
kwargs['warp_scale'] = 1
elif kwargs.get('warp_scale', 1) == 'auto':
if 'extent' in kwargs:
if 'warp_scale' in kwargs:
print "Warning: extent specified, warp_scale='auto' " \
"ignored."
else:
try:
xi, xf, yi, yf, _, _ = self.source.data.bounds
zi, zf = self.source.data.scalar_range
except AttributeError:
xi, xf, yi, yf, _, _ = self.source.image_data.bounds
zi, zf = self.source.image_data.scalar_range
z0 = zf - zi
dz = 0.3 * ((xf - xi) + (yf - yi))
zi = z0 - 0.5 * dz
zf = z0 + 0.5 * dz
kwargs['extent'] = (xi, xf, yi, yf, zi, zf)
kwargs['warp_scale'] = 1.
self.store_kwargs(kwargs)
# Copy the pipeline so as not to modify it for the next call
self.pipeline = self._pipeline[:]
return self.build_pipeline()
surf = document_pipeline(Surf())
def test_simple_surf():
"""Test Surf with a simple collection of points."""
x, y = numpy.mgrid[0:3:1, 0:3:1]
return surf(x, y, numpy.asarray(x, 'd'))
@animate
def test_simple_surf_anim(obj=None):
"""Test Surf with a simple collection of points and animate it."""
obj = obj if obj is not None else test_simple_surf()
ms = obj.mlab_source
x = ms.x
for i in range(10):
ms.scalars = numpy.asarray(x * 0.1 * (i + 1), 'd')
yield
def test_surf():
"""Test surf on regularly spaced co-ordinates like MayaVi."""
def f(x, y):
sin, cos = numpy.sin, numpy.cos
return sin(x + y) + sin(2 * x - y) + cos(3 * x + 4 * y)
x, y = numpy.mgrid[-7.:7.05:0.1, -5.:5.05:0.05]
s = surf(x, y, f)
#cs = contour_surf(x, y, f, contour_z=0)
return s
def test_surf_wigner():
def cat(x, y, alpha=2, eta=1, purity=1):
""" Multiphoton shrodinger cat. eta is the fidelity, alpha the number
of photons"""
cos = numpy.cos
exp = numpy.exp
return (1 + eta * (exp(-x ** 2 - (y - alpha) ** 2)
+ exp(-x ** 2 - (y + alpha) ** 2)
+ 2 * purity * exp(-x ** 2 - y ** 2) *
cos(2 * alpha * x)) / (2 * (1 + exp(-alpha ** 2)))) / 2
x, y = numpy.mgrid[-5:5:0.1, -5:5:0.1]
return surf(x, y, cat)
#############################################################################
class Mesh(Pipeline):
"""
Plots a surface using grid-spaced data supplied as 2D arrays.
**Function signatures**::
mesh(x, y, z, ...)
x, y, z are 2D arrays, all of the same shape, giving the positions of
the vertices of the surface. The connectivity between these points is
implied by the connectivity on the arrays.
For simple structures (such as orthogonal grids) prefer the `surf`
function, as it will create more efficient data structures. For mesh
defined by triangles rather than regular implicit connectivity, see the
`triangular_mesh` function.
"""
scale_mode = Trait('none', {'none': 'data_scaling_off',
'scalar': 'scale_by_scalar',
'vector': 'scale_by_vector'},
help="""the scaling mode for the glyphs
('vector', 'scalar', or 'none').""")
scale_factor = CFloat(0.05,
desc="""scale factor of the glyphs used to represent
the vertices, in fancy_mesh mode. """)
tube_radius = Trait(0.025, CFloat, None,
help="""radius of the tubes used to represent the
lines, in mesh mode. If None, simple lines are used.
""")
scalars = Array(help="""optional scalar data.""")
mask = Array(help="""boolean mask array to suppress some data points.
Note: this works based on colormapping of scalars and will
not work if you specify a solid color using the
`color` keyword.""")
representation = Trait('surface', 'wireframe', 'points', 'mesh',
'fancymesh',
desc="""the representation type used for the surface.""")
_source_function = Callable(grid_source)
_pipeline = [ExtractEdgesFactory, GlyphFactory, TubeFactory,
SurfaceFactory]
def __call_internal__(self, *args, **kwargs):
""" Override the call to be able to choose whether to apply
filters.
"""
self.source = self._source_function(*args, **kwargs)
kwargs.pop('name', None)
self.store_kwargs(kwargs)
# Copy the pipeline so as not to modify it for the next call
self.pipeline = self._pipeline[:]
if not self.kwargs['representation'] in ('mesh', 'fancymesh'):
self.pipeline.remove(ExtractEdgesFactory)
self.pipeline.remove(TubeFactory)
self.pipeline.remove(GlyphFactory)
self.pipeline = [PolyDataNormalsFactory, ] + self.pipeline
else:
if self.kwargs['tube_radius'] is None:
self.pipeline.remove(TubeFactory)
if not self.kwargs['representation'] == 'fancymesh':
self.pipeline.remove(GlyphFactory)
self.kwargs['representation'] = 'surface'
return self.build_pipeline()
mesh = document_pipeline(Mesh())
def test_mesh():
"""A very pretty picture of spherical harmonics translated from
the octaviz example."""
pi = numpy.pi
cos = numpy.cos
sin = numpy.sin
dphi, dtheta = pi / 250.0, pi / 250.0
[phi, theta] = numpy.mgrid[0:pi + dphi * 1.5:dphi,
0:2 * pi + dtheta * 1.5:dtheta]
m0 = 4
m1 = 3
m2 = 2
m3 = 3
m4 = 6
m5 = 2
m6 = 6
m7 = 4
r = sin(m0 * phi) ** m1 + cos(m2 * phi) ** m3 + \
sin(m4 * theta) ** m5 + cos(m6 * theta) ** m7
x = r * sin(phi) * cos(theta)
y = r * cos(phi)
z = r * sin(phi) * sin(theta)
return mesh(x, y, z, colormap="bone")
def test_mesh_sphere(r=1.0, npts=(100, 100), colormap='jet'):
"""Create a simple sphere."""
pi = numpy.pi
cos = numpy.cos
sin = numpy.sin
np_phi = npts[0] * 1j
np_theta = npts[1] * 1j
phi, theta = numpy.mgrid[0:pi:np_phi, 0:2 * pi:np_theta]
x = r * sin(phi) * cos(theta)
y = r * sin(phi) * sin(theta)
z = r * cos(phi)
return mesh(x, y, z, colormap=colormap)
@animate
def test_mesh_sphere_anim(obj=None, r=1.0, npts=(100, 100), colormap='jet'):
"""Create a simple sphere and animate it."""
obj = obj if obj is not None else test_mesh_sphere(r, npts, colormap)
pi = numpy.pi
cos = numpy.cos
np_phi = npts[0] * 1j
np_theta = npts[1] * 1j
phi, theta = numpy.mgrid[0:pi:np_phi, 0:2 * pi:np_theta]
ms = obj.mlab_source
for i in range(1, 10):
z = (r + i * 0.25) * cos(phi)
ms.set(z=z, scalars=z)
yield
def test_mesh_mask_custom_colors(r=1.0, npts=(100, 100)):
"""Create a sphere with masking and using a custom colormap.
Note that masking works only when scalars are set. The custom colormap
illustrates how one can completely customize the colors with numpy arrays.
In this case we use a simple 2 color colormap.
"""
# Create the data like for test_mesh_sphere.
pi = numpy.pi
cos = numpy.cos
sin = numpy.sin
np_phi = npts[0] * 1j
np_theta = npts[1] * 1j
phi, theta = numpy.mgrid[0:pi:np_phi, 0:2 * pi:np_theta]
x = r * sin(phi) * cos(theta)
y = r * sin(phi) * sin(theta)
z = r * cos(phi)
# Setup the mask array.
mask = numpy.zeros_like(x).astype(bool)
mask[::5] = True
mask[:,::5] = True
# Create the mesh with the default colormapping.
m = mesh(x, y, z, scalars=z, mask=mask)
# Setup the colormap. This is an array of (R, G, B, A) values (each in
# range 0-255), there should be at least 2 colors in the array. If you
# want a constant color set the two colors to the same value.
colors = numpy.zeros((2, 4), dtype='uint8')
colors[0,2] = 255
colors[1,1] = 255
# Set the alpha value to fully visible.
colors[:,3] = 255
# Now setup the lookup table to use these colors.
m.module_manager.scalar_lut_manager.lut.table = colors
return m
def test_fancy_mesh():
"""Create a fancy looking mesh using mesh (example taken from octaviz)."""
pi = numpy.pi
cos = numpy.cos
du, dv = pi / 20.0, pi / 20.0
u, v = numpy.mgrid[0.01:pi + du * 1.5:du, 0:2 * pi + dv * 1.5:dv]
x = (1 - cos(u)) * cos(u + 2 * pi / 3) * cos(v + 2 * pi / 3.0) * 0.5
y = (1 - cos(u)) * cos(u + 2 * pi / 3) * cos(v - 2 * pi / 3.0) * 0.5
z = -cos(u - 2 * pi / 3.)
m = mesh(x, y, z, representation='fancymesh',
tube_radius=0.0075, colormap="RdYlGn")
return m
#############################################################################
class ContourSurf(Pipeline):
"""
Plots a the contours of a surface using grid-spaced data for
elevation supplied as a 2D array.
**Function signatures**::
contour_surf(s, ...)
contour_surf(x, y, s, ...)
contour_surf(x, y, f, ...)
s is the elevation matrix, a 2D array. The contour lines plotted
are lines of equal s value.
x and y can be 1D or 2D arrays (such as returned by numpy.ogrid or
numpy.mgrid), but the points should be located on an orthogonal grid
(possibly non-uniform). In other words, all the points sharing a same
index in the s array need to have the same x or y value. For
arbitrary-shaped position arrays (non-orthogonal grids), see the mesh
function.
If only 1 array s is passed, the x and y arrays are assumed to be
made from the indices of arrays, and an uniformly-spaced data set is
created.
If 3 positional arguments are passed the last one must be an array s,
or a callable, f, that returns an array. x and y give the
coordinates of positions corresponding to the s values."""
_source_function = Callable(array2d_source)
_pipeline = [WarpScalarFactory, ContourSurfaceFactory]
contour_surf = document_pipeline(ContourSurf())
def test_contour_surf():
"""Test contour_surf on regularly spaced co-ordinates like MayaVi."""
def f(x, y):
sin, cos = numpy.sin, numpy.cos
return sin(x + y) + sin(2 * x - y) + cos(3 * x + 4 * y)
x, y = numpy.mgrid[-7.:7.05:0.1, -5.:5.05:0.05]
s = contour_surf(x, y, f)
return s
#############################################################################
# Expose only the glyphs that make (more or less) sense for a barchart.
bar_mode_dict = dict()
for item in ('cube', '2dtriangle', '2dsquare', '2dvertex', '2dthick_cross',
'2ddiamond', '2dcross', '2dcircle'):
bar_mode_dict[item] = glyph_mode_dict[item]
class BarChart(Pipeline):
"""
Plots vertical glyphs (like bars) scaled vertical, to do
histogram-like plots.
This functions accepts a wide variety of inputs, with positions given
in 2-D or in 3-D.
**Function signatures**::
barchart(s, ...)
barchart(x, y, s, ...)
barchart(x, y, f, ...)
barchart(x, y, z, s, ...)
barchart(x, y, z, f, ...)
If only one positional argument is passed, it can be a 1-D, 2-D, or 3-D
array giving the length of the vectors. The positions of the data
points are deducted from the indices of array, and an
uniformly-spaced data set is created.
If 3 positional arguments (x, y, s) are passed the last one must be
an array s, or a callable, f, that returns an array. x and y give the
2D coordinates of positions corresponding to the s values.
If 4 positional arguments (x, y, z, s) are passed, the 3 first are
arrays giving the 3D coordinates of the data points, and the last one
is an array s, or a callable, f, that returns an array giving the
data value.
"""
_source_function = Callable(vertical_vectors_source)
_pipeline = [VectorsFactory, ]
mode = Trait('cube', bar_mode_dict,
desc='The glyph used to represent the bars.')
lateral_scale = CFloat(0.9, desc='The lateral scale of the glyph, '
'in units of the distance between nearest points')
auto_scale = true(desc='whether to compute automatically the '
'lateral scaling of the glyphs. This might be '
'computationally expensive.')
def __call_internal__(self, *args, **kwargs):
""" Override the call to be able to scale automatically the axis.
"""
g = Pipeline.__call_internal__(self, *args, **kwargs)
gs = g.glyph.glyph_source
# Use a cube source for glyphs.
if not 'mode' in kwargs:
gs.glyph_source = gs.glyph_dict['cube_source']
# Position the glyph tail on the point.
gs.glyph_position = 'tail'
gs.glyph_source.center = (0.0, 0.0, 0.5)
g.glyph.glyph.orient = False
if not 'color' in kwargs:
g.glyph.color_mode = 'color_by_scalar'
if not 'scale_mode' in kwargs:
g.glyph.scale_mode = 'scale_by_vector_components'
g.glyph.glyph.clamping = False
# The auto-scaling code. It involves finding the minimum
# distance between points, which can be very expensive. We
# shortcut this calculation for structured data
if len(args) == 1 or self.auto_scale:
min_axis_distance = 1
else:
x, y, z = g.mlab_source.x, g.mlab_source.y, g.mlab_source.z
min_axis_distance = \
tools._min_axis_distance(x, y, z)
scale_factor = g.glyph.glyph.scale_factor * min_axis_distance
lateral_scale = kwargs.pop('lateral_scale', self.lateral_scale)
try:
g.glyph.glyph_source.glyph_source.y_length = \
lateral_scale / (scale_factor)
g.glyph.glyph_source.glyph_source.x_length = \
lateral_scale / (scale_factor)
except TraitError:
" Not all types of glyphs have controlable y_length and x_length"
return g
barchart = document_pipeline(BarChart())
def test_barchart():
""" Demo the bar chart plot with a 2D array.
"""
s = numpy.abs(numpy.random.random((3, 3)))
return barchart(s)
#############################################################################
class TriangularMesh(Mesh):
"""
Plots a surface using a mesh defined by the position of its vertices
and the triangles connecting them.
**Function signatures**::
triangular_mesh(x, y, z, triangles ...)
x, y, z are arrays giving the positions of the vertices of the surface.
triangles is a list of triplets (or an array) list the vertices in
each triangle. Vertices are indexes by their appearance number in the
position arrays.
For simple structures (such as rectangular grids) prefer the surf or
mesh functions, as they will create more efficient data structures.
"""
_source_function = Callable(triangular_mesh_source)
triangular_mesh = document_pipeline(TriangularMesh())
def test_triangular_mesh():
"""An example of a cone, ie a non-regular mesh defined by its
triangles.
"""
n = 8
t = numpy.linspace(-numpy.pi, numpy.pi, n)
z = numpy.exp(1j * t)
x = z.real.copy()
y = z.imag.copy()
z = numpy.zeros_like(x)
triangles = [(0, i, i + 1) for i in range(1, n)]
x = numpy.r_[0, x]
y = numpy.r_[0, y]
z = numpy.r_[1, z]
t = numpy.r_[0, t]
return triangular_mesh(x, y, z, triangles, scalars=t)
| liulion/mayavi | mayavi/tools/helper_functions.py | Python | bsd-3-clause | 40,211 | [
"Mayavi",
"VTK"
] | fc21660deea595af7316ecffebe9ba3d0afecf0e13cb26f3356ab0917547c4d2 |
########################################################################
# $Id$
########################################################################
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.List import stringListToString, intListToString
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.FileManagerBase import FileManagerBase
import os
class FileManagerFlat( FileManagerBase ):
######################################################
#
# The all important _findFiles and _getDirectoryFiles methods
#
def _findFiles( self, lfns, metadata = ['FileID'], connection = False ):
connection = self._getConnection( connection )
""" Find file ID if it exists for the given list of LFNs """
dirDict = self._getFileDirectories( lfns )
failed = {}
directoryIDs = {}
for dirPath in dirDict.keys():
res = self.db.dtree.findDir( dirPath )
if not res['OK'] or not res['Value']:
error = res.get( 'Message', 'No such file or directory' )
for fileName in dirDict[dirPath]:
failed['%s/%s' % ( dirPath, fileName )] = error
else:
directoryIDs[dirPath] = res['Value']
successful = {}
for dirPath in directoryIDs.keys():
fileNames = dirDict[dirPath]
res = self._getDirectoryFiles( directoryIDs[dirPath], fileNames, metadata, connection = connection )
if not res['OK'] or not res['Value']:
error = res.get( 'Message', 'No such file or directory' )
for fileName in fileNames:
failed['%s/%s' % ( dirPath, fileName )] = error
else:
for fileName, fileDict in res['Value'].items():
successful["%s/%s" % ( dirPath, fileName )] = fileDict
return S_OK( {"Successful":successful, "Failed":failed} )
def _getDirectoryFiles( self, dirID, fileNames, metadata, allStatus = False, connection = False ):
connection = self._getConnection( connection )
# metadata can be any of ['FileID','Size','UID','GID','Checksum','ChecksumType','Type','CreationDate','ModificationDate','Mode','Status']
req = "SELECT FileName,%s FROM FC_Files WHERE DirID=%d" % ( intListToString( metadata ), dirID )
if not allStatus:
statusIDs = []
res = self._getStatusInt( 'AprioriGood', connection = connection )
if res['OK']:
statusIDs.append( res['Value'] )
if statusIDs:
req = "%s AND Status IN (%s)" % ( req, intListToString( statusIDs ) )
if fileNames:
req = "%s AND FileName IN (%s)" % ( req, stringListToString( fileNames ) )
res = self.db._query( req, connection )
if not res['OK']:
return res
files = {}
for fTuple in res['Value']:
fileName = fTuple[0]
files[fileName] = dict( zip( metadata, fTuple[1:] ) )
return S_OK( files )
######################################################
#
# _addFiles related methods
#
def _insertFiles( self, lfns, uid, gid, connection = False ):
connection = self._getConnection( connection )
# Add the files
failed = {}
directoryFiles = {}
insertTuples = []
res = self._getStatusInt( 'AprioriGood', connection = connection )
statusID = 0
if res['OK']:
statusID = res['Value']
for lfn in sorted( lfns.keys() ):
fileInfo = lfns[lfn]
size = fileInfo['Size']
guid = fileInfo.get( 'GUID', '' )
checksum = fileInfo['Checksum']
checksumtype = fileInfo.get( 'ChecksumType', 'Adler32' )
dirName = os.path.dirname( lfn )
dirID = fileInfo['DirID']
fileName = os.path.basename( lfn )
if not directoryFiles.has_key( dirName ):
directoryFiles[dirName] = []
directoryFiles[dirName].append( fileName )
insertTuples.append( "(%d,%d,%d,%d,%d,'%s','%s','%s','%s',UTC_TIMESTAMP(),UTC_TIMESTAMP(),%d)" % ( dirID, size, uid, gid, statusID, fileName, guid, checksum, checksumtype, self.db.umask ) )
req = "INSERT INTO FC_Files (DirID,Size,UID,GID,Status,FileName,GUID,Checksum,ChecksumType,CreationDate,ModificationDate,Mode) VALUES %s" % ( ','.join( insertTuples ) )
res = self.db._update( req, connection )
if not res['OK']:
return res
# Get the fileIDs for the inserted files
res = self._findFiles( lfns.keys(), ['FileID'], connection = connection )
if not res['OK']:
for lfn in lfns.keys():
failed[lfn] = 'Failed post insert check'
lfns.pop( lfn )
else:
failed.update( res['Value']['Failed'] )
for lfn, fileDict in res['Value']['Successful'].items():
lfns[lfn]['FileID'] = fileDict['FileID']
return S_OK( {'Successful':lfns, 'Failed':failed} )
def _getFileIDFromGUID( self, guid, connection = False ):
connection = self._getConnection( connection )
if not guid:
return S_OK( {} )
if not isinstance( guid, ( list, tuple ) ):
guid = [guid]
req = "SELECT FileID,GUID FROM FC_Files WHERE GUID IN (%s)" % stringListToString( guid )
res = self.db._query( req, connection )
if not res['OK']:
return res
guidDict = {}
for fileID, guid in res['Value']:
guidDict[guid] = fileID
return S_OK( guidDict )
######################################################
#
# _deleteFiles related methods
#
def _deleteFiles( self, fileIDs, connection = False ):
connection = self._getConnection( connection )
replicaPurge = self.__deleteFileReplicas( fileIDs )
filePurge = self.__deleteFiles( fileIDs, connection = connection )
if not replicaPurge['OK']:
return replicaPurge
if not filePurge['OK']:
return filePurge
return S_OK()
def __deleteFileReplicas( self, fileIDs, connection = False ):
connection = self._getConnection( connection )
if not fileIDs:
return S_OK()
req = "DELETE FROM FC_Replicas WHERE FileID in (%s)" % ( intListToString( fileIDs ) )
return self.db._update( req, connection )
def __deleteFiles( self, fileIDs, connection = False ):
connection = self._getConnection( connection )
if not fileIDs:
return S_OK()
req = "DELETE FROM FC_Files WHERE FileID in (%s)" % ( intListToString( fileIDs ) )
return self.db._update( req, connection )
######################################################
#
# _addReplicas related methods
#
def _insertReplicas( self, lfns, master = False, connection = False ):
connection = self._getConnection( connection )
res = self._getStatusInt( 'AprioriGood', connection = connection )
statusID = 0
if res['OK']:
statusID = res['Value']
replicaType = 'Replica'
if master:
replicaType = 'Master'
insertTuples = {}
deleteTuples = []
successful = {}
failed = {}
directorySESizeDict = {}
for lfn in sorted( lfns.keys() ):
fileID = lfns[lfn]['FileID']
pfn = lfns[lfn]['PFN']
seName = lfns[lfn]['SE']
res = self.db.seManager.findSE( seName )
if not res['OK']:
failed[lfn] = res['Message']
continue
seID = res['Value']
if not master:
res = self.__existsReplica( fileID, seID, connection = connection )
if not res['OK']:
failed[lfn] = res['Message']
continue
elif res['Value']:
successful[lfn] = True
continue
dirID = lfns[lfn]['DirID']
if not directorySESizeDict.has_key( dirID ):
directorySESizeDict[dirID] = {}
if not directorySESizeDict[dirID].has_key( seID ):
directorySESizeDict[dirID][seID] = {'Files':0, 'Size':0}
directorySESizeDict[dirID][seID]['Size'] += lfns[lfn]['Size']
directorySESizeDict[dirID][seID]['Files'] += 1
insertTuples[lfn] = ( "(%d,%d,%d,'%s',UTC_TIMESTAMP(),UTC_TIMESTAMP(),'%s')" % ( fileID, seID, statusID, replicaType, pfn ) )
deleteTuples.append( ( fileID, seID ) )
if insertTuples:
req = "INSERT INTO FC_Replicas (FileID,SEID,Status,RepType,CreationDate,ModificationDate,PFN) VALUES %s" % ','.join( insertTuples.values() )
res = self.db._update( req, connection )
if not res['OK']:
self.__deleteReplicas( deleteTuples, connection = connection )
for lfn in insertTuples.keys():
failed[lfn] = res['Message']
else:
# Update the directory usage
self._updateDirectoryUsage( directorySESizeDict, '+', connection = connection )
for lfn in insertTuples.keys():
successful[lfn] = True
return S_OK( {'Successful':successful, 'Failed':failed} )
def __existsReplica( self, fileID, seID, connection = False ):
# TODO: This is in efficient. Should perform bulk operation
connection = self._getConnection( connection )
""" Check if a replica already exists """
if isinstance( seID, basestring ):
res = self.db.seManager.findSE( seID )
if not res['OK']:
return res
seID = res['Value']
req = "SELECT FileID FROM FC_Replicas WHERE FileID=%d AND SEID=%d" % ( fileID, seID )
result = self.db._query( req, connection )
if not result['OK']:
return result
if not result['Value']:
return S_OK( False )
return S_OK( True )
######################################################
#
# _deleteReplicas related methods
#
def _deleteReplicas( self, lfns, connection = False ):
connection = self._getConnection( connection )
successful = {}
res = self._findFiles( lfns.keys(), ['DirID', 'FileID', 'Size'], connection = connection )
failed = res['Value']['Failed']
lfnFileIDDict = res['Value']['Successful']
toRemove = []
directorySESizeDict = {}
for lfn, fileDict in lfnFileIDDict.items():
fileID = fileDict['FileID']
se = lfns[lfn]['SE']
toRemove.append( ( fileID, se ) )
# Now prepare the storage usage dict
res = self.db.seManager.findSE( se )
if not res['OK']:
return res
seID = res['Value']
dirID = fileDict['DirID']
if not directorySESizeDict.has_key( dirID ):
directorySESizeDict[dirID] = {}
if not directorySESizeDict[dirID].has_key( seID ):
directorySESizeDict[dirID][seID] = {'Files':0, 'Size':0}
directorySESizeDict[dirID][seID]['Size'] += fileDict['Size']
directorySESizeDict[dirID][seID]['Files'] += 1
res = self.__deleteReplicas( toRemove )
if not res['OK']:
for lfn in lfnFileIDDict.keys():
failed[lfn] = res['Message']
else:
# Update the directory usage
self._updateDirectoryUsage( directorySESizeDict, '-', connection = connection )
for lfn in lfnFileIDDict.keys():
successful[lfn] = True
return S_OK( {'Successful':successful, 'Failed':failed} )
def __deleteReplicas( self, replicaTuples, connection = False ):
connection = self._getConnection( connection )
deleteTuples = []
for fileID, seID in replicaTuples:
if isinstance( seID, basestring ):
res = self.db.seManager.findSE( seID )
if not res['OK']:
return res
seID = res['Value']
deleteTuples.append( "(%d,%d)" % ( fileID, seID ) )
req = "DELETE FROM FC_Replicas WHERE (FileID,SEID) IN (%s)" % intListToString( deleteTuples )
return self.db._update( req, connection )
######################################################
#
# _setReplicaStatus _setReplicaHost _setReplicaParameter methods
# _setFileParameter method
#
def _setReplicaStatus( self, fileID, se, status, connection = False ):
connection = self._getConnection( connection )
res = self._getStatusInt( status, connection = connection )
if not res['OK']:
return res
statusID = res['Value']
return self._setReplicaParameter( fileID, se, 'Status', statusID, connection = connection )
def _setReplicaHost( self, fileID, se, newSE, connection = False ):
connection = self._getConnection( connection )
res = self.db.seManager.findSE( newSE )
if not res['OK']:
return res
newSE = res['Value']
return self._setReplicaParameter( fileID, se, 'SEID', newSE, connection = connection )
def _setReplicaParameter( self, fileID, seID, paramName, paramValue, connection = False ):
connection = self._getConnection( connection )
if isinstance( seID, basestring ):
res = self.db.seManager.findSE( seID )
if not res['OK']:
return res
seID = res['Value']
req = "UPDATE FC_Replicas SET %s='%s', ModificationDate=UTC_TIMESTAMP() WHERE FileID=%d AND SEID=%d;" % ( paramName, paramValue, fileID, seID )
return self.db._update( req, connection )
def _setFileParameter( self, fileID, paramName, paramValue, connection = False ):
connection = self._getConnection( connection )
if not isinstance( fileID, ( list, tuple ) ):
fileID = [fileID]
req = "UPDATE FC_Files SET %s='%s', ModificationDate=UTC_TIMESTAMP() WHERE FileID IN (%s)" % ( paramName, paramValue, intListToString( fileID ) )
return self.db._update( req, connection )
######################################################
#
# _getFileReplicas related methods
#
def _getFileReplicas( self, fileIDs, fields = ['PFN'], connection = False ):
connection = self._getConnection( connection )
if not fileIDs:
return S_ERROR( "No such file or directory" )
req = "SELECT FileID,SEID,Status,%s FROM FC_Replicas WHERE FileID IN (%s);" % ( intListToString( fields ), intListToString( fileIDs ) )
res = self.db._query( req, connection )
if not res['OK']:
return res
replicas = {}
for fTuple in res['Value']:
fileID = fTuple[0]
if not replicas.has_key( fileID ):
replicas[fileID] = {}
seID = fTuple[1]
res = self.db.seManager.getSEName( seID )
if not res['OK']:
continue
seName = res['Value']
statusID = fTuple[2]
res = self._getIntStatus( statusID, connection = connection )
if not res['OK']:
continue
status = res['Value']
replicas[fileID][seName] = {'Status':status}
replicas[fileID][seName].update( dict( zip( fields, fTuple[3:] ) ) )
for fileID in fileIDs:
if not replicas.has_key( fileID ):
replicas[fileID] = {}
return S_OK( replicas )
| Andrew-McNab-UK/DIRAC | DataManagementSystem/DB/FileCatalogComponents/FileManagerFlat.py | Python | gpl-3.0 | 14,198 | [
"DIRAC"
] | 538936df844392c15d88cd3f0fd305a3c43182abc0f6fd8b36b90229a04236f2 |
# $Id$
"""
This module defines a classs for a generic Workflow Parameter. It also defines
a ParameterCollection class as a list of parameters as well as an AttributeCollection
class which is the base class for the main Workflow classes.
"""
__RCSID__ = "$Revision: 1.33 $"
from DIRAC.Core.Workflow.Utility import *
# unbound method, returns indentated string
def indent( indent = 0 ):
return indent * 2 * ' '
class Parameter( object ):
def __init__( self, name = None, value = None, type = None, linked_module = None,
linked_parameter = None, typein = None, typeout = None, description = None, parameter = None ):
# the priority to assign values
# if parameter exists all values taken from there
# and then owerriten by values taken from the arguments
if isinstance( parameter, Parameter ):
self.name = parameter.name
self.type = parameter.type
self.value = parameter.value
self.description = parameter.description
self.linked_module = parameter.linked_module
self.linked_parameter = parameter.linked_parameter
self.typein = bool( parameter.typein )
self.typeout = bool( parameter.typeout )
else:
# default values
self.name = ""
self.type = "string"
self.value = ""
self.description = ""
self.linked_module = ""
self.linked_parameter = ""
self.typein = False
self.typeout = False
if name != None:
self.name = name
if type != None:
self.type = type
if value != None:
self.setValue( value )
if description != None:
self.description = description
if linked_module != None:
self.linked_module = linked_module
if linked_parameter != None:
self.linked_parameter = linked_parameter
if typein != None:
self.setInput( typein )
if typeout != None:
self.setOutput( typeout )
def getName( self ):
return self.name
def setName( self, n ):
self.name = n # if collection=None it still will work fine
def getValue( self ):
return self.value
def getValueTypeCorrected( self ):
# this method used to generate code for the workflow
# it NOT used to geterate XML!!!
if self.isTypeString():
return '"""' + str( self.value ).replace( '"', r'\"' ).replace( "'", r"\'" ) + '"""'
return self.value
def setValue( self, value, type_ = None ):
if type_ != None:
self.setType( type_ )
self.setValueByType( value )
def setValueByType( self, value ):
type = self.type.lower() # change the register
if self.isTypeString():
self.value = str( value )
elif type == 'float':
self.value = float( value )
elif type == 'int':
self.value = int( value )
elif type == 'bool':
self.value = bool( value )
else:
#raise TypeError('Can not assing value '+value+' of unknown type '+ self.type + ' to the Parameter '+ str(self.name))
#print 'WARNING: we do not have established conversion algorithm to assing value ',value,' of unknown type ',self.type, ' to the Parameter ', str(self.name)
self.value = value
def getType( self ):
return self.type
def setType( self, type_ ):
self.type = type_
def isTypeString( self ):
"""returns True if type is the string kind"""
type = self.type.lower() # change the register
if type == 'string' or type == 'jdl' or \
type == 'option' or type == 'parameter' or \
type == 'jdlreqt':
return True
return False
def getDescription( self ):
return self.description
def setDescription( self, descr ):
self.description = descr
def link( self, module, parameter ):
self.linked_module = module
self.linked_parameter = parameter
def unlink( self ):
self.linked_module = ""
self.linked_parameter = ""
def getLinkedModule( self ):
return self.linked_module
def getLinkedParameter( self ):
return self.linked_parameter
def getLink( self ):
# we have 4 possibilities
# two fields can be filled independently
# it is possible to fill one field with the valid information
# spaces shall be ignored ( using strip() function)
if ( self.linked_module == None ) or ( self.linked_module.strip() == '' ):
if ( self.linked_parameter == None ) or ( self.linked_parameter.strip() == '' ):
# both empty
return ""
else:
# parameter filled
return self.linked_parameter
else:
if ( self.linked_parameter == None ) or ( self.linked_parameter.strip() == '' ):
return self.linked_module
return self.linked_module + '.' + self.linked_parameter
def isLinked( self ):
if ( self.linked_module == None ) or ( self.linked_module.strip() == '' ):
if ( self.linked_parameter == None ) or ( self.linked_parameter.strip() == '' ):
return False
return True
def preExecute( self ):
""" method to request watever parameter need to be defined before calling execute method
returns TRUE if it needs to be done, FALSE otherwise
PS: parameters with the output status only going to be left out"""
return ( not self.isOutput() ) or self.isInput()
def isInput( self ):
return self.typein
def isOutput( self ):
return self.typeout
def setInput( self, i ):
if isinstance( i, str ) or isinstance( i, unicode ):
self.typein = self.__setBooleanFromString( i )
else:
self.typein = bool( i )
def setOutput( self, i ):
if isinstance( i, str ) or isinstance( i, unicode ):
self.typeout = self.__setBooleanFromString( i )
else:
self.typeout = bool( i )
def __setBooleanFromString( self, i ):
if i.upper() == "TRUE":
return True
else:
return False
def __str__( self ):
return str( type( self ) ) + ": name=" + self.name + " value=" + str( self.getValueTypeCorrected() ) + " type=" + str( self.type )\
+ " linked_module=" + str( self.linked_module ) + " linked_parameter=" + str( self.linked_parameter )\
+ " in=" + str( self.typein ) + " out=" + str( self.typeout )\
+ " description=" + str( self.description )
def toXML( self ):
return '<Parameter name="' + self.name + '" type="' + str( self.type )\
+ '" linked_module="' + str( self.linked_module ) + '" linked_parameter="' + str( self.linked_parameter )\
+ '" in="' + str( self.typein ) + '" out="' + str( self.typeout )\
+ '" description="' + str( self.description ) + '">'\
+ '<value><![CDATA[' + str( self.getValue() ) + ']]></value>'\
+ '</Parameter>\n'
# we got a problem with the index() function
# def __eq__(self, s):
def compare( self, s ):
if isinstance( s, Parameter ):
return ( self.name == s.name ) and \
( self.value == s.value ) and \
( self.type == s.type ) and \
( self.linked_module == s.linked_module ) and \
( self.linked_parameter == s.linked_parameter ) and \
( self.typein == s.typein ) and \
( self.typeout == s.typeout ) and \
( self.description == s.description )
else:
return False
#
# def __deepcopy__(self, memo):
# return Parameter(parameter=self)
#
# def __copy__(self):
# return self.__deepcopy__({})
def copy( self, parameter ):
if isinstance( parameter, Parameter ):
self.name = parameter.name
self.value = parameter.value
self.type = parameter.type
self.description = parameter.description
self.linked_module = parameter.linked_module
self.linked_parameter = parameter.linked_parameter
self.typein = parameter.typein
self.typeout = parameter.typeout
else:
raise TypeError( 'Can not make a copy of object ' + str( type( self ) ) + ' from the ' + str( type( parameter ) ) )
def createParameterCode( self, ind = 0, instance_name = None ):
if ( instance_name == None ) or ( instance_name == '' ):
ret = indent( ind ) + self.getName() + ' = ' + self.getValueTypeCorrected()
else:
if self.isLinked():
ret = indent( ind ) + instance_name + '.' + self.getName() + ' = ' + self.getLink()
else:
ret = indent( ind ) + instance_name + '.' + self.getName() + ' = ' + str( self.getValueTypeCorrected() )
return ret + ' # type=' + self.getType() + ' in=' + str( self.isInput() ) + ' out=' + str( self.isOutput() ) + ' ' + self.getDescription() + '\n'
class ParameterCollection( list ):
""" Parameter collection class representing a list of Parameters
"""
def __init__( self, coll = None ):
list.__init__( self )
if isinstance( coll, ParameterCollection ):
# makes a deep copy of the parameters
for v in coll:
self.append( Parameter( parameter = v ) )
elif coll != None:
raise TypeError( 'Can not create object type ' + str( type( self ) ) + ' from the ' + str( type( coll ) ) )
def appendOrOverwrite( self, opt ):
index = self.findIndex( opt.getName() )
if index > -1:
#print "Warning: Overriting Parameter %s = \"%s\" with the value \"%s\""%(self[index].getName(), self[index].getValue(), opt.getValue())
self[index] = opt
else:
list.append( self, opt )
def append( self, opt ):
if isinstance( opt, ParameterCollection ):
for p in opt:
self.appendOrOverwrite( p )
elif isinstance( opt, Parameter ):
self.appendOrOverwrite( opt )
return opt
else:
raise TypeError( 'Can not append object type ' + str( type( opt ) ) + ' to the ' + str( type( self ) ) + '. Parameter type appendable only' )
def appendCopy( self, opt, prefix = "", postfix = "" ):
if isinstance( opt, ParameterCollection ):
for p in opt:
self.appendOrOverwrite( Parameter( name = prefix + p.getName() + postfix, parameter = p ) )
elif isinstance( opt, Parameter ):
self.appendOrOverwrite( Parameter( name = prefix + opt.getName() + postfix, parameter = opt ) )
else:
raise TypeError( 'Can not append object type ' + str( type( opt ) ) + ' to the ' + str( type( self ) ) + '. Parameter type appendable only' )
def appendCopyLinked( self, opt, prefix = "", postfix = "" ):
if isinstance( opt, ParameterCollection ):
for p in opt:
if p.isLinked():
self.appendOrOverwrite( Parameter( name = prefix + p.getName() + postfix, parameter = p ) )
elif isinstance( opt, Parameter ):
if opt.isLinked():
self.appendOrOverwrite( Parameter( name = prefix + opt.getName() + postfix, parameter = opt ) )
else:
raise TypeError( 'Can not append object type ' + str( type( opt ) ) + ' to the ' + str( type( self ) ) + '. Parameter type appendable only' )
def setValue( self, name, value, vtype = None ):
""" Method finds parameter with the name "name" and if exists its set value
Returns True if sucsessfull
"""
par = self.find( name )
if par == None:
print "ERROR ParameterCollection.setValue() can not find parameter with the name=%s to set Value=%s" % ( name, value )
return False
else:
par.setValue( value, vtype )
return True
def getInput( self ):
""" Get input linked parameters
"""
return self.get( input = True )
def getOutput( self ):
""" Get output linked parameters
"""
return self.get( output = True )
def getLinked( self ):
""" Get linked parameters
"""
return self.get( input = True, output = True )
def get( self, input = False, output = False ):
""" Get a copy of parameters. If input or output is True, get corresponding
io type parameters only. Otherwise, get all the parameters
"""
all = not input and not output
params = ParameterCollection()
for p in self:
OK = False
if all:
OK = True
elif input and p.isInput():
OK = True
elif output and p.isOutput():
OK = True
if OK:
params.append( Parameter( parameter = p ) )
return params
def setLink( self, name, module_name, parameter_name ):
""" Method finds parameter with the name "name" and if exists its set value
Returns True if sucsessfull
"""
par = self.find( name )
if par == None:
print "ERROR ParameterCollection.setLink() can not find parameter with the name=%s to link it with %s.%s" % ( name, module_name, parameter_name )
return False
else:
par.link( module_name, parameter_name )
return True
def linkUp( self, opt, prefix = "", postfix = "", objname = "self" ):
""" This is a GROUP method operates on the 'obj' parameters using only parameters listed in 'opt' list
Method will link self.parameters with the outer object (self) perameters using prefix and postfix
for example if we want to link module instance with the step or step instance with the workflow
opt - ParameterCollection or sigle Parameter (WARNING!! used as reference to get a names!!! opt is not changing!!!)
opt ALSO can be a list of string with the names of parameters to link
objname - name of the object to connect with, usually 'self'
"""
if isinstance( opt, ParameterCollection ):
# if parameter in the list opt is not present in the self
# we are going to ignore this
for p in opt:
par = self.find( p.getName() )
if par == None:
print "WARNING ParameterCollection.linkUp can not find parameter with the name=", p.getName(), " IGNORING"
else:
par.link( objname, prefix + p.getName() + postfix )
elif isinstance( opt, Parameter ):
self.setLink( opt.getName(), objname, prefix + opt.getName() + postfix )
elif isinstance( opt, list ) and isinstance( opt[0], str ):
for s in opt:
par = self.find( s )
if par == None:
print "ERROR ParameterCollection.linkUp() can not find parameter with the name=%s" % ( s )
else:
par.link( objname, prefix + p.getName() + postfix )
elif isinstance( opt, str ):
par = self.find( opt )
if par == None:
print "ERROR ParameterCollection.linkUp() can not find parameter with the name=%s" % ( par )
else:
par.link( objname, prefix + par.getName() + postfix )
else:
raise TypeError( 'Can not link object type ' + str( type( opt ) ) + ' to the ' + str( type( self ) ) + '.' )
def unlink( self, opt ):
""" This is a GROUP method operates on the 'obj' parameters using only parameters listed in 'opt' list
Method will unlink some self.parameters
opt - ParameterCollection or sigle Parameter (WARNING!! used as reference to get a names!!! opt is not changing!!!)
opt ALSO can be a list of string with the names of parameters to link
objname - name of the object to connect with, usually 'self'
"""
if isinstance( opt, ParameterCollection ):
# if parameter in the list opt is not present in the self
# we are going to ignore this
for p in opt:
par = self.find( p.getName() )
if par == None:
print "WARNING ParameterCollection.linkUp can not find parameter with the name=", p.getName(), " IGNORING"
else:
par.unlink()
elif isinstance( opt, Parameter ):
self.unlink()
elif isinstance( opt, list ) and isinstance( opt[0], str ):
for s in opt:
par = self.find( s )
if par == None:
print "ERROR ParameterCollection.unlink() can not find parameter with the name=%s" % ( s )
else:
par.unlink()
elif isinstance( opt, str ):
par = self.find( opt )
if par == None:
print "ERROR ParameterCollection.unlink() can not find parameter with the name=%s" % ( s )
else:
par.unlink()
else:
raise TypeError( 'Can not unlink object type ' + str( type( opt ) ) + ' to the ' + str( type( self ) ) + '.' )
def removeAllParameters( self ):
self[:] = []
def remove( self, name_or_ind ):
""" Removes a parameter given its name, or the index (the latter is not suggested), and only if it exists
If there are 2 parameters with the same name, only the first will be removed
"""
if isinstance( name_or_ind, list ) and isinstance( name_or_ind[0], str ):
for s in name_or_ind:
par = self.find( s )
if par == None:
print "ERROR ParameterCollection.remove() can not find parameter with the name=%s" % ( s )
else:
index = self.findIndex( s )
if index > -1:
del self[index]
elif isinstance( name_or_ind, str ): # we give a name
index = self.findIndex( name_or_ind )
elif isinstance( name_or_ind, int ): # we give the index
index = name_or_ind
if index > -1:
del self[index]
def find( self, name_or_ind ):
""" Method to find Parameters
Return: Parameter """
# work for index as well as for the string
if isinstance( name_or_ind, str ): # we given name
for v in self:
if v.getName() == name_or_ind:
return v
return None
elif isinstance( name_or_ind, int ) or isinstance( name_or_ind, long ): # we given index
return self[name_or_ind]
return self[int( name_or_ind )]
def findLinked( self, name_or_ind, linked_status = True ):
""" Method to find Parameters
if linked_status is True it returns only linked Var from the list
if linked_status is False it returns only NOTlinked Var from the list
Return: Parameter """
v = self.find( name_or_ind )
if ( v != None ) and ( v.isLinked() != linked_status ):
return None
return v
def findIndex( self, name ):
i = 0
for v in self:
if v.getName() == name:
return i
i = i + 1
return - 1
def getParametersNames( self ):
list = []
for v in self:
list.append( v.getName() )
return list
def compare( self, s ):
# we comparing parameters only, the attributes will be compared in hierarhy above
# we ignore the position of the Parameter in the list
# we assume that names of the Parameters are DIFFERENT otherwise we have to change alghorithm!!!
if ( not isinstance( s, ParameterCollection ) ) or ( len( s ) != len( self ) ):
return False
for v in self:
for i in s:
if v.getName() == i.getName():
if not v.compare( i ):
return False
else:
break
else:
#if we reached this place naturally we can not find matching name
return False
return True
def __str__( self ):
ret = str( type( self ) ) + ':\n'
for v in self:
ret = ret + str( v ) + '\n'
return ret
def toXML( self ):
ret = ""
for v in self:
ret = ret + v.toXML()
return ret
def createParametersCode( self, indent = 0, instance_name = None ):
str = ''
for v in self:
if v.preExecute():
str = str + v.createParameterCode( indent, instance_name )
return str
def resolveGlobalVars( self, wf_parameters = None, step_parameters = None ):
"""This function resolves global parameters of type @{value} within the ParameterCollection
"""
recurrency_max = 12
for v in self:
recurrency = 0
skip_list = []
substitute_vars = getSubstitute( v.value )
while True:
for substitute_var in substitute_vars:
# looking in the current scope
v_other = self.find( substitute_var )
# looking in the scope of step instance
if v_other == None and step_parameters != None :
v_other = step_parameters.findLinked( substitute_var, False )
# looking in the scope of workflow
if v_other == None and wf_parameters != None :
v_other = wf_parameters.findLinked( substitute_var, False )
# finaly the action itself
if v_other != None and not v_other.isLinked():
v.value = substitute( v.value, substitute_var, v_other.value )
elif v_other != None:
print "Leaving %s variable for dynamic resolution" % substitute_var
skip_list.append( substitute_var )
else: # if nothing helped tough!
print "Can not resolve ", substitute_var, str( v )
recurrency += 1
if recurrency > recurrency_max:
# must be an exception
print "ERROR! reached maximum recurrency level", recurrency, "within the parameter ", str( v )
if step_parameters == None:
if wf_parameters == None:
print "on the level of Workflow"
else:
print "on the level of Step"
else:
if wf_parameters != None:
print "on the level of Module"
break
else:
substitute_vars = getSubstitute( v.value, skip_list )
if not substitute_vars:
break
class AttributeCollection( dict ):
""" Attribute Collection class contains Parameter Collection as a data member
"""
def __init__( self ):
dict.__init__( self )
self.parameters = None
self.parent = None
def __str__( self ):
ret = ''
for v in self.keys():
ret = ret + v + ' = ' + str( self[v] ) + '\n'
return ret
def toXMLString( self ):
return self.toXML()
def toXMLFile( self, filename ):
f = open( filename, 'w+' )
sarray = self.toXML()
for element in sarray:
f.write( element )
f.close()
return
def toXML( self ):
ret = ""
for v in self.keys():
if v == 'parent':
continue # doing nothing
elif v == 'body' or v == 'description':
ret = ret + '<' + v + '><![CDATA[' + str( self[v] ) + ']]></' + v + '>\n'
else:
ret = ret + '<' + v + '>' + str( self[v] ) + '</' + v + '>\n'
return ret
def addParameter( self, opt, prefix = "", postfix = "" ):
self.parameters.appendCopy( opt, prefix, postfix )
def addParameterLinked( self, opt, prefix = "", postfix = "" ):
self.parameters.appendCopyLinked( opt, prefix, postfix )
def linkUp( self, opt, prefix = "", postfix = "", objname = "self" ):
self.parameters.linkUp( opt, prefix, postfix, objname )
def unlink( self, opt ):
self.parameters.unlink( opt )
def removeParameter( self, name_or_ind ):
self.parameters.remove( name_or_ind )
def removeAllParameters( self ):
self.parameters.removeAllParameters()
def findParameter( self, name_or_ind ):
return self.parameters.find( name_or_ind )
def findParameterIndex( self, ind ):
return self.parameters.findIndex( ind )
def compareParameters( self, s ):
return self.parameters.compare( s )
def setValue( self, name, value, type_ = None ):
if not self.parameters.setValue( name, value, type_ ):
print " in the object=", type( self ), "with name=", self.getName(), "of type=", self.getType()
def setLink( self, name, module_name, parameter_name ):
if not self.parameters.setLink( name, module_name, parameter_name ):
print " in the object=", type( self ), "with name=", self.getName(), "of type=", self.getType()
def compare( self, s ):
return ( self == s ) and self.parameters.compare( s.parameters )
def setParent( self, parent ):
self.parent = parent
def getParent( self ):
return self.parent
# ------------- common functions -----------
def setName( self, name ):
self['name'] = name
def getName( self ):
if self.has_key( 'name' ):
return self['name']
return ''
def setType( self, att_type ):
self['type'] = att_type
def getType( self ):
if self.has_key( 'type' ):
return self['type']
return ''
def setRequired( self, required ):
self['required'] = required
def getRequired( self ):
return self['required']
def setDescription( self, description ):
self['description'] = description
def getDescription( self ):
return self['description']
def setDescrShort( self, descr_short ):
self['descr_short'] = descr_short
def getDescrShort( self ):
return self['descr_short']
def setBody( self, body ):
self['body'] = body
def getBody( self ):
return self['body']
def setOrigin( self, origin ):
self['origin'] = origin
def getOrigin( self ):
return self['origin']
def setVersion( self, ver ):
self['version'] = ver
def getVersion( self ):
return self['version']
def resolveGlobalVars( self, wf_parameters = None, step_parameters = None ):
self.parameters.resolveGlobalVars( wf_parameters, step_parameters )
| vmendez/DIRAC | Core/Workflow/Parameter.py | Python | gpl-3.0 | 24,537 | [
"DIRAC"
] | 8d53434f16127d88b4a75b7bc98f30784d2a875dc1ef08739bf560774596de77 |
#!/usr/bin/python
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.widgets as Cursor
import scipy.constants as cte
from sympy import *
from matplotlib.widgets import Cursor
import sys
if sys.version_info[0] < 3:
import Tkinter as Tk1
else:
import tkinter as Tk1
from Tkinter import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
matplotlib.use('TkAgg')
T1=0.01
T2=300
T3=3000
T4=5000
N1=0.45
#EFF=0.3
E=np.linspace(0, 2, 5000)
select=0
root = Tk()
root.title("FERMI-DIRAC")
font = {'family' : 'serif',
'color' : 'blue',
'weight' : 'normal',
'size' : 16,
}
mlabel = Label(root, text="SELECCIONE EL SEMICONDUCTOR", font="Helvetica 16 bold italic", fg="blue").pack()
v = StringVar()
def imprimir(valor):
valor=int(valor)
if valor==1:
nombre="Germanio"
valor=0.66
else:
if valor ==2:
nombre="Silicio"
valor=1.169
else:
if valor==3:
nombre="Arseniuro de Galio"
valor=1.519
selection = "Ha elegido simular " + nombre
label.config(text = selection)
plt.clf()
plt.ion()
NE1=(1/(np.exp((E-(valor))/(T1*0.0000138))+1))*((3*N1*(E**0.5))/(2*(valor**0.75)))
plt.plot(E,NE1)
NE2=1/(np.exp((E-(valor))/(T2*0.0000138))+1)*((3*N1*(E**0.5))/(2*(valor**0.75)))
plt.plot(E,NE2)
NE3=1/(np.exp((E-(valor))/(T3*0.0000138))+1)*((3*N1*(E**0.5))/(2*(valor**0.75)))
plt.plot(E,NE3)
NE4=1/(np.exp((E-(valor))/(T4*0.0000138))+1)*((3*N1*(E**0.5))/(2*(valor**0.75)))
plt.plot(E,NE4)
plt.grid(True)
plt.ylim(-0.2, 1.2)
plt.xlabel(r'$E(eV)$', fontsize=20)
plt.ylabel(r'$N(E)$', fontsize=20)
plt.title(r'$Funcion\ de\ distribucion\ de\ FERMI-DIRAC\ para\ el\ $'+nombre, fontdict=font)
plt.text(1.5,1.05,'T1=0K',color='b')
plt.text(1.5,0.95,'T1=300K',color='g')
plt.text(1.5,0.85,'T2=3000K',color='r')
plt.text(1.5,0.75,'T3=5000K',color='c')
plt.text(0.5,0.9, 'Danny Fabian Mora 20112005201\nDiego Javier Mena 20092005053', style='italic',bbox={'facecolor':'red','alpha':0.5, 'pad':10})
plt.show()
def sel():
EF=v.get()
print EF
imprimir(EF)
Radiobutton(root, text="Germanio [Ge]", indicatoron = 0, width = 50, variable=v, value=1, command=sel).pack(anchor=W)
Radiobutton(root, text="Silicio [Si]", indicatoron = 0, width = 50, variable=v, value=2, command=sel).pack(anchor=W)
Radiobutton(root, text="Arseniuro de galio [GaAs]", indicatoron = 0, width = 50, variable=v, value=3, command=sel).pack(anchor=W)
label = Label(root)
label.pack()
mainloop()
| ingelectronicadj/FisicaConPython | FisicaCuantica/distribucionDeFermi/DisdeFermiNe.py | Python | gpl-3.0 | 2,838 | [
"DIRAC"
] | 989e22c61236d5ddffd87149a3dcd313c6a6d605932ef78d45a0a0471179d233 |
from ase.test.fleur import installed
assert installed()
from ase.tasks.main import run
atoms, task = run("fleur bulk Al -x fcc -a 4.04 --k-point-density=3.0 -p xc=PBE")
atoms, task = run('fleur bulk Al -s')
| alexei-matveev/ase-local | ase/test/fleur/fleur_cmdline.py | Python | gpl-2.0 | 210 | [
"ASE",
"FLEUR"
] | 44514d9941c914fe3f5d102fbacff8a4a7b7a5741af4a969afe6ee5aa3531482 |
"""
A collection of utility functions and classes. Many (but not all)
from the Python Cookbook -- hence the name cbook
"""
from __future__ import generators
import re, os, errno, sys, StringIO, traceback, locale, threading, types
import time, datetime
import warnings
import numpy as np
import numpy.ma as ma
from weakref import ref
major, minor1, minor2, s, tmp = sys.version_info
# on some systems, locale.getpreferredencoding returns None, which can break unicode
preferredencoding = locale.getpreferredencoding()
def unicode_safe(s):
if preferredencoding is None: return unicode(s)
else: return unicode(s, preferredencoding)
class converter:
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s==self.missing: return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s==self.missing
class tostr(converter):
'convert to string or None'
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
class todatetime(converter):
'convert to a datetime or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
class todate(converter):
'convert to a date or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
class tofloat(converter):
'convert to a float or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s): return self.missingval
return float(s)
class toint(converter):
'convert to an int or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s): return self.missingval
return int(s)
class CallbackRegistry:
"""
Handle registering and disconnecting for a set of signals and
callbacks::
signals = 'eat', 'drink', 'be merry'
def oneat(x):
print 'eat', x
def ondrink(x):
print 'drink', x
callbacks = CallbackRegistry(signals)
ideat = callbacks.connect('eat', oneat)
iddrink = callbacks.connect('drink', ondrink)
#tmp = callbacks.connect('drunk', ondrink) # this will raise a ValueError
callbacks.process('drink', 123) # will call oneat
callbacks.process('eat', 456) # will call ondrink
callbacks.process('be merry', 456) # nothing will be called
callbacks.disconnect(ideat) # disconnect oneat
callbacks.process('eat', 456) # nothing will be called
"""
def __init__(self, signals):
'*signals* is a sequence of valid signals'
self.signals = set(signals)
# callbacks is a dict mapping the signal to a dictionary
# mapping callback id to the callback function
self.callbacks = dict([(s, dict()) for s in signals])
self._cid = 0
def _check_signal(self, s):
'make sure *s* is a valid signal or raise a ValueError'
if s not in self.signals:
signals = list(self.signals)
signals.sort()
raise ValueError('Unknown signal "%s"; valid signals are %s'%(s, signals))
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._check_signal(s)
self._cid +=1
self.callbacks[s][self._cid] = func
return self._cid
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in self.callbacks.items():
try: del callbackd[cid]
except KeyError: continue
else: return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
self._check_signal(s)
for func in self.callbacks[s].values():
func(*args, **kwargs)
class Scheduler(threading.Thread):
"""
Base class for timeout and idle scheduling
"""
idlelock = threading.Lock()
id = 0
def __init__(self):
threading.Thread.__init__(self)
self.id = Scheduler.id
self._stopped = False
Scheduler.id += 1
self._stopevent = threading.Event()
def stop(self):
if self._stopped: return
self._stopevent.set()
self.join()
self._stopped = True
class Timeout(Scheduler):
"""
Schedule recurring events with a wait time in seconds
"""
def __init__(self, wait, func):
Scheduler.__init__(self)
self.wait = wait
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(self.wait)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class Idle(Scheduler):
"""
Schedule callbacks when scheduler is idle
"""
# the prototype impl is a bit of a poor man's idle handler. It
# just implements a short wait time. But it will provide a
# placeholder for a proper impl ater
waittime = 0.05
def __init__(self, func):
Scheduler.__init__(self)
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(Idle.waittime)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a give type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None: self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def strip_math(s):
'remove latex formatting from mathtext'
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove: s = s.replace(r,'')
return s
class Bunch:
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables:
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def unique(x):
'Return a list of unique elements of *x*'
return dict([ (val, 1) for val in x]).keys()
def iterable(obj):
'return true if *obj* is iterable'
try: len(obj)
except: return False
return True
def is_string_like(obj):
'Return True if *obj* looks like a string'
if isinstance(obj, (str, unicode)): return True
# numpy strings are subclass of str, ma strings are not
if ma.isMaskedArray(obj):
if obj.ndim == 0 and obj.dtype.kind in 'SU':
return True
else:
return False
try: obj + ''
except (TypeError, ValueError): return False
return True
def is_sequence_of_strings(obj):
"""
Returns true if *obj* is iterable and contains strings
"""
if not iterable(obj): return False
if is_string_like(obj): return False
for o in obj:
if not is_string_like(o): return False
return True
def is_writable_file_like(obj):
'return true if *obj* looks like a file object with a *write* method'
return hasattr(obj, 'write') and callable(obj.write)
def is_scalar(obj):
'return true if *obj* is not string like and is not iterable'
return not is_string_like(obj) and not iterable(obj)
def is_numlike(obj):
'return true if *obj* looks like a number'
try: obj+1
except TypeError: return False
else: return True
def to_filehandle(fname, flag='r', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, flag)
else:
fh = file(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
return is_string_like(val) or not iterable(val)
def flatten(seq, scalarp=is_scalar_or_string):
"""
this generator flattens nested containers such as
>>> l=( ('John', 'Hunter'), (1,23), [[[[42,(5,23)]]]])
so that
>>> for i in flatten(l): print i,
John Hunter 1 23 42 5 23
By: Composite of Holger Krekel and Luther Blissett
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/121294
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item): yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
class Sorter:
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace: data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = data[:]
result.sort()
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i],attributename),i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print multiple_replace(adict, text)
xlat = Xlator(adict)
print xlat.xlat(text)
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, self.keys())))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc: fc = c # Remember first letter
d = soundex_digits[ord(c)-ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
class Null:
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs): pass
def __call__(self, *args, **kwargs): return self
def __str__(self): return "Null()"
def __repr__(self): return "Null()"
def __nonzero__(self): return 0
def __getattr__(self, name): return self
def __setattr__(self, name, value): return self
def __delattr__(self, name): return self
def mkdirs(newdir, mode=0777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
try:
if not os.path.exists(newdir):
parts = os.path.split(newdir)
for i in range(1, len(parts)+1):
thispart = os.path.join(*parts[:i])
if not os.path.exists(thispart):
os.makedirs(thispart, mode)
except OSError, err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(newdir):
raise
class GetRealpathAndStat:
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
def dict_delall(d, keys):
'delete all of the *keys* from the :class:`dict` *d*'
for key in keys:
try: del d[key]
except KeyError: pass
class RingBuffer:
""" class that implements a not-yet-full buffer """
def __init__(self,size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur+1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:]+self.data[:self.cur]
def append(self,x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
"""
sLen = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, range(len(seq))):
sLen += len(word) + 1 # +1 to account for the len(' ')
if sLen>=N: return ind
return len(seq)
def wrap(prefix, text, cols):
'wrap *text* with *prefix* at length *cols*'
pad = ' '*len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind<Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile("(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path, fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
# Collect input and output arguments into one bunch
class Bunch:
def __init__(self, **kwds): self.__dict__.update(kwds)
arg = Bunch(recurse=recurse, pattern_list=pattern_list,
return_folders=return_folders, results=[])
def visit(arg, dirname, files):
# Append to arg.results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if arg.return_folders or os.path.isfile(fullname):
for pattern in arg.pattern_list:
if fnmatch.fnmatch(name, pattern):
arg.results.append(fullname)
break
# Block recursion if recursion was disallowed
if not arg.recurse: files[:]=[]
os.path.walk(root, visit, arg)
return arg.results
def get_recursive_filelist(args):
"""
Recurs all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
def pieces(seq, num=2):
"Break up the *seq* into *num* tuples"
start = 0
while 1:
item = seq[start:start+num]
if not len(item): break
yield item
start += num
def exception_to_str(s = None):
sh = StringIO.StringIO()
if s is not None: print >>sh, s
traceback.print_exc(file=sh)
return sh.getvalue()
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq)<2: return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val: return False
return True
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if not val: return False
return True
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if val: return True
return False
def allpairs(x):
"""
return all possible pairs in sequence *x*
Condensed by Alex Martelli from this thread_ on c.l.python
.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1
"""
return [ (s, f) for i, f in enumerate(x) for s in x[i+1:] ]
# python 2.2 dicts don't have pop--but we don't support 2.2 any more
def popd(d, *args):
"""
Should behave like python2.3 :meth:`dict.pop` method; *d* is a
:class:`dict`::
# returns value for key and deletes item; raises a KeyError if key
# is not in dict
val = popd(d, key)
# returns value for key if key exists, else default. Delete key,
# val item if it exists. Will not raise a KeyError
val = popd(d, key, default)
"""
warnings.warn("Use native python dict.pop method", DeprecationWarning)
# warning added 2008/07/22
if len(args)==1:
key = args[0]
val = d[key]
del d[key]
elif len(args)==2:
key, default = args
val = d.get(key, default)
try: del d[key]
except KeyError: pass
return val
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to contrain size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if len(self)>=self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
dict.__setitem__(self, k, v)
self._killkeys.append(k)
class Stack:
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
'return the current element, or None'
if not len(self._elements): return self._default
else: return self._elements[self._pos]
def forward(self):
'move the position forward and return the current element'
N = len(self._elements)
if self._pos<N-1: self._pos += 1
return self()
def back(self):
'move the position back and return the current element'
if self._pos>0: self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos+1]
self._elements.append(o)
self._pos = len(self._elements)-1
return self()
def home(self):
'push the first element onto the top of the stack'
if not len(self._elements): return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements)==0
def clear(self):
'empty the stack'
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso==o: bubbles.append(thiso)
else: self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso==o: continue
else: self.push(thiso)
def popall(seq):
'empty a list'
for i in xrange(len(seq)): seq.pop()
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name,name) for name in dir(o) if is_string_like(name)]
else:
names = [(name.lower(), name) for name in dir(o) if is_string_like(name)]
match = match.lower()
return [orig for name, orig in names if name.find(match)>=0]
def reverse_dict(d):
'reverse the dictionary -- may lose data if values are not unique!'
return dict([(v,k) for k,v in d.items()])
def report_memory(i=0): # argument may go away
'return the memory consumed by process'
pid = os.getpid()
if sys.platform=='sunos5':
a2 = os.popen('ps -p %d -o osz' % pid).readlines()
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
a2 = os.popen('ps -p %d -o rss,sz' % pid).readlines()
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
a2 = os.popen('ps -p %d -o rss,vsz' % pid).readlines()
mem = int(a2[1].split()[0])
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
'make sure *args* are equal len before zipping'
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i+1, len(arg)))
return zip(*args)
def issubclass_safe(x, klass):
'return issubclass(x, klass) and return False on a TypeError'
try:
return issubclass(x, klass)
except TypeError:
return False
class MemoryMonitor:
def __init__(self, nmax=20000):
self._nmax = nmax
self._mem = np.zeros((self._nmax,), np.int32)
self.clear()
def clear(self):
self._n = 0
self._overflow = False
def __call__(self):
mem = report_memory()
if self._n < self._nmax:
self._mem[self._n] = mem
self._n += 1
else:
self._overflow = True
return mem
def report(self, segments=4):
n = self._n
segments = min(n, segments)
dn = int(n/segments)
ii = range(0, n, dn)
ii[-1] = n-1
print
print 'memory report: i, mem, dmem, dmem/nloops'
print 0, self._mem[0]
for i in range(1, len(ii)):
di = ii[i] - ii[i-1]
if di == 0:
continue
dm = self._mem[ii[i]] - self._mem[ii[i-1]]
print '%5d %5d %3d %8.3f' % (ii[i], self._mem[ii[i]],
dm, dm / float(di))
if self._overflow:
print "Warning: array size was too small for the number of calls."
def xy(self, i0=0, isub=1):
x = np.arange(i0, self._n, isub)
return x, self._mem[i0:self._n:isub]
def plot(self, i0=0, isub=1, fig=None):
if fig is None:
from pylab import figure, show
fig = figure()
ax = fig.add_subplot(111)
ax.plot(*self.xy(i0, isub))
fig.canvas.draw()
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in step.items():
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, { }, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable.
For example:
>>> g = grouper.Grouper()
>>> g.join('a', 'b')
>>> g.join('b', 'c')
>>> g.join('d', 'e')
>>> list(g)
[['a', 'b', 'c'], ['d', 'e']]
>>> g.joined('a', 'b')
True
>>> g.joined('a', 'c')
True
>>> g.joined('a', 'd')
False
"""
def __init__(self, init=[]):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
for key, val in mapping.items():
if key() is None:
del mapping[key]
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
class Token: pass
token = Token()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in self._mapping.itervalues():
if not group[-1] is token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in self._mapping.itervalues():
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
steps = np.floor(steps)
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1: ]
delta = ((a1 - a0) / steps)
for i in range(1, int(steps)):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
def recursive_remove(path):
if os.path.isdir(path):
for fname in glob.glob(os.path.join(path, '*')) + glob.glob(os.path.join(path, '.*')):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
#os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (is_string_like(args[0]) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:
seqlist[i] = True
if ma.isMA(x):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if ma.isMA(x):
masks.append(~ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: #Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = reduce(np.logical_and, masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and ma.isMA(x):
margs[i] = x.filled()
return margs
def unmasked_index_ranges(mask, compressed = True):
'''
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
'''
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# a dict to cross-map linestyle arguments
_linestyles = [('-', 'solid'),
('--', 'dashed'),
('-.', 'dashdot'),
(':', 'dotted')]
ls_mapper = dict(_linestyles)
ls_mapper.update([(ls[1], ls[0]) for ls in _linestyles])
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('less_simple_linear_interpolation has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.less_simple_linear_interpolation( x, y, xi, extrap=extrap )
def isvector(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('isvector has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.isvector( x, y, xi, extrap=extrap )
def vector_lengths( X, P=2., axis=None ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('vector_lengths has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.vector_lengths( X, P=2., axis=axis )
def distances_along_curve( X ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('distances_along_curve has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.distances_along_curve( X )
def path_length(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('path_length has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.path_length(X)
def is_closed_polygon(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('is_closed_polygon has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.is_closed_polygon(X)
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('quad2cubic has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y)
if __name__=='__main__':
assert( allequal([1,1,1]) )
assert(not allequal([1,1,0]) )
assert( allequal([]) )
assert( allequal(('a', 'a')))
assert( not allequal(('a', 'b')))
| tkaitchuck/nupic | external/linux64/lib/python2.6/site-packages/matplotlib/cbook.py | Python | gpl-3.0 | 42,525 | [
"VisIt"
] | 52690d069fbe3c5df94c6655d09c731150346a6219478c36787726bb399dfcef |
#! /usr/bin/env python
# FIXME: if it requires a dirac.cfg it is not a unit test and should be moved to tests directory
import unittest
import time
import os
import shutil
import sys
import six
from DIRAC.Core.Base.Script import parseCommandLine, getPositionalArgs
parseCommandLine()
from DIRAC import gLogger
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Core.Utilities.File import getSize
positionalArgs = getPositionalArgs()
if len(positionalArgs) < 3:
print("Usage: TestStoragePlugIn.py StorageElement <lfnDir> <localFile>")
sys.exit()
else:
storageElementToTest = positionalArgs[0]
lfnDirToTest = positionalArgs[1]
fileToTest = positionalArgs[2]
class StorageElementTestCase(unittest.TestCase):
"""Base class for the StorageElement test cases"""
def setUp(self):
self.numberOfFiles = 1
self.storageElement = StorageElement(storageElementToTest)
self.localSourceFile = fileToTest
self.localFileSize = getSize(self.localSourceFile)
self.destDirectory = lfnDirToTest
# destinationDir = returnSingleResult( self.storageElement.getURL( self.destDirectory ) )['Value']
destinationDir = self.destDirectory
res = self.storageElement.createDirectory(destinationDir)
self.assertTrue(res["OK"])
def tearDown(self):
# destinationDir = returnSingleResult( self.storageElement.getURL( self.destDirectory ) )['Value']
res = self.storageElement.removeDirectory(self.destDirectory, recursive=True)
self.assertTrue(res["OK"])
class GetInfoTestCase(StorageElementTestCase):
def test_dump(self):
print("\n\n#########################################################" "################\n\n\t\t\tDump test\n")
self.storageElement.dump()
def test_isValid(self):
print(
"\n\n#########################################################" "################\n\n\t\t\tIs valid test\n"
)
res = self.storageElement.isValid()
self.assertTrue(res["OK"])
def test_getRemotePlugins(self):
print(
"\n\n#########################################################"
"################\n\n\t\t\tGet remote protocols test\n"
)
res = self.storageElement.getRemotePlugins()
self.assertTrue(res["OK"])
self.assertEqual(type(res["Value"]), list)
def test_getLocalPlugins(self):
print(
"\n\n#########################################################"
"################\n\n\t\t\tGet local protocols test\n"
)
res = self.storageElement.getLocalPlugins()
self.assertTrue(res["OK"])
self.assertEqual(type(res["Value"]), list)
def test_getPlugins(self):
print(
"\n\n#########################################################"
"################\n\n\t\t\tGet protocols test\n"
)
res = self.storageElement.getPlugins()
self.assertTrue(res["OK"])
self.assertEqual(type(res["Value"]), list)
# def test_isLocalSE( self ):
# print '\n\n#########################################################################\n\n\t\t\tIs local SE test\n'
# res = self.storageElement.isLocalSE()
# self.assertTrue(res['OK'])
# self.assertFalse( res['Value'] )
# def test_getStorageElementOption( self ):
# print '\n\n########################################################################
# \n\n\t\t\tGet storage element option test\n'
# res = self.storageElement.getStorageElementOption( 'BackendType' )
# self.assertTrue(res['OK'])
# self.assertEqual( res['Value'], 'DISET' )
def test_getStorageParameters(self):
print(
"\n\n#########################################################"
"################\n\n\t\t\tGet storage parameters test\n"
)
result = self.storageElement.getStorageParameters("DIP")
self.assertTrue(result["OK"])
resDict = result["Value"]
self.assertEqual(resDict["Protocol"], "dips")
# self.assertEqual( resDict['SpaceToken'], 'LHCb_RAW' )
# self.assertEqual( resDict['WSUrl'], '/srm/managerv2?SFN=' )
# self.assertEqual( resDict['Host'], 'srm-lhcb.cern.ch' )
# self.assertEqual( resDict['Path'], '/castor/cern.ch/grid' )
# self.assertEqual( resDict['ProtocolName'], 'SRM2' )
# self.assertEqual( resDict['Port'], '8443' )
class FileTestCases(StorageElementTestCase):
def test_exists(self):
print("\n\n#########################################################" "################\n\n\t\t\tExists test\n")
destinationFilePath = "%s/testFile.%s" % (self.destDirectory, time.time())
# pfnForLfnRes = self.storageElement.getURL( destinationFilePath )
# destinationPfn = list(pfnForLfnRes['Value']['Successful'].values())[0]
fileDict = {destinationFilePath: self.localSourceFile}
putFileRes = returnSingleResult(self.storageElement.putFile(fileDict))
# File exists
existsRes = returnSingleResult(self.storageElement.exists(destinationFilePath))
# Now remove the destination file
removeFileRes = returnSingleResult(self.storageElement.removeFile(destinationFilePath))
# Check removed file
missingExistsRes = returnSingleResult(self.storageElement.exists(destinationFilePath))
# Check directories are handled properly
destinationDir = os.path.dirname(destinationFilePath)
directoryExistsRes = returnSingleResult(self.storageElement.exists(destinationDir))
# Check that the put was done correctly
self.assertTrue(putFileRes["OK"])
self.assertTrue(putFileRes["Value"])
self.assertEqual(putFileRes["Value"], self.localFileSize)
# Check that we checked the file correctly
self.assertTrue(existsRes["OK"])
self.assertTrue(existsRes["Value"])
# Check that the removal was done correctly
self.assertTrue(removeFileRes["OK"])
self.assertTrue(removeFileRes["Value"])
# Check the exists for non existant file
self.assertTrue(missingExistsRes["OK"])
self.assertFalse(missingExistsRes["Value"])
# Check that directories exist
self.assertTrue(directoryExistsRes["OK"])
self.assertTrue(directoryExistsRes["Value"])
def test_isFile(self):
print(
"\n\n#########################################################"
"################\n\n\t\t\tIs file size test\n"
)
destinationFilePath = "%s/testFile.%s" % (self.destDirectory, time.time())
# pfnForLfnRes = returnSingleResult( self.storageElement.getURL( destinationFilePath ) )
# destinationPfn = pfnForLfnRes['Value']
fileDict = {destinationFilePath: self.localSourceFile}
putFileRes = returnSingleResult(self.storageElement.putFile(fileDict))
# Is a file
isFileRes = returnSingleResult(self.storageElement.isFile(destinationFilePath))
# Now remove the destination file
removeFileRes = returnSingleResult(self.storageElement.removeFile(destinationFilePath))
# Get metadata for a removed file
missingIsFileRes = returnSingleResult(self.storageElement.isFile(destinationFilePath))
# Check directories are handled properly
destinationDir = os.path.dirname(destinationFilePath)
directoryIsFileRes = returnSingleResult(self.storageElement.isFile(destinationDir))
# Check that the put was done correctly
self.assertTrue(putFileRes["OK"])
self.assertTrue(putFileRes["Value"])
self.assertEqual(putFileRes["Value"], self.localFileSize)
# Check that we checked the file correctly
self.assertTrue(isFileRes["OK"])
self.assertTrue(isFileRes["Value"])
# Check that the removal was done correctly
self.assertTrue(removeFileRes["OK"])
self.assertTrue(removeFileRes["Value"])
# Check the is file for non existant file
self.assertFalse(missingIsFileRes["OK"])
expectedError = "File does not exist"
self.assertTrue(expectedError in missingIsFileRes["Message"])
# Check that is file operation with a directory
self.assertTrue(directoryIsFileRes["OK"])
self.assertFalse(directoryIsFileRes["Value"])
def test_putFile(self):
print(
"\n\n#########################################################" "################\n\n\t\t\tPut file test\n"
)
destinationFilePath = "%s/testFile.%s" % (self.destDirectory, time.time())
# pfnForLfnRes = returnSingleResult( self.storageElement.getURL( destinationFilePath ) )
# destinationPfn = pfnForLfnRes['Value']
fileDict = {destinationFilePath: self.localSourceFile}
putFileRes = returnSingleResult(self.storageElement.putFile(fileDict))
# Now remove the destination file
removeFileRes = returnSingleResult(self.storageElement.removeFile(destinationFilePath))
# Check that the put was done correctly
self.assertTrue(putFileRes["OK"])
self.assertTrue(putFileRes["Value"])
self.assertEqual(putFileRes["Value"], self.localFileSize)
# Check that the removal was done correctly
self.assertTrue(removeFileRes["OK"])
self.assertTrue(removeFileRes["Value"])
def test_getFile(self):
print(
"\n\n#########################################################" "################\n\n\t\t\tGet file test\n"
)
destinationFilePath = "%s/testFile.%s" % (self.destDirectory, time.time())
# pfnForLfnRes = returnSingleResult( self.storageElement.getURL( destinationFilePath ) )
# destinationPfn = pfnForLfnRes['Value']
fileDict = {destinationFilePath: self.localSourceFile}
putFileRes = returnSingleResult(self.storageElement.putFile(fileDict))
# Now get a local copy of the file
getFileRes = returnSingleResult(self.storageElement.getFile(destinationFilePath))
# Now remove the destination file
removeFileRes = returnSingleResult(self.storageElement.removeFile(destinationFilePath))
# Clean up the local mess
os.remove(os.path.basename(destinationFilePath))
# Check that the put was done correctly
self.assertTrue(putFileRes["OK"])
self.assertTrue(putFileRes["Value"])
self.assertEqual(putFileRes["Value"], self.localFileSize)
# Check that we got the file correctly
self.assertTrue(getFileRes["OK"])
self.assertEqual(getFileRes["Value"], self.localFileSize)
# Check that the removal was done correctly
self.assertTrue(removeFileRes["OK"])
self.assertTrue(removeFileRes["Value"])
def test_getFileMetadata(self):
print(
"\n\n#########################################################"
"################\n\n\t\t\tGet file metadata test\n"
)
destinationFilePath = "%s/testFile.%s" % (self.destDirectory, time.time())
# pfnForLfnRes = returnSingleResult( self.storageElement.getURL( destinationFilePath ) )
# destinationPfn = pfnForLfnRes['Value']
fileDict = {destinationFilePath: self.localSourceFile}
putFileRes = returnSingleResult(self.storageElement.putFile(fileDict))
# Get the file metadata
getFileMetadataRes = returnSingleResult(self.storageElement.getFileMetadata(destinationFilePath))
# Now remove the destination file
removeFileRes = returnSingleResult(self.storageElement.removeFile(destinationFilePath))
# Get metadata for a removed file
getMissingFileMetadataRes = returnSingleResult(self.storageElement.getFileMetadata(destinationFilePath))
# Check directories are handled properly
destinationDir = os.path.dirname(destinationFilePath)
directoryMetadataRes = returnSingleResult(self.storageElement.getFileMetadata(destinationDir))
# Check that the put was done correctly
self.assertTrue(putFileRes["OK"])
self.assertTrue(putFileRes["Value"])
self.assertEqual(putFileRes["Value"], self.localFileSize)
# Check that the metadata was done correctly
self.assertTrue(getFileMetadataRes["OK"])
metadataDict = getFileMetadataRes["Value"]
# Works only for SRM2 plugin
# self.assertTrue( metadataDict['Cached'] )
# self.assertFalse( metadataDict['Migrated'] )
self.assertEqual(metadataDict["Size"], self.localFileSize)
# Check that the removal was done correctly
self.assertTrue(removeFileRes["OK"])
self.assertTrue(removeFileRes["Value"])
# Check the get metadata for non existant file
self.assertFalse(getMissingFileMetadataRes["OK"])
expectedError = "File does not exist"
self.assertTrue(expectedError in getMissingFileMetadataRes["Message"])
# Check that metadata operation with a directory
self.assertFalse(directoryMetadataRes["OK"])
expectedError = "Supplied path is not a file"
self.assertTrue(expectedError in directoryMetadataRes["Message"])
def test_getFileSize(self):
print(
"\n\n#########################################################"
"################\n\n\t\t\tGet file size test\n"
)
destinationFilePath = "%s/testFile.%s" % (self.destDirectory, time.time())
# pfnForLfnRes = returnSingleResult( self.storageElement.getURL( destinationFilePath ) )
# destinationPfn = pfnForLfnRes['Value']
fileDict = {destinationFilePath: self.localSourceFile}
putFileRes = returnSingleResult(self.storageElement.putFile(fileDict))
# Get the file metadata
getFileSizeRes = returnSingleResult(self.storageElement.getFileSize(destinationFilePath))
# Now remove the destination file
removeFileRes = returnSingleResult(self.storageElement.removeFile(destinationFilePath))
# Get metadata for a removed file
getMissingFileSizeRes = returnSingleResult(self.storageElement.getFileSize(destinationFilePath))
# Check directories are handled properly
destinationDir = os.path.dirname(destinationFilePath)
directorySizeRes = returnSingleResult(self.storageElement.getFileSize(destinationDir))
# Check that the put was done correctly
self.assertTrue(putFileRes["OK"])
self.assertTrue(putFileRes["Value"])
self.assertEqual(putFileRes["Value"], self.localFileSize)
# Check that the metadata was done correctly
self.assertTrue(getFileSizeRes["OK"])
self.assertEqual(getFileSizeRes["Value"], self.localFileSize)
# Check that the removal was done correctly
self.assertTrue(removeFileRes["OK"])
self.assertTrue(removeFileRes["Value"])
# Check the get metadata for non existant file
self.assertFalse(getMissingFileSizeRes["OK"])
expectedError = "File does not exist"
self.assertTrue(expectedError in getMissingFileSizeRes["Message"])
# Check that metadata operation with a directory
self.assertFalse(directorySizeRes["OK"])
expectedError = "Supplied path is not a file"
self.assertTrue(expectedError in directorySizeRes["Message"])
def test_getURL(self):
print(
"\n\n#########################################################"
"################\n\n\t\tGet access url test\n"
)
destinationFilePath = "%s/testFile.%s" % (self.destDirectory, time.time())
# pfnForLfnRes = returnSingleResult( self.storageElement.getURL( destinationFilePath ) )
# destinationPfn = pfnForLfnRes['Value']
fileDict = {destinationFilePath: self.localSourceFile}
putFileRes = returnSingleResult(self.storageElement.putFile(fileDict))
# Get a transfer url for the file
getTurlRes = self.storageElement.getURL(destinationFilePath, protocol="dips")
# Remove the destination file
removeFileRes = returnSingleResult(self.storageElement.removeFile(destinationFilePath))
# Get missing turl res
getMissingTurlRes = self.storageElement.getURL(destinationFilePath, protocol="dips")
# Check that the put was done correctly
self.assertTrue(putFileRes["OK"])
self.assertTrue(putFileRes["Value"])
self.assertEqual(putFileRes["Value"], self.localFileSize)
# Check that we can get the tURL properly
self.assertTrue(getTurlRes["OK"])
self.assertTrue(getTurlRes["Value"])
self.assertTrue(isinstance(getTurlRes["Value"], dict))
self.assertTrue(type(getTurlRes["Value"]["Successful"][destinationFilePath]) in six.string_types)
# Check that the removal was done correctly
self.assertTrue(removeFileRes["OK"])
self.assertTrue(removeFileRes["Value"])
# Works only for SRM2 plugins
# # Check that non-existant files are handled correctly
# self.assertFalse( getMissingTurlRes['OK'] )
# expectedError = "File does not exist"
# self.assertTrue( expectedError in getMissingTurlRes['Message'] )
# Works only for SRM2 plugins
# def test_prestageFile( self ):
# destinationFilePath = '%s/testFile.%s' % ( self.destDirectory, time.time() )
# pfnForLfnRes = self.storageElement.getURL( destinationFilePath )
# destinationPfn = pfnForLfnRes['Value']
# fileDict = {destinationPfn:self.localSourceFile}
# putFileRes = self.storageElement.putFile( fileDict, singleFile = True )
# # Get the file metadata
# prestageFileRes = self.storageElement.prestageFile( destinationPfn, singleFile = True )
# # Now remove the destination file
# removeFileRes = self.storageElement.removeFile( destinationPfn, singleFile = True )
# # Get metadata for a removed file
# missingPrestageFileRes = self.storageElement.prestageFile( destinationPfn, singleFile = True )
#
# # Check that the put was done correctly
# self.assertTrue( putFileRes['OK'] )
# self.assertTrue( putFileRes['Value'] )
# self.assertEqual( putFileRes['Value'], self.localFileSize )
# # Check that the prestage was done correctly
# self.assertTrue( prestageFileRes['OK'] )
# self.assertEqual( type( prestageFileRes['Value'] ), types.StringType )
# # Check that the removal was done correctly
# self.assertTrue( removeFileRes['OK'] )
# self.assertTrue( removeFileRes['Value'] )
# # Check the prestage for non existant file
# self.assertFalse( missingPrestageFileRes['OK'] )
# expectedError = "No such file or directory"
# self.assertTrue( expectedError in missingPrestageFileRes['Message'] )
# Works only for SRM2 plugins
# def test_prestageStatus( self ):
# destinationFilePath = '%s/testFile.%s' % ( self.destDirectory, time.time() )
# pfnForLfnRes = self.storageElement.getURL( destinationFilePath )
# destinationPfn = pfnForLfnRes['Value']
# fileDict = {destinationPfn:self.localSourceFile}
# putFileRes = self.storageElement.putFile( fileDict, singleFile = True )
# # Get the file metadata
# prestageFileRes = self.storageElement.prestageFile( destinationPfn, singleFile = True )
# srmID = ''
# if prestageFileRes['OK']:
# srmID = prestageFileRes['Value']
# # Take a quick break to allow the SRM to realise the file is available
# sleepTime = 10
# print 'Sleeping for %s seconds' % sleepTime
# time.sleep( sleepTime )
# # Check that we can monitor the stage request
# prestageStatusRes = self.storageElement.prestageFileStatus( {destinationPfn:srmID}, singleFile = True )
# # Now remove the destination file
# removeFileRes = self.storageElement.removeFile( destinationPfn, singleFile = True )
#
# # Check that the put was done correctly
# self.assertTrue( putFileRes['OK'] )
# self.assertTrue( putFileRes['Value'] )
# self.assertEqual( putFileRes['Value'], self.localFileSize )
# # Check that the prestage was done correctly
# self.assertTrue( prestageFileRes['OK'] )
# self.assertEqual( type( prestageFileRes['Value'] ), types.StringType )
# # Check the file was found to be staged
# self.assertTrue( prestageStatusRes['OK'] )
# self.assertTrue( prestageStatusRes['Value'] )
# # Check that the removal was done correctly
# self.assertTrue( removeFileRes['OK'] )
# self.assertTrue( removeFileRes['Value'] )
# Works only for SRM2 plugins
# def test_pinRelease( self ):
# print '\n\n#########################################################################\n\n\t\tPin release test\n'
# destinationFilePath = '%s/testFile.%s' % ( self.destDirectory, time.time() )
# pfnForLfnRes = self.storageElement.getURL( destinationFilePath )
# destinationPfn = pfnForLfnRes['Value']
# fileDict = {destinationPfn:self.localSourceFile}
# putFileRes = self.storageElement.putFile( fileDict, singleFile = True )
# # Get the file metadata
# pinFileRes = self.storageElement.pinFile( destinationPfn, singleFile = True )
# srmID = ''
# if pinFileRes['OK']:
# srmID = pinFileRes['Value']
# # Check that we can release the file
# releaseFileRes = self.storageElement.releaseFile( {destinationPfn:srmID}, singleFile = True )
# # Now remove the destination file
# removeFileRes = self.storageElement.removeFile( destinationPfn, singleFile = True )
#
# # Check that the put was done correctly
# self.assertTrue( putFileRes['OK'] )
# self.assertTrue( putFileRes['Value'] )
# self.assertEqual( putFileRes['Value'], self.localFileSize )
# # Check that the file pin was done correctly
# self.assertTrue( pinFileRes['OK'] )
# self.assertEqual( type( pinFileRes['Value'] ), types.StringType )
# # Check the file was found to be staged
# self.assertTrue( releaseFileRes['OK'] )
# self.assertTrue( releaseFileRes['Value'] )
# # Check that the removal was done correctly
# self.assertTrue( removeFileRes['OK'] )
# self.assertTrue( removeFileRes['Value'] )
class DirectoryTestCases(StorageElementTestCase):
def test_createDirectory(self):
print(
"\n\n#########################################################"
"################\n\n\t\t\tCreate directory test\n"
)
directory = "%s/%s" % (self.destDirectory, "createDirectoryTest")
# pfnForLfnRes = returnSingleResult( self.storageElement.getURL( directory ) )
# directoryPfn = pfnForLfnRes['Value']
createDirRes = self.storageElement.createDirectory(directory)
# Remove the target dir
removeDirRes = self.storageElement.removeDirectory(directory, recursive=True)
# Check that the creation was done correctly
self.assertTrue(createDirRes["OK"])
self.assertTrue(createDirRes["Value"])
# Remove the directory
self.assertTrue(removeDirRes["OK"])
self.assertTrue(removeDirRes["Value"])
def test_isDirectory(self):
print(
"\n\n#########################################################"
"################\n\n\t\t\tIs directory test\n"
)
destDirectory = self.destDirectory
# Test that it is a directory
isDirectoryRes = self.storageElement.isDirectory(destDirectory)
# Test that no existant dirs are handled correctly
nonExistantDir = "%s/%s" % (destDirectory, "NonExistant")
nonExistantDirRes = self.storageElement.isDirectory(nonExistantDir)
# Check that it works with the existing dir
self.assertTrue(isDirectoryRes["OK"])
self.assertTrue(isDirectoryRes["Value"])
# Check that we handle non existant correctly
self.assertTrue(nonExistantDirRes["Value"]["Failed"][nonExistantDir] in ["Path does not exist"])
def test_listDirectory(self):
print(
"\n\n#########################################################"
"################\n\n\t\t\tList directory test\n"
)
destDirectory = "%s/%s" % (self.destDirectory, "listDirectoryTest")
# destDirectory = returnSingleResult( self.storageElement.getURL( directory ) )['Value']
# Create a local directory to upload
localDir = "/tmp/unit-test"
srcFile = "/etc/group"
sizeOfLocalFile = getSize(srcFile)
if not os.path.exists(localDir):
os.mkdir(localDir)
for i in range(self.numberOfFiles):
shutil.copy(srcFile, "%s/testFile.%s" % (localDir, time.time()))
time.sleep(1)
# Check that we can successfully upload the directory to the storage element
dirDict = {destDirectory: localDir}
putDirRes = self.storageElement.putDirectory(dirDict)
print(putDirRes)
# List the remote directory
listDirRes = self.storageElement.listDirectory(destDirectory)
# Now remove the remove directory
removeDirRes = self.storageElement.removeDirectory(destDirectory, recursive=True)
print(removeDirRes)
# Clean up the locally created directory
shutil.rmtree(localDir)
# Perform the checks for the put dir operation
self.assertTrue(putDirRes["OK"])
self.assertTrue(putDirRes["Value"])
if putDirRes["Value"]["Successful"][destDirectory]["Files"]:
self.assertEqual(putDirRes["Value"]["Successful"][destDirectory]["Files"], self.numberOfFiles)
self.assertEqual(
putDirRes["Value"]["Successful"][destDirectory]["Size"], self.numberOfFiles * sizeOfLocalFile
)
self.assertTrue(type(putDirRes["Value"]["Successful"][destDirectory]["Files"]) in six.integer_types)
self.assertTrue(type(putDirRes["Value"]["Successful"][destDirectory]["Size"]) in six.integer_types)
# Perform the checks for the list dir operation
self.assertTrue(listDirRes["OK"])
self.assertTrue(listDirRes["Value"])
self.assertTrue("SubDirs" in listDirRes["Value"]["Successful"][destDirectory])
self.assertTrue("Files" in listDirRes["Value"]["Successful"][destDirectory])
self.assertEqual(len(listDirRes["Value"]["Successful"][destDirectory]["Files"]), self.numberOfFiles)
# Perform the checks for the remove directory operation
self.assertTrue(removeDirRes["OK"])
self.assertTrue(removeDirRes["Value"])
if removeDirRes["Value"]["Successful"][destDirectory]["FilesRemoved"]:
self.assertEqual(removeDirRes["Value"]["Successful"][destDirectory]["FilesRemoved"], self.numberOfFiles)
self.assertEqual(
removeDirRes["Value"]["Successful"][destDirectory]["SizeRemoved"], self.numberOfFiles * sizeOfLocalFile
)
self.assertTrue(type(removeDirRes["Value"]["Successful"][destDirectory]["FilesRemoved"]) in six.integer_types)
self.assertTrue(type(removeDirRes["Value"]["Successful"][destDirectory]["SizeRemoved"]) in six.integer_types)
def test_getDirectoryMetadata(self):
print(
"\n\n#########################################################"
"################\n\n\t\t\tDirectory metadata test\n"
)
destDirectory = "%s/%s" % (self.destDirectory, "getDirectoryMetadataTest")
# destDirectory = returnSingleResult( self.storageElement.getURL( directory ) )['Value']
# Create a local directory to upload
localDir = "/tmp/unit-test"
srcFile = "/etc/group"
sizeOfLocalFile = getSize(srcFile)
if not os.path.exists(localDir):
os.mkdir(localDir)
for i in range(self.numberOfFiles):
shutil.copy(srcFile, "%s/testFile.%s" % (localDir, time.time()))
time.sleep(1)
# Check that we can successfully upload the directory to the storage element
dirDict = {destDirectory: localDir}
putDirRes = self.storageElement.putDirectory(dirDict)
# Get the directory metadata
metadataDirRes = self.storageElement.getDirectoryMetadata(destDirectory)
# Now remove the remove directory
removeDirRes = self.storageElement.removeDirectory(destDirectory, recursive=True)
# Clean up the locally created directory
shutil.rmtree(localDir)
# Perform the checks for the put dir operation
self.assertTrue(putDirRes["OK"])
self.assertTrue(putDirRes["Value"])
if putDirRes["Value"]["Successful"][destDirectory]["Files"]:
self.assertEqual(putDirRes["Value"]["Successful"][destDirectory]["Files"], self.numberOfFiles)
self.assertEqual(
putDirRes["Value"]["Successful"][destDirectory]["Size"], self.numberOfFiles * sizeOfLocalFile
)
self.assertTrue(type(putDirRes["Value"]["Successful"][destDirectory]["Files"]) in six.integer_types)
self.assertTrue(type(putDirRes["Value"]["Successful"][destDirectory]["Size"]) in six.integer_types)
# Perform the checks for the list dir operation
self.assertTrue(metadataDirRes["OK"])
self.assertTrue(metadataDirRes["Value"])
# Works only for the SRM2 plugin
# self.assertTrue( metadataDirRes['Value']['Mode'] )
# self.assertTrue( type( metadataDirRes['Value']['Mode'] ) == int )
self.assertTrue(metadataDirRes["Value"]["Successful"][destDirectory]["Exists"])
self.assertEqual(metadataDirRes["Value"]["Successful"][destDirectory]["Type"], "Directory")
# Perform the checks for the remove directory operation
self.assertTrue(removeDirRes["OK"])
self.assertTrue(removeDirRes["Value"])
if removeDirRes["Value"]["Successful"][destDirectory]["FilesRemoved"]:
self.assertEqual(removeDirRes["Value"]["Successful"][destDirectory]["FilesRemoved"], self.numberOfFiles)
self.assertEqual(
removeDirRes["Value"]["Successful"][destDirectory]["SizeRemoved"], self.numberOfFiles * sizeOfLocalFile
)
self.assertTrue(
type(removeDirRes["Value"]["Successful"][destDirectory]["FilesRemoved"]) in six.integer_types
)
self.assertTrue(
type(removeDirRes["Value"]["Successful"][destDirectory]["SizeRemoved"]) in six.integer_types
)
def test_getDirectorySize(self):
print(
"\n\n#########################################################"
"################\n\n\t\t\tGet directory size test\n"
)
destDirectory = "%s/%s" % (self.destDirectory, "getDirectorySizeTest")
# destDirectory = returnSingleResult( self.storageElement.getURL( directory ) )['Value']
# Create a local directory to upload
localDir = "/tmp/unit-test"
srcFile = "/etc/group"
sizeOfLocalFile = getSize(srcFile)
if not os.path.exists(localDir):
os.mkdir(localDir)
for i in range(self.numberOfFiles):
shutil.copy(srcFile, "%s/testFile.%s" % (localDir, time.time()))
time.sleep(1)
# Check that we can successfully upload the directory to the storage element
dirDict = {destDirectory: localDir}
putDirRes = self.storageElement.putDirectory(dirDict)
# Get the directory metadata
getDirSizeRes = self.storageElement.getDirectorySize(destDirectory)
# Now remove the remove directory
removeDirRes = self.storageElement.removeDirectory(destDirectory, recursive=True)
# Clean up the locally created directory
shutil.rmtree(localDir)
# Perform the checks for the put dir operation
self.assertTrue(putDirRes["OK"])
self.assertTrue(putDirRes["Value"])
if putDirRes["Value"]["Successful"][destDirectory]["Files"]:
self.assertEqual(putDirRes["Value"]["Successful"][destDirectory]["Files"], self.numberOfFiles)
self.assertEqual(
putDirRes["Value"]["Successful"][destDirectory]["Size"], self.numberOfFiles * sizeOfLocalFile
)
self.assertTrue(type(putDirRes["Value"]["Successful"][destDirectory]["Files"]) in six.integer_types)
self.assertTrue(type(putDirRes["Value"]["Successful"][destDirectory]["Size"]) in six.integer_types)
# Perform the checks for the get dir size operation
self.assertTrue(getDirSizeRes["OK"])
self.assertTrue(getDirSizeRes["Value"])
self.assertFalse(getDirSizeRes["Value"]["Successful"][destDirectory]["SubDirs"])
self.assertTrue(type(getDirSizeRes["Value"]["Successful"][destDirectory]["Files"]) in six.integer_types)
self.assertTrue(type(getDirSizeRes["Value"]["Successful"][destDirectory]["Size"]) in six.integer_types)
# Perform the checks for the remove directory operation
self.assertTrue(removeDirRes["OK"])
self.assertTrue(removeDirRes["Value"])
if removeDirRes["Value"]["Successful"][destDirectory]["FilesRemoved"]:
self.assertEqual(removeDirRes["Value"]["Successful"][destDirectory]["FilesRemoved"], self.numberOfFiles)
self.assertEqual(
removeDirRes["Value"]["Successful"][destDirectory]["SizeRemoved"], self.numberOfFiles * sizeOfLocalFile
)
self.assertTrue(
type(removeDirRes["Value"]["Successful"][destDirectory]["FilesRemoved"]) in six.integer_types
)
self.assertTrue(
type(removeDirRes["Value"]["Successful"][destDirectory]["SizeRemoved"]) in six.integer_types
)
def test_removeDirectory(self):
print(
"\n\n#########################################################"
"################\n\n\t\t\tRemove directory test\n"
)
destDirectory = "%s/%s" % (self.destDirectory, "removeDirectoryTest")
# destDirectory = returnSingleResult( self.storageElement.getURL( directory ) )['Value']
# Create a local directory to upload
localDir = "/tmp/unit-test"
srcFile = "/etc/group"
sizeOfLocalFile = getSize(srcFile)
if not os.path.exists(localDir):
os.mkdir(localDir)
for i in range(self.numberOfFiles):
shutil.copy(srcFile, "%s/testFile.%s" % (localDir, time.time()))
time.sleep(1)
# Check that we can successfully upload the directory to the storage element
dirDict = {destDirectory: localDir}
putDirRes = self.storageElement.putDirectory(dirDict)
# Get the directory metadata
# Now remove the remove directory
removeDirRes = self.storageElement.removeDirectory(destDirectory, recursive=True)
# Clean up the locally created directory
shutil.rmtree(localDir)
# Perform the checks for the put dir operation
self.assertTrue(putDirRes["OK"])
self.assertTrue(putDirRes["Value"])
if putDirRes["Value"]["Successful"][destDirectory]["Files"]:
self.assertEqual(putDirRes["Value"]["Successful"][destDirectory]["Files"], self.numberOfFiles)
self.assertEqual(
putDirRes["Value"]["Successful"][destDirectory]["Size"], self.numberOfFiles * sizeOfLocalFile
)
self.assertTrue(type(putDirRes["Value"]["Successful"][destDirectory]["Files"]) in six.integer_types)
self.assertTrue(type(putDirRes["Value"]["Successful"][destDirectory]["Size"]) in six.integer_types)
# Perform the checks for the remove directory operation
self.assertTrue(removeDirRes["OK"])
self.assertTrue(removeDirRes["Value"])
if removeDirRes["Value"]["Successful"][destDirectory]["FilesRemoved"]:
self.assertEqual(removeDirRes["Value"]["Successful"][destDirectory]["FilesRemoved"], self.numberOfFiles)
self.assertEqual(
removeDirRes["Value"]["Successful"][destDirectory]["SizeRemoved"], self.numberOfFiles * sizeOfLocalFile
)
self.assertTrue(
type(removeDirRes["Value"]["Successful"][destDirectory]["FilesRemoved"]) in six.integer_types
)
self.assertTrue(
type(removeDirRes["Value"]["Successful"][destDirectory]["SizeRemoved"]) in six.integer_types
)
def test_getDirectory(self):
print(
"\n\n#########################################################"
"################\n\n\t\t\tGet directory test\n"
)
destDirectory = "%s/%s" % (self.destDirectory, "getDirectoryTest")
# destDirectory = returnSingleResult( self.storageElement.getURL( directory ) )['Value']
# Create a local directory to upload
localDir = "/tmp/unit-test"
srcFile = "/etc/group"
sizeOfLocalFile = getSize(srcFile)
if not os.path.exists(localDir):
os.mkdir(localDir)
for i in range(self.numberOfFiles):
shutil.copy(srcFile, "%s/testFile.%s" % (localDir, time.time()))
time.sleep(1)
# Check that we can successfully upload the directory to the storage element
dirDict = {destDirectory: localDir}
putDirRes = self.storageElement.putDirectory(dirDict)
# Get the directory metadata
# Clean up the locally created directory
shutil.rmtree(localDir)
getDirRes = self.storageElement.getDirectory(destDirectory, localPath=localDir)
# Now remove the remove directory
removeDirRes = self.storageElement.removeDirectory(destDirectory, recursive=True)
# Clean up the locally created directory
if os.path.exists(localDir):
shutil.rmtree(localDir)
# Perform the checks for the put dir operation
self.assertTrue(putDirRes["OK"])
self.assertTrue(putDirRes["Value"])
for _dir in dirDict:
if putDirRes["Value"]["Successful"][_dir]["Files"]:
self.assertEqual(putDirRes["Value"]["Successful"][_dir]["Files"], self.numberOfFiles)
self.assertEqual(putDirRes["Value"]["Successful"][_dir]["Size"], self.numberOfFiles * sizeOfLocalFile)
self.assertTrue(type(putDirRes["Value"]["Successful"][_dir]["Files"]) in six.integer_types)
self.assertTrue(type(putDirRes["Value"]["Successful"][_dir]["Size"]) in six.integer_types)
# Perform the checks for the get directory operation
self.assertTrue(getDirRes["OK"])
self.assertTrue(getDirRes["Value"])
for _dir in dirDict:
if getDirRes["Value"]["Successful"][_dir]["Files"]:
self.assertEqual(getDirRes["Value"]["Successful"][_dir]["Files"], self.numberOfFiles)
self.assertEqual(getDirRes["Value"]["Successful"][_dir]["Size"], self.numberOfFiles * sizeOfLocalFile)
self.assertTrue(type(getDirRes["Value"]["Successful"][_dir]["Files"]) in six.integer_types)
self.assertTrue(type(getDirRes["Value"]["Successful"][_dir]["Size"]) in six.integer_types)
# Perform the checks for the remove directory operation
self.assertTrue(removeDirRes["OK"])
self.assertTrue(removeDirRes["Value"])
if removeDirRes["Value"]["Successful"][destDirectory]["FilesRemoved"]:
self.assertEqual(removeDirRes["Value"]["Successful"][destDirectory]["FilesRemoved"], self.numberOfFiles)
self.assertEqual(
removeDirRes["Value"]["Successful"][destDirectory]["SizeRemoved"], self.numberOfFiles * sizeOfLocalFile
)
self.assertTrue(
type(removeDirRes["Value"]["Successful"][destDirectory]["FilesRemoved"]) in six.integer_types
)
self.assertTrue(
type(removeDirRes["Value"]["Successful"][destDirectory]["SizeRemoved"]) in six.integer_types
)
if __name__ == "__main__":
gLogger.setLevel("DEBUG")
suite = unittest.defaultTestLoader.loadTestsFromTestCase(DirectoryTestCases)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(FileTestCases))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(GetInfoTestCase))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
| DIRACGrid/DIRAC | src/DIRAC/Resources/Storage/test/FIXME_Test_StorageElement.py | Python | gpl-3.0 | 40,461 | [
"DIRAC"
] | 2fa43a3e062406331caba8e939352b2c66b521b619300a498a21fcb2b45a168a |
# import os
# import subprocess
# import uuid
import sys
from datetime import datetime
sys.path.append('../../python')
try:
import moose
except ImportError:
print 'Please include the directory containing moose.py and _moose.so in your PYTHONPATH environmental variable.'
sys.exit(1)
def time_creation(n=1000):
elist = []
start = datetime.now()
for ii in range(n):
elist.append(moose.Neutral('a_%d' % (ii)))
end = datetime.now()
delta = end - start
print 'total time to create %d Neutral elements: %g' % (n, delta.days * 86400 + delta.seconds + delta.microseconds * 1e-6)
return delta
if __name__ == '__main__':
time_creation()
| dilawar/moose-full | moose-core/tests/python/benchmark.py | Python | gpl-2.0 | 686 | [
"MOOSE"
] | b0348bc58502c8b3d6d4cada1bc4f14b43ddfab734addae6ef5c75ccc7a220f7 |
import os
import unittest
import shutil, tempfile
from shyft.repository.netcdf.cf_region_model_repository import CFRegionModelRepository
from shyft import shyftdata_dir
from shyft.api.pt_gs_k import PTGSKModel
class CFRegionModelRepositoryTestCase(unittest.TestCase):
# Create a temporary directory
test_dir = tempfile.mkdtemp()
region = {'region_model_id': 'test', # a unique name identifier of the simulation
'domain': {'EPSG': 32633,
'nx': 400,
'ny': 80,
'step_x': 1000,
'step_y': 1000,
'lower_left_x': 100000,
'lower_left_y': 6960000},
'repository': {'class': CFRegionModelRepository,
'params': {
'data_file': os.path.join(shyftdata_dir,
'netcdf/orchestration-testdata/cell_data.nc')}},
}
model = {'model_t': PTGSKModel, # model to construct
'model_parameters': {
'ae': {
'ae_scale_factor': 1.5},
'gs': {
'calculate_iso_pot_energy': False,
'fast_albedo_decay_rate': 6.752787747748934,
'glacier_albedo': 0.4,
'initial_bare_ground_fraction': 0.04,
'max_albedo': 0.9,
'max_water': 0.1,
'min_albedo': 0.6,
'slow_albedo_decay_rate': 37.17325702015658,
'snow_cv': 0.4,
'snow_cv_altitude_factor': 0.0,
'snow_cv_forest_factor': 0.0,
'tx': -0.5752881492890207,
'snowfall_reset_depth': 5.0,
'surface_magnitude': 30.0,
'wind_const': 1.0,
'wind_scale': 1.8959672005350063,
'winter_end_day_of_year': 100},
'kirchner': {
'c1': -3.336197322290274,
'c2': 0.33433661533385695,
'c3': -0.12503959620315988},
'p_corr': {
'scale_factor': 1.0},
'pt': {'albedo': 0.2,
'alpha': 1.26},
'routing': {
'alpha': 0.9,
'beta': 3.0,
'velocity': 0.0}
}
}
region_model_repo = CFRegionModelRepository(region, model)
def test_get_region_model(self):
region_model = self.region_model_repo.get_region_model('test')
self.assertIsInstance(region_model, PTGSKModel, 'Correct model type not returned from CFRegionModelRepository')
def test_cell_data_to_netcdf(self):
region_model = self.region_model_repo.get_region_model('test')
self.region_model_repo.cell_data_to_netcdf(region_model, os.path.join(self.test_dir,'test'))
# open the file and be sure it works
output_nc = os.path.join(self.test_dir,'test_cell_data.nc')
self.region['repository']['params']['data_file'] = output_nc
tmp_rm = CFRegionModelRepository(self.region, self.model).get_region_model('test')
self.assertIsInstance(tmp_rm, PTGSKModel, 'Error with {}'.format(output_nc))
shutil.rmtree(self.test_dir)
if __name__ == '__main__':
unittest.main()
| jfburkhart/shyft | shyft/tests/test_cf_region_model_repository.py | Python | lgpl-3.0 | 3,505 | [
"NetCDF"
] | 1bf3094daa4262a9d3d017e032045c3d341c665446c30db24df1c4cd3f24d20b |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = """
---
module: elb_classic_lb
description:
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
short_description: Creates or destroys Amazon ELB.
version_added: "1.5"
author:
- "Jim Dalton (@jsdalton)"
options:
state:
description:
- Create or destroy the ELB
choices: ["present", "absent"]
required: true
name:
description:
- The name of the ELB
required: true
listeners:
description:
- List of ports/protocols for this ELB to listen on (see example)
required: false
purge_listeners:
description:
- Purge existing listeners on ELB that are not found in listeners
required: false
default: true
instance_ids:
description:
- List of instance ids to attach to this ELB
required: false
default: false
version_added: "2.1"
purge_instance_ids:
description:
- Purge existing instance ids on ELB that are not found in instance_ids
required: false
default: false
version_added: "2.1"
zones:
description:
- List of availability zones to enable on this ELB
required: false
purge_zones:
description:
- Purge existing availability zones on ELB that are not found in zones
required: false
default: false
security_group_ids:
description:
- A list of security groups to apply to the elb
required: false
default: None
version_added: "1.6"
security_group_names:
description:
- A list of security group names to apply to the elb
required: false
default: None
version_added: "2.0"
health_check:
description:
- An associative array of health check configuration settings (see example)
required: false
default: None
access_logs:
description:
- An associative array of access logs configuration settings (see example)
required: false
default: None
version_added: "2.0"
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
required: false
default: None
aliases: []
version_added: "1.7"
purge_subnets:
description:
- Purge existing subnet on ELB that are not found in subnets
required: false
default: false
version_added: "1.7"
scheme:
description:
- The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'.
If you choose to update your scheme with a different value the ELB will be destroyed and
recreated. To update scheme you must use the option wait.
choices: ["internal", "internet-facing"]
required: false
default: 'internet-facing'
version_added: "1.7"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
connection_draining_timeout:
description:
- Wait a specified timeout allowing connections to drain before terminating an instance
required: false
aliases: []
version_added: "1.8"
idle_timeout:
description:
- ELB connections from clients and to servers are timed out after this amount of time
required: false
version_added: "2.0"
cross_az_load_balancing:
description:
- Distribute load across all configured Availability Zones
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
version_added: "1.8"
stickiness:
description:
- An associative array of stickiness policy settings. Policy will be applied to all listeners ( see example )
required: false
version_added: "2.0"
wait:
description:
- When specified, Ansible will check the status of the load balancer to ensure it has been successfully
removed from AWS.
required: false
default: no
choices: ["yes", "no"]
version_added: "2.1"
wait_timeout:
description:
- Used in conjunction with wait. Number of seconds to wait for the elb to be terminated.
A maximum of 600 seconds (10 minutes) is allowed.
required: false
default: 60
version_added: "2.1"
tags:
description:
- An associative array of tags. To delete all tags, supply an empty dict.
required: false
version_added: "2.1"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example (non-VPC)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
proxy_protocol: True
- protocol: https
load_balancer_port: 443
instance_protocol: http # optional, defaults to value of protocol setting
instance_port: 80
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Internal ELB example
- local_action:
module: ec2_elb_lb
name: "test-vpc"
scheme: internal
state: present
instance_ids:
- i-abcd1234
purge_instance_ids: true
subnets:
- subnet-abcd1234
- subnet-1a2b3c4d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
# Configure a health check and the access logs
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http # options are http, https, ssl, tcp
ping_port: 80
ping_path: "/index.html" # not required for tcp or ssl
response_timeout: 5 # seconds
interval: 30 # seconds
unhealthy_threshold: 2
healthy_threshold: 10
access_logs:
interval: 5 # minutes (defaults to 60)
s3_location: "my-bucket" # This value is required if access_logs is set
s3_prefix: "logs"
# Ensure ELB is gone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
# Ensure ELB is gone and wait for check (for default timeout)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
# Ensure ELB is gone and wait for check with timeout value
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
wait_timeout: 600
# Normally, this module will purge any listeners that exist on the ELB
# but aren't specified in the listeners parameter. If purge_listeners is
# false it leaves them alone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extraneous zones
# will be removed
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
# Creates a ELB and assigns a list of subnets to it.
- local_action:
module: ec2_elb_lb
state: present
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456,subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with connection draining, increased idle timeout and cross availability
# zone load balancing
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
connection_draining_timeout: 60
idle_timeout: 300
cross_az_load_balancing: "yes"
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with load balancer stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: loadbalancer
enabled: yes
expiration: 300
# Create an ELB with application stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: application
enabled: yes
cookie: SESSIONID
# Create an ELB and add tags
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags:
Name: "New ELB"
stack: "production"
client: "Bob"
# Delete all tags from an ELB
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags: {}
"""
import random
import time
import traceback
try:
import boto
import boto.ec2.elb
import boto.ec2.elb.attributes
import boto.vpc
from boto.ec2.elb.healthcheck import HealthCheck
from boto.ec2.tag import Tag
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, connect_to_aws, AnsibleAWSError, get_aws_connection_info
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native
def _throttleable_operation(max_retries):
def _operation_wrapper(op):
def _do_op(*args, **kwargs):
retry = 0
while True:
try:
return op(*args, **kwargs)
except boto.exception.BotoServerError as e:
if retry < max_retries and e.code in \
("Throttling", "RequestLimitExceeded"):
retry = retry + 1
time.sleep(min(random.random() * (2 ** retry), 300))
continue
else:
raise
return _do_op
return _operation_wrapper
def _get_vpc_connection(module, region, aws_connect_params):
try:
return connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
_THROTTLING_RETRIES = 5
class ElbManager(object):
"""Handles ELB creation and destruction"""
def __init__(self, module, name, listeners=None, purge_listeners=None,
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
idle_timeout=None,
cross_az_load_balancing=None, access_logs=None,
stickiness=None, wait=None, wait_timeout=None, tags=None,
region=None,
instance_ids=None, purge_instance_ids=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.instance_ids = instance_ids
self.purge_instance_ids = purge_instance_ids
self.zones = zones
self.purge_zones = purge_zones
self.security_group_ids = security_group_ids
self.health_check = health_check
self.subnets = subnets
self.purge_subnets = purge_subnets
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.idle_timeout = idle_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.access_logs = access_logs
self.stickiness = stickiness
self.wait = wait
self.wait_timeout = wait_timeout
self.tags = tags
self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
self.status = 'gone'
self.elb_conn = self._get_elb_connection()
try:
self.elb = self._get_elb()
except boto.exception.BotoServerError as e:
module.fail_json(msg='unable to get all load balancers: %s' % e.message, exception=traceback.format_exc())
self.ec2_conn = self._get_ec2_connection()
@_throttleable_operation(_THROTTLING_RETRIES)
def ensure_ok(self):
"""Create the ELB"""
if not self.elb:
# Zones and listeners will be added at creation
self._create_elb()
else:
if self._get_scheme():
# the only way to change the scheme is by recreating the resource
self.ensure_gone()
self._create_elb()
else:
self._set_zones()
self._set_security_groups()
self._set_elb_listeners()
self._set_subnets()
self._set_health_check()
# boto has introduced support for some ELB attributes in
# different versions, so we check first before trying to
# set them to avoid errors
if self._check_attribute_support('connection_draining'):
self._set_connection_draining_timeout()
if self._check_attribute_support('connecting_settings'):
self._set_idle_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
if self._check_attribute_support('access_log'):
self._set_access_log()
# add sitcky options
self.select_stickiness_policy()
# ensure backend server policies are correct
self._set_backend_policies()
# set/remove instance ids
self._set_instance_ids()
self._set_tags()
def ensure_gone(self):
"""Destroy the ELB"""
if self.elb:
self._delete_elb()
if self.wait:
elb_removed = self._wait_for_elb_removed()
# Unfortunately even though the ELB itself is removed quickly
# the interfaces take longer so reliant security groups cannot
# be deleted until the interface has registered as removed.
elb_interface_removed = self._wait_for_elb_interface_removed()
if not (elb_removed and elb_interface_removed):
self.module.fail_json(msg='Timed out waiting for removal of load balancer.')
def get_info(self):
try:
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
except:
check_elb = None
if not check_elb:
info = {
'name': self.name,
'status': self.status,
'region': self.region
}
else:
try:
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
except:
lb_cookie_policy = None
try:
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
except:
app_cookie_policy = None
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
'zones': check_elb.availability_zones,
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme,
'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
'lb_cookie_policy': lb_cookie_policy,
'app_cookie_policy': app_cookie_policy,
'proxy_policy': self._get_proxy_protocol_policy(),
'backends': self._get_backend_policies(),
'instances': [instance.id for instance in check_elb.instances],
'out_of_service_count': 0,
'in_service_count': 0,
'unknown_instance_state_count': 0,
'region': self.region
}
# status of instances behind the ELB
if info['instances']:
info['instance_health'] = [ dict(
instance_id = instance_state.instance_id,
reason_code = instance_state.reason_code,
state = instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
# instance state counts: InService or OutOfService
if info['instance_health']:
for instance_state in info['instance_health']:
if instance_state['state'] == "InService":
info['in_service_count'] += 1
elif instance_state['state'] == "OutOfService":
info['out_of_service_count'] += 1
else:
info['unknown_instance_state_count'] += 1
if check_elb.health_check:
info['health_check'] = {
'target': check_elb.health_check.target,
'interval': check_elb.health_check.interval,
'timeout': check_elb.health_check.timeout,
'healthy_threshold': check_elb.health_check.healthy_threshold,
'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
}
if check_elb.listeners:
info['listeners'] = [self._api_listener_as_tuple(l)
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
# immediately returned result, so just include the
# ones that were added
info['listeners'] = [self._listener_as_tuple(l)
for l in self.listeners]
else:
info['listeners'] = []
if self._check_attribute_support('connection_draining'):
info['connection_draining_timeout'] = int(self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout)
if self._check_attribute_support('connecting_settings'):
info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
if self._check_attribute_support('cross_zone_load_balancing'):
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
if is_cross_az_lb_enabled:
info['cross_az_load_balancing'] = 'yes'
else:
info['cross_az_load_balancing'] = 'no'
# return stickiness info?
info['tags'] = self.tags
return info
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout // polling_increment_secs)
status_achieved = False
for x in range(0, max_retries):
try:
self.elb_conn.get_all_lb_attributes(self.name)
except (boto.exception.BotoServerError, Exception) as e:
if "LoadBalancerNotFound" in e.code:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_interface_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout // polling_increment_secs)
status_achieved = False
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
filters={'attachment.instance-owner-id': 'amazon-elb',
'description': 'ELB {0}'.format(self.name) })
for x in range(0, max_retries):
for interface in elb_interfaces:
try:
result = self.ec2_conn.get_all_network_interfaces(interface.id)
if result == []:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except (boto.exception.BotoServerError, Exception) as e:
if 'InvalidNetworkInterfaceID' in e.code:
status_achieved = True
break
else:
self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _get_elb(self):
elbs = self.elb_conn.get_all_load_balancers()
for elb in elbs:
if self.name == elb.name:
self.status = 'ok'
return elb
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json(msg=str(e))
def _get_ec2_connection(self):
try:
return connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, Exception) as e:
self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
@_throttleable_operation(_THROTTLING_RETRIES)
def _delete_elb(self):
# True if succeeds, exception raised if not
result = self.elb_conn.delete_load_balancer(name=self.name)
if result:
self.changed = True
self.status = 'deleted'
def _create_elb(self):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
security_groups=self.security_group_ids,
complex_listeners=listeners,
subnets=self.subnets,
scheme=self.scheme)
if self.elb:
# HACK: Work around a boto bug in which the listeners attribute is
# always set to the listeners argument to create_load_balancer, and
# not the complex_listeners
# We're not doing a self.elb = self._get_elb here because there
# might be eventual consistency issues and it doesn't necessarily
# make sense to wait until the ELB gets returned from the EC2 API.
# This is necessary in the event we hit the throttling errors and
# need to retry ensure_ok
# See https://github.com/boto/boto/issues/3526
self.elb.listeners = self.listeners
self.changed = True
self.status = 'created'
def _create_elb_listeners(self, listeners):
"""Takes a list of listener tuples and creates them"""
# True if succeeds, exception raised if not
self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
complex_listeners=listeners)
def _delete_elb_listeners(self, listeners):
"""Takes a list of listener tuples and deletes them from the elb"""
ports = [l[0] for l in listeners]
# True if succeeds, exception raised if not
self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
ports)
def _set_elb_listeners(self):
"""
Creates listeners specified by self.listeners; overwrites existing
listeners on these ports; removes extraneous listeners
"""
listeners_to_add = []
listeners_to_remove = []
listeners_to_keep = []
# Check for any listeners we need to create or overwrite
for listener in self.listeners:
listener_as_tuple = self._listener_as_tuple(listener)
# First we loop through existing listeners to see if one is
# already specified for this port
existing_listener_found = None
for existing_listener in self.elb.listeners:
# Since ELB allows only one listener on each incoming port, a
# single match on the incoming port is all we're looking for
if existing_listener[0] == int(listener['load_balancer_port']):
existing_listener_found = self._api_listener_as_tuple(existing_listener)
break
if existing_listener_found:
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the existing one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
# We already have this listener, so we're going to keep it
listeners_to_keep.append(existing_listener_found)
else:
# We didn't find an existing listener, so just add the new one
listeners_to_add.append(listener_as_tuple)
# Check for any extraneous listeners we need to remove, if desired
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
if existing_listener_tuple in listeners_to_remove:
# Already queued for removal
continue
if existing_listener_tuple in listeners_to_keep:
# Keep this one around
continue
# Since we're not already removing it and we don't need to keep
# it, let's get rid of it
listeners_to_remove.append(existing_listener_tuple)
if listeners_to_remove:
self._delete_elb_listeners(listeners_to_remove)
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _api_listener_as_tuple(self, listener):
"""Adds ssl_certificate_id to ELB API tuple if present"""
base_tuple = listener.get_complex_tuple()
if listener.ssl_certificate_id and len(base_tuple) < 5:
return base_tuple + (listener.ssl_certificate_id,)
return base_tuple
def _listener_as_tuple(self, listener):
"""Formats listener as a 4- or 5-tuples, in the order specified by the
ELB API"""
# N.B. string manipulations on protocols below (str(), upper()) is to
# ensure format matches output from ELB API
listener_list = [
int(listener['load_balancer_port']),
int(listener['instance_port']),
str(listener['protocol'].upper()),
]
# Instance protocol is not required by ELB API; it defaults to match
# load balancer protocol. We'll mimic that behavior here
if 'instance_protocol' in listener:
listener_list.append(str(listener['instance_protocol'].upper()))
else:
listener_list.append(str(listener['protocol'].upper()))
if 'ssl_certificate_id' in listener:
listener_list.append(str(listener['ssl_certificate_id']))
return tuple(listener_list)
def _enable_zones(self, zones):
try:
self.elb.enable_zones(zones)
except boto.exception.BotoServerError as e:
self.module.fail_json(msg='unable to enable zones: %s' % e.message, exception=traceback.format_exc())
self.changed = True
def _disable_zones(self, zones):
try:
self.elb.disable_zones(zones)
except boto.exception.BotoServerError as e:
self.module.fail_json(msg='unable to disable zones: %s' % e.message, exception=traceback.format_exc())
self.changed = True
def _attach_subnets(self, subnets):
self.elb_conn.attach_lb_to_subnets(self.name, subnets)
self.changed = True
def _detach_subnets(self, subnets):
self.elb_conn.detach_lb_from_subnets(self.name, subnets)
self.changed = True
def _set_subnets(self):
"""Determine which subnets need to be attached or detached on the ELB"""
if self.subnets:
if self.purge_subnets:
subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
else:
subnets_to_detach = None
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
if subnets_to_attach:
self._attach_subnets(subnets_to_attach)
if subnets_to_detach:
self._detach_subnets(subnets_to_detach)
def _get_scheme(self):
"""Determine if the current scheme is different than the scheme of the ELB"""
if self.scheme:
if self.elb.scheme != self.scheme:
if not self.wait:
self.module.fail_json(msg="Unable to modify scheme without using the wait option")
return True
return False
def _set_zones(self):
"""Determine which zones need to be enabled or disabled on the ELB"""
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
if zones_to_disable:
self._disable_zones(zones_to_disable)
def _set_security_groups(self):
if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids):
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
self.changed = True
def _set_health_check(self):
"""Set health check values on ELB as needed"""
if self.health_check:
# This just makes it easier to compare each of the attributes
# and look for changes. Keys are attributes of the current
# health_check; values are desired values of new health_check
health_check_config = {
"target": self._get_health_check_target(),
"timeout": self.health_check['response_timeout'],
"interval": self.health_check['interval'],
"unhealthy_threshold": self.health_check['unhealthy_threshold'],
"healthy_threshold": self.health_check['healthy_threshold'],
}
update_health_check = False
# The health_check attribute is *not* set on newly created
# ELBs! So we have to create our own.
if not self.elb.health_check:
self.elb.health_check = HealthCheck()
for attr, desired_value in health_check_config.items():
if getattr(self.elb.health_check, attr) != desired_value:
setattr(self.elb.health_check, attr, desired_value)
update_health_check = True
if update_health_check:
self.elb.configure_health_check(self.elb.health_check)
self.changed = True
def _check_attribute_support(self, attr):
return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
def _set_cross_az_load_balancing(self):
attributes = self.elb.get_attributes()
if self.cross_az_load_balancing:
if not attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = True
else:
if attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
attributes.cross_zone_load_balancing.enabled)
def _set_access_log(self):
attributes = self.elb.get_attributes()
if self.access_logs:
if 's3_location' not in self.access_logs:
self.module.fail_json(msg='s3_location information required')
access_logs_config = {
"enabled": True,
"s3_bucket_name": self.access_logs['s3_location'],
"s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
"emit_interval": self.access_logs.get('interval', 60),
}
update_access_logs_config = False
for attr, desired_value in access_logs_config.items():
if getattr(attributes.access_log, attr) != desired_value:
setattr(attributes.access_log, attr, desired_value)
update_access_logs_config = True
if update_access_logs_config:
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
self.changed = True
elif attributes.access_log.enabled:
attributes.access_log.enabled = False
self.changed = True
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
def _set_connection_draining_timeout(self):
attributes = self.elb.get_attributes()
if self.connection_draining_timeout is not None:
if not attributes.connection_draining.enabled or \
attributes.connection_draining.timeout != self.connection_draining_timeout:
self.changed = True
attributes.connection_draining.enabled = True
attributes.connection_draining.timeout = self.connection_draining_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
else:
if attributes.connection_draining.enabled:
self.changed = True
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _set_idle_timeout(self):
attributes = self.elb.get_attributes()
if self.idle_timeout is not None:
if attributes.connecting_settings.idle_timeout != self.idle_timeout:
self.changed = True
attributes.connecting_settings.idle_timeout = self.idle_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
def _policy_name(self, policy_type):
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
def _create_policy(self, policy_param, policy_meth, policy):
getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
self._delete_policy(self.elb.name, policy)
self._create_policy(policy_param, policy_meth, policy)
def _set_listener_policy(self, listeners_dict, policy=[]):
for listener_port in listeners_dict:
if listeners_dict[listener_port].startswith('HTTP'):
self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
for p in getattr(elb_info.policies, policy_attrs['attr']):
if str(p.__dict__['policy_name']) == str(policy[0]):
if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0):
self._set_listener_policy(listeners_dict)
self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
self.changed = True
break
else:
self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
self.changed = True
self._set_listener_policy(listeners_dict, policy)
def select_stickiness_policy(self):
if self.stickiness:
if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
d = {}
for listener in elb_info.listeners:
d[listener[0]] = listener[2]
listeners_dict = d
if self.stickiness['type'] == 'loadbalancer':
policy = []
policy_type = 'LBCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']):
if 'expiration' not in self.stickiness:
self.module.fail_json(msg='expiration must be set when type is loadbalancer')
try:
expiration = self.stickiness['expiration'] if int(self.stickiness['expiration']) else None
except ValueError:
self.module.fail_json(msg='expiration must be set to an integer')
policy_attrs = {
'type': policy_type,
'attr': 'lb_cookie_stickiness_policies',
'method': 'create_lb_cookie_stickiness_policy',
'dict_key': 'cookie_expiration_period',
'param_value': expiration
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif not self.module.boolean(self.stickiness['enabled']):
if len(elb_info.policies.lb_cookie_stickiness_policies):
if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
else:
self.changed = False
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
elif self.stickiness['type'] == 'application':
policy = []
policy_type = 'AppCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']):
if 'cookie' not in self.stickiness:
self.module.fail_json(msg='cookie must be set when type is application')
policy_attrs = {
'type': policy_type,
'attr': 'app_cookie_stickiness_policies',
'method': 'create_app_cookie_stickiness_policy',
'dict_key': 'cookie_name',
'param_value': self.stickiness['cookie']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif not self.module.boolean(self.stickiness['enabled']):
if len(elb_info.policies.app_cookie_stickiness_policies):
if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
else:
self._set_listener_policy(listeners_dict)
def _get_backend_policies(self):
"""Get a list of backend policies"""
policies = []
if self.elb.backends is not None:
for backend in self.elb.backends:
if backend.policies is not None:
for policy in backend.policies:
policies.append(str(backend.instance_port) + ':' + policy.policy_name)
return policies
def _set_backend_policies(self):
"""Sets policies for all backends"""
ensure_proxy_protocol = False
replace = []
backend_policies = self._get_backend_policies()
# Find out what needs to be changed
for listener in self.listeners:
want = False
if 'proxy_protocol' in listener and listener['proxy_protocol']:
ensure_proxy_protocol = True
want = True
if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies:
if not want:
replace.append({'port': listener['instance_port'], 'policies': []})
elif want:
replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']})
# enable or disable proxy protocol
if ensure_proxy_protocol:
self._set_proxy_protocol_policy()
# Make the backend policies so
for item in replace:
self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies'])
self.changed = True
def _get_proxy_protocol_policy(self):
"""Find out if the elb has a proxy protocol enabled"""
if self.elb.policies is not None and self.elb.policies.other_policies is not None:
for policy in self.elb.policies.other_policies:
if policy.policy_name == 'ProxyProtocol-policy':
return policy.policy_name
return None
def _set_proxy_protocol_policy(self):
"""Install a proxy protocol policy if needed"""
proxy_policy = self._get_proxy_protocol_policy()
if proxy_policy is None:
self.elb_conn.create_lb_policy(
self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True}
)
self.changed = True
# TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there
def _diff_list(self, a, b):
"""Find the entries in list a that are not in list b"""
b = set(b)
return [aa for aa in a if aa not in b]
def _get_instance_ids(self):
"""Get the current list of instance ids installed in the elb"""
instances = []
if self.elb.instances is not None:
for instance in self.elb.instances:
instances.append(instance.id)
return instances
def _set_instance_ids(self):
"""Register or deregister instances from an lb instance"""
assert_instances = self.instance_ids or []
has_instances = self._get_instance_ids()
add_instances = self._diff_list(assert_instances, has_instances)
if add_instances:
self.elb_conn.register_instances(self.elb.name, add_instances)
self.changed = True
if self.purge_instance_ids:
remove_instances = self._diff_list(has_instances, assert_instances)
if remove_instances:
self.elb_conn.deregister_instances(self.elb.name, remove_instances)
self.changed = True
def _set_tags(self):
"""Add/Delete tags"""
if self.tags is None:
return
params = {'LoadBalancerNames.member.1': self.name}
tagdict = dict()
# get the current list of tags from the ELB, if ELB exists
if self.elb:
current_tags = self.elb_conn.get_list('DescribeTags', params,
[('member', Tag)])
tagdict = dict((tag.Key, tag.Value) for tag in current_tags
if hasattr(tag, 'Key'))
# Add missing tags
dictact = dict(set(self.tags.items()) - set(tagdict.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
self.elb_conn.make_request('AddTags', params)
self.changed=True
# Remove extra tags
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
self.elb_conn.make_request('RemoveTags', params)
self.changed=True
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
protocol = self.health_check['ping_protocol'].upper()
path = ""
if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
path = self.health_check['ping_path']
return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list'},
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
instance_ids={'default': None, 'required': False, 'type': 'list'},
purge_instance_ids={'default': False, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list'},
purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list'},
security_group_names={'default': None, 'required': False, 'type': 'list'},
health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False, 'choices': ['internal', 'internet-facing']},
connection_draining_timeout={'default': None, 'required': False, 'type': 'int'},
idle_timeout={'default': None, 'type': 'int', 'required': False},
cross_az_load_balancing={'default': None, 'type': 'bool', 'required': False},
stickiness={'default': None, 'required': False, 'type': 'dict'},
access_logs={'default': None, 'required': False, 'type': 'dict'},
wait={'default': False, 'type': 'bool', 'required': False},
wait_timeout={'default': 60, 'type': 'int', 'required': False},
tags={'default': None, 'required': False, 'type': 'dict'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['security_group_ids', 'security_group_names']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
listeners = module.params['listeners']
purge_listeners = module.params['purge_listeners']
instance_ids = module.params['instance_ids']
purge_instance_ids = module.params['purge_instance_ids']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids']
security_group_names = module.params['security_group_names']
health_check = module.params['health_check']
access_logs = module.params['access_logs']
subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
idle_timeout = module.params['idle_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
stickiness = module.params['stickiness']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout']
tags = module.params['tags']
if state == 'present' and not listeners:
module.fail_json(msg="At least one listener is required for ELB creation")
if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
if wait_timeout > 600:
module.fail_json(msg='wait_timeout maximum is 600 seconds')
if security_group_names:
security_group_ids = []
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
if subnets: # We have at least one subnet, ergo this is a VPC
vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
filters = {'vpc_id': vpc_id}
else:
filters = None
grp_details = ec2.get_all_security_groups(filters=filters)
for group_name in security_group_names:
if isinstance(group_name, string_types):
group_name = [group_name]
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, idle_timeout,
cross_az_load_balancing,
access_logs, stickiness, wait, wait_timeout, tags,
region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids,
**aws_connect_params)
# check for unsupported attributes for this version of boto
if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':
elb_man.ensure_gone()
ansible_facts = {'ec2_elb': 'info'}
ec2_facts_result = dict(changed=elb_man.changed,
elb=elb_man.get_info(),
ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
if __name__ == '__main__':
main()
| jbenden/ansible | lib/ansible/modules/cloud/amazon/elb_classic_lb.py | Python | gpl-3.0 | 53,955 | [
"Dalton"
] | a32cc37b52ab51e81ea69c22a51d128ce6fb837fc4fb6d8c21e05b2c275fbe02 |
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Australian Government, Department of the Environment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
Utility helper functions
'''
#========================================================================================================
# Imports
#========================================================================================================
import openpyxl
import sys, os.path, os, re, struct, glob, shutil,traceback,time,tempfile,copy
import warnings
import tarfile,zipfile
import uuid as _uuid
#========================================================================================================
# Globals
#========================================================================================================
dateformat='%Y-%m-%d' #ISO 8601
timeformat='%H:%M:%S' #ISO 8601
datetimeformat='%sT%s' % (dateformat,timeformat)
encoding='utf-8'
iswin=os.name=='nt'#sys.platform[0:3].lower()=='win'#Are we on Windows
compressedfiles=('.zip','.tar.gz','.tgz','.tbz', '.tbz2','.tb2','.tar.bz2','.tar','kmz')
#========================================================================================================
#{String Utilities
#========================================================================================================
def encode(string):
''' Encode a unicode string
@type string: C{unicode}
@param string: Unicode string
@rtype: C{str}
@return: Encoded string
'''
if type(string) is unicode:return string.encode(encoding)
elif string is None:return ''
else:return string
#========================================================================================================
#{Filesystem Utilities
#========================================================================================================
def archivelist(f):
''' List files in a tar (inc gzip or bz2 compressed) or zip archive.
@type f: C{str}
@param f: archive filepath
@rtype: C{list}
@return: archive filelisting
'''
lst=[]
if tarfile.is_tarfile(f):
#return tarfile.open(f,'r').getnames() #includes subfolders
lst=[ti.name for ti in tarfile.open(f,'r').getmembers() if ti.isfile()]
return [os.sep.join(['/vsitar',normcase(f),l]) for l in lst]
#return [os.sep.join(['/vsitar',f,l]) for l in lst]
elif zipfile.is_zipfile(f):
#return zipfile.ZipFile(f,'r').namelist() #includes subfolders
lst=[zi.filename for zi in zipfile.ZipFile(f,'r').infolist() if zi.file_size> 0]
return [os.sep.join(['/vsizip',normcase(f),l]) for l in lst]
#return [os.sep.join(['/vsizip',f,l]) for l in lst]
return lst
def archivefileinfo(f,n):
''' List files in a tar (inc gzip or bz2 compressed) or zip archive.
@type f: C{str}
@param f: archive filepath
@type n: C{str}
@param n: archive member name
@rtype: C{dict}
@return: archive file member info
'''
archiveinfo={}
if tarfile.is_tarfile(f):
afi = tarfile.open(f,'r').getmember(n)
archiveinfo['size']=afi.size
archiveinfo['datemodified']=time.strftime(datetimeformat, time.localtime(afi.mtime))
#archiveinfo['ownerid']=afi.uid #Use the owner of the archive instead
#archiveinfo['ownername']=afi.uname
elif zipfile.is_zipfile(f):
afi = zipfile.ZipFile(f,'r').getinfo(n)
archiveinfo['size']=afi.file_size
archiveinfo['datemodified']=time.strftime(datetimeformat, list(afi.date_time)+[0,0,0])
return archiveinfo
def compressed_file_exists(path,testfile=True):
''' Check check whether /vsi...\path_to_archive\folder\file exists.
Alternatively, only check if the archive exists on the file system.
@type path: C{str}
@param path: VSI filepath (/vsi...\path_to_archive\folder\file)
@type testfile: C{bool}
@param testfile: If True, check if file exists in archive. If False, only check if the archive exists on the file system.
@rtype: C{bool}
@return: Returns True or False
'''
p=os.path.split(path[8:])[0]
while p:
if os.path.exists(p) and tarfile.is_tarfile(p) or zipfile.is_zipfile(p):
if testfile:
if path in archivelist(p):return True
else:return False
else:return True
p=os.path.split(p)[0]
return False
def runcmd(cmd, format='s'):
''' Run a command
@type cmd: C{str}
@param cmd: Command (inc arguments) to run
@rtype: C{tuple}
@return: Returns (exit_code,stdout,stderr)
'''
import subprocess
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
if format.lower() == 's': #string output
stdout,stderr=proc.communicate()
#elif format.lower() == 'f': #file object output #doesn't flush IO buffer, causes python to hang
# stdout,stderr=proc.stdout,proc.stderr
elif format.lower() == 'l': #list output
stdout,stderr=proc.stdout.readlines(),proc.stderr.readlines()
#else:raise TypeError, "fomat argument must be in ['s','f','l'] (string, file, list)"
else:raise TypeError, "fomat argument must be in ['s','l'] (string or list format)"
exit_code=proc.wait()
return exit_code,stdout,stderr
def which(name, returnfirst=True, flags=os.F_OK | os.X_OK, path=None):
''' Search PATH for executable files with the given name.
On newer versions of MS-Windows, the PATHEXT environment variable will be
set to the list of file extensions for files considered executable. This
will normally include things like ".EXE". This fuction will also find files
with the given name ending with any of these extensions.
On MS-Windows the only flag that has any meaning is os.F_OK. Any other
flags will be ignored.
Derived mostly from U{http://code.google.com/p/waf/issues/detail?id=531} with
additions from Brian Curtins patch - U{http://bugs.python.org/issue444582}
@type name: C{str}
@param name: The name for which to search.
@type returnfirst: C{boolean}
@param returnfirst: Return the first executable found.
@type flags: C{int}
@param flags: Arguments to U{os.access<http://docs.python.org/library/os.html#os.access>}.
@rtype: C{str}/C{list}
@return: Full path to the first matching file found or a list of the full paths to all files found,
in the order in which they were found.
'''
result = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
if not path:
path = os.environ.get("PATH", os.defpath)
for p in os.environ.get('PATH', '').split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, flags):
if returnfirst:return p
else:result.append(p)
for e in exts:
pext = p + e
if os.access(pext, flags):
if returnfirst:return pext
else:result.append(pext)
return result
def exists(f,returnpath=False):
''' A case insensitive file existence checker
@type f: C{str}
@param f: The filepath to check.
@type returnpath: C{boolean}
@param returnpath: Return the case sensitive path.
@rtype: C{boolean}/C{(str,boolean)}
@return: True/False, optionally full path to the case sensitive path
'''
if iswin:#Windows is case insensitive anyways
if returnpath:return os.path.exists(f),f
else:return os.path.exists(f)
import re
path,name=os.path.split(os.path.abspath(f))
files = os.listdir(path)
for f in files:
if re.search(f,name,re.I):
if returnpath:return True,os.path.join(path,f)
else:return True
if returnpath:return False,None
else:return False
def readbinary(data,offset, start, stop):
''' Read binary data
@type data: C{str}
@param data: data read from binary file
@type offset: C{int}
@param offset: Number of bytes to skip
@type start: C{int}
@param start: Byte to start reading from (from offset, not beginning of data)
@type stop: C{int}
@param stop: Byte to stop reading at (from offset, not beginning of data)
@rtype: C{str}
@return: String
'''
return ''.join(struct.unpack('s' * (stop-start+1), data[offset+start-1:offset+stop])).strip()
def readascii(data,offset,start,stop):
''' Read ASCII data
@type data: C{str}
@param data: data read from ASCII file
@type offset: C{int}
@param offset: Number of characters to skip
@type start: C{int}
@param start: Character to start reading from (from offset, not beginning of data)
@type stop: C{int}
@param stop: Character to stop reading at (from offset, not beginning of data)
@rtype: C{str}
@return: String
'''
return data[start+offset-1:stop+offset].strip()
def ByteOrder():
''' Determine byte order of host machine.
@rtype: C{str}
@return: String
'''
from struct import pack
if pack('<h', 1) == pack('=h',1):
return 'LSB'
elif pack('>h', 1) == pack('=h',1):
return 'MSB'
else:
raise Exception,'Unknown byte order'
def _WinFileOwner(filepath):
import pywintypes
import pythoncom
import win32com.client
import win32net
import win32netcon
OWNERID=(8,10) # seems to be 8 on XP, 10 on Win7
try:
d=os.path.split(filepath)
oShell = win32com.client.Dispatch("Shell.Application")
oFolder = oShell.NameSpace(d[0])
for oid in OWNERID:
ownerid=str(oFolder.GetDetailsOf(oFolder.parsename(d[1]), oid))
if ownerid:break
try:domain,ownerid=ownerid.split('\\')
except:domain,ownerid=None,ownerid.split('\\')[-1]
except: domain,ownerid=None,''
#Too slow...
##oWMI = win32com.client.GetObject(r"winmgmts:\\.\root\cimv2")
##qry = "Select * from Win32_UserAccount where NAME = '%s'" % ownerid
##qry = oWMI.ExecQuery(qry)
##if qry.count > 0:
##for result in qry:
## ownername=str(result.FullName)
## break
##else: ownername='No user match'
#Much quicker...
try:
dc=win32net.NetServerEnum(None,100,win32netcon.SV_TYPE_DOMAIN_CTRL)
dcname=r'\\'+dc[0][0]['name']
except:
try:dcname=win32net.NetGetDCName()
except:dcname=None
try:
if dcname:
ownername=win32net.NetUserGetInfo(dcname,ownerid,2)['full_name']
else:
ownername=win32net.NetUserGetInfo(None,ownerid,2)['full_name']
except: ownername='No user match'
return ownerid,ownername
def _NixFileOwner(uid):
import pwd
pwuid=pwd.getpwuid(uid)
ownerid = pwuid[0]
ownername = pwuid[4]
return ownerid,ownername
def FileInfo(filepath):
''' File information.
@type filepath: C{str}
@param filepath: Path to file
@rtype: C{dict}
@return: Dictionary containing file: size, datemodified, datecreated, dateaccessed, ownerid & ownername
'''
fileinfo = {
'size':0,
'datemodified':'',
'datecreated': '',
'dateaccessed':'',
'filepath':'',
'guid':''
}
if not os.path.exists(filepath) and filepath[:4].lower()!= '/vsi':
raise IOError('File not found')
try:
if filepath[:4].lower() == '/vsi':
f=filepath.replace('/vsitar/','').replace('/vsitar\\','')
f=f.replace('/vsizip/','').replace('/vsizip\\','')
for ext in compressedfiles:
if ext in f.lower():
f=f.split(ext)
archive=f[0]+ext
filename=ext.join(f[1:]).strip('\\/')
fileinfo.update(archivefileinfo(archive,filename))
break
filestat = os.stat(archive)
fileinfo['filename']=os.path.basename(filename)
fileinfo['filepath']=filepath
fileinfo['datecreated']=time.strftime(datetimeformat, time.localtime(filestat.st_ctime))
fileinfo['dateaccessed']=time.strftime(datetimeformat, time.localtime(filestat.st_atime))
fileinfo['guid']=uuid(filepath)
filepath=archive
else:
filepath=normcase(realpath(filepath))
#filepath=realpath(filepath)
filestat = os.stat(filepath)
fileinfo['filename']=os.path.basename(filepath)
fileinfo['filepath']=filepath
fileinfo['size']=filestat.st_size
fileinfo['datemodified']=time.strftime(datetimeformat, time.localtime(filestat.st_mtime))
fileinfo['datecreated']=time.strftime(datetimeformat, time.localtime(filestat.st_ctime))
fileinfo['dateaccessed']=time.strftime(datetimeformat, time.localtime(filestat.st_atime))
fileinfo['guid']=uuid(filepath)
if not fileinfo.get('ownerid'):
if iswin:
ownerid,ownername=_WinFileOwner(filepath)
else:
ownerid,ownername=_NixFileOwner(filestat.st_uid)
fileinfo['ownerid']=ownerid
fileinfo['ownername']=ownername
finally:return fileinfo
def uuid(filepath):
''' Generate a uuid reproducible based on filename
@type filepath: C{str}
@param filepath: Path to file
@rtype: C{str}
@return: uuid
'''
filepath=normcase(uncpath(realpath(filepath)))
#filepath=uncpath(realpath(filepath))
return str(_uuid.uuid3(_uuid.NAMESPACE_DNS,filepath))
def uncpath(filepath):
''' Convert file path to UNC.
@type filepath: C{str}
@param filepath: Path to file
@rtype: C{str}
@return: UNC filepath (if on Windows)
'''
#if sys.platform[0:3].lower()=='win':
if iswin:
import win32wnet
if hasattr(filepath,'__iter__'): #Is iterable
uncpath=[]
for path in filepath:
try: uncpath.append(normcase(win32wnet.WNetGetUniversalName(path)))
except: uncpath.append(normcase(path)) #Local path
#try: uncpath.append(win32wnet.WNetGetUniversalName(path))
#except: uncpath.append(path) #Local path
else:
try: uncpath=win32wnet.WNetGetUniversalName(filepath)
except: uncpath=filepath #Local path
else:uncpath=filepath
return uncpath
def normcase(filepath):
''' Normalize case of pathname. Makes all characters lowercase and all slashes into backslashes.
@type filepath: C{str/list}
@param filepath: Path to file/s
@rtype: C{str/list}
@return: Path to file/s
'''
#if type(filepath) in [list,tuple]:
if hasattr(filepath,'__iter__'): #Is iterable
return [os.path.normcase(i) for i in filepath]
else:
return os.path.normcase(filepath)
def normpath(filepath):
''' Normalize path, eliminating double slashes, etc.
@type filepath: C{str/list}
@param filepath: Path to file/s
@rtype: C{str/list}
@return: Path to file/s
'''
if hasattr(filepath,'__iter__'): #Is iterable
return [os.path.normpath(i) for i in filepath]
else:
return os.path.normpath(filepath)
def realpath(filepath):
''' Return the absolute version of a path.
@type filepath: C{str/list}
@param filepath: Path to file/s
@rtype: C{str/list}
@return: Path to file/s
@note: os.path.realpath/os.path.abspath returns unexpected results on windows if filepath[-1]==':'
'''
if hasattr(filepath,'__iter__'): #Is iterable
if iswin:
realpath=[]
for f in filepath:
if f[-1]==':':f+='\\'
realpath.append(os.path.realpath(f))
else:return [os.path.realpath(f) for f in filepath]
else:
if iswin and filepath[-1]==':':filepath+='\\'
return os.path.realpath(filepath)
def checkExt(filepath,ext):
''' Check a file has an allowed extension or apply a default extension if it has none.
@type filepath: C{str}
@param filepath: Path to file
@type ext: C{[str,...,str]}
@param ext: Allowed file extensions, ext[0] is the default
@rtype: C{str}
@return: Path to file with updated extension
'''
vars=os.path.splitext(filepath)
if vars[1] not in (ext):
return vars[0]+ext[0]
else:
return filepath
def volname(path):
''' Get the volume label for a CD/DVD
@type path: C{str}
@param path: Disc path
@rtype: C{str}
@return: Volume label
'''
volname=None
try:
#if sys.platform[0:3].lower()=='win':
if iswin:
import win32api
drive=os.path.splitdrive(path)[0]
if drive[-1]!='\\':drive+='\\'
if drive: volinfo=win32api.GetVolumeInformation(drive)
if volinfo[4] in ['CDFS','UDF']:volname=volinfo[0]
else:
#get the device from mount point
exit_code,stdout,stderr=utilities.runcmd('df '+path)
if exit_code == 0:
device=stdout.split('\n')[1].split()[0]
exit_code,stdout,stderr=runcmd('volname '+device)
if exit_code == 0:volname=stdout.strip()
finally:
return volname
def writable(filepath):
if not os.path.isdir(filepath):
filepath=os.path.dirname(filepath)
try:
tmp=tempfile.TemporaryFile(dir=filepath) #Can we write a temp file there...?
del tmp
return True
except:
return False
class rglob:
'''A recursive/regex enhanced glob
adapted from os-path-walk-example-3.py - http://effbot.org/librarybook/os-path.htm
'''
def __init__(self, directory, pattern="*", regex=False, regex_flags=0, recurse=True, archive=False):
''' @type directory: C{str}
@param directory: Path to xls file
@type pattern: C{type}
@param pattern: Regular expression/wildcard pattern to match files against
@type regex: C{boolean}
@param regex: Use regular expression matching (if False, use fnmatch)
See U{http://docs.python.org/library/re.html}
@type regex_flags: C{int}
@param regex_flags: Flags to pass to the regular expression compiler.
See U{http://docs.python.org/library/re.html}
@type recurse: C{boolean}
@param recurse: Recurse into the directory?
@type archive: C{boolean}
@param archive: List files in compressed archives? Archive be supported by the zipfile and tarfile modules. Note: this slows things down considerably....
'''
self.stack = [directory]
self.pattern = pattern
self.regex = regex
self.recurse = recurse
self.archive = archive
self.regex_flags = regex_flags
self.files = []
self.index = 0
def __getitem__(self, index):
while 1:
try:
file = self.files[self.index]
self.index = self.index + 1
except IndexError:
# pop next directory from stack
self.directory = normcase(self.stack.pop())
#self.directory = self.stack.pop()
try:
self.files = os.listdir(self.directory)
self.index = 0
except:
if self.archive:
try:
self.files = archivelist(self.directory)
self.index = 0
except:pass
else:
# got a filename
fullname = os.path.join(self.directory, file)
try:islink=os.path.islink(fullname)
except:islink=False
try:isdir=os.path.isdir(fullname) and not islink
except:isdir=False
try:isarchive=(not islink and not isdir) and (tarfile.is_tarfile(fullname) or zipfile.is_zipfile(fullname))
except:isarchive=False
try:isfile=((not isdir and not isarchive and not islink) and os.path.isfile(fullname)) or (tarfile.is_tarfile(self.directory) or zipfile.is_zipfile(self.directory))
except:isfile=False
if isdir and self.recurse:
self.stack.append(fullname)
elif isarchive and self.archive and os.path.exists(fullname):
self.stack.append(fullname)
elif isfile:
if self.regex:
import re
if re.search(self.pattern,file,self.regex_flags):
return fullname
else:
import fnmatch
if fnmatch.fnmatch(file, self.pattern):
return fullname
#========================================================================================================
#{Process Utilities
#========================================================================================================
def isrunning(pid):
if hasattr(os,'kill'):
try:
os.kill(pid, 0) #Sending a 0 signal does nothing.
return True
except:
return False
elif iswin:
import win32process
try:
return pid in win32process.EnumProcesses()
except:
return False
#========================================================================================================
#{Exception Utilities
#========================================================================================================
def ExceptionInfo(maxTBlevel=0):
'''Get info about the last exception'''
cla, exc, trbk = sys.exc_info()
excName = cla.__name__
if maxTBlevel > 0:
excArgs=[]
excTb = FormatTraceback(trbk, maxTBlevel)
#return '%s: %s\nTraceback: %s' % (excName, str(exc), excTb)
return '%s: %s\n%s' % (excName, str(exc), excTb)
else:
return '%s: %s' % (excName, str(exc))
def FormatTraceback(trbk, maxTBlevel):
'''Format traceback'''
return 'Traceback (most recent call last): '+''.join(traceback.format_tb(trbk, maxTBlevel))
#========================================================================================================
#{Excel Utilities
#========================================================================================================
class ExcelWriter:
''' A simple spreadsheet writer'''
def __init__(self,xlsx,fields=[],update=False, sort = True):
''' A simple spreadsheet writer.
@type xlsx: C{str}
@param xlsx: Path to xlsx file
@type fields: C{list}
@param fields: List of column/field headers
'''
if sort:fields.sort()
self._file=xlsx
self._tempfile=""
self._fields=fields
self._sheets=[]
self._rows=1 #row index
self._cols={} #dict of col indices
self._heading = openpyxl.styles.Style(font=openpyxl.styles.Font(bold=True))
if update and os.path.exists(xlsx):
self._tempfile=os.path.join(tempfile.mkdtemp(),os.path.basename(xlsx))
shutil.copy(xlsx, self._tempfile)
self._wb=openpyxl.load_workbook(self._tempfile)
self._sheets=self._wb.worksheets
self._wb.encoding=encoding #
self._ws=self._sheets[0]
self._rows=self._ws.max_row-1
#Check if all fields exist, add them if not
ws=self._sheets[0]
fields=[encode(c.value) for c in self._sheets[0].rows[0]]
extrafields=[f for f in self._fields if f not in fields]
col=len(fields)
if extrafields:
for ws in self._sheets:
#self._rows+=ws.max_row-1
row=ws.rows[0]
for i,field in enumerate(extrafields):
#row[col+i].value=field
ws.cell(row=1, column=col+i+1).value = field
fields+=extrafields
#self._wb.save(self._file)
self._fields=fields
else:
if os.path.exists(xlsx):os.remove(xlsx)
self._wb = openpyxl.Workbook(encoding=encoding)
self._sheets = self._wb.worksheets
self._ws = self._sheets[0]
self._rows = 0
self._addheader(self._ws)
self._wb.save(self._file)
#fs=set(self._fields) !!! set(list) reorders the list!!!
fs=[]
for f in self._fields:
if f not in fs:fs.append(f)
self._cols=dict(zip(fs,[self.__getcol__(self._fields,f) for f in fs]))
def __getcol__(self,lst,val):
i = -1
cols=[]
try:
while 1:
i = list(lst).index(val, i+1)
cols.append(i)
except ValueError:
pass
return cols
def _addsheet(self):
self._ws = self._wb.create_sheet()
self._sheets=self._wb.worksheets
self._addheader(self._ws)
self._rows = 0
def _addheader(self, ws):
for i,field in enumerate(self._fields):
ws.cell(row=1, column=i+1).value = field
ws.cell(row=1, column=i+1).style = self._heading
def _writevalue(self,row,col,value,ws=None):
''' Write a value to a cell
@type col: C{int}
@param col: column index, 0 based
@type row: C{int}
@param row: row index, 0 based
@type value: C{int/str}
@param value: value to write
'''
if not ws:ws=self._ws
if isinstance(value,str):value=value.decode(encoding)
if isinstance(value,basestring) and len(value) > 32767:
value=value[:32767]
warnings.warn('The "%s" field is longer than 32767 characters and has been truncated.'%self._fields[field])
ws.cell(row=row+1, column=col+1).value = value
def WriteRecord(self,data):
''' Write a record
@type data: C{dict} #Known issue, doesn't handle list of lists (zipped lists)
@param data: Dict containing column headers (dict.keys()) and values (dict.values())
'''
dirty=False
if self._rows > 1048575:
self._addsheet()
cols=copy.deepcopy(self._cols) #make a copy to alter
if data!=dict(data):
fields,values = zip(*data)
for i,field in enumerate(fields):
value=values[i]
if field in self._fields and value not in ['',None,False]:#0 is valid
try:col=cols[field].pop(0)
except:continue
self._writevalue(self._rows+1, col,value)
dirty=True
else:
for field in data:
if field in self._fields and data[field] not in ['',None,False]:#0 is valid
self._writevalue(self._rows+1, self._cols[field][0],data[field])
dirty=True
if dirty:
self._rows+=1
#self._wb.save(self._file)
def UpdateRecord(self,data,row):
''' Update an existing record
@type data: C{dict} or C{list}
@param data: Dict containing column headers (dict.keys()) and values (dict.values()) or zipped list
@type row: C{int}
@param row: Row number of existing record
'''
dirty=False
s=row/1048575
r=row-s*1048575
#ws=self._wb.get_sheet(s)
ws=self._wb.worksheets[s]
cols=copy.deepcopy(self._cols) #make a copy to alter
if data!=dict(data):
fields,values = zip(*data)
for i,field in enumerate(fields):
value=values[i]
if field in self._fields and value not in ['',None,False]:#0 is valid
try:col=cols[field].pop(0)
except:continue
self._writevalue(r+1, col,values[i], ws)
dirty=True
else:
for field in data:
if field in self._fields and data[field] not in ['',None,False]:#0 is valid
self._writevalue(r+1, self._cols[field][0],data[field], ws)
dirty=True
#if dirty:self._wb.save(self._file)
def save(self):
if os.path.exists(self._tempfile):
self._wb.save(self._tempfile)
else:
self._wb.save(self._file)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
#try:
if exc_type is None: #Don't save if we've crashed.
self.save()
del self._ws
del self._wb
if os.path.exists(self._tempfile):
print self._tempfile
shutil.copy(self._tempfile, self._file)
os.unlink(self._tempfile)
os.rmdir(os.path.dirname(self._tempfile))
#except:pass
class ExcelReader:
'''A simple spreadsheet reader'''
def __init__(self,xlsx,returntype=dict):
''' A simple spreadsheet reader.
@type xlsx: C{str}
@param xlsx: Path to xlsx file
@type returntype: C{type}
@param returntype: dict or list
'''
self._wb=openpyxl.load_workbook(xlsx, use_iterators = True)
self._returntype=returntype
self._sheets=self._wb.worksheets
self._headers=[]
self._rows=[]
self.records=0-len(self._sheets)
for ws in self._sheets:
self.records+=ws.max_row
rows=ws.iter_rows()
row=rows.next()
headers=[encode(c.value) for c in row]
self._rows.append(rows)
self._headers.append(headers)
def __getitem__(self, index):
i=index/1048575
j=index-i*1048575
ws=self._sheets[i]
headers=self._headers[i]
rows=self._rows[i]
row=rows.next()
### Little kludge for port to openpyxl
cells=[str(encode(c.value)).replace('_x000D_','') for c in row]
#cells=[str(encode(c.value)) for c in row]
###
if self._returntype is dict:
return dict(zip(headers,cells))
else:
return zip(headers,cells)
def __enter__(self):
return self
def __exit__(self):
del self._headers
del self._rows
del self._sheets
del self._wb
#}
| ssutee/metageta | metageta/utilities.py | Python | mit | 32,330 | [
"Brian"
] | adac841428e1f8a8516697d4fad9be0a889c059d27e5532929afbca46c66675d |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_column('profiles_indicator', 'generate_percent', 'display_percent', )
def backwards(self, orm):
db.rename_column('profiles_indicator', 'display_percent', 'generate_percent', )
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.datadomain': {
'Meta': {'object_name': 'DataDomain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'})
},
'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataSource']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
'profiles.indicatordata': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'IndicatorData'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']", 'null': 'True'})
},
'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataDomain']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"})
},
'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']"})
},
'profiles.time': {
'Meta': {'object_name': 'Time'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
}
}
complete_apps = ['profiles']
| ProvidencePlan/Profiles | communityprofiles/profiles/oldmigrations/0018_change_generate_percent_to_display_percent.py | Python | mit | 10,591 | [
"MOE"
] | 6240100c677780a9ea9e3a2690ecfc648b2d19176af9fd1be843f59d9462713e |
"""
Generating Mock Objects with IRAF
=================================
This script provides a class that can be used to generate objects such as galaxies using IRAF.
:requires: PyRAF
:requires: PyFITS
:requires: NumPy
:author: Sami-Matias Niemi
:contact: smn2@mssl.ucl.ac.uk
:version: 0.1
"""
from __future__ import print_function
from builtins import object
try:
from pyraf import iraf
from iraf import artdata
except ImportError:
print('Cannot import PyRAF, please install it...')
import numpy as np
import pyfits as pf
import logger as lg
import os, datetime
class generateFakeData(object):
"""
Generates an image frame with stars and galaxies using IRAF's artdata.
"""
def __init__(self, log, **kwargs):
"""
"""
self.log = log
self.settings = dict(dynrange=1e4,
gain=3.5,
magzero=25.58,
exptime=565.0,
rdnoise=4.5,
background=0.049,
xdim=4096,
ydim=4132,
star='gaussian',
beta=2.5,
radius=0.18,
ar=1.0,
pa=0.0,
poisson=iraf.yes,
egalmix=0.4,
output='image.fits')
self.settings.update(kwargs)
for key, value in self.settings.items():
self.log.info('%s = %s' % (key, value))
def createStarlist(self, nstars=20, output='stars.dat'):
"""
Generates an ascii file with uniform random x and y positions.
The magnitudes of stars are taken from an isotropic and homogeneous power-law distribution.
The output ascii file contains the following columns: xc yc magnitude
:param nstars: number of stars to include
:type nstars: int
:param output: name of the output ascii file
:type output: str
"""
self.log.info('Generating a list of stars; including %i stars to %s' %
(nstars, output))
if os.path.isfile(output):
os.remove(output)
iraf.starlist(output,
nstars,
xmax=self.settings['xdim'],
ymax=self.settings['ydim']) #,
#minmag=5, maxmag=15)
def createGalaxylist(self, ngalaxies=150, output='galaxies.dat'):
"""
Generates an ascii file with uniform random x and y positions.
The magnitudes of galaxies are taken from an isotropic and homogeneous power-law distribution.
The output ascii file contains the following columns: xc yc magnitude model radius ar pa <save>
:param ngalaxies: number of galaxies to include
:type ngalaxies: int
:param output: name of the output ascii file
:type output: str
"""
self.log.info(
'Generating a list of galaxies; including %i galaxies to %s' %
(ngalaxies, output))
if os.path.isfile(output):
os.remove(output)
iraf.gallist(output,
ngalaxies,
xmax=self.settings['xdim'],
ymax=self.settings['ydim'],
egalmix=self.settings['egalmix'],
maxmag=23.0,
minmag=10)
def addObjects(self, inputlist='galaxies.dat'):
"""
Add object(s) from inputlist to the output image.
:param inputlist: name of the input list
:type inputlist: str
"""
self.log.info('Adding objects from %s to %s' %
(inputlist, self.settings['output']))
iraf.artdata.dynrange = self.settings['dynrange']
iraf.mkobjects(self.settings['output'],
output='',
ncols=self.settings['xdim'],
nlines=self.settings['ydim'],
background=self.settings['background'],
objects=inputlist,
xoffset=0.0,
yoffset=0.0,
star=self.settings['star'],
radius=self.settings['radius'],
beta=self.settings['beta'],
ar=self.settings['ar'],
pa=self.settings['pa'],
distance=1.0,
exptime=self.settings['exptime'],
magzero=self.settings['magzero'],
gain=self.settings['gain'],
rdnoise=self.settings['rdnoise'],
poisson=self.settings['poisson'],
seed=2,
comments=iraf.yes)
def maskCrazyValues(self, filename=None):
"""
For some reason mkobjects sometimes adds crazy values to an image.
This method tries to remove those values and set them to more reasonable ones.
The values > 65k are set to the median of the image.
:param filename: name of the input file to modify [default = self.settings['output']]
:type filename: str
:return: None
"""
if filename is None:
filename = self.settings['output']
fh = pf.open(filename, mode='update')
hdu = fh[0]
data = fh[0].data
msk = data > 65000.
median = np.median(data)
data[msk] = median
hdu.scale('int16', '', bzero=32768)
hdu.header.add_history('Scaled to unsigned 16bit integer!')
#update the header
hdu.header.add_history(
'If questions, please contact Sami-Matias Niemi (smn2 at mssl.ucl.ac.uk).')
hdu.header.add_history(
'This file has been created with the VISsim Python Package at %s' %
datetime.datetime.isoformat(datetime.datetime.now()))
fh.close()
def runAll(self, nostars=True):
"""
Run all methods sequentially.
"""
if nostars:
self.createStarlist()
self.addObjects(inputlist='stars.dat')
self.createGalaxylist()
self.addObjects()
self.maskCrazyValues()
if __name__ == '__main__':
log = lg.setUpLogger('generateGalaxies.log')
log.info('Starting to create fake galaxies')
fakedata = generateFakeData(log)
fakedata.runAll()
#no noise or background
settings = dict(rdnoise=0.0,
background=0.0,
output='nonoise.fits',
poisson=iraf.no)
fakedata = generateFakeData(log, **settings)
fakedata.runAll()
#postage stamp galaxy
settings = dict(rdnoise=0.0,
background=0.0,
output='stamp.fits',
poisson=iraf.no,
xdim=200,
ydim=200)
fakedata = generateFakeData(log, **settings)
fakedata.addObjects(inputlist='singlegalaxy.dat')
fakedata.maskCrazyValues('stamp.fits')
log.info('All done...\n\n\n')
| boada/planckClusters | snippets/generateGalaxies.py | Python | mit | 7,185 | [
"Galaxy",
"Gaussian"
] | d25d504549744d03d79b69424f9f80ef6a82a2ea95f476dd70f38142fa764e60 |
"""This module defines classes used in tvtk code generation,
`SpecialGenerator` defines methods that write out special code for
some of the VTK classes. `HelperGenerator` helps generate the
`tvtk_helper.py` class.
"""
# Author: Prabhu Ramachandran
# Copyright (c) 2004-2007, Enthought, Inc.
# License: BSD Style.
import vtk
# These are relative imports for good reason.
import indenter
from common import get_tvtk_name
######################################################################
# `SpecialGenerator` class.
######################################################################
class SpecialGenerator:
"""Generates special code for some of the TVTK classes.
For example vtkMatrix4x4 objects can be pickled nicely if the
elements of the matrix are stored and restored. So we define a
`_write_Matrix4x4` method that generates the appropriate code.
"""
def __init__(self, indent):
"""`indent` is a reference to the `Indenter` instance of the
WrapperGenerator.
"""
self.indent = indent
#################################################################
# `SpecialGenerator` interface.
#################################################################
def generate_code(self, node, out):
"""Write the code given the node in the class tree, `node`,
and output file-like object, `out`.
"""
self._write_special(node.name, out)
#################################################################
# Non-public interface.
#################################################################
def _write_special(self, name, out):
"""Given the name of the class, call appropriate method, if
available.
"""
tname = get_tvtk_name(name)
writer = '_write_%s'%tname
if hasattr(self, writer):
getattr(self, writer)(out)
def _write_InteractorEventRecorder(self, out):
# This class is a pain because it must always take highest
# priority, the default value is therefore set to a huge
# number so that it catches all events first.
code = '''
priority = traits.Trait(1.0, traits.Float, traits.Range(0.0, 1.0))
def _priority_changed(self, old_val, new_val):
self._do_change(self._vtk_obj.SetPriority,
self.priority)
priority.help = \
"""
Set/Get the priority at which events are processed. This is used when
multiple interactor observers are used simultaneously. The default value
is 0.0 (lowest priority.) Note that when multiple interactor observer
have the same priority, then the last observer added will process the
event first. (Note: once the set_interactor() method has been called,
changing the priority does not effect event processing. You will have
to set_interactor(_null), change priority, and then set_interactor(iren)
to have the priority take effect.)
"""
'''
out.write(self.indent.format(code))
def _write_Matrix4x4(self, out):
code = """
def __getstate__(self):
d = tvtk_base.TVTKBase.__getstate__(self)
obj = self._vtk_obj
e = [obj.GetElement(i, j) for i in range(4) for j in range(4)]
d['elements'] = e
return d
def __setstate__(self, dict):
e = dict.pop('elements')
tvtk_base.TVTKBase.__setstate__(self, dict)
self._in_set = 1
obj = self._vtk_obj
[obj.SetElement(i, j, e[4*i+j]) for i in range(4) for j in range(4)]
self._in_set = 0
self.update_traits()
def from_array(self, arr):
'''Set the value of the matrix using the passed
Numeric array or Python list.
'''
obj = self._vtk_obj
[obj.SetElement(i, j, arr[i,j]) for i in range(4) for j in range(4)]
def to_array(self):
'''Return the object as a numpy array.'''
obj = self._vtk_obj
e = [obj.GetElement(i, j) for i in range(4) for j in range(4)]
arr = array_handler.numpy.array(e, dtype=float)
arr.shape = (4,4)
return arr
"""
out.write(self.indent.format(code))
def _write_Property(self, out):
# Color is made from the other specified colors.
code = """
def __getstate__(self):
d = tvtk_base.TVTKBase.__getstate__(self)
if 'color' in d:
del d['color']
return d
def __setstate__(self, dict):
tvtk_base.TVTKBase.__setstate__(self, dict)
self.update_traits()
"""
out.write(self.indent.format(code))
_write_Light = _write_Property
def _write_Collection(self, out):
code = """
def __len__(self):
return self._vtk_obj.GetNumberOfItems()
def __iter__(self):
self._vtk_obj.InitTraversal()
return self
def next(self):
try:
val = self._vtk_obj.GetNextItem()
except AttributeError:
val = self._vtk_obj.GetNextProp()
if val is None:
raise StopIteration
return wrap_vtk(val)
def __getitem__(self, key):
obj = self._vtk_obj
if type(key) != type(1):
raise TypeError, "Only integers are valid keys."
ni = obj.GetNumberOfItems()
if key < 0:
key = ni + key
ret = obj.GetItemAsObject(key)
if ret is None:
raise IndexError, "Index out of range."
return wrap_vtk(ret)
def __setitem__(self, key, val):
obj = self._vtk_obj
if type(key) != type(1):
raise TypeError, "Only integers are valid key."
ni = obj.GetNumberOfItems()
if key < 0:
key = ni + key
if key < 0 or key >= ni:
raise IndexError, "Index out of range."
obj.ReplaceItem(key, deref_vtk(val))
def __delitem__(self, key):
obj = self._vtk_obj
if type(key) != type(1):
raise TypeError, "Only integers are valid keys."
ni = obj.GetNumberOfItems()
if key < 0:
key = ni + key
if key < 0 or key >= ni:
raise IndexError, "Index out of range."
obj.RemoveItem(key)
def __repr__(self):
return repr([repr(x) for x in self])
def append(self, val):
self._vtk_obj.AddItem(deref_vtk(val))
def extend(self, arr):
obj = self._vtk_obj
for i in arr:
obj.AddItem(deref_vtk(i))
"""
out.write(self.indent.format(code))
def _write_DataArray(self, out):
code = """
def __len__(self):
return self._vtk_obj.GetNumberOfTuples()
def __iter__(self):
obj = self._vtk_obj
n = obj.GetNumberOfTuples()
nc = obj.GetNumberOfComponents()
if nc in [1,2,3,4,9]:
meth = getattr(obj, 'GetTuple%d'%nc)
for i in xrange(n):
yield meth(i)
else:
for i in xrange(n):
yield tuple([obj.GetComponent(i, x) for x in range(nc)])
def _check_key(self, key, n):
if type(key) not in [int, long]:
raise TypeError, "Only integers are valid keys."
if key < 0:
key = n + key
if key < 0 or key >= n:
raise IndexError, "Index out of range."
return key
def __getitem__(self, key):
obj = self._vtk_obj
n = obj.GetNumberOfTuples()
key = self._check_key(key, n)
nc = obj.GetNumberOfComponents()
if nc in [1,2,3,4,9]:
return getattr(obj, 'GetTuple%d'%nc)(key)
else:
return tuple([obj.GetComponent(key, x) for x in range(nc)])
def __setitem__(self, key, val):
obj = self._vtk_obj
n = obj.GetNumberOfTuples()
key = self._check_key(key, n)
nc = obj.GetNumberOfComponents()
if nc == 1:
obj.SetValue(key, val)
elif nc in [2,3,4,9]:
getattr(obj, 'SetTuple%d'%nc)(key, *val)
else:
assert len(val) == nc, \
'length of %s != %s.'%(val, nc)
for x in range(nc):
obj.SetComponent(key, x, val[x])
def __repr__(self):
obj = self._vtk_obj
n = obj.GetNumberOfTuples()
if n <= 10:
return repr([x for x in self])
else:
first, last = self[0], self[-1]
return '[%s, ..., %s], length = %s'%(first, last, n)
def append(self, val):
obj = self._vtk_obj
nc = obj.GetNumberOfComponents()
if nc == 1:
obj.InsertNextTuple1(val)
elif nc in [2,3,4,9]:
meth = getattr(obj, 'InsertNextTuple%d'%nc)
meth(*val)
else:
n = obj.GetNumberOfTuples()
for x in range(nc):
obj.InsertComponent(n, x, val[x])
self.update_traits()
def extend(self, arr):
obj = self._vtk_obj
nc = obj.GetNumberOfComponents()
if nc == 1:
for i in arr:
obj.InsertNextTuple1(i)
elif nc in [2,3,4,9]:
meth = getattr(obj, 'InsertNextTuple%d'%nc)
for i in arr:
meth(*i)
else:
n = obj.GetNumberOfTuples()
for i in range(len(arr)):
for x in range(nc):
obj.InsertComponent(n+i, x, arr[i][x])
self.update_traits()
def from_array(self, arr):
'''Set the value of the data array using the passed
Numeric array or Python list. This is implemented
efficiently.
'''
array_handler.array2vtk(arr, self._vtk_obj)
self.update_traits()
def to_array(self):
'''Return the object as a Numeric array.'''
return array_handler.vtk2array(self._vtk_obj)
"""
out.write(self.indent.format(code))
def _write_Points(self, out):
code = """
def __len__(self):
return self._vtk_obj.GetNumberOfPoints()
def __iter__(self):
obj = self._vtk_obj
n = obj.GetNumberOfPoints()
for i in xrange(n):
yield obj.GetPoint(i)
def _check_key(self, key, n):
##############################################
# Allow int and long keys. Fixes GH Issue 173.
##############################################
if not isinstance(key, (int, long)):
raise TypeError, "Only int and long are valid keys."
if key < 0:
key = n + key
if key < 0 or key >= n:
raise IndexError, "Index out of range."
return key
def __getitem__(self, key):
obj = self._vtk_obj
n = obj.GetNumberOfPoints()
key = self._check_key(key, n)
return obj.GetPoint(key)
def __setitem__(self, key, val):
obj = self._vtk_obj
n = obj.GetNumberOfPoints()
key = self._check_key(key, n)
obj.SetPoint(key, val)
def __repr__(self):
obj = self._vtk_obj
n = obj.GetNumberOfPoints()
if n <= 10:
return repr([x for x in self])
else:
meth = obj.GetPoint
return '[%s, ..., %s], length = %s'%(meth(0),
meth(n-1), n)
def append(self, val):
self._vtk_obj.InsertNextPoint(val)
self.update_traits()
def extend(self, arr):
obj = self._vtk_obj
for i in arr:
obj.InsertNextPoint(i)
self.update_traits()
def from_array(self, arr):
'''Set the value of the data array using the passed
Numeric array or Python list. This is implemented
efficiently.
'''
array_handler.array2vtkPoints(arr, self._vtk_obj)
self.update_traits()
def to_array(self):
'''Return the object as a Numeric array.'''
return array_handler.vtk2array(self._vtk_obj.GetData())
"""
out.write(self.indent.format(code))
def _write_IdList(self, out):
code = """
def __len__(self):
return self._vtk_obj.GetNumberOfIds()
def __iter__(self):
obj = self._vtk_obj
n = obj.GetNumberOfIds()
for i in xrange(n):
yield obj.GetId(i)
def _check_key(self, key, n):
if type(key) != type(1):
raise TypeError, "Only integers are valid keys."
if key < 0:
key = n + key
if key < 0 or key >= n:
raise IndexError, "Index out of range."
return key
def __getitem__(self, key):
obj = self._vtk_obj
n = obj.GetNumberOfIds()
key = self._check_key(key, n)
return obj.GetId(key)
def __setitem__(self, key, val):
obj = self._vtk_obj
n = obj.GetNumberOfIds()
key = self._check_key(key, n)
obj.SetId(key, val)
def __repr__(self):
obj = self._vtk_obj
n = obj.GetNumberOfIds()
if n <= 10:
return repr([x for x in self])
else:
meth = obj.GetId
return '[%s, ..., %s], length = %s'%(meth(0),
meth(n-1), n)
def append(self, val):
self._vtk_obj.InsertNextId(val)
self.update_traits()
def extend(self, arr):
obj = self._vtk_obj
for i in arr:
obj.InsertNextId(i)
self.update_traits()
def from_array(self, arr):
'''Set the value of the data array using the passed
Numeric array or Python list. This is implemented
efficiently.
'''
array_handler.array2vtkIdList(arr, self._vtk_obj)
self.update_traits()
"""
out.write(self.indent.format(code))
def _write_CellArray(self, out):
code = """
def from_array(self, arr):
'''Set the value of the data array using the passed
Numeric array or Python list. This is implemented
efficiently.
'''
array_handler.array2vtkCellArray(arr, self._vtk_obj)
self.update_traits()
def to_array(self):
'''Return the object as a Numeric array.'''
return array_handler.vtk2array(self._vtk_obj.GetData())
"""
out.write(self.indent.format(code))
######################################################################
# `HelperGenerator` class.
######################################################################
class HelperGenerator:
"""Writes out the tvtk_helper.py file that makes it easy to use
tvtk objects efficiently.
"""
def __init__(self):
self.indent = indenter.Indent()
#################################################################
# `HelperGenerator` interface.
#################################################################
def write_prelims(self, out):
""" Write out the preliminary data."""
indent = self.indent
v = vtk.vtkVersion()
vtk_version = v.GetVTKVersion()[:3]
vtk_src_version = v.GetVTKSourceVersion()
code = """
import vtk
from tvtk import tvtk_base
from tvtk.common import get_tvtk_name, camel2enthought
# Caches all the classes.
_cache = {}
def set_ancestors(klass):
tmp = klass.__bases__
if not tmp:
return
# Assuming a single inheritance.
tmp = tmp[0]
name = tmp.__name__
while not _cache.has_key(name) and \
name not in ['TVTKBase', 'object']:
_cache[name] = tmp
tmp = tmp.__bases__[0]
name = tmp.__name__
def get_module(fname):
try:
mod = __import__('tvtk.custom.%%s'%%fname,
globals(), locals(), [fname])
except ImportError:
# This is a local import since the tvtk modules are all
# inside the tvtk_classes ZIP file and are local to the
# current module: tvtk_helper.py
mod = __import__('tvtk.tvtk_classes.%%s'%%fname, globals(), locals(), [fname])
return mod
def get_class(name):
if _cache.has_key(name):
return _cache[name]
else:
fname = camel2enthought(name)
mod = get_module(fname)
klass = getattr(mod, name)
_cache[name] = klass
set_ancestors(klass)
return klass
def wrap_vtk(obj):
if isinstance(obj, tvtk_base.TVTKBase):
return obj
elif isinstance(obj, vtk.vtkObjectBase):
cached_obj = tvtk_base.get_tvtk_object_from_cache(obj)
if cached_obj is not None:
return cached_obj
cname = get_tvtk_name(obj.__class__.__name__)
tvtk_class = get_class(cname)
return tvtk_class(obj)
else:
return obj
class TVTK(object):
to_tvtk = staticmethod(wrap_vtk)
to_vtk = staticmethod(tvtk_base.deref_vtk)
"""%locals()
out.write(indent.format(code))
indent.incr()
def add_class(self, name, out):
"""Add a tvtk class with name, `name` as a property to the
helper class output file-like object, `out`.
"""
code = """
%(name)s = property(lambda self: get_class('%(name)s'))
"""%locals()
out.write(self.indent.format(code))
| liulion/mayavi | tvtk/special_gen.py | Python | bsd-3-clause | 18,831 | [
"VTK"
] | 7885f8eb8d12d3811ce8edec3ae257e8a18cbe4ffda0a2ab6fe1e809eb15c26e |
import math
from datetime import timedelta as delta
from glob import glob
from os import path
import numpy as np
import pytest
import dask
from parcels import AdvectionRK4
from parcels import Field
from parcels import FieldSet
from parcels import JITParticle
from parcels import ParticleFile
from parcels import ParticleSet
from parcels import ScipyParticle
from parcels import Variable
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
def fieldset_from_nemo_3D(chunk_mode):
data_path = path.join(path.dirname(__file__), 'NemoNorthSeaORCA025-N006_data/')
ufiles = sorted(glob(data_path + 'ORCA*U.nc'))
vfiles = sorted(glob(data_path + 'ORCA*V.nc'))
wfiles = sorted(glob(data_path + 'ORCA*W.nc'))
mesh_mask = data_path + 'coordinates.nc'
filenames = {'U': {'lon': mesh_mask, 'lat': mesh_mask, 'depth': wfiles[0], 'data': ufiles},
'V': {'lon': mesh_mask, 'lat': mesh_mask, 'depth': wfiles[0], 'data': vfiles},
'W': {'lon': mesh_mask, 'lat': mesh_mask, 'depth': wfiles[0], 'data': wfiles}}
variables = {'U': 'uo',
'V': 'vo',
'W': 'wo'}
dimensions = {'U': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'},
'V': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'},
'W': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'}}
chs = False
if chunk_mode == 'auto':
chs = 'auto'
elif chunk_mode == 'specific':
chs = {'U': {'depthu': 75, 'depthv': 75, 'depthw': 75, 'y': 16, 'x': 16},
'V': {'depthu': 75, 'depthv': 75, 'depthw': 75, 'y': 16, 'x': 16},
'W': {'depthu': 75, 'depthv': 75, 'depthw': 75, 'y': 16, 'x': 16}}
fieldset = FieldSet.from_nemo(filenames, variables, dimensions, field_chunksize=chs)
return fieldset
def fieldset_from_globcurrent(chunk_mode):
filenames = path.join(path.dirname(__file__), 'GlobCurrent_example_data',
'200201*-GLOBCURRENT-L4-CUReul_hs-ALT_SUM-v02.0-fv01.0.nc')
variables = {'U': 'eastward_eulerian_current_velocity', 'V': 'northward_eulerian_current_velocity'}
dimensions = {'lat': 'lat', 'lon': 'lon', 'time': 'time'}
chs = False
if chunk_mode == 'auto':
chs = 'auto'
elif chunk_mode == 'specific':
chs = {'U': {'lat': 16, 'lon': 16},
'V': {'lat': 16, 'lon': 16}}
fieldset = FieldSet.from_netcdf(filenames, variables, dimensions, field_chunksize=chs)
return fieldset
def fieldset_from_pop_1arcs(chunk_mode):
filenames = path.join(path.join(path.dirname(__file__), 'POPSouthernOcean_data'), 't.x1_SAMOC_flux.1690*.nc')
variables = {'U': 'UVEL', 'V': 'VVEL', 'W': 'WVEL'}
timestamps = np.expand_dims(np.array([np.datetime64('2000-%.2d-01' % m) for m in range(1, 7)]), axis=1)
dimensions = {'lon': 'ULON', 'lat': 'ULAT', 'depth': 'w_dep'}
chs = False
if chunk_mode == 'auto':
chs = 'auto'
elif chunk_mode == 'specific':
chs = {'i': 8, 'j': 8, 'k': 3, 'w_dep': 3}
fieldset = FieldSet.from_pop(filenames, variables, dimensions, field_chunksize=chs, timestamps=timestamps)
return fieldset
def fieldset_from_swash(chunk_mode):
filenames = path.join(path.join(path.dirname(__file__), 'SWASH_data'), 'field_*.nc')
variables = {'U': 'cross-shore velocity',
'V': 'along-shore velocity',
'W': 'vertical velocity',
'depth': 'time varying depth',
'depth_u': 'time varying depth_u'}
dimensions = {'U': {'lon': 'x', 'lat': 'y', 'depth': 'not_yet_set', 'time': 't'},
'V': {'lon': 'x', 'lat': 'y', 'depth': 'not_yet_set', 'time': 't'},
'W': {'lon': 'x', 'lat': 'y', 'depth': 'not_yet_set', 'time': 't'},
'depth': {'lon': 'x', 'lat': 'y', 'depth': 'not_yet_set', 'time': 't'},
'depth_u': {'lon': 'x', 'lat': 'y', 'depth': 'not_yet_set', 'time': 't'}}
chs = False
if chunk_mode == 'auto':
chs = 'auto'
elif chunk_mode == 'specific':
chs = (1, 7, 4, 4)
fieldset = FieldSet.from_netcdf(filenames, variables, dimensions, mesh='flat', allow_time_extrapolation=True, field_chunksize=chs)
fieldset.U.set_depth_from_field(fieldset.depth_u)
fieldset.V.set_depth_from_field(fieldset.depth_u)
fieldset.W.set_depth_from_field(fieldset.depth)
return fieldset
def fieldset_from_ofam(chunk_mode):
filenames = {'U': path.join(path.dirname(__file__), 'OFAM_example_data', 'OFAM_simple_U.nc'),
'V': path.join(path.dirname(__file__), 'OFAM_example_data', 'OFAM_simple_V.nc')}
variables = {'U': 'u', 'V': 'v'}
dimensions = {'lat': 'yu_ocean', 'lon': 'xu_ocean', 'depth': 'st_ocean',
'time': 'Time'}
chs = False
name_map = {'lon': ['xu_ocean'],
'lat': ['yu_ocean'],
'depth': ['st_edges_ocean', 'st_ocean'],
'time': 'Time'}
if chunk_mode == 'auto':
chs = 'auto'
elif chunk_mode == 'specific':
chs = (1, 60, 50, 100)
return FieldSet.from_netcdf(filenames, variables, dimensions, allow_time_extrapolation=True, field_chunksize=chs, chunkdims_name_map=name_map)
def fieldset_from_mitgcm(chunk_mode):
data_path = path.join(path.dirname(__file__), "MITgcm_example_data/")
filenames = {"U": data_path + "mitgcm_UV_surface_zonally_reentrant.nc",
"V": data_path + "mitgcm_UV_surface_zonally_reentrant.nc"}
variables = {"U": "UVEL", "V": "VVEL"}
dimensions = {"U": {"lon": "XG", "lat": "YG", "time": "time"},
"V": {"lon": "XG", "lat": "YG", "time": "time"}}
chs = False
name_map = {'lon': 'XG', 'lat': 'YG', 'time': 'time'}
if chunk_mode == 'auto':
chs = 'auto'
elif chunk_mode == 'specific':
chs = (1, 50, 100)
return FieldSet.from_mitgcm(filenames, variables, dimensions, mesh='flat', field_chunksize=chs, chunkdims_name_map=name_map)
def compute_nemo_particle_advection(field_set, mode, lonp, latp):
def periodicBC(particle, fieldSet, time):
if particle.lon > 15.0:
particle.lon -= 15.0
if particle.lon < 0:
particle.lon += 15.0
if particle.lat > 60.0:
particle.lat -= 11.0
if particle.lat < 49.0:
particle.lat += 11.0
pset = ParticleSet.from_list(field_set, ptype[mode], lon=lonp, lat=latp)
pfile = ParticleFile("nemo_particles_chunk", pset, outputdt=delta(days=1))
kernels = pset.Kernel(AdvectionRK4) + periodicBC
pset.execute(kernels, runtime=delta(days=4), dt=delta(hours=6), output_file=pfile)
return pset
def compute_globcurrent_particle_advection(field_set, mode, lonp, latp):
pset = ParticleSet(field_set, pclass=ptype[mode], lon=lonp, lat=latp)
pfile = ParticleFile("globcurrent_particles_chunk", pset, outputdt=delta(hours=2))
pset.execute(AdvectionRK4, runtime=delta(days=1), dt=delta(minutes=5), output_file=pfile)
return pset
def compute_pop_particle_advection(field_set, mode, lonp, latp):
pset = ParticleSet.from_list(field_set, ptype[mode], lon=lonp, lat=latp)
pfile = ParticleFile("globcurrent_particles_chunk", pset, outputdt=delta(days=15))
pset.execute(AdvectionRK4, runtime=delta(days=90), dt=delta(days=2), output_file=pfile)
return pset
def compute_swash_particle_advection(field_set, mode, lonp, latp, depthp):
pset = ParticleSet.from_list(field_set, ptype[mode], lon=lonp, lat=latp, depth=depthp)
pfile = ParticleFile("swash_particles_chunk", pset, outputdt=delta(seconds=0.05))
pset.execute(AdvectionRK4, runtime=delta(seconds=0.2), dt=delta(seconds=0.005), output_file=pfile)
return pset
def compute_ofam_particle_advection(field_set, mode, lonp, latp, depthp):
pset = ParticleSet(field_set, pclass=ptype[mode], lon=lonp, lat=latp, depth=depthp)
pfile = ParticleFile("ofam_particles_chunk", pset, outputdt=delta(minutes=10))
pset.execute(AdvectionRK4, runtime=delta(days=10), dt=delta(minutes=5), output_file=pfile)
return pset
@pytest.mark.parametrize('mode', ['jit'])
@pytest.mark.parametrize('chunk_mode', [False, 'auto', 'specific'])
def test_nemo_3D(mode, chunk_mode):
if chunk_mode == 'auto':
dask.config.set({'array.chunk-size': '2MiB'})
else:
dask.config.set({'array.chunk-size': '128MiB'})
field_set = fieldset_from_nemo_3D(chunk_mode)
npart = 20
lonp = 2.5 * np.ones(npart)
latp = [i for i in 52.0+(-1e-3+np.random.rand(npart)*2.0*1e-3)]
compute_nemo_particle_advection(field_set, mode, lonp, latp)
# Nemo sample file dimensions: depthu=75, y=201, x=151
assert (len(field_set.U.grid.load_chunk) == len(field_set.V.grid.load_chunk))
assert (len(field_set.U.grid.load_chunk) == len(field_set.W.grid.load_chunk))
if chunk_mode is False:
assert (len(field_set.U.grid.load_chunk) == 1)
elif chunk_mode == 'auto':
assert (len(field_set.U.grid.load_chunk) != 1)
elif chunk_mode == 'specific':
assert (len(field_set.U.grid.load_chunk) == (1 * int(math.ceil(201.0/16.0)) * int(math.ceil(151.0/16.0))))
@pytest.mark.parametrize('mode', ['jit'])
@pytest.mark.parametrize('chunk_mode', [False, 'auto', 'specific'])
def test_pop(mode, chunk_mode):
if chunk_mode == 'auto':
dask.config.set({'array.chunk-size': '1MiB'})
else:
dask.config.set({'array.chunk-size': '128MiB'})
field_set = fieldset_from_pop_1arcs(chunk_mode)
npart = 20
lonp = 70.0 * np.ones(npart)
latp = [i for i in -45.0+(-0.25+np.random.rand(npart)*2.0*0.25)]
compute_pop_particle_advection(field_set, mode, lonp, latp)
# POP sample file dimensions: k=21, j=60, i=60
assert (len(field_set.U.grid.load_chunk) == len(field_set.V.grid.load_chunk))
assert (len(field_set.U.grid.load_chunk) == len(field_set.W.grid.load_chunk))
if chunk_mode is False:
assert (len(field_set.U.grid.load_chunk) == 1)
elif chunk_mode == 'auto':
assert (len(field_set.U.grid.load_chunk) == 1)
elif chunk_mode == 'specific':
assert (len(field_set.U.grid.load_chunk) == (int(math.ceil(21.0/3.0)) * int(math.ceil(60.0/8.0)) * int(math.ceil(60.0/8.0))))
@pytest.mark.parametrize('mode', ['jit'])
@pytest.mark.parametrize('chunk_mode', [False, 'auto', 'specific'])
def test_swash(mode, chunk_mode):
if chunk_mode == 'auto':
dask.config.set({'array.chunk-size': '32KiB'})
else:
dask.config.set({'array.chunk-size': '128MiB'})
field_set = fieldset_from_swash(chunk_mode)
npart = 20
lonp = [i for i in 9.5 + (-0.2 + np.random.rand(npart) * 2.0 * 0.2)]
latp = [i for i in np.arange(start=12.3, stop=13.1, step=0.04)[0:20]]
depthp = [-0.1, ] * npart
compute_swash_particle_advection(field_set, mode, lonp, latp, depthp)
# SWASH sample file dimensions: t=1, z=7, z_u=6, y=21, x=51
assert (len(field_set.U.grid.load_chunk) == len(field_set.V.grid.load_chunk))
if chunk_mode != 'auto':
assert (len(field_set.U.grid.load_chunk) == len(field_set.W.grid.load_chunk))
if chunk_mode is False:
assert (len(field_set.U.grid.load_chunk) == 1)
elif chunk_mode == 'auto':
assert (len(field_set.U.grid.load_chunk) != 1)
elif chunk_mode == 'specific':
assert (len(field_set.U.grid.load_chunk) == (1 * int(math.ceil(6.0 / 7.0)) * int(math.ceil(21.0 / 4.0)) * int(math.ceil(51.0 / 4.0))))
assert (len(field_set.U.grid.load_chunk) == (1 * int(math.ceil(7.0 / 7.0)) * int(math.ceil(21.0 / 4.0)) * int(math.ceil(51.0 / 4.0))))
@pytest.mark.parametrize('mode', ['jit'])
@pytest.mark.parametrize('chunk_mode', [False, 'auto', 'specific'])
def test_globcurrent_2D(mode, chunk_mode):
if chunk_mode == 'auto':
dask.config.set({'array.chunk-size': '32KiB'})
else:
dask.config.set({'array.chunk-size': '128MiB'})
field_set = fieldset_from_globcurrent(chunk_mode)
lonp = [25]
latp = [-35]
pset = compute_globcurrent_particle_advection(field_set, mode, lonp, latp)
# GlobCurrent sample file dimensions: time=UNLIMITED, lat=41, lon=81
assert (len(field_set.U.grid.load_chunk) == len(field_set.V.grid.load_chunk))
if chunk_mode is False:
assert (len(field_set.U.grid.load_chunk) == 1)
elif chunk_mode == 'auto':
assert (len(field_set.U.grid.load_chunk) != 1)
elif chunk_mode == 'specific':
assert (len(field_set.U.grid.load_chunk) == (1 * int(math.ceil(41.0/16.0)) * int(math.ceil(81.0/16.0))))
assert(abs(pset[0].lon - 23.8) < 1)
assert(abs(pset[0].lat - -35.3) < 1)
@pytest.mark.parametrize('mode', ['jit'])
@pytest.mark.parametrize('chunk_mode', [False, 'auto', 'specific'])
def test_ofam_3D(mode, chunk_mode):
if chunk_mode == 'auto':
dask.config.set({'array.chunk-size': '1024KiB'})
else:
dask.config.set({'array.chunk-size': '128MiB'})
field_set = fieldset_from_ofam(chunk_mode)
lonp = [180]
latp = [10]
depthp = [2.5] # the depth of the first layer in OFAM
pset = compute_ofam_particle_advection(field_set, mode, lonp, latp, depthp)
# OFAM sample file dimensions: time=UNLIMITED, st_ocean=1, st_edges_ocean=52, lat=601, lon=2001
assert (len(field_set.U.grid.load_chunk) == len(field_set.V.grid.load_chunk))
if chunk_mode is False:
assert (len(field_set.U.grid.load_chunk) == 1)
elif chunk_mode == 'auto':
assert (len(field_set.U.grid.load_chunk) != 1)
elif chunk_mode == 'specific':
print(field_set.U.grid.chunk_info)
numblocks = [i for i in field_set.U.grid.chunk_info[1:3]]
dblocks = 1
vblocks = 0
for bsize in field_set.U.grid.chunk_info[3:3+numblocks[0]]:
vblocks += bsize
ublocks = 0
for bsize in field_set.U.grid.chunk_info[3+numblocks[0]:3+numblocks[0]+numblocks[1]]:
ublocks += bsize
matching_numblocks = (ublocks == 2001 and vblocks == 601 and dblocks == 1)
matching_fields = (field_set.U.grid.chunk_info == field_set.V.grid.chunk_info)
matching_uniformblocks = (len(field_set.U.grid.load_chunk) == (1 * int(math.ceil(1.0/60.0)) * int(math.ceil(601.0/50.0)) * int(math.ceil(2001.0/100.0))))
assert (matching_uniformblocks or (matching_fields and matching_numblocks))
assert(abs(pset[0].lon - 173) < 1)
assert(abs(pset[0].lat - 11) < 1)
@pytest.mark.parametrize('mode', ['jit'])
@pytest.mark.parametrize('chunk_mode', [False, 'auto', 'specific'])
def test_mitgcm(mode, chunk_mode):
if chunk_mode == 'auto':
dask.config.set({'array.chunk-size': '1024KiB'})
else:
dask.config.set({'array.chunk-size': '128MiB'})
field_set = fieldset_from_mitgcm(chunk_mode)
lons, lats = 5e5, 5e5
pset = ParticleSet.from_list(fieldset=field_set, pclass=ptype[mode], lon=lons, lat=lats)
pset.execute(AdvectionRK4, runtime=delta(days=1), dt=delta(minutes=5))
# MITgcm sample file dimensions: time=10, XG=400, YG=200
assert (len(field_set.U.grid.load_chunk) == len(field_set.V.grid.load_chunk))
if chunk_mode in [False, 'auto']:
assert (len(field_set.U.grid.load_chunk) == 1)
elif chunk_mode == 'specific':
assert (len(field_set.U.grid.load_chunk) == (1 * int(math.ceil(400.0/50.0)) * int(math.ceil(200.0/100.0))))
assert np.allclose(pset[0].lon, 5.27e5, atol=1e3)
@pytest.mark.parametrize('mode', ['jit'])
def test_diff_entry_dimensions_chunks(mode):
data_path = path.join(path.dirname(__file__), 'NemoNorthSeaORCA025-N006_data/')
ufiles = sorted(glob(data_path + 'ORCA*U.nc'))
vfiles = sorted(glob(data_path + 'ORCA*V.nc'))
mesh_mask = data_path + 'coordinates.nc'
filenames = {'U': {'lon': mesh_mask, 'lat': mesh_mask, 'data': ufiles},
'V': {'lon': mesh_mask, 'lat': mesh_mask, 'data': vfiles}}
variables = {'U': 'uo',
'V': 'vo'}
dimensions = {'U': {'lon': 'glamf', 'lat': 'gphif', 'time': 'time_counter'},
'V': {'lon': 'glamf', 'lat': 'gphif', 'time': 'time_counter'}}
chs = {'U': {'depthu': 75, 'depthv': 75, 'y': 16, 'x': 16},
'V': {'depthu': 75, 'depthv': 75, 'y': 16, 'x': 16}}
fieldset = FieldSet.from_nemo(filenames, variables, dimensions, field_chunksize=chs)
npart = 20
lonp = 5.2 * np.ones(npart)
latp = [i for i in 52.0+(-1e-3+np.random.rand(npart)*2.0*1e-3)]
compute_nemo_particle_advection(fieldset, mode, lonp, latp)
# Nemo sample file dimensions: depthu=75, y=201, x=151
assert (len(fieldset.U.grid.load_chunk) == len(fieldset.V.grid.load_chunk))
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_3d_2dfield_sampling(mode):
data_path = path.join(path.dirname(__file__), 'NemoNorthSeaORCA025-N006_data/')
ufiles = sorted(glob(data_path + 'ORCA*U.nc'))
vfiles = sorted(glob(data_path + 'ORCA*V.nc'))
mesh_mask = data_path + 'coordinates.nc'
filenames = {'U': {'lon': mesh_mask, 'lat': mesh_mask, 'data': ufiles},
'V': {'lon': mesh_mask, 'lat': mesh_mask, 'data': vfiles},
'nav_lon': {'lon': mesh_mask, 'lat': mesh_mask, 'data': [ufiles[0], ]}}
variables = {'U': 'uo',
'V': 'vo',
'nav_lon': 'nav_lon'}
dimensions = {'U': {'lon': 'glamf', 'lat': 'gphif', 'time': 'time_counter'},
'V': {'lon': 'glamf', 'lat': 'gphif', 'time': 'time_counter'},
'nav_lon': {'lon': 'glamf', 'lat': 'gphif'}}
fieldset = FieldSet.from_nemo(filenames, variables, dimensions, field_chunksize=False)
fieldset.nav_lon.data = np.ones(fieldset.nav_lon.data.shape, dtype=np.float32)
fieldset.add_field(Field('rectilinear_2D', np.ones((2, 2)),
lon=np.array([-10, 20]), lat=np.array([40, 80]), field_chunksize=False))
class MyParticle(ptype[mode]):
sample_var_curvilinear = Variable('sample_var_curvilinear')
sample_var_rectilinear = Variable('sample_var_rectilinear')
pset = ParticleSet(fieldset, pclass=MyParticle, lon=2.5, lat=52)
def Sample2D(particle, fieldset, time):
particle.sample_var_curvilinear += fieldset.nav_lon[time, particle.depth, particle.lat, particle.lon]
particle.sample_var_rectilinear += fieldset.rectilinear_2D[time, particle.depth, particle.lat, particle.lon]
runtime, dt = 86400*4, 6*3600
pset.execute(pset.Kernel(AdvectionRK4) + Sample2D, runtime=runtime, dt=dt)
assert pset.sample_var_rectilinear == runtime/dt
assert pset.sample_var_curvilinear == runtime/dt
@pytest.mark.parametrize('mode', ['jit'])
def test_diff_entry_chunksize_error_nemo_simple(mode):
data_path = path.join(path.dirname(__file__), 'NemoNorthSeaORCA025-N006_data/')
ufiles = sorted(glob(data_path + 'ORCA*U.nc'))
vfiles = sorted(glob(data_path + 'ORCA*V.nc'))
wfiles = sorted(glob(data_path + 'ORCA*W.nc'))
mesh_mask = data_path + 'coordinates.nc'
filenames = {'U': {'lon': mesh_mask, 'lat': mesh_mask, 'depth': wfiles[0], 'data': ufiles},
'V': {'lon': mesh_mask, 'lat': mesh_mask, 'depth': wfiles[0], 'data': vfiles},
'W': {'lon': mesh_mask, 'lat': mesh_mask, 'depth': wfiles[0], 'data': wfiles}}
variables = {'U': 'uo',
'V': 'vo',
'W': 'wo'}
dimensions = {'U': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'},
'V': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'},
'W': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'}}
chs = {'U': {'depthu': 75, 'y': 16, 'x': 16},
'V': {'depthv': 20, 'y': 4, 'x': 16},
'W': {'depthw': 15, 'y': 16, 'x': 4}}
try:
fieldset = FieldSet.from_nemo(filenames, variables, dimensions, field_chunksize=chs)
except ValueError:
return True
npart = 20
lonp = 5.2 * np.ones(npart)
latp = [i for i in 52.0+(-1e-3+np.random.rand(npart)*2.0*1e-3)]
compute_nemo_particle_advection(fieldset, mode, lonp, latp)
return False
@pytest.mark.parametrize('mode', ['jit'])
def test_diff_entry_chunksize_error_nemo_complex_conform_depth(mode):
# ==== this test is expected to fall-back to a pre-defined minimal chunk as ==== #
# ==== the requested chunks don't match, or throw a value error. ==== #
data_path = path.join(path.dirname(__file__), 'NemoNorthSeaORCA025-N006_data/')
ufiles = sorted(glob(data_path + 'ORCA*U.nc'))
vfiles = sorted(glob(data_path + 'ORCA*V.nc'))
wfiles = sorted(glob(data_path + 'ORCA*W.nc'))
mesh_mask = data_path + 'coordinates.nc'
filenames = {'U': {'lon': mesh_mask, 'lat': mesh_mask, 'depth': wfiles[0], 'data': ufiles},
'V': {'lon': mesh_mask, 'lat': mesh_mask, 'depth': wfiles[0], 'data': vfiles},
'W': {'lon': mesh_mask, 'lat': mesh_mask, 'depth': wfiles[0], 'data': wfiles}}
variables = {'U': 'uo',
'V': 'vo',
'W': 'wo'}
dimensions = {'U': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'},
'V': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'},
'W': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'}}
chs = {'U': {'depthu': 75, 'depthv': 75, 'depthw': 75, 'y': 16, 'x': 16},
'V': {'depthu': 75, 'depthv': 75, 'depthw': 75, 'y': 4, 'x': 16},
'W': {'depthu': 75, 'depthv': 75, 'depthw': 75, 'y': 16, 'x': 4}}
fieldset = FieldSet.from_nemo(filenames, variables, dimensions, field_chunksize=chs)
npart = 20
lonp = 5.2 * np.ones(npart)
latp = [i for i in 52.0+(-1e-3+np.random.rand(npart)*2.0*1e-3)]
compute_nemo_particle_advection(fieldset, mode, lonp, latp)
# Nemo sample file dimensions: depthu=75, y=201, x=151
npart_U = 1
npart_U = [npart_U * k for k in fieldset.U.nchunks[1:]]
npart_V = 1
npart_V = [npart_V * k for k in fieldset.V.nchunks[1:]]
npart_W = 1
npart_W = [npart_W * k for k in fieldset.V.nchunks[1:]]
chn = {'U': {'lat': int(math.ceil(201.0/chs['U']['y'])),
'lon': int(math.ceil(151.0/chs['U']['x'])),
'depth': int(math.ceil(75.0/chs['U']['depthu']))},
'V': {'lat': int(math.ceil(201.0/chs['V']['y'])),
'lon': int(math.ceil(151.0/chs['V']['x'])),
'depth': int(math.ceil(75.0/chs['V']['depthv']))},
'W': {'lat': int(math.ceil(201.0/chs['W']['y'])),
'lon': int(math.ceil(151.0/chs['W']['x'])),
'depth': int(math.ceil(75.0/chs['W']['depthw']))}}
npart_U_request = 1
npart_U_request = [npart_U_request * chn['U'][k] for k in chn['U']]
npart_V_request = 1
npart_V_request = [npart_V_request * chn['V'][k] for k in chn['V']]
npart_W_request = 1
npart_W_request = [npart_W_request * chn['W'][k] for k in chn['W']]
assert (len(fieldset.U.grid.load_chunk) == len(fieldset.V.grid.load_chunk))
assert (len(fieldset.U.grid.load_chunk) == len(fieldset.W.grid.load_chunk))
assert (npart_U == npart_V)
assert (npart_U == npart_W)
assert (npart_U != npart_U_request)
assert (npart_V != npart_V_request)
assert (npart_W != npart_W_request)
@pytest.mark.parametrize('mode', ['jit'])
def test_diff_entry_chunksize_error_nemo_complex_nonconform_depth(mode):
# ==== this test is expected to fall-back to a pre-defined minimal chunk as the ==== #
# ==== requested chunks don't match, or throw a value error ==== #
data_path = path.join(path.dirname(__file__), 'NemoNorthSeaORCA025-N006_data/')
ufiles = sorted(glob(data_path + 'ORCA*U.nc'))
vfiles = sorted(glob(data_path + 'ORCA*V.nc'))
wfiles = sorted(glob(data_path + 'ORCA*W.nc'))
mesh_mask = data_path + 'coordinates.nc'
filenames = {'U': {'lon': mesh_mask, 'lat': mesh_mask, 'depth': wfiles[0], 'data': ufiles},
'V': {'lon': mesh_mask, 'lat': mesh_mask, 'depth': wfiles[0], 'data': vfiles}}
variables = {'U': 'uo',
'V': 'vo'}
dimensions = {'U': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'},
'V': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'}}
chs = {'U': {'depthu': 75, 'depthv': 15, 'y': 16, 'x': 16},
'V': {'depthu': 75, 'depthv': 15, 'y': 4, 'x': 16}}
fieldset = FieldSet.from_nemo(filenames, variables, dimensions, field_chunksize=chs)
npart = 20
lonp = 5.2 * np.ones(npart)
latp = [i for i in 52.0+(-1e-3+np.random.rand(npart)*2.0*1e-3)]
try:
compute_nemo_particle_advection(fieldset, mode, lonp, latp)
except IndexError: # incorrect data access, in case grids were created
return True
except AssertionError: # U-V grids are not equal to one another, throwing assertion errors
return True
return False
@pytest.mark.parametrize('mode', ['jit'])
def test_erroneous_fieldset_init(mode):
data_path = path.join(path.dirname(__file__), 'NemoNorthSeaORCA025-N006_data/')
ufiles = sorted(glob(data_path + 'ORCA*U.nc'))
vfiles = sorted(glob(data_path + 'ORCA*V.nc'))
wfiles = sorted(glob(data_path + 'ORCA*W.nc'))
mesh_mask = data_path + 'coordinates.nc'
filenames = {'U': {'lon': mesh_mask, 'lat': mesh_mask, 'depth': wfiles[0], 'data': ufiles},
'V': {'lon': mesh_mask, 'lat': mesh_mask, 'depth': wfiles[0], 'data': vfiles},
'W': {'lon': mesh_mask, 'lat': mesh_mask, 'depth': wfiles[0], 'data': wfiles}}
variables = {'U': 'uo',
'V': 'vo',
'W': 'wo'}
dimensions = {'U': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'},
'V': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'},
'W': {'lon': 'glamf', 'lat': 'gphif', 'depth': 'depthw', 'time': 'time_counter'}}
chs = {'U': {'depthu': 75, 'y': 16, 'x': 16},
'V': {'depthv': 75, 'y': 16, 'x': 16},
'W': {'depthw': 75, 'y': 16, 'x': 16}}
try:
FieldSet.from_nemo(filenames, variables, dimensions, field_chunksize=chs)
except ValueError:
return True
return False
@pytest.mark.parametrize('mode', ['jit'])
def test_diff_entry_chunksize_correction_globcurrent(mode):
filenames = path.join(path.dirname(__file__), 'GlobCurrent_example_data',
'200201*-GLOBCURRENT-L4-CUReul_hs-ALT_SUM-v02.0-fv01.0.nc')
variables = {'U': 'eastward_eulerian_current_velocity', 'V': 'northward_eulerian_current_velocity'}
dimensions = {'lat': 'lat', 'lon': 'lon', 'time': 'time'}
chs = {'U': {'lat': 16, 'lon': 16},
'V': {'lat': 16, 'lon': 4}}
fieldset = FieldSet.from_netcdf(filenames, variables, dimensions, field_chunksize=chs)
lonp = [25]
latp = [-35]
compute_globcurrent_particle_advection(fieldset, mode, lonp, latp)
# GlobCurrent sample file dimensions: time=UNLIMITED, lat=41, lon=81
npart_U = 1
npart_U = [npart_U * k for k in fieldset.U.nchunks[1:]]
npart_V = 1
npart_V = [npart_V * k for k in fieldset.V.nchunks[1:]]
npart_V_request = 1
chn = {'U': {'lat': int(math.ceil(41.0/chs['U']['lat'])),
'lon': int(math.ceil(81.0/chs['U']['lon']))},
'V': {'lat': int(math.ceil(41.0/chs['V']['lat'])),
'lon': int(math.ceil(81.0/chs['V']['lon']))}}
npart_V_request = [npart_V_request * chn['V'][k] for k in chn['V']]
assert (npart_U == npart_V)
assert (npart_V != npart_V_request)
assert (len(fieldset.U.grid.load_chunk) == len(fieldset.V.grid.load_chunk))
| OceanPARCELS/parcels | parcels/examples/example_dask_chunk_OCMs.py | Python | mit | 27,874 | [
"ORCA"
] | 3589c21b058f2232d6d72b21ad5558df5450f9970a34438a530e2aa48ccbfe03 |
""" Definitions of a standard set of pilot commands
Each commands is represented by a class inheriting CommandBase class.
The command class constructor takes PilotParams object which is a data
structure which keeps common parameters across all the pilot commands.
The constructor must call the superclass constructor with the PilotParams
object and the command name as arguments, e.g. ::
class InstallDIRAC( CommandBase ):
def __init__( self, pilotParams ):
CommandBase.__init__(self, pilotParams, 'Install')
...
The command class must implement execute() method for the actual command
execution.
"""
import sys
import os
import stat
import socket
from pilotTools import CommandBase, retrieveUrlTimeout
__RCSID__ = "$Id$"
class GetPilotVersion( CommandBase ):
""" Used to get the pilot version that needs to be installed.
If passed as a parameter, uses that one. If not passed, it looks for alternatives.
This assures that a version is always got even on non-standard Grid resources.
"""
def execute( self ):
""" Standard method for pilot commands
"""
if self.pp.releaseVersion:
self.log.info( "Pilot version requested as pilot script option. Nothing to do." )
else:
try:
import json
except ImportError:
self.log.error( 'No json module available, exiting ...' )
sys.exit( 2 )
self.log.info( "Pilot version not requested as pilot script option, going to find it" )
result = retrieveUrlTimeout( self.pp.pilotCFGFileLocation + '/' + self.pp.pilotCFGFile,
self.pp.pilotCFGFile,
self.log,
timeout = 120 )
if not result:
self.log.error( "Failed to get pilot version, exiting ..." )
sys.exit( 1 )
fp = open( self.pp.pilotCFGFile + '-local', 'r' )
pilotCFGFileContent = json.load( fp )
fp.close()
pilotVersions = [str( pv ) for pv in pilotCFGFileContent[self.pp.setup]['Version']]
self.log.debug( "Pilot versions found: %s" % ', '.join( pilotVersions ) )
self.log.info( "Setting pilot version to %s" % pilotVersions[0] )
self.pp.releaseVersion = pilotVersions[0]
class CheckWorkerNode( CommandBase ):
""" Executes some basic checks
"""
def __init__( self, pilotParams ):
""" c'tor
"""
super( CheckWorkerNode, self ).__init__( pilotParams )
def execute( self ):
""" Get host and local user info, and other basic checks, e.g. space available
"""
self.log.info( 'Uname = %s' % " ".join( os.uname() ) )
self.log.info( 'Host Name = %s' % socket.gethostname() )
self.log.info( 'Host FQDN = %s' % socket.getfqdn() )
self.log.info( 'WorkingDir = %s' % self.pp.workingDir ) # this could be different than rootPath
fileName = '/etc/redhat-release'
if os.path.exists( fileName ):
f = open( fileName, 'r' )
self.log.info( 'RedHat Release = %s' % f.read().strip() )
f.close()
fileName = '/etc/lsb-release'
if os.path.isfile( fileName ):
f = open( fileName, 'r' )
self.log.info( 'Linux release:\n%s' % f.read().strip() )
f.close()
fileName = '/proc/cpuinfo'
if os.path.exists( fileName ):
f = open( fileName, 'r' )
cpu = f.readlines()
f.close()
nCPU = 0
for line in cpu:
if line.find( 'cpu MHz' ) == 0:
nCPU += 1
freq = line.split()[3]
elif line.find( 'model name' ) == 0:
CPUmodel = line.split( ': ' )[1].strip()
self.log.info( 'CPU (model) = %s' % CPUmodel )
self.log.info( 'CPU (MHz) = %s x %s' % ( nCPU, freq ) )
fileName = '/proc/meminfo'
if os.path.exists( fileName ):
f = open( fileName, 'r' )
mem = f.readlines()
f.close()
freeMem = 0
for line in mem:
if line.find( 'MemTotal:' ) == 0:
totalMem = int( line.split()[1] )
if line.find( 'MemFree:' ) == 0:
freeMem += int( line.split()[1] )
if line.find( 'Cached:' ) == 0:
freeMem += int( line.split()[1] )
self.log.info( 'Memory (kB) = %s' % totalMem )
self.log.info( 'FreeMem. (kB) = %s' % freeMem )
##############################################################################################################################
# Disk space check
# fs = os.statvfs( rootPath )
fs = os.statvfs( self.pp.workingDir )
# bsize; /* file system block size */
# frsize; /* fragment size */
# blocks; /* size of fs in f_frsize units */
# bfree; /* # free blocks */
# bavail; /* # free blocks for non-root */
# files; /* # inodes */
# ffree; /* # free inodes */
# favail; /* # free inodes for non-root */
# flag; /* mount flags */
# namemax; /* maximum filename length */
diskSpace = fs[4] * fs[0] / 1024 / 1024
self.log.info( 'DiskSpace (MB) = %s' % diskSpace )
if diskSpace < self.pp.minDiskSpace:
self.log.error( '%s MB < %s MB, not enough local disk space available, exiting'
% ( diskSpace, self.pp.minDiskSpace ) )
sys.exit( 1 )
class InstallDIRAC( CommandBase ):
""" Basically, this is used to call dirac-install with the passed parameters.
It requires dirac-install script to be sitting in the same directory.
"""
def __init__( self, pilotParams ):
""" c'tor
"""
super( InstallDIRAC, self ).__init__( pilotParams )
self.installOpts = []
self.pp.rootPath = self.pp.pilotRootPath
self.installScriptName = 'dirac-install.py'
self.installScript = ''
def _setInstallOptions( self ):
""" Setup installation parameters
"""
for o, v in self.pp.optList:
if o in ( '-b', '--build' ):
self.installOpts.append( '-b' )
elif o == '-d' or o == '--debug':
self.installOpts.append( '-d' )
elif o == '-e' or o == '--extraPackages':
self.installOpts.append( '-e "%s"' % v )
elif o == '-g' or o == '--grid':
self.pp.gridVersion = v
elif o == '-i' or o == '--python':
self.pp.pythonVersion = v
elif o in ( '-l', '--project' ):
self.installOpts.append( "-l '%s'" % v )
elif o == '-p' or o == '--platform':
self.pp.platform = v
elif o == '-u' or o == '--url':
self.installOpts.append( '-u "%s"' % v )
elif o in ( '-P', '--path' ):
self.installOpts.append( '-P "%s"' % v )
self.pp.rootPath = v
elif o in ( '-V', '--installation' ):
self.installOpts.append( '-V "%s"' % v )
elif o == '-t' or o == '--server':
self.installOpts.append( '-t "server"' )
if self.pp.gridVersion:
self.installOpts.append( "-g '%s'" % self.pp.gridVersion )
if self.pp.pythonVersion:
self.installOpts.append( "-i '%s'" % self.pp.pythonVersion )
if self.pp.platform:
self.installOpts.append( '-p "%s"' % self.pp.platform )
# The release version to install is a requirement
self.installOpts.append( '-r "%s"' % self.pp.releaseVersion )
self.log.debug( 'INSTALL OPTIONS [%s]' % ', '.join( map( str, self.installOpts ) ) )
def _locateInstallationScript( self ):
""" Locate installation script
"""
installScript = ''
for path in ( self.pp.pilotRootPath, self.pp.originalRootPath, self.pp.rootPath ):
installScript = os.path.join( path, self.installScriptName )
if os.path.isfile( installScript ):
break
self.installScript = installScript
if not os.path.isfile( installScript ):
self.log.error( "%s requires %s to exist in one of: %s, %s, %s" % ( self.pp.pilotScriptName,
self.installScriptName,
self.pp.pilotRootPath,
self.pp.originalRootPath,
self.pp.rootPath ) )
sys.exit( 1 )
try:
# change permission of the script
os.chmod( self.installScript, stat.S_IRWXU )
except OSError:
pass
def _installDIRAC( self ):
""" Install DIRAC or its extension, then parse the environment file created, and use it for subsequent calls
"""
# Installing
installCmd = "%s %s" % ( self.installScript, " ".join( self.installOpts ) )
self.log.debug( "Installing with: %s" % installCmd )
# At this point self.pp.installEnv may coincide with os.environ
# If extensions want to pass in a modified environment, it's easy to set self.pp.installEnv in an extended command
retCode, output = self.executeAndGetOutput( installCmd, self.pp.installEnv )
self.log.info( output, header = False )
if retCode:
self.log.error( "Could not make a proper DIRAC installation [ERROR %d]" % retCode )
self.exitWithError( retCode )
self.log.info( "%s completed successfully" % self.installScriptName )
# Parsing the bashrc then adding its content to the installEnv
# at this point self.pp.installEnv may still coincide with os.environ
retCode, output = self.executeAndGetOutput( 'bash -c "source bashrc && env"', self.pp.installEnv )
if retCode:
self.log.error( "Could not parse the bashrc file [ERROR %d]" % retCode )
self.exitWithError( retCode )
for line in output.split('\n'):
try:
var, value = [vx.strip() for vx in line.split( '=', 1 )]
if var == '_' or 'SSH' in var or '{' in value or '}' in value: # Avoiding useless/confusing stuff
continue
self.pp.installEnv[var] = value
except (IndexError, ValueError):
continue
# At this point self.pp.installEnv should contain all content of bashrc, sourced "on top" of (maybe) os.environ
self.pp.diracInstalled = True
def execute( self ):
""" What is called all the time
"""
self._setInstallOptions()
self._locateInstallationScript()
self._installDIRAC()
class ReplaceDIRACCode( CommandBase ):
""" This command will replace DIRAC code with the one taken from a different location.
This command is mostly for testing purposes, and should NOT be added in default configurations.
It uses generic -o option for specifying a zip location (like an archive file from github).
"""
def __init__( self, pilotParams ):
""" c'tor
"""
super( ReplaceDIRACCode, self ).__init__( pilotParams )
def execute(self):
""" Download/unzip an archive file
"""
from io import BytesIO
from urllib2 import urlopen
from zipfile import ZipFile
zipresp = urlopen(self.pp.genericOption)
zfile = ZipFile(BytesIO(zipresp.read()))
os.mkdir(os.getcwd() + os.path.sep + 'AlternativeCode')
zfile.extractall(os.getcwd() + os.path.sep + 'AlternativeCode')
zfile.close()
zipresp.close()
os.rename(os.getcwd() + os.path.sep + 'AlternativeCode' + os.path.sep + os.listdir('./AlternativeCode')[0],
os.getcwd() + os.path.sep + 'AlternativeCode' + os.path.sep + 'DIRAC')
self.pp.installEnv['PYTHONPATH'] = os.getcwd() + os.path.sep + 'AlternativeCode' + os.path.sep + 'DIRAC' ':' \
+ self.pp.installEnv['PYTHONPATH']
class ConfigureBasics( CommandBase ):
""" This command completes DIRAC installation, e.g. calls dirac-configure to:
- download, by default, the CAs
- creates a standard or custom (defined by self.pp.localConfigFile) cfg file
to be used where all the pilot configuration is to be set, e.g.:
- adds to it basic info like the version
- adds to it the security configuration
If there is more than one command calling dirac-configure, this one should be always the first one called.
.. note:: Further commands should always call dirac-configure using the options -FDMH
.. note:: If custom cfg file is created further commands should call dirac-configure with
"-O %s %s" % ( self.pp.localConfigFile, self.pp.localConfigFile )
From here on, we have to pay attention to the paths. Specifically, we need to know where to look for
- executables (scripts)
- DIRAC python code
If the pilot has installed DIRAC (and extensions) in the traditional way, so using the dirac-install.py script,
simply the current directory is used, and:
- scripts will be in $CWD/scripts.
- DIRAC python code will be all sitting in $CWD
- the local dirac.cfg file will be found in $CWD/etc
For a more general case of non-traditional installations, we should use the PATH and PYTHONPATH as set by the
installation phase. Executables and code will be searched there.
"""
def __init__( self, pilotParams ):
""" c'tor
"""
super( ConfigureBasics, self ).__init__( pilotParams )
self.cfg = []
def execute( self ):
""" What is called all the times.
VOs may want to replace/extend the _getBasicsCFG and _getSecurityCFG functions
"""
self._getBasicsCFG()
self._getSecurityCFG()
if self.pp.debugFlag:
self.cfg.append( '-ddd' )
if self.pp.localConfigFile:
self.cfg.append( '-O %s' % self.pp.localConfigFile )
configureCmd = "%s %s" % ( self.pp.configureScript, " ".join( self.cfg ) )
retCode, _configureOutData = self.executeAndGetOutput( configureCmd, self.pp.installEnv )
if retCode:
self.log.error( "Could not configure DIRAC basics [ERROR %d]" % retCode )
self.exitWithError( retCode )
def _getBasicsCFG( self ):
""" basics (needed!)
"""
self.cfg.append( '-S "%s"' % self.pp.setup )
if self.pp.configServer:
self.cfg.append( '-C "%s"' % self.pp.configServer )
if self.pp.releaseProject:
self.cfg.append( '-e "%s"' % self.pp.releaseProject )
self.cfg.append( '-o /LocalSite/ReleaseProject=%s' % self.pp.releaseProject )
if self.pp.gateway:
self.cfg.append( '-W "%s"' % self.pp.gateway )
if self.pp.userGroup:
self.cfg.append( '-o /AgentJobRequirements/OwnerGroup="%s"' % self.pp.userGroup )
if self.pp.userDN:
self.cfg.append( '-o /AgentJobRequirements/OwnerDN="%s"' % self.pp.userDN )
self.cfg.append( '-o /LocalSite/ReleaseVersion=%s' % self.pp.releaseVersion )
def _getSecurityCFG( self ):
""" Nothing specific by default, but need to know host cert and key location in case they are needed
"""
if self.pp.useServerCertificate:
self.cfg.append( '--UseServerCertificate' )
self.cfg.append( "-o /DIRAC/Security/CertFile=%s/hostcert.pem" % self.pp.certsLocation )
self.cfg.append( "-o /DIRAC/Security/KeyFile=%s/hostkey.pem" % self.pp.certsLocation )
class CheckCECapabilities( CommandBase ):
""" Used to get CE tags and other relevant parameters
"""
def __init__( self, pilotParams ):
""" c'tor
"""
super( CheckCECapabilities, self ).__init__( pilotParams )
# this variable contains the options that are passed to dirac-configure, and that will fill the local dirac.cfg file
self.cfg = []
def execute( self ):
""" Main execution method
"""
if self.pp.useServerCertificate:
self.cfg.append( '-o /DIRAC/Security/UseServerCertificate=yes' )
if self.pp.localConfigFile:
self.cfg.append( self.pp.localConfigFile ) # this file is as input
# Get the resource description as defined in its configuration
checkCmd = 'dirac-resource-get-parameters -S %s -N %s -Q %s %s' % ( self.pp.site,
self.pp.ceName,
self.pp.queueName,
" ".join( self.cfg ) )
retCode, resourceDict = self.executeAndGetOutput( checkCmd, self.pp.installEnv )
if retCode:
self.log.error( "Could not get resource parameters [ERROR %d]" % retCode )
self.exitWithError( retCode )
try:
import json
resourceDict = json.loads( resourceDict )
except ValueError:
self.log.error( "The pilot command output is not json compatible." )
sys.exit( 1 )
self.pp.queueParameters = resourceDict
self.cfg = []
# Pick up all the relevant resource parameters that will be used in the job matching
for ceParam in [ "WholeNode", "NumberOfProcessors" ]:
if ceParam in resourceDict:
self.cfg.append( '-o /Resources/Computing/CEDefaults/%s=%s' % ( ceParam, resourceDict[ ceParam ] ) )
# Tags must be added to already defined tags if any
if resourceDict.get( 'Tag' ):
self.pp.tags += resourceDict['Tag']
if self.pp.tags:
self.cfg.append( '-o "/Resources/Computing/CEDefaults/Tag=%s"' % ','.join( ( str( x ) for x in self.pp.tags ) ) )
# RequiredTags are similar to tags.
if resourceDict.get( 'RequiredTag' ):
self.pp.reqtags += resourceDict['RequiredTag']
if self.pp.reqtags:
self.cfg.append( '-o "/Resources/Computing/CEDefaults/RequiredTag=%s"' % ','.join( ( str( x ) for x in self.pp.reqtags ) ) )
# If there is anything to be added to the local configuration, let's do it
if self.cfg:
self.cfg.append( '-FDMH' )
if self.debugFlag:
self.cfg.append( '-ddd' )
if self.pp.localConfigFile:
self.cfg.append( '-O %s' % self.pp.localConfigFile ) # this file is as output
self.cfg.append( self.pp.localConfigFile ) # this file is as input
configureCmd = "%s %s" % ( self.pp.configureScript, " ".join( self.cfg ) )
retCode, _configureOutData = self.executeAndGetOutput( configureCmd, self.pp.installEnv )
if retCode:
self.log.error( "Could not configure DIRAC [ERROR %d]" % retCode )
self.exitWithError( retCode )
else:
self.log.debug( 'No Tags defined for this Queue' )
class CheckWNCapabilities( CommandBase ):
""" Used to get capabilities specific to the Worker Node. This command must be called
after the CheckCECapabilities command
"""
def __init__( self, pilotParams ):
""" c'tor
"""
super( CheckWNCapabilities, self ).__init__( pilotParams )
self.cfg = []
def execute( self ):
""" Discover NumberOfProcessors and RAM
"""
if self.pp.useServerCertificate:
self.cfg.append( '-o /DIRAC/Security/UseServerCertificate=yes' )
if self.pp.localConfigFile:
self.cfg.append( self.pp.localConfigFile ) # this file is as input
# Get the worker node parameters
checkCmd = 'dirac-wms-get-wn-parameters -S %s -N %s -Q %s %s' % ( self.pp.site,
self.pp.ceName,
self.pp.queueName,
" ".join( self.cfg ) )
retCode, result = self.executeAndGetOutput( checkCmd, self.pp.installEnv )
if retCode:
self.log.error( "Could not get resource parameters [ERROR %d]" % retCode )
self.exitWithError( retCode )
try:
result = result.split( ' ' )
numberOfProcessors = int( result[0] )
maxRAM = int( result[1] )
except ValueError:
self.log.error( "Wrong Command output %s" % result )
sys.exit( 1 )
self.cfg = []
# If NumberOfProcessors or MaxRAM are defined in the resource configuration, these
# values are preferred
if numberOfProcessors and "NumberOfProcessors" not in self.pp.queueParameters:
self.cfg.append( '-o "/Resources/Computing/CEDefaults/NumberOfProcessors=%d"' % numberOfProcessors )
else:
self.log.warn( "Could not retrieve number of processors" )
if maxRAM and "MaxRAM" not in self.pp.queueParameters:
self.cfg.append( '-o "/Resources/Computing/CEDefaults/MaxRAM=%d"' % maxRAM )
else:
self.log.warn( "Could not retrieve MaxRAM" )
if self.cfg:
self.cfg.append( '-FDMH' )
if self.debugFlag:
self.cfg.append( '-ddd' )
if self.pp.localConfigFile:
self.cfg.append( '-O %s' % self.pp.localConfigFile ) # this file is as output
self.cfg.append( self.pp.localConfigFile ) # this file is as input
configureCmd = "%s %s" % ( self.pp.configureScript, " ".join( self.cfg ) )
retCode, _configureOutData = self.executeAndGetOutput( configureCmd, self.pp.installEnv )
if retCode:
self.log.error( "Could not configure DIRAC [ERROR %d]" % retCode )
self.exitWithError( retCode )
class ConfigureSite( CommandBase ):
""" Command to configure DIRAC sites using the pilot options
"""
def __init__( self, pilotParams ):
""" c'tor
"""
super( ConfigureSite, self ).__init__( pilotParams )
# this variable contains the options that are passed to dirac-configure, and that will fill the local dirac.cfg file
self.cfg = []
self.boincUserID = ''
self.boincHostID = ''
self.boincHostPlatform = ''
self.boincHostName = ''
def execute( self ):
""" Setup configuration parameters
"""
self.__setFlavour()
self.cfg.append( '-o /LocalSite/GridMiddleware=%s' % self.pp.flavour )
self.cfg.append( '-n "%s"' % self.pp.site )
self.cfg.append( '-S "%s"' % self.pp.setup )
if not self.pp.ceName or not self.pp.queueName:
self.__getCEName()
self.cfg.append( '-N "%s"' % self.pp.ceName )
self.cfg.append( '-o /LocalSite/GridCE=%s' % self.pp.ceName )
self.cfg.append( '-o /LocalSite/CEQueue=%s' % self.pp.queueName )
if self.pp.ceType:
self.cfg.append( '-o /LocalSite/LocalCE=%s' % self.pp.ceType )
for o, v in self.pp.optList:
if o == '-o' or o == '--option':
self.cfg.append( '-o "%s"' % v )
elif o == '-s' or o == '--section':
self.cfg.append( '-s "%s"' % v )
if self.pp.pilotReference != 'Unknown':
self.cfg.append( '-o /LocalSite/PilotReference=%s' % self.pp.pilotReference )
# add options for BOINc
# FIXME: this should not be part of the standard configuration
if self.boincUserID:
self.cfg.append( '-o /LocalSite/BoincUserID=%s' % self.boincUserID )
if self.boincHostID:
self.cfg.append( '-o /LocalSite/BoincHostID=%s' % self.boincHostID )
if self.boincHostPlatform:
self.cfg.append( '-o /LocalSite/BoincHostPlatform=%s' % self.boincHostPlatform )
if self.boincHostName:
self.cfg.append( '-o /LocalSite/BoincHostName=%s' % self.boincHostName )
if self.pp.useServerCertificate:
self.cfg.append( '--UseServerCertificate' )
self.cfg.append( "-o /DIRAC/Security/CertFile=%s/hostcert.pem" % self.pp.certsLocation )
self.cfg.append( "-o /DIRAC/Security/KeyFile=%s/hostkey.pem" % self.pp.certsLocation )
# these are needed as this is not the fist time we call dirac-configure
self.cfg.append( '-FDMH' )
if self.pp.localConfigFile:
self.cfg.append( '-O %s' % self.pp.localConfigFile )
self.cfg.append( self.pp.localConfigFile )
if self.debugFlag:
self.cfg.append( '-ddd' )
configureCmd = "%s %s" % ( self.pp.configureScript, " ".join( self.cfg ) )
retCode, _configureOutData = self.executeAndGetOutput( configureCmd, self.pp.installEnv )
if retCode:
self.log.error( "Could not configure DIRAC [ERROR %d]" % retCode )
self.exitWithError( retCode )
def __setFlavour( self ):
pilotRef = 'Unknown'
# Pilot reference is specified at submission
if self.pp.pilotReference:
self.pp.flavour = 'DIRAC'
pilotRef = self.pp.pilotReference
# Take the reference from the Torque batch system
if 'PBS_JOBID' in os.environ:
self.pp.flavour = 'SSHTorque'
pilotRef = 'sshtorque://' + self.pp.ceName + '/' + os.environ['PBS_JOBID'].split('.')[0]
# Take the reference from the OAR batch system
if 'OAR_JOBID' in os.environ:
self.pp.flavour = 'SSHOAR'
pilotRef = 'sshoar://' + self.pp.ceName + '/' + os.environ['OAR_JOBID']
# Grid Engine
if 'JOB_ID' in os.environ and 'SGE_TASK_ID' in os.environ:
self.pp.flavour = 'SSHGE'
pilotRef = 'sshge://' + self.pp.ceName + '/' + os.environ['JOB_ID']
# Generic JOB_ID
elif 'JOB_ID' in os.environ:
self.pp.flavour = 'Generic'
pilotRef = 'generic://' + self.pp.ceName + '/' + os.environ['JOB_ID']
# Condor
if 'CONDOR_JOBID' in os.environ:
self.pp.flavour = 'SSHCondor'
pilotRef = 'sshcondor://' + self.pp.ceName + '/' + os.environ['CONDOR_JOBID']
# HTCondor
if 'HTCONDOR_JOBID' in os.environ:
self.pp.flavour = 'HTCondorCE'
pilotRef = 'htcondorce://' + self.pp.ceName + '/' + os.environ['HTCONDOR_JOBID']
# LSF
if 'LSB_BATCH_JID' in os.environ:
self.pp.flavour = 'SSHLSF'
pilotRef = 'sshlsf://' + self.pp.ceName + '/' + os.environ['LSB_BATCH_JID']
# SLURM batch system
if 'SLURM_JOBID' in os.environ:
self.pp.flavour = 'SSHSLURM'
pilotRef = 'sshslurm://' + self.pp.ceName + '/' + os.environ['SLURM_JOBID']
# This is the CREAM direct submission case
if 'CREAM_JOBID' in os.environ:
self.pp.flavour = 'CREAM'
pilotRef = os.environ['CREAM_JOBID']
# If we still have the GLITE_WMS_JOBID, it means that the submission
# was through the WMS, take this reference then
if 'EDG_WL_JOBID' in os.environ:
self.pp.flavour = 'LCG'
pilotRef = os.environ['EDG_WL_JOBID']
if 'GLITE_WMS_JOBID' in os.environ:
if os.environ['GLITE_WMS_JOBID'] != 'N/A':
self.pp.flavour = 'gLite'
pilotRef = os.environ['GLITE_WMS_JOBID']
if 'OSG_WN_TMP' in os.environ:
self.pp.flavour = 'OSG'
# GLOBUS Computing Elements
if 'GLOBUS_GRAM_JOB_CONTACT' in os.environ:
self.pp.flavour = 'GLOBUS'
pilotRef = os.environ['GLOBUS_GRAM_JOB_CONTACT']
# Direct SSH tunnel submission
if 'SSHCE_JOBID' in os.environ:
self.pp.flavour = 'SSH'
pilotRef = 'ssh://' + self.pp.ceName + '/' + os.environ['SSHCE_JOBID']
# ARC case
if 'GRID_GLOBAL_JOBID' in os.environ:
self.pp.flavour = 'ARC'
pilotRef = os.environ['GRID_GLOBAL_JOBID']
# VMDIRAC case
if 'VMDIRAC_VERSION' in os.environ:
self.pp.flavour = 'VMDIRAC'
pilotRef = 'vm://' + self.pp.ceName + '/' + os.environ['JOB_ID']
# This is for BOINC case
if 'BOINC_JOB_ID' in os.environ:
self.pp.flavour = 'BOINC'
pilotRef = os.environ['BOINC_JOB_ID']
if self.pp.flavour == 'BOINC':
if 'BOINC_USER_ID' in os.environ:
self.boincUserID = os.environ['BOINC_USER_ID']
if 'BOINC_HOST_ID' in os.environ:
self.boincHostID = os.environ['BOINC_HOST_ID']
if 'BOINC_HOST_PLATFORM' in os.environ:
self.boincHostPlatform = os.environ['BOINC_HOST_PLATFORM']
if 'BOINC_HOST_NAME' in os.environ:
self.boincHostName = os.environ['BOINC_HOST_NAME']
self.log.debug( "Flavour: %s; pilot reference: %s " % ( self.pp.flavour, pilotRef ) )
self.pp.pilotReference = pilotRef
def __getCEName( self ):
""" Try to get the CE name
"""
# FIXME: this should not be part of the standard configuration (flavours discriminations should stay out)
if self.pp.flavour in ['LCG', 'gLite', 'OSG']:
retCode, CEName = self.executeAndGetOutput( 'glite-brokerinfo getCE',
self.pp.installEnv )
if retCode:
self.log.warn( "Could not get CE name with 'glite-brokerinfo getCE' command [ERROR %d]" % retCode )
if 'OSG_JOB_CONTACT' in os.environ:
# OSG_JOB_CONTACT String specifying the endpoint to use within the job submission
# for reaching the site (e.g. manager.mycluster.edu/jobmanager-pbs )
CE = os.environ['OSG_JOB_CONTACT']
self.pp.ceName = CE.split( '/' )[0]
if len( CE.split( '/' ) ) > 1:
self.pp.queueName = CE.split( '/' )[1]
else:
self.log.error( "CE Name %s not accepted" % CE )
self.exitWithError( retCode )
else:
self.log.error( "Can't find ceName nor queue... have to fail!" )
sys.exit( 1 )
else:
self.log.debug( "Found CE %s" % CEName )
self.pp.ceName = CEName.split( ':' )[0]
if len( CEName.split( '/' ) ) > 1:
self.pp.queueName = CEName.split( '/' )[1]
# configureOpts.append( '-N "%s"' % cliParams.ceName )
elif self.pp.flavour == "CREAM":
if 'CE_ID' in os.environ:
self.log.debug( "Found CE %s" % os.environ['CE_ID'] )
self.pp.ceName = os.environ['CE_ID'].split( ':' )[0]
if os.environ['CE_ID'].count( "/" ):
self.pp.queueName = os.environ['CE_ID'].split( '/' )[1]
else:
self.log.error( "Can't find queue name" )
sys.exit( 1 )
else:
self.log.error( "Can't find CE name" )
sys.exit( 1 )
class ConfigureArchitecture( CommandBase ):
""" This command simply calls dirac-platfom to determine the platform.
Separated from the ConfigureDIRAC command for easier extensibility.
"""
def execute( self ):
""" This is a simple command to call the dirac-platform utility to get the platform, and add it to the configuration
The architecture script, as well as its options can be replaced in a pilot extension
"""
cfg = []
if self.pp.useServerCertificate:
cfg.append( '-o /DIRAC/Security/UseServerCertificate=yes' )
if self.pp.localConfigFile:
cfg.append( self.pp.localConfigFile ) # this file is as input
architectureCmd = "%s %s" % ( self.pp.architectureScript, " ".join( cfg ) )
retCode, localArchitecture = self.executeAndGetOutput( architectureCmd, self.pp.installEnv )
if retCode:
self.log.error( "There was an error updating the platform [ERROR %d]" % retCode )
self.exitWithError( retCode )
self.log.debug( "Architecture determined: %s" % localArchitecture )
# standard options
cfg = ['-FDMH'] # force update, skip CA checks, skip CA download, skip VOMS
if self.pp.useServerCertificate:
cfg.append( '--UseServerCertificate' )
if self.pp.localConfigFile:
cfg.append( '-O %s' % self.pp.localConfigFile ) # our target file for pilots
cfg.append( self.pp.localConfigFile ) # this file is also an input
if self.pp.debugFlag:
cfg.append( "-ddd" )
# real options added here
localArchitecture = localArchitecture.strip()
cfg.append( '-S "%s"' % self.pp.setup )
cfg.append( '-o /LocalSite/Architecture=%s' % localArchitecture )
configureCmd = "%s %s" % ( self.pp.configureScript, " ".join( cfg ) )
retCode, _configureOutData = self.executeAndGetOutput( configureCmd, self.pp.installEnv )
if retCode:
self.log.error( "Configuration error [ERROR %d]" % retCode )
self.exitWithError( retCode )
return localArchitecture
class ConfigureCPURequirements( CommandBase ):
""" This command determines the CPU requirements. Needs to be executed after ConfigureSite
"""
def __init__( self, pilotParams ):
""" c'tor
"""
super( ConfigureCPURequirements, self ).__init__( pilotParams )
def execute( self ):
""" Get job CPU requirement and queue normalization
"""
# Determining the CPU normalization factor and updating pilot.cfg with it
configFileArg = ''
if self.pp.useServerCertificate:
configFileArg = '-o /DIRAC/Security/UseServerCertificate=yes'
if self.pp.localConfigFile:
configFileArg = '%s -R %s %s' % ( configFileArg, self.pp.localConfigFile, self.pp.localConfigFile )
retCode, cpuNormalizationFactorOutput = self.executeAndGetOutput( 'dirac-wms-cpu-normalization -U %s' % configFileArg,
self.pp.installEnv )
if retCode:
self.log.error( "Failed to determine cpu normalization [ERROR %d]" % retCode )
self.exitWithError( retCode )
# HS06 benchmark
# FIXME: this is a (necessary) hack!
cpuNormalizationFactor = float( cpuNormalizationFactorOutput.split( '\n' )[0].replace( "Estimated CPU power is ",
'' ).replace( " HS06", '' ) )
self.log.info( "Current normalized CPU as determined by 'dirac-wms-cpu-normalization' is %f" % cpuNormalizationFactor )
configFileArg = ''
if self.pp.useServerCertificate:
configFileArg = '-o /DIRAC/Security/UseServerCertificate=yes'
retCode, cpuTimeOutput = self.executeAndGetOutput( 'dirac-wms-get-queue-cpu-time %s %s' % ( configFileArg,
self.pp.localConfigFile ),
self.pp.installEnv )
if retCode:
self.log.error( "Failed to determine cpu time left in the queue [ERROR %d]" % retCode )
self.exitWithError( retCode )
for line in cpuTimeOutput.split( '\n' ):
if "CPU time left determined as" in line:
cpuTime = int(line.replace("CPU time left determined as", '').strip())
self.log.info( "CPUTime left (in seconds) is %s" % cpuTime )
# HS06s = seconds * HS06
try:
self.pp.jobCPUReq = float( cpuTime ) * float( cpuNormalizationFactor )
self.log.info( "Queue length (which is also set as CPUTimeLeft) is %f" % self.pp.jobCPUReq )
except ValueError:
self.log.error( 'Pilot command output does not have the correct format' )
sys.exit( 1 )
# now setting this value in local file
cfg = ['-FDMH']
if self.pp.useServerCertificate:
cfg.append( '-o /DIRAC/Security/UseServerCertificate=yes' )
if self.pp.localConfigFile:
cfg.append( '-O %s' % self.pp.localConfigFile ) # our target file for pilots
cfg.append( self.pp.localConfigFile ) # this file is also input
cfg.append( '-o /LocalSite/CPUTimeLeft=%s' % str( int( self.pp.jobCPUReq ) ) ) # the only real option
configureCmd = "%s %s" % ( self.pp.configureScript, " ".join( cfg ) )
retCode, _configureOutData = self.executeAndGetOutput( configureCmd, self.pp.installEnv )
if retCode:
self.log.error( "Failed to update CFG file for CPUTimeLeft [ERROR %d]" % retCode )
self.exitWithError( retCode )
class LaunchAgent( CommandBase ):
""" Prepare and launch the job agent
"""
def __init__( self, pilotParams ):
""" c'tor
"""
super( LaunchAgent, self ).__init__( pilotParams )
self.inProcessOpts = []
self.jobAgentOpts = []
def __setInProcessOpts( self ):
localUid = os.getuid()
try:
import pwd
localUser = pwd.getpwuid( localUid )[0]
except KeyError:
localUser = 'Unknown'
self.log.info( 'User Name = %s' % localUser )
self.log.info( 'User Id = %s' % localUid )
self.inProcessOpts = ['-s /Resources/Computing/CEDefaults' ]
self.inProcessOpts.append( '-o WorkingDirectory=%s' % self.pp.workingDir )
self.inProcessOpts.append( '-o /LocalSite/MaxCPUTime=%s' % ( int( self.pp.jobCPUReq ) ) )
self.inProcessOpts.append( '-o /LocalSite/CPUTime=%s' % ( int( self.pp.jobCPUReq ) ) )
# To prevent a wayward agent picking up and failing many jobs.
self.inProcessOpts.append( '-o MaxTotalJobs=%s' % 10 )
self.jobAgentOpts = ['-o MaxCycles=%s' % self.pp.maxCycles]
if self.debugFlag:
self.jobAgentOpts.append( '-o LogLevel=DEBUG' )
if self.pp.userGroup:
self.log.debug( 'Setting DIRAC Group to "%s"' % self.pp.userGroup )
self.inProcessOpts .append( '-o OwnerGroup="%s"' % self.pp.userGroup )
if self.pp.userDN:
self.log.debug( 'Setting Owner DN to "%s"' % self.pp.userDN )
self.inProcessOpts.append( '-o OwnerDN="%s"' % self.pp.userDN )
if self.pp.useServerCertificate:
self.log.debug( 'Setting UseServerCertificate flag' )
self.inProcessOpts.append( '-o /DIRAC/Security/UseServerCertificate=yes' )
# The instancePath is where the agent works
self.inProcessOpts.append( '-o /LocalSite/InstancePath=%s' % self.pp.workingDir )
# The file pilot.cfg has to be created previously by ConfigureDIRAC
if self.pp.localConfigFile:
self.inProcessOpts.append( ' -o /AgentJobRequirements/ExtraOptions=%s' % self.pp.localConfigFile )
self.inProcessOpts.append( self.pp.localConfigFile )
def __startJobAgent( self ):
""" Starting of the JobAgent
"""
# Find any .cfg file uploaded with the sandbox or generated by previous commands
diracAgentScript = "dirac-agent"
extraCFG = []
for i in os.listdir( self.pp.rootPath ):
cfg = os.path.join( self.pp.rootPath, i )
if os.path.isfile( cfg ) and cfg.endswith( '.cfg' ):
extraCFG.append( cfg )
if self.pp.executeCmd:
# Execute user command
self.log.info( "Executing user defined command: %s" % self.pp.executeCmd )
self.exitWithError( os.system( "source bashrc; %s" % self.pp.executeCmd ) / 256 )
self.log.info( 'Starting JobAgent' )
os.environ['PYTHONUNBUFFERED'] = 'yes'
jobAgent = '%s WorkloadManagement/JobAgent %s %s %s' % ( diracAgentScript,
" ".join( self.jobAgentOpts ),
" ".join( self.inProcessOpts ),
" ".join( extraCFG ) )
retCode, _output = self.executeAndGetOutput( jobAgent, self.pp.installEnv )
if retCode:
self.log.error( "Error executing the JobAgent [ERROR %d]" % retCode )
self.exitWithError( retCode )
fs = os.statvfs( self.pp.workingDir )
diskSpace = fs[4] * fs[0] / 1024 / 1024
self.log.info( 'DiskSpace (MB) = %s' % diskSpace )
def execute( self ):
""" What is called all the time
"""
self.__setInProcessOpts()
self.__startJobAgent()
sys.exit( 0 )
| Andrew-McNab-UK/DIRAC | WorkloadManagementSystem/PilotAgent/pilotCommands.py | Python | gpl-3.0 | 38,094 | [
"DIRAC"
] | d4c01850f13c28a7494f098b0705bd5034e4ac2decbe763fc3a6eab13dbb67a2 |
__author__ = "Brian Lenihan <brian.lenihan@gmail.com"
__copyright__ = "Copyright (c) 2012 Python for Android Project"
__license__ = "Apache License, Version 2.0"
import os
import logging
import sl4a
"""
Create and set a new Tasker variable, display the variable's value in a Tasker
popup, and then clear the variable.
Misc / Allow External Access must be set in Tasker's prefs.
Tasker action code reference:
http://tasker.dinglisch.net/ActionCodes.java
"""
SET_VARIABLE = 547
CLEAR_VARIABLE = 549
POPUP = 550
logging.basicConfig(level=logging.INFO)
class Tasker(object):
def __init__(self):
self.droid = sl4a.Android()
self.extras = dict(
version_number = '1.0',
task_name = 'tasker_demo.{}'.format(os.getpid()),
task_priority = 9)
self.actions = 0
def bundle(self, action, *args):
# Unused parameters are padded with False
args = list(args)
args.extend([False]*(6-len(args)))
self.actions += 1
self.extras.update(
{'action{}'.format(self.actions) : dict(
{'action' : action,
'arg:1' : args[0],
'arg:2' : args[1],
'arg:3' : args[2],
'arg:4' : args[3],
'arg:5' : args[4],
'arg:6' : args[5]})
})
def broadcast_intent(self):
intent = self.droid.makeIntent(
'net.dinglisch.sl4a.tasker.ACTION_TASK', None, None, self.extras).result
logging.debug("-- {}".format(intent))
self.droid.sendBroadcastIntent(intent)
if __name__ == "__main__":
tasker = Tasker()
tasker.bundle(SET_VARIABLE, "%PY4A_DEMO", "Hello from python")
# Popup: String title, String text, String background image, Scene layout,
# Integer timeout, Boolean show over keyguard, Boolean condition
tasker.bundle(POPUP, "Tasker", "%PY4A_DEMO", "", "Popup", 5, True, False)
tasker.bundle(CLEAR_VARIABLE, "%PY4A_DEMO")
tasker.broadcast_intent()
| tomMoulard/python-projetcs | scripts3/tasker_example.py | Python | apache-2.0 | 1,874 | [
"Brian"
] | 0544f6219091045f3573640eb388a85f918c5bf4d17b49b7dc16f4c6d6609650 |
"""Reusable decorators and functions for custom installations.
"""
from __future__ import print_function
from contextlib import contextmanager
import functools
import os
import socket
from string import Template
import sys
import tempfile
from tempfile import NamedTemporaryFile
import urllib
import uuid
import shutil
import subprocess
import time
# Optional fabric imports, for back compatibility
try:
from fabric.api import *
from fabric.contrib.files import *
from cloudbio.fabutils import quiet, warn_only
except ImportError:
pass
CBL_REPO_ROOT_URL = "https://raw.github.com/chapmanb/cloudbiolinux/master/"
# -- decorators and context managers
@contextmanager
def chdir(new_dir):
"""Context manager to temporarily change to a new directory.
http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/
"""
# On busy filesystems can have issues accessing main directory. Allow retries
num_tries = 0
max_tries = 5
cur_dir = None
while cur_dir is None:
try:
cur_dir = os.getcwd()
except OSError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(2)
safe_makedir(new_dir)
os.chdir(new_dir)
try:
yield
finally:
os.chdir(cur_dir)
def safe_makedir(dname):
"""Make a directory if it doesn't exist, handling concurrent race conditions.
"""
if not dname:
return dname
num_tries = 0
max_tries = 5
while not os.path.exists(dname):
# we could get an error here if multiple processes are creating
# the directory at the same time. Grr, concurrency.
try:
os.makedirs(dname)
except OSError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(2)
return dname
def which(program, env=None):
""" returns the path to an executable or None if it can't be found"""
paths = os.environ["PATH"].split(os.pathsep)
if env and hasattr(env, "system_install"):
paths += [env.system_install, os.path.join(env.system_install, "anaconda")]
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in paths:
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def _if_not_installed(pname):
"""Decorator that checks if a callable program is installed.
"""
def argcatcher(func):
functools.wraps(func)
def decorator(*args, **kwargs):
if _galaxy_tool_install(args):
run_function = not _galaxy_tool_present(args)
elif isinstance(pname, list):
run_function = any([_executable_not_on_path(x) for x in pname])
else:
run_function = _executable_not_on_path(pname)
if run_function:
return func(*args, **kwargs)
return decorator
return argcatcher
def _all_cbl_paths(env, ext):
"""Add paths to other non-system directories installed by CloudBioLinux.
"""
return ":".join("%s/%s" % (p, ext) for p in [env.system_install,
os.path.join(env.system_install, "anaconda")])
def _executable_not_on_path(pname):
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True):
result = env.safe_run("export PATH=%s:$PATH && "
"export LD_LIBRARY_PATH=%s:$LD_LIBRARY_PATH && %s" %
(_all_cbl_paths(env, "bin"), _all_cbl_paths(env, "lib"), pname))
return result.return_code == 127
def _galaxy_tool_install(args):
try:
return args[0]["galaxy_tool_install"]
except:
return False
def _galaxy_tool_present(args):
return env.safe_exists(os.path.join(args[0]["system_install"], "env.sh"))
def _if_not_python_lib(library):
"""Decorator that checks if a python library is installed.
"""
def argcatcher(func):
functools.wraps(func)
def decorator(*args, **kwargs):
with settings(warn_only=True):
errcount = int(env.safe_run_output("%s -c 'import %s' 2>&1 | grep -c ImportError | cat" % (_python_cmd(env), library)))
result = 0 if errcount >= 1 else 1
if result == 0:
return func(*args, **kwargs)
else:
return result
return decorator
return argcatcher
@contextmanager
def make_tmp_dir_local(ext, work_dir):
if ext:
work_dir += ext
safe_makedir(work_dir)
yield work_dir
shutil.rmtree(work_dir)
@contextmanager
def _make_tmp_dir(ext=None, work_dir=None):
"""
Setup a temporary working directory for building custom software. First checks
fabric environment for a `work_dir` path, if that is not set it will use the
remote path $TMPDIR/cloudbiolinux if $TMPDIR is defined remotely, finally falling
back on remote $HOME/cloudbiolinux otherwise.
`ext` allows creation of tool specific temporary directories to avoid conflicts
using CloudBioLinux inside of CloudBioLinux.
"""
if not work_dir:
work_dir = __work_dir()
if ext:
work_dir += ext
use_sudo = False
if not env.safe_exists(work_dir):
with settings(warn_only=True):
# Try to create this directory without using sudo, but
# if needed fallback.
result = env.safe_run("mkdir -p '%s'" % work_dir)
if result.return_code != 0:
use_sudo = True
if use_sudo:
env.safe_sudo("mkdir -p '%s'" % work_dir)
env.safe_sudo("chown -R %s '%s'" % (env.user, work_dir))
yield work_dir
if env.safe_exists(work_dir):
run_func = env.safe_sudo if use_sudo else env.safe_run
run_func("rm -rf %s" % work_dir)
def __work_dir():
work_dir = env.get("work_dir", None)
if not work_dir:
with quiet():
tmp_dir = env.safe_run_output("echo $TMPDIR")
if tmp_dir.failed or not tmp_dir.strip():
home_dir = env.safe_run_output("echo $HOME")
tmp_dir = os.path.join(home_dir, "tmp")
work_dir = os.path.join(tmp_dir.strip(), "cloudbiolinux")
return work_dir
# -- Standard build utility simplifiers
def _get_expected_file(url, dir_name=None, safe_tar=False, tar_file_name=None):
if tar_file_name:
tar_file = tar_file_name
else:
tar_file = os.path.split(url.split("?")[0])[-1]
safe_tar = "--pax-option='delete=SCHILY.*,delete=LIBARCHIVE.*'" if safe_tar else ""
exts = {(".tar.gz", ".tgz"): "tar %s -xzpf" % safe_tar,
(".tar",): "tar %s -xpf" % safe_tar,
(".tar.bz2",): "tar %s -xjpf" % safe_tar,
(".zip",): "unzip"}
for ext_choices, tar_cmd in exts.iteritems():
for ext in ext_choices:
if tar_file.endswith(ext):
if dir_name is None:
dir_name = tar_file[:-len(ext)]
return tar_file, dir_name, tar_cmd
raise ValueError("Did not find extract command for %s" % url)
def _safe_dir_name(dir_name, need_dir=True):
replace_try = ["", "-src", "_core"]
for replace in replace_try:
check = dir_name.replace(replace, "")
if env.safe_exists(check):
return check
# still couldn't find it, it's a nasty one
for check_part in (dir_name.split("-")[0].split("_")[0],
dir_name.split("-")[-1].split("_")[-1],
dir_name.split(".")[0],
dir_name.lower().split(".")[0]):
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True):
dirs = env.safe_run_output("ls -d1 *%s*/" % check_part).split("\n")
dirs = [x for x in dirs if "cannot access" not in x and "No such" not in x]
if len(dirs) == 1 and dirs[0]:
return dirs[0]
dirs = env.safe_run_output("find * -type d -maxdepth 0").split("\n")
if len(dirs) == 1 and dirs[0]:
return dirs[0]
if need_dir:
raise ValueError("Could not find directory %s" % dir_name)
def _remote_fetch(env, url, out_file=None, allow_fail=False, fix_fn=None, samedir=False):
"""Retrieve url using wget, performing download in a temporary directory.
Provides a central location to handle retrieval issues and avoid
using interrupted downloads.
"""
if out_file is None:
out_file = os.path.basename(url)
if not os.path.exists(out_file):
if samedir and os.path.isabs(out_file):
orig_dir = os.path.dirname(out_file)
out_file = os.path.basename(out_file)
else:
orig_dir = os.getcwd()
temp_ext = "/%s" % uuid.uuid3(uuid.NAMESPACE_URL,
str("file://%s/%s/%s" %
("localhost", socket.gethostname(), out_file)))
with make_tmp_dir_local(ext=temp_ext, work_dir=orig_dir) as tmp_dir:
with chdir(tmp_dir):
try:
subprocess.check_call("wget --continue --no-check-certificate -O %s '%s'"
% (out_file, url), shell=True)
if fix_fn:
out_file = fix_fn(env, out_file)
subprocess.check_call("mv %s %s" % (out_file, orig_dir), shell=True)
except subprocess.CalledProcessError:
if allow_fail:
out_file = None
else:
raise IOError("Failure to retrieve remote file: %s" % url)
if samedir and out_file:
out_file = os.path.join(orig_dir, out_file)
return out_file
def _fetch_and_unpack(url, need_dir=True, dir_name=None, revision=None,
safe_tar=False, tar_file_name=None):
if url.startswith(("git", "svn", "hg", "cvs")):
base = os.path.splitext(os.path.basename(url.split()[-1]))[0]
if env.safe_exists(base):
env.safe_sudo("rm -rf {0}".format(base))
env.safe_run(url)
if revision:
if url.startswith("git"):
env.safe_run("cd %s && git checkout %s" % (base, revision))
else:
raise ValueError("Need to implement revision retrieval for %s" % url.split()[0])
return base
else:
# If tar_file_name is provided, use it instead of the inferred one
tar_file, dir_name, tar_cmd = _get_expected_file(url, dir_name, safe_tar, tar_file_name=tar_file_name)
tar_file = _remote_fetch(env, url, tar_file)
env.safe_run("%s %s" % (tar_cmd, tar_file))
return _safe_dir_name(dir_name, need_dir)
def _configure_make(env):
env.safe_run("export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:%s/lib/pkgconfig && " \
"./configure --disable-werror --prefix=%s " %
(env.system_install, env.system_install))
lib_export = "export LD_LIBRARY_PATH=%s/lib:$LD_LIBRARY_PATH" % env.system_install
env.safe_run("%s && make" % lib_export)
env.safe_sudo("%s && make install" % lib_export)
def _ac_configure_make(env):
env.safe_run("autoreconf -i -f")
_configure_make(env)
def _make_copy(find_cmd=None, premake_cmd=None, do_make=True):
def _do_work(env):
if premake_cmd:
premake_cmd()
if do_make:
env.safe_run("make")
if find_cmd:
install_dir = _get_bin_dir(env)
for fname in env.safe_run_output(find_cmd).split("\n"):
env.safe_sudo("cp -rf %s %s" % (fname.rstrip("\r"), install_dir))
return _do_work
def _get_install(url, env, make_command, post_unpack_fn=None, revision=None, dir_name=None,
safe_tar=False, tar_file_name=None):
"""Retrieve source from a URL and install in our system directory.
"""
with _make_tmp_dir() as work_dir:
with cd(work_dir):
dir_name = _fetch_and_unpack(url, revision=revision, dir_name=dir_name,
safe_tar=safe_tar, tar_file_name=tar_file_name)
with cd(os.path.join(work_dir, dir_name)):
if post_unpack_fn:
post_unpack_fn(env)
make_command(env)
def _apply_patch(env, url):
patch = os.path.basename(url)
cmd = "wget {url}; patch -p0 < {patch}".format(url=url, patch=patch)
env.safe_run(cmd)
def _get_install_local(url, env, make_command, dir_name=None,
post_unpack_fn=None, safe_tar=False, tar_file_name=None):
"""Build and install in a local directory.
"""
(_, test_name, _) = _get_expected_file(url, safe_tar=safe_tar, tar_file_name=tar_file_name)
test1 = os.path.join(env.local_install, test_name)
if dir_name is not None:
test2 = os.path.join(env.local_install, dir_name)
elif "-" in test1:
test2, _ = test1.rsplit("-", 1)
else:
test2 = os.path.join(env.local_install, test_name.split("_")[0])
if not env.safe_exists(test1) and not env.safe_exists(test2):
with _make_tmp_dir() as work_dir:
with cd(work_dir):
dir_name = _fetch_and_unpack(url, dir_name=dir_name, safe_tar=safe_tar,
tar_file_name=tar_file_name)
if not env.safe_exists(os.path.join(env.local_install, dir_name)):
with cd(dir_name):
if post_unpack_fn:
post_unpack_fn(env)
make_command(env)
# Copy instead of move because GNU mv does not have --parents flag.
# The source dir will get cleaned up anyhow so just leave it.
destination_dir = env.local_install
env.safe_sudo("mkdir -p '%s'" % destination_dir)
env.safe_sudo("cp --recursive %s %s" % (dir_name, destination_dir))
# --- Language specific utilities
def _symlinked_install_dir(pname, version, env, extra_dir=None):
if extra_dir:
base_dir = os.path.join(env.system_install, "share", extra_dir, pname)
else:
base_dir = os.path.join(env.system_install, "share", pname)
return base_dir, "%s-%s" % (base_dir, version)
def _symlinked_dir_exists(pname, version, env, extra_dir=None):
"""Check if a symlinked directory exists and is non-empty.
"""
_, install_dir = _symlinked_install_dir(pname, version, env, extra_dir)
if env.safe_exists(install_dir):
items = env.safe_run_output("ls %s" % install_dir)
if items.strip() != "":
return True
return False
def _symlinked_shared_dir(pname, version, env, extra_dir=None):
"""Create a symlinked directory of files inside the shared environment.
"""
base_dir, install_dir = _symlinked_install_dir(pname, version, env, extra_dir)
relative_install_dir = os.path.relpath(install_dir, os.path.dirname(base_dir))
# Does not exist, change symlink to new directory
if not env.safe_exists(install_dir):
env.safe_sudo("mkdir -p %s" % install_dir)
if env.safe_exists(base_dir):
env.safe_sudo("rm -f %s" % base_dir)
env.safe_sudo("ln -sf %s %s" % (relative_install_dir, base_dir))
return install_dir
items = env.safe_run_output("ls %s" % install_dir)
# empty directory, change symlink and re-download
if items.strip() == "":
if env.safe_exists(base_dir):
env.safe_sudo("rm -f %s" % base_dir)
env.safe_sudo("ln -sf %s %s" % (relative_install_dir, base_dir))
return install_dir
# Create symlink if previously deleted
if not env.safe_exists(base_dir):
env.safe_sudo("ln -sf %s %s" % (relative_install_dir, base_dir))
return None
def _symlinked_java_version_dir(pname, version, env):
return _symlinked_shared_dir(pname, version, env, extra_dir="java")
def _java_install(pname, version, url, env, install_fn=None,
pre_fetch_fn=None):
"""Download java jars into versioned input directories.
pre_fetch_fn runs before URL retrieval, allowing insertion of
manual steps like restricted downloads.
"""
install_dir = _symlinked_java_version_dir(pname, version, env)
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
if pre_fetch_fn:
out = pre_fetch_fn(env)
if out is None:
return
dir_name = _fetch_and_unpack(url)
with cd(dir_name):
if install_fn is not None:
install_fn(env, install_dir)
else:
env.safe_sudo("mv *.jar %s" % install_dir)
def _python_cmd(env):
"""Retrieve python command, handling tricky situations on CentOS.
"""
anaconda_py = os.path.join(env.system_install, "anaconda", "bin", "python")
if env.safe_exists(anaconda_py):
return anaconda_py
if "python_version_ext" in env and env.python_version_ext:
major, minor = env.safe_run("python --version").split()[-1].split(".")[:2]
check_major, check_minor = env.python_version_ext.split(".")[:2]
if major != check_major or int(check_minor) > int(minor):
return "python%s" % env.python_version_ext
else:
return "python"
else:
return "python"
def _pip_cmd(env):
"""Retrieve pip command for installing python packages, allowing configuration.
"""
anaconda_pip = os.path.join(env.system_install, "anaconda", "bin", "pip")
if env.safe_exists(anaconda_pip):
to_check = [anaconda_pip]
else:
to_check = ["pip"]
if "pip_cmd" in env and env.pip_cmd:
to_check.append(env.pip_cmd)
if not env.use_sudo:
to_check.append(os.path.join(env.system_install, "bin", "pip"))
if "python_version_ext" in env and env.python_version_ext:
to_check.append("pip-{0}".format(env.python_version_ext))
for cmd in to_check:
with quiet():
pip_version = env.safe_run("%s --version" % cmd)
if pip_version.succeeded:
return cmd
raise ValueError("Could not find pip installer from: %s" % to_check)
def _conda_cmd(env):
if hasattr(env, "conda_cmd") and env.conda_cmd:
return env.conda_cmd
to_check = []
if env.hosts == ["localhost"]:
to_check.append(os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "conda"))
to_check.extend([os.path.join(env.system_install, "anaconda", "bin", "conda"), "conda"])
for cmd in to_check:
with quiet():
test = env.safe_run("%s --version" % cmd)
if test.succeeded:
return cmd
return None
def _is_anaconda(env):
"""Check if we have a conda command or are in an anaconda subdirectory.
"""
with quiet():
conda = _conda_cmd(env)
has_conda = conda and env.safe_run_output("%s -h" % conda).startswith("usage: conda")
with quiet():
try:
full_pip = env.safe_run_output("which %s" % _pip_cmd(env))
except ValueError:
full_pip = None
in_anaconda_dir = full_pip and full_pip.succeeded and "/anaconda/" in full_pip
return has_conda or in_anaconda_dir
def _python_make(env):
run_cmd = env.safe_run if _is_anaconda(env) else env.safe_sudo
# Clean up previously failed builds
env.safe_sudo("rm -rf /tmp/pip-build-%s" % env.user)
env.safe_sudo("rm -rf /tmp/pip-*-build")
run_cmd("%s install --upgrade ." % _pip_cmd(env))
for clean in ["dist", "build", "lib/*.egg-info"]:
env.safe_sudo("rm -rf %s" % clean)
def _get_installed_file(env, local_file):
installed_files_dir = \
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "installed_files")
path = os.path.join(installed_files_dir, local_file)
if not os.path.exists(path):
# If using cloudbiolinux as a library, this won't be available,
# download the file from github instead
f = NamedTemporaryFile(delete=False)
cloudbiolinx_repo_url = env.get("cloudbiolinux_repo_url", CBL_REPO_ROOT_URL)
url = os.path.join(cloudbiolinx_repo_url, 'installed_files', local_file)
urllib.urlretrieve(url, f.name)
path = f.name
return path
def _get_installed_file_contents(env, local_file):
return open(_get_installed_file(env, local_file), "r").read()
def _write_to_file(contents, path, mode):
"""
Use fabric to write string contents to remote file specified by path.
"""
fd, local_path = tempfile.mkstemp()
try:
os.write(fd, contents)
tmp_path = os.path.join("/tmp", os.path.basename(path))
env.safe_put(local_path, tmp_path)
env.safe_sudo("mv %s %s" % (tmp_path, path))
env.safe_sudo("chmod %s %s" % (mode, path))
os.close(fd)
finally:
os.unlink(local_path)
def _get_bin_dir(env):
"""
When env.system_install is /usr this exists, but in the Galaxy
it may not already exist.
"""
return _get_install_subdir(env, "bin")
def _get_include_dir(env):
return _get_install_subdir(env, "include")
def _get_lib_dir(env):
return _get_install_subdir(env, "lib")
def _get_install_subdir(env, subdir):
path = os.path.join(env.system_install, subdir)
if not env.safe_exists(path):
env.safe_sudo("mkdir -p '%s'" % path)
return path
def _set_default_config(env, install_dir, sym_dir_name="default"):
"""
Sets up default galaxy config directory symbolic link (if needed). Needed
when it doesn't exists or when installing a new version of software.
"""
version = env["tool_version"]
if env.safe_exists(install_dir):
install_dir_root = "%s/.." % install_dir
sym_dir = "%s/%s" % (install_dir_root, sym_dir_name)
replace_default = False
if not env.safe_exists(sym_dir):
replace_default = True
if not replace_default:
default_version = env.safe_sudo("basename `readlink -f %s`" % sym_dir)
if version > default_version: # Bug: Wouldn't work for 1.9 < 1.10
print("default version %s is older than version %s just installed, replacing..." % (default_version, version))
replace_default = True
if replace_default:
env.safe_sudo("rm -rf %s; ln -f -s %s %s" % (sym_dir, install_dir, sym_dir))
def _setup_simple_service(service_name):
"""
Very Ubuntu/Debian specific, will need to be modified if used on other
archs.
"""
sudo("ln -f -s /etc/init.d/%s /etc/rc0.d/K01%s" % (service_name, service_name))
sudo("ln -f -s /etc/init.d/%s /etc/rc1.d/K01%s" % (service_name, service_name))
sudo("ln -f -s /etc/init.d/%s /etc/rc2.d/S99%s" % (service_name, service_name))
sudo("ln -f -s /etc/init.d/%s /etc/rc3.d/S99%s" % (service_name, service_name))
sudo("ln -f -s /etc/init.d/%s /etc/rc4.d/S99%s" % (service_name, service_name))
sudo("ln -f -s /etc/init.d/%s /etc/rc5.d/S99%s" % (service_name, service_name))
sudo("ln -f -s /etc/init.d/%s /etc/rc6.d/K01%s" % (service_name, service_name))
def _render_config_file_template(env, name, defaults={}, overrides={}, default_source=None):
"""
If ``name` is say ``nginx.conf``, check fabric environment for
``nginx_conf_path`` and then ``nginx_conf_template_path``. If
``nginx_conf_path`` is set, return the contents of that file. If
nginx_conf_template_path is set, return the contents of that file
but with variable interpolation performed. Variable interpolation
is performed using a derivative of the fabric environment defined
using the supplied ``defaults`` and ``overrides`` using the
``_extend_env`` function below.
Finally, if neither ``nginx_conf_path`` or
``nginx_conf_template_path`` are set, check the
``installed_files`` directory for ``nginx.conf`` and finally
``nginx.conf.template``.
"""
param_prefix = name.replace(".", "_")
# Deployer can specify absolute path for config file, check this first
path_key_name = "%s_path" % param_prefix
template_key_name = "%s_template_path" % param_prefix
if env.get(path_key_name, None):
source_path = env[path_key_name]
source_template = False
elif env.get(template_key_name, None):
source_path = env[template_key_name]
source_template = True
elif default_source:
source_path = _get_installed_file(env, default_source)
source_template = source_path.endswith(".template")
else:
default_template_name = "%s.template" % name
source_path = _get_installed_file(env, default_template_name)
source_template = True
if source_template:
template = Template(open(source_path, "r").read())
template_params = _extend_env(env, defaults=defaults, overrides=overrides)
contents = template.substitute(template_params)
else:
contents = open(source_path, "r").read()
return contents
def _extend_env(env, defaults={}, overrides={}):
"""
Create a new ``dict`` from fabric's ``env``, first adding defaults
specified via ``defaults`` (if available). Finally, override
anything in env, with values specified by ``overrides``.
"""
new_env = {}
for key, value in defaults.iteritems():
new_env[key] = value
for key, value in env.iteritems():
new_env[key] = value
for key, value in overrides.iteritems():
new_env[key] = value
return new_env
def _setup_conf_file(env, dest, name, defaults={}, overrides={}, default_source=None, mode="0755"):
conf_file_contents = _render_config_file_template(env, name, defaults, overrides, default_source)
_write_to_file(conf_file_contents, dest, mode=mode)
def _add_to_profiles(line, profiles=[], use_sudo=True):
"""
If it's not already there, append ``line`` to shell profiles files.
By default, these are ``/etc/profile`` and ``/etc/bash.bashrc`` but can be
overridden by providing a list of file paths to the ``profiles`` argument.
"""
if not profiles:
profiles = ['/etc/bash.bashrc', '/etc/profile']
for profile in profiles:
if not env.safe_contains(profile, line):
env.safe_append(profile, line, use_sudo=use_sudo)
def install_venvburrito():
"""
If not already installed, install virtualenv-burrito
(https://github.com/brainsik/virtualenv-burrito) as a convenient
method for installing and managing Python virtualenvs.
"""
url = "https://raw.github.com/brainsik/virtualenv-burrito/master/virtualenv-burrito.sh"
if not env.safe_exists("$HOME/.venvburrito/startup.sh"):
env.safe_run("curl -sL {0} | $SHELL".format(url))
# Add the startup script into the ubuntu user's bashrc
_add_to_profiles(". $HOME/.venvburrito/startup.sh", [env.shell_config], use_sudo=False)
def _create_python_virtualenv(env, venv_name, reqs_file=None, reqs_url=None):
"""
Using virtual-burrito, create a new Python virtualenv named ``venv_name``.
Do so only if the virtualenv of the given name does not already exist.
virtual-burrito installs virtualenvs in ``$HOME/.virtualenvs``.
By default, an empty virtualenv is created. Python libraries can be
installed into the virutalenv at the time of creation by providing a path
to the requirements.txt file (``reqs_file``). Instead of providing the file,
a url to the file can be provided via ``reqs_url``, in which case the
requirements file will first be downloaded. Note that if the ``reqs_url``
is provided, the downloaded file will take precedence over ``reqs_file``.
"""
# First make sure virtualenv-burrito is installed
install_venvburrito()
activate_vburrito = ". $HOME/.venvburrito/startup.sh"
def create():
if "venv_directory" not in env:
_create_global_python_virtualenv(env, venv_name, reqs_file, reqs_url)
else:
_create_local_python_virtualenv(env, venv_name, reqs_file, reqs_url)
# TODO: Terrible hack here, figure it out and fix it.
# prefix or vburrito does not work with is_local or at least deployer+is_local
if env.is_local:
create()
else:
with prefix(activate_vburrito):
create()
def _create_local_python_virtualenv(env, venv_name, reqs_file, reqs_url):
"""
Use virtualenv directly to setup virtualenv in specified directory.
"""
venv_directory = env.get("venv_directory")
if not env.safe_exists(venv_directory):
if reqs_url:
_remote_fetch(env, reqs_url, reqs_file)
env.logger.debug("Creating virtualenv in directory %s" % venv_directory)
env.safe_sudo("virtualenv --no-site-packages '%s'" % venv_directory)
env.logger.debug("Activating")
env.safe_sudo(". %s/bin/activate; pip install -r '%s'" % (venv_directory, reqs_file))
def _create_global_python_virtualenv(env, venv_name, reqs_file, reqs_url):
"""
Use mkvirtualenv to setup this virtualenv globally for user.
"""
if venv_name in env.safe_run_output("bash -l -c lsvirtualenv | grep {0} || true"
.format(venv_name)):
env.logger.info("Virtualenv {0} already exists".format(venv_name))
else:
with _make_tmp_dir():
if reqs_file or reqs_url:
if not reqs_file:
# This mean the url only is provided so 'standardize ' the file name
reqs_file = 'requirements.txt'
cmd = "bash -l -c 'mkvirtualenv -r {0} {1}'".format(reqs_file, venv_name)
else:
cmd = "bash -l -c 'mkvirtualenv {0}'".format(venv_name)
if reqs_url:
_remote_fetch(env, reqs_url, reqs_file)
env.safe_run(cmd)
env.logger.info("Finished installing virtualenv {0}".format(venv_name))
def _get_bitbucket_download_url(revision, default_repo):
if revision.startswith("http"):
url = revision
else:
url = "%s/get/%s.tar.gz" % (default_repo, revision)
return url
def _read_boolean(env, name, default):
property_str = env.get(name, str(default))
return property_str.upper() in ["TRUE", "YES"]
| chapmanb/cloudbiolinux | cloudbio/custom/shared.py | Python | mit | 30,622 | [
"Galaxy"
] | 80cb0407771edf3fc83537695e5940469858d63b26facbc26787bf973dd5070d |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements equivalents of the basic ComputedEntry objects, which
is the basic entity that can be used to perform many analyses. ComputedEntries
contain calculated information, typically from VASP or other electronic
structure codes. For example, ComputedEntries can be used as inputs for phase
diagram analysis.
"""
import json
import abc
from monty.json import MontyEncoder, MontyDecoder, MSONable
from pymatgen.core.composition import Composition
from pymatgen.core.structure import Structure
from pymatgen.entries import Entry
__author__ = "Ryan Kingsbury, Shyue Ping Ong, Anubhav Jain"
__copyright__ = "Copyright 2011-2020, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "April 2020"
class EnergyAdjustment(MSONable):
"""
Lightweight class to contain information about an energy adjustment or
energy correction.
"""
def __init__(self, value, name="Manual adjustment", cls=None, description=""):
"""
Args:
value: float, value of the energy adjustment in eV
name: str, human-readable name of the energy adjustment.
(Default: Manual adjustment)
cls: dict, Serialized Compatibility class used to generate the energy adjustment. (Default: None)
description: str, human-readable explanation of the energy adjustment.
"""
self.name = name
self.cls = cls if cls else {}
self.description = description
@property
@abc.abstractmethod
def value(self):
"""
Return the value of the energy adjustment in eV
"""
def __repr__(self):
output = ["{}:".format(self.__class__.__name__),
" Name: {}".format(self.name),
" Value: {:.3f} eV".format(self.value),
" Description: {}".format(self.description),
" Generated by: {}".format(self.cls.get("@class", None))]
return "\n".join(output)
@abc.abstractmethod
def _normalize(self, factor):
"""
Scale the value of the energy adjustment by factor.
This method is utilized in ComputedEntry.normalize() to scale the energies to a formula unit basis
(e.g. E_Fe6O9 = 3 x E_Fe2O3).
"""
class ConstantEnergyAdjustment(EnergyAdjustment):
"""
A constant energy adjustment applied to a ComputedEntry. Useful in energy referencing
schemes such as the Aqueous energy referencing scheme.
"""
def __init__(self, value, name="Constant energy adjustment", cls=None, description="Constant energy adjustment"):
"""
Args:
value: float, value of the energy adjustment in eV
name: str, human-readable name of the energy adjustment.
(Default: Constant energy adjustment)
cls: dict, Serialized Compatibility class used to generate the energy adjustment. (Default: None)
description: str, human-readable explanation of the energy adjustment.
"""
description = description + " ({:.3f} eV)".format(value)
super().__init__(value, name, cls, description)
self._value = value
@property
def value(self):
"""
Return the value of the energy correction in eV.
"""
return self._value
@value.setter
def value(self, x):
self._value = x
def _normalize(self, factor):
self._value /= factor
class ManualEnergyAdjustment(ConstantEnergyAdjustment):
"""
A manual energy adjustment applied to a ComputedEntry.
"""
def __init__(self, value):
"""
Args:
value: float, value of the energy adjustment in eV
"""
name = "Manual energy adjustment"
description = "Manual energy adjustment"
super().__init__(value, name, cls=None, description=description)
class CompositionEnergyAdjustment(EnergyAdjustment):
"""
An energy adjustment applied to a ComputedEntry based on the atomic composition.
Used in various DFT energy correction schemes.
"""
def __init__(self, adj_per_atom, n_atoms, name, cls=None, description="Composition-based energy adjustment"):
"""
Args:
adj_per_atom: float, energy adjustment to apply per atom, in eV/atom
n_atoms: float or int, number of atoms
name: str, human-readable name of the energy adjustment.
(Default: "")
cls: dict, Serialized Compatibility class used to generate the energy adjustment. (Default: None)
description: str, human-readable explanation of the energy adjustment.
"""
self._value = adj_per_atom
self.n_atoms = n_atoms
self.cls = cls if cls else {}
self.name = name
self.description = description + " ({:.3f} eV/atom x {} atoms)".format(self._value,
self.n_atoms
)
@property
def value(self):
"""
Return the value of the energy adjustment in eV.
"""
return self._value * self.n_atoms
def _normalize(self, factor):
self.n_atoms /= factor
class TemperatureEnergyAdjustment(EnergyAdjustment):
"""
An energy adjustment applied to a ComputedEntry based on the temperature.
Used, for example, to add entropy to DFT energies.
"""
def __init__(self, adj_per_deg, temp, n_atoms, name="", cls=None,
description="Temperature-based energy adjustment"):
"""
Args:
adj_per_deg: float, energy adjustment to apply per degree K, in eV/atom
temp: float, temperature in Kelvin
n_atoms: float or int, number of atoms
name: str, human-readable name of the energy adjustment.
(Default: "")
cls: dict, Serialized Compatibility class used to generate the energy adjustment. (Default: None)
description: str, human-readable explanation of the energy adjustment.
"""
self._value = adj_per_deg
self.temp = temp
self.n_atoms = n_atoms
self.name = name
self.cls = cls if cls else {}
self.description = description + " ({:.4f} eV/K/atom x {} K x {} atoms)".format(self._value,
self.temp,
self.n_atoms,
)
@property
def value(self):
"""
Return the value of the energy correction in eV.
"""
return self._value * self.temp * self.n_atoms
def _normalize(self, factor):
self.n_atoms /= factor
class ComputedEntry(Entry):
"""
Lightweight Entry object for computed data. Contains facilities
for applying corrections to the .energy attribute and for storing
calculation parameters.
"""
def __init__(self,
composition: Composition,
energy: float,
correction: float = 0.0,
energy_adjustments: list = None,
parameters: dict = None,
data: dict = None,
entry_id: object = None):
"""
Initializes a ComputedEntry.
Args:
composition (Composition): Composition of the entry. For
flexibility, this can take the form of all the typical input
taken by a Composition, including a {symbol: amt} dict,
a string formula, and others.
energy (float): Energy of the entry. Usually the final calculated
energy from VASP or other electronic structure codes.
energy_adjustments: An optional list of EnergyAdjustment to
be applied to the energy. This is used to modify the energy for
certain analyses. Defaults to None.
parameters: An optional dict of parameters associated with
the entry. Defaults to None.
data: An optional dict of any additional data associated
with the entry. Defaults to None.
entry_id: An optional id to uniquely identify the entry.
"""
super().__init__(composition, energy)
self.uncorrected_energy = self._energy
self.energy_adjustments = energy_adjustments if energy_adjustments else []
if correction != 0.0:
if energy_adjustments:
raise ValueError("Argument conflict! Setting correction = {:.3f} conflicts "
"with setting energy_adjustments. Specify one or the "
"other.".format(correction))
self.correction = correction
self.parameters = parameters if parameters else {}
self.data = data if data else {}
self.entry_id = entry_id
self.name = self.composition.reduced_formula
@property
def energy(self) -> float:
"""
:return: the *corrected* energy of the entry.
"""
return self._energy + self.correction
@property
def correction(self) -> float:
"""
Returns:
float: the total energy correction / adjustment applied to the entry,
in eV.
"""
return sum([e.value for e in self.energy_adjustments])
@correction.setter
def correction(self, x: float) -> None:
corr = ManualEnergyAdjustment(x)
self.energy_adjustments = [corr]
def normalize(self, mode: str = "formula_unit") -> None:
"""
Normalize the entry's composition and energy.
Args:
mode: "formula_unit" is the default, which normalizes to
composition.reduced_formula. The other option is "atom", which
normalizes such that the composition amounts sum to 1.
"""
factor = self._normalization_factor(mode)
self.uncorrected_energy /= factor
for ea in self.energy_adjustments:
ea._normalize(factor)
super().normalize(mode)
def __repr__(self):
n_atoms = self.composition.num_atoms
output = ["{} {:<10} - {:<12} ({})".format(str(self.entry_id),
type(self).__name__,
self.composition.formula,
self.composition.reduced_formula),
"{:<24} = {:<9.4f} eV ({:<8.4f} eV/atom)".format("Energy (Uncorrected)",
self._energy,
self._energy / n_atoms),
"{:<24} = {:<9.4f} eV ({:<8.4f} eV/atom)".format("Correction",
self.correction,
self.correction / n_atoms),
"{:<24} = {:<9.4f} eV ({:<8.4f} eV/atom)".format("Energy (Final)",
self.energy,
self.energy_per_atom),
"Energy Adjustments:"
]
if len(self.energy_adjustments) == 0:
output.append(" None")
else:
for e in self.energy_adjustments:
output.append(" {:<23}: {:<9.4f} eV ({:<8.4f} eV/atom)".format(e.name,
e.value,
e.value / n_atoms))
output.append("Parameters:")
for k, v in self.parameters.items():
output.append(" {:<22} = {}".format(k, v))
output.append("Data:")
for k, v in self.data.items():
output.append(" {:<22} = {}".format(k, v))
return "\n".join(output)
def __str__(self):
return self.__repr__()
@classmethod
def from_dict(cls, d) -> 'ComputedEntry':
"""
:param d: Dict representation.
:return: ComputedEntry
"""
dec = MontyDecoder()
# the first block here is for legacy ComputedEntry that were
# serialized before we had the energy_adjustments attribute.
if d["correction"] != 0 and not d.get("energy_adjustments"):
return cls(d["composition"], d["energy"], d["correction"],
parameters={k: dec.process_decoded(v)
for k, v in d.get("parameters", {}).items()},
data={k: dec.process_decoded(v)
for k, v in d.get("data", {}).items()},
entry_id=d.get("entry_id", None))
# this is the preferred / modern way of instantiating ComputedEntry
# we don't pass correction explicitly because it will be calculated
# on the fly from energy_adjustments
else:
return cls(d["composition"], d["energy"], correction=0,
energy_adjustments=[dec.process_decoded(e)
for e in d.get("energy_adjustments", {})],
parameters={k: dec.process_decoded(v)
for k, v in d.get("parameters", {}).items()},
data={k: dec.process_decoded(v)
for k, v in d.get("data", {}).items()},
entry_id=d.get("entry_id", None))
def as_dict(self) -> dict:
"""
:return: MSONable dict.
"""
return_dict = super().as_dict()
return_dict.update({"energy_adjustments": json.loads(json.dumps(self.energy_adjustments, cls=MontyEncoder)),
"parameters": json.loads(json.dumps(self.parameters, cls=MontyEncoder)),
"data": json.loads(json.dumps(self.data, cls=MontyEncoder)),
"entry_id": self.entry_id,
"correction": self.correction})
return return_dict
class ComputedStructureEntry(ComputedEntry):
"""
A heavier version of ComputedEntry which contains a structure as well. The
structure is needed for some analyses.
"""
def __init__(self,
structure: Structure,
energy: float,
correction: float = 0.0,
energy_adjustments: list = None,
parameters: dict = None,
data: dict = None,
entry_id: object = None):
"""
Initializes a ComputedStructureEntry.
Args:
structure (Structure): The actual structure of an entry.
energy (float): Energy of the entry. Usually the final calculated
energy from VASP or other electronic structure codes.
energy_adjustments: An optional list of EnergyAdjustment to
be applied to the energy. This is used to modify the energy for
certain analyses. Defaults to None.
parameters: An optional dict of parameters associated with
the entry. Defaults to None.
data: An optional dict of any additional data associated
with the entry. Defaults to None.
entry_id: An optional id to uniquely identify the entry.
"""
super().__init__(
structure.composition, energy, correction=correction, energy_adjustments=energy_adjustments,
parameters=parameters, data=data, entry_id=entry_id)
self.structure = structure
def as_dict(self) -> dict:
"""
:return: MSONAble dict.
"""
d = super().as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["structure"] = self.structure.as_dict()
return d
@classmethod
def from_dict(cls, d) -> 'ComputedStructureEntry':
"""
:param d: Dict representation.
:return: ComputedStructureEntry
"""
dec = MontyDecoder()
# the first block here is for legacy ComputedEntry that were
# serialized before we had the energy_adjustments attribute.
if d["correction"] != 0 and not d.get("energy_adjustments"):
return cls(dec.process_decoded(d["structure"]), d["energy"], d["correction"],
parameters={k: dec.process_decoded(v)
for k, v in d.get("parameters", {}).items()},
data={k: dec.process_decoded(v)
for k, v in d.get("data", {}).items()},
entry_id=d.get("entry_id", None))
# this is the preferred / modern way of instantiating ComputedEntry
# we don't pass correction explicitly because it will be calculated
# on the fly from energy_adjustments
else:
return cls(dec.process_decoded(d["structure"]), d["energy"], correction=0,
energy_adjustments=[dec.process_decoded(e)
for e in d.get("energy_adjustments", {})],
parameters={k: dec.process_decoded(v)
for k, v in d.get("parameters", {}).items()},
data={k: dec.process_decoded(v)
for k, v in d.get("data", {}).items()},
entry_id=d.get("entry_id", None))
| mbkumar/pymatgen | pymatgen/entries/computed_entries.py | Python | mit | 18,004 | [
"VASP",
"pymatgen"
] | e9d6cd84b2642eb25f9fc6816256efdabddb55cfa91f27d8e891db249d0db172 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import copy
import pandas as pd
import numpy as np
import numpy.testing as npt
from skbio.util._testing import assert_data_frame_almost_equal
from skbio.metadata import IntervalMetadata
class MetadataMixinTests:
def test_constructor_invalid_type(self):
for md in (0, 'a', ('f', 'o', 'o'), np.array([]), pd.DataFrame()):
with self.assertRaisesRegex(TypeError, 'metadata must be a dict'):
self._metadata_constructor_(metadata=md)
def test_constructor_no_metadata(self):
for md in None, {}:
obj = self._metadata_constructor_(metadata=md)
self.assertFalse(obj.has_metadata())
self.assertEqual(obj.metadata, {})
def test_constructor_with_metadata(self):
obj = self._metadata_constructor_(metadata={'foo': 'bar'})
self.assertEqual(obj.metadata, {'foo': 'bar'})
obj = self._metadata_constructor_(
metadata={'': '', 123: {'a': 'b', 'c': 'd'}})
self.assertEqual(obj.metadata, {'': '', 123: {'a': 'b', 'c': 'd'}})
def test_constructor_handles_missing_metadata_efficiently(self):
self.assertIsNone(self._metadata_constructor_()._metadata)
self.assertIsNone(self._metadata_constructor_(metadata=None)._metadata)
def test_constructor_makes_shallow_copy_of_metadata(self):
md = {'foo': 'bar', 42: []}
obj = self._metadata_constructor_(metadata=md)
self.assertEqual(obj.metadata, md)
self.assertIsNot(obj.metadata, md)
md['foo'] = 'baz'
self.assertEqual(obj.metadata, {'foo': 'bar', 42: []})
md[42].append(True)
self.assertEqual(obj.metadata, {'foo': 'bar', 42: [True]})
def test_eq(self):
self.assertReallyEqual(
self._metadata_constructor_(metadata={'foo': 42}),
self._metadata_constructor_(metadata={'foo': 42}))
self.assertReallyEqual(
self._metadata_constructor_(metadata={'foo': 42, 123: {}}),
self._metadata_constructor_(metadata={'foo': 42, 123: {}}))
def test_eq_missing_metadata(self):
self.assertReallyEqual(self._metadata_constructor_(),
self._metadata_constructor_())
self.assertReallyEqual(self._metadata_constructor_(),
self._metadata_constructor_(metadata={}))
self.assertReallyEqual(self._metadata_constructor_(metadata={}),
self._metadata_constructor_(metadata={}))
def test_eq_handles_missing_metadata_efficiently(self):
obj1 = self._metadata_constructor_()
obj2 = self._metadata_constructor_()
self.assertReallyEqual(obj1, obj2)
self.assertIsNone(obj1._metadata)
self.assertIsNone(obj2._metadata)
def test_ne(self):
# Both have metadata.
obj1 = self._metadata_constructor_(metadata={'id': 'foo'})
obj2 = self._metadata_constructor_(metadata={'id': 'bar'})
self.assertReallyNotEqual(obj1, obj2)
# One has metadata.
obj1 = self._metadata_constructor_(metadata={'id': 'foo'})
obj2 = self._metadata_constructor_()
self.assertReallyNotEqual(obj1, obj2)
def test_copy_metadata_none(self):
obj = self._metadata_constructor_()
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNone(obj._metadata)
self.assertIsNone(obj_copy._metadata)
def test_copy_metadata_empty(self):
obj = self._metadata_constructor_(metadata={})
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj._metadata, {})
self.assertIsNone(obj_copy._metadata)
def test_copy_with_metadata(self):
obj = self._metadata_constructor_(metadata={'foo': [1]})
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNot(obj._metadata, obj_copy._metadata)
self.assertIs(obj._metadata['foo'], obj_copy._metadata['foo'])
obj_copy.metadata['foo'].append(2)
obj_copy.metadata['foo2'] = 42
self.assertEqual(obj_copy.metadata, {'foo': [1, 2], 'foo2': 42})
self.assertEqual(obj.metadata, {'foo': [1, 2]})
def test_deepcopy_metadata_none(self):
obj = self._metadata_constructor_()
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNone(obj._metadata)
self.assertIsNone(obj_copy._metadata)
def test_deepcopy_metadata_empty(self):
obj = self._metadata_constructor_(metadata={})
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertEqual(obj._metadata, {})
self.assertIsNone(obj_copy._metadata)
def test_deepcopy_with_metadata(self):
obj = self._metadata_constructor_(metadata={'foo': [1]})
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNot(obj._metadata, obj_copy._metadata)
self.assertIsNot(obj._metadata['foo'], obj_copy._metadata['foo'])
obj_copy.metadata['foo'].append(2)
obj_copy.metadata['foo2'] = 42
self.assertEqual(obj_copy.metadata, {'foo': [1, 2], 'foo2': 42})
self.assertEqual(obj.metadata, {'foo': [1]})
def test_deepcopy_memo_is_respected(self):
# Basic test to ensure deepcopy's memo is passed through to recursive
# deepcopy calls.
obj = self._metadata_constructor_(metadata={'foo': 'bar'})
memo = {}
copy.deepcopy(obj, memo)
self.assertGreater(len(memo), 2)
def test_metadata_getter(self):
obj = self._metadata_constructor_(
metadata={42: 'foo', ('hello', 'world'): 43})
self.assertIsInstance(obj.metadata, dict)
self.assertEqual(obj.metadata, {42: 'foo', ('hello', 'world'): 43})
obj.metadata[42] = 'bar'
self.assertEqual(obj.metadata, {42: 'bar', ('hello', 'world'): 43})
def test_metadata_getter_no_metadata(self):
obj = self._metadata_constructor_()
self.assertIsNone(obj._metadata)
self.assertIsInstance(obj.metadata, dict)
self.assertEqual(obj.metadata, {})
self.assertIsNotNone(obj._metadata)
def test_metadata_setter(self):
obj = self._metadata_constructor_()
self.assertFalse(obj.has_metadata())
obj.metadata = {'hello': 'world'}
self.assertTrue(obj.has_metadata())
self.assertEqual(obj.metadata, {'hello': 'world'})
obj.metadata = {}
self.assertFalse(obj.has_metadata())
self.assertEqual(obj.metadata, {})
def test_metadata_setter_makes_shallow_copy(self):
obj = self._metadata_constructor_()
md = {'foo': 'bar', 42: []}
obj.metadata = md
self.assertEqual(obj.metadata, md)
self.assertIsNot(obj.metadata, md)
md['foo'] = 'baz'
self.assertEqual(obj.metadata, {'foo': 'bar', 42: []})
md[42].append(True)
self.assertEqual(obj.metadata, {'foo': 'bar', 42: [True]})
def test_metadata_setter_invalid_type(self):
obj = self._metadata_constructor_(metadata={123: 456})
for md in (None, 0, 'a', ('f', 'o', 'o'), np.array([]),
pd.DataFrame()):
with self.assertRaisesRegex(TypeError, 'metadata must be a dict'):
obj.metadata = md
self.assertEqual(obj.metadata, {123: 456})
def test_metadata_deleter(self):
obj = self._metadata_constructor_(metadata={'foo': 'bar'})
self.assertEqual(obj.metadata, {'foo': 'bar'})
del obj.metadata
self.assertIsNone(obj._metadata)
self.assertFalse(obj.has_metadata())
# Delete again.
del obj.metadata
self.assertIsNone(obj._metadata)
self.assertFalse(obj.has_metadata())
obj = self._metadata_constructor_()
self.assertIsNone(obj._metadata)
self.assertFalse(obj.has_metadata())
del obj.metadata
self.assertIsNone(obj._metadata)
self.assertFalse(obj.has_metadata())
def test_has_metadata(self):
obj = self._metadata_constructor_()
self.assertFalse(obj.has_metadata())
# Handles metadata efficiently.
self.assertIsNone(obj._metadata)
self.assertFalse(
self._metadata_constructor_(metadata={}).has_metadata())
self.assertTrue(
self._metadata_constructor_(metadata={'': ''}).has_metadata())
self.assertTrue(
self._metadata_constructor_(
metadata={'foo': 42}).has_metadata())
class PositionalMetadataMixinTests:
def test_constructor_invalid_positional_metadata_type(self):
with self.assertRaisesRegex(TypeError,
'Invalid positional metadata. Must be '
'consumable by `pd.DataFrame` constructor.'
' Original pandas error message: '):
self._positional_metadata_constructor_(0, positional_metadata=2)
def test_constructor_positional_metadata_len_mismatch(self):
# Zero elements.
with self.assertRaisesRegex(ValueError, r'\(0\).*\(4\)'):
self._positional_metadata_constructor_(4, positional_metadata=[])
# Not enough elements.
with self.assertRaisesRegex(ValueError, r'\(3\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=[2, 3, 4])
# Too many elements.
with self.assertRaisesRegex(ValueError, r'\(5\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=[2, 3, 4, 5, 6])
# Series not enough rows.
with self.assertRaisesRegex(ValueError, r'\(3\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.Series(range(3)))
# Series too many rows.
with self.assertRaisesRegex(ValueError, r'\(5\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.Series(range(5)))
# DataFrame not enough rows.
with self.assertRaisesRegex(ValueError, r'\(3\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.DataFrame({'quality': range(3)}))
# DataFrame too many rows.
with self.assertRaisesRegex(ValueError, r'\(5\).*\(4\)'):
self._positional_metadata_constructor_(
4, positional_metadata=pd.DataFrame({'quality': range(5)}))
# Empty DataFrame wrong size.
with self.assertRaisesRegex(ValueError, r'\(2\).*\(3\)'):
self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(2)))
def test_constructor_no_positional_metadata(self):
# Length zero with missing/empty positional metadata.
for empty in None, {}, pd.DataFrame():
obj = self._positional_metadata_constructor_(
0, positional_metadata=empty)
self.assertFalse(obj.has_positional_metadata())
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(0)))
# Nonzero length with missing positional metadata.
obj = self._positional_metadata_constructor_(
3, positional_metadata=None)
self.assertFalse(obj.has_positional_metadata())
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
def test_constructor_with_positional_metadata_len_zero(self):
for data in [], (), np.array([]):
obj = self._positional_metadata_constructor_(
0, positional_metadata={'foo': data})
self.assertTrue(obj.has_positional_metadata())
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': data}, index=range(0)))
def test_constructor_with_positional_metadata_len_one(self):
for data in [2], (2, ), np.array([2]):
obj = self._positional_metadata_constructor_(
1, positional_metadata={'foo': data})
self.assertTrue(obj.has_positional_metadata())
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': data}, index=range(1)))
def test_constructor_with_positional_metadata_len_greater_than_one(self):
for data in ([0, 42, 42, 1, 0, 8, 100, 0, 0],
(0, 42, 42, 1, 0, 8, 100, 0, 0),
np.array([0, 42, 42, 1, 0, 8, 100, 0, 0])):
obj = self._positional_metadata_constructor_(
9, positional_metadata={'foo': data})
self.assertTrue(obj.has_positional_metadata())
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': data}, index=range(9)))
def test_constructor_with_positional_metadata_multiple_columns(self):
obj = self._positional_metadata_constructor_(
5, positional_metadata={'foo': np.arange(5),
'bar': np.arange(5)[::-1]})
self.assertTrue(obj.has_positional_metadata())
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=range(5)))
def test_constructor_with_positional_metadata_custom_index(self):
df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
index=['a', 'b', 'c', 'd', 'e'])
obj = self._positional_metadata_constructor_(
5, positional_metadata=df)
self.assertTrue(obj.has_positional_metadata())
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=range(5)))
def test_constructor_with_positional_metadata_int64_index(self):
# Test that memory-inefficient index is converted to memory-efficient
# index.
df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
index=np.arange(5))
self.assertIsInstance(df.index, pd.Int64Index)
obj = self._positional_metadata_constructor_(
5, positional_metadata=df)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=range(5)))
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
def test_constructor_handles_missing_positional_metadata_efficiently(self):
obj = self._positional_metadata_constructor_(4)
self.assertIsNone(obj._positional_metadata)
obj = self._positional_metadata_constructor_(
4, positional_metadata=None)
self.assertIsNone(obj._positional_metadata)
def test_constructor_makes_shallow_copy_of_positional_metadata(self):
df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=['a', 'b', 'c'])
obj = self._positional_metadata_constructor_(
3, positional_metadata=df)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=range(3)))
self.assertIsNot(obj.positional_metadata, df)
# Original df is not mutated.
orig_df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=['a', 'b', 'c'])
assert_data_frame_almost_equal(df, orig_df)
# Change values of column (using same dtype).
df['foo'] = [42, 42, 42]
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=range(3)))
# Change single value of underlying data.
df.values[0][0] = 10
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=range(3)))
# Mutate list (not a deep copy).
df['bar'][0].append(42)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[42], [], []]},
index=range(3)))
def test_eq_basic(self):
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
obj2 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
self.assertReallyEqual(obj1, obj2)
def test_eq_from_different_source(self):
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': np.array([1, 2, 3])})
obj2 = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame({'foo': [1, 2, 3]},
index=['foo', 'bar', 'baz']))
self.assertReallyEqual(obj1, obj2)
def test_eq_missing_positional_metadata(self):
for empty in None, {}, pd.DataFrame(), pd.DataFrame(index=[]):
obj = self._positional_metadata_constructor_(
0, positional_metadata=empty)
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(0))
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(
0, positional_metadata=empty))
for empty in None, pd.DataFrame(index=['a', 'b']):
obj = self._positional_metadata_constructor_(
2, positional_metadata=empty)
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(2))
self.assertReallyEqual(
obj,
self._positional_metadata_constructor_(
2, positional_metadata=empty))
def test_eq_handles_missing_positional_metadata_efficiently(self):
obj1 = self._positional_metadata_constructor_(1)
obj2 = self._positional_metadata_constructor_(1)
self.assertReallyEqual(obj1, obj2)
self.assertIsNone(obj1._positional_metadata)
self.assertIsNone(obj2._positional_metadata)
def test_ne_len_zero(self):
# Both have positional metadata.
obj1 = self._positional_metadata_constructor_(
0, positional_metadata={'foo': []})
obj2 = self._positional_metadata_constructor_(
0, positional_metadata={'foo': [], 'bar': []})
self.assertReallyNotEqual(obj1, obj2)
# One has positional metadata.
obj1 = self._positional_metadata_constructor_(
0, positional_metadata={'foo': []})
obj2 = self._positional_metadata_constructor_(0)
self.assertReallyNotEqual(obj1, obj2)
def test_ne_len_greater_than_zero(self):
# Both have positional metadata.
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
obj2 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 2]})
self.assertReallyNotEqual(obj1, obj2)
# One has positional metadata.
obj1 = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
obj2 = self._positional_metadata_constructor_(3)
self.assertReallyNotEqual(obj1, obj2)
def test_ne_len_mismatch(self):
obj1 = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(3)))
obj2 = self._positional_metadata_constructor_(
2, positional_metadata=pd.DataFrame(index=range(2)))
self.assertReallyNotEqual(obj1, obj2)
def test_copy_positional_metadata_none(self):
obj = self._positional_metadata_constructor_(3)
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNone(obj._positional_metadata)
self.assertIsNone(obj_copy._positional_metadata)
def test_copy_positional_metadata_empty(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(3)))
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(obj._positional_metadata,
pd.DataFrame(index=range(3)))
self.assertIsNone(obj_copy._positional_metadata)
def test_copy_with_positional_metadata(self):
obj = self._positional_metadata_constructor_(
4, positional_metadata={'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]})
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNot(obj._positional_metadata,
obj_copy._positional_metadata)
self.assertIsNot(obj._positional_metadata.values,
obj_copy._positional_metadata.values)
self.assertIs(obj._positional_metadata.loc[0, 'bar'],
obj_copy._positional_metadata.loc[0, 'bar'])
obj_copy.positional_metadata.loc[0, 'bar'].append(1)
obj_copy.positional_metadata.loc[0, 'baz'] = 43
assert_data_frame_almost_equal(
obj_copy.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [43, 42, 42, 42]}))
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [42, 42, 42, 42]}))
def test_copy_preserves_range_index(self):
for pm in None, {'foo': ['a', 'b', 'c']}:
obj = self._positional_metadata_constructor_(
3, positional_metadata=pm)
obj_copy = copy.copy(obj)
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
self.assertIsInstance(obj_copy.positional_metadata.index,
pd.RangeIndex)
def test_deepcopy_positional_metadata_none(self):
obj = self._positional_metadata_constructor_(3)
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNone(obj._positional_metadata)
self.assertIsNone(obj_copy._positional_metadata)
def test_deepcopy_positional_metadata_empty(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata=pd.DataFrame(index=range(3)))
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
assert_data_frame_almost_equal(obj._positional_metadata,
pd.DataFrame(index=range(3)))
self.assertIsNone(obj_copy._positional_metadata)
def test_deepcopy_with_positional_metadata(self):
obj = self._positional_metadata_constructor_(
4, positional_metadata={'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]})
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNot(obj._positional_metadata,
obj_copy._positional_metadata)
self.assertIsNot(obj._positional_metadata.values,
obj_copy._positional_metadata.values)
self.assertIsNot(obj._positional_metadata.loc[0, 'bar'],
obj_copy._positional_metadata.loc[0, 'bar'])
obj_copy.positional_metadata.loc[0, 'bar'].append(1)
obj_copy.positional_metadata.loc[0, 'baz'] = 43
assert_data_frame_almost_equal(
obj_copy.positional_metadata,
pd.DataFrame({'bar': [[1], [], [], []],
'baz': [43, 42, 42, 42]}))
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'bar': [[], [], [], []],
'baz': [42, 42, 42, 42]}))
def test_deepcopy_preserves_range_index(self):
for pm in None, {'foo': ['a', 'b', 'c']}:
obj = self._positional_metadata_constructor_(
3, positional_metadata=pm)
obj_copy = copy.deepcopy(obj)
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
self.assertIsInstance(obj_copy.positional_metadata.index,
pd.RangeIndex)
def test_deepcopy_memo_is_respected(self):
# Basic test to ensure deepcopy's memo is passed through to recursive
# deepcopy calls.
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
memo = {}
copy.deepcopy(obj, memo)
self.assertGreater(len(memo), 2)
def test_positional_metadata_getter(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [22, 22, 0]})
self.assertIsInstance(obj.positional_metadata, pd.DataFrame)
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0]}))
# Update existing column.
obj.positional_metadata['foo'] = [42, 42, 43]
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [42, 42, 43]}))
# Add new column.
obj.positional_metadata['foo2'] = [True, False, True]
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [42, 42, 43],
'foo2': [True, False, True]}))
def test_positional_metadata_getter_no_positional_metadata(self):
obj = self._positional_metadata_constructor_(4)
self.assertIsNone(obj._positional_metadata)
self.assertIsInstance(obj.positional_metadata, pd.DataFrame)
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame(index=range(4)))
self.assertIsNotNone(obj._positional_metadata)
def test_positional_metadata_getter_set_column_series(self):
length = 8
obj = self._positional_metadata_constructor_(
length, positional_metadata={'foo': range(length)})
obj.positional_metadata['bar'] = pd.Series(range(length-3))
# pandas.Series will be padded with NaN if too short.
npt.assert_equal(obj.positional_metadata['bar'],
np.array(list(range(length-3)) + [np.nan]*3))
obj.positional_metadata['baz'] = pd.Series(range(length+3))
# pandas.Series will be truncated if too long.
npt.assert_equal(obj.positional_metadata['baz'],
np.array(range(length)))
def test_positional_metadata_getter_set_column_array(self):
length = 8
obj = self._positional_metadata_constructor_(
length, positional_metadata={'foo': range(length)})
# array-like objects will fail if wrong size.
for array_like in (np.array(range(length-1)), range(length-1),
np.array(range(length+1)), range(length+1)):
with self.assertRaisesRegex(ValueError,
r'Length of values \(' +
str(len(array_like)) +
r'\) does not match length'
r' of index \(8\)'):
obj.positional_metadata['bar'] = array_like
def test_positional_metadata_setter_pandas_consumable(self):
obj = self._positional_metadata_constructor_(3)
self.assertFalse(obj.has_positional_metadata())
obj.positional_metadata = {'foo': [3, 2, 1]}
self.assertTrue(obj.has_positional_metadata())
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [3, 2, 1]}))
obj.positional_metadata = pd.DataFrame(index=np.arange(3))
self.assertFalse(obj.has_positional_metadata())
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
def test_positional_metadata_setter_data_frame(self):
obj = self._positional_metadata_constructor_(3)
self.assertFalse(obj.has_positional_metadata())
obj.positional_metadata = pd.DataFrame({'foo': [3, 2, 1]},
index=['a', 'b', 'c'])
self.assertTrue(obj.has_positional_metadata())
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [3, 2, 1]}))
obj.positional_metadata = pd.DataFrame(index=np.arange(3))
self.assertFalse(obj.has_positional_metadata())
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(3)))
def test_positional_metadata_setter_none(self):
obj = self._positional_metadata_constructor_(
0, positional_metadata={'foo': []})
self.assertTrue(obj.has_positional_metadata())
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': []}))
# `None` behavior differs from constructor.
obj.positional_metadata = None
self.assertFalse(obj.has_positional_metadata())
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame(index=range(0)))
def test_positional_metadata_setter_int64_index(self):
# Test that memory-inefficient index is converted to memory-efficient
# index.
obj = self._positional_metadata_constructor_(5)
df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
index=np.arange(5))
self.assertIsInstance(df.index, pd.Int64Index)
obj.positional_metadata = df
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=range(5)))
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
def test_positional_metadata_setter_makes_shallow_copy(self):
obj = self._positional_metadata_constructor_(3)
df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=['a', 'b', 'c'])
obj.positional_metadata = df
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=range(3)))
self.assertIsNot(obj.positional_metadata, df)
# Original df is not mutated.
orig_df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=['a', 'b', 'c'])
assert_data_frame_almost_equal(df, orig_df)
# Change values of column (using same dtype).
df['foo'] = [42, 42, 42]
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=range(3)))
# Change single value of underlying data.
df.values[0][0] = 10
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},
index=range(3)))
# Mutate list (not a deep copy).
df['bar'][0].append(42)
assert_data_frame_almost_equal(
obj.positional_metadata,
pd.DataFrame({'foo': [22, 22, 0], 'bar': [[42], [], []]},
index=range(3)))
def test_positional_metadata_setter_invalid_type(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 42]})
with self.assertRaisesRegex(TypeError,
'Invalid positional metadata. Must be '
'consumable by `pd.DataFrame` constructor.'
' Original pandas error message: '):
obj.positional_metadata = 2
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
def test_positional_metadata_setter_len_mismatch(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 42]})
# `None` behavior differs from constructor.
with self.assertRaisesRegex(ValueError, r'\(0\).*\(3\)'):
obj.positional_metadata = None
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
with self.assertRaisesRegex(ValueError, r'\(4\).*\(3\)'):
obj.positional_metadata = [1, 2, 3, 4]
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [1, 2, 42]}))
def test_positional_metadata_deleter(self):
obj = self._positional_metadata_constructor_(
3, positional_metadata={'foo': [1, 2, 3]})
self.assertIsInstance(obj.positional_metadata.index, pd.RangeIndex)
assert_data_frame_almost_equal(obj.positional_metadata,
pd.DataFrame({'foo': [1, 2, 3]}))
del obj.positional_metadata
self.assertIsNone(obj._positional_metadata)
self.assertFalse(obj.has_positional_metadata())
# Delete again.
del obj.positional_metadata
self.assertIsNone(obj._positional_metadata)
self.assertFalse(obj.has_positional_metadata())
obj = self._positional_metadata_constructor_(3)
self.assertIsNone(obj._positional_metadata)
self.assertFalse(obj.has_positional_metadata())
del obj.positional_metadata
self.assertIsNone(obj._positional_metadata)
self.assertFalse(obj.has_positional_metadata())
def test_has_positional_metadata(self):
obj = self._positional_metadata_constructor_(4)
self.assertFalse(obj.has_positional_metadata())
self.assertIsNone(obj._positional_metadata)
obj = self._positional_metadata_constructor_(0, positional_metadata={})
self.assertFalse(obj.has_positional_metadata())
obj = self._positional_metadata_constructor_(
4, positional_metadata=pd.DataFrame(index=np.arange(4)))
self.assertFalse(obj.has_positional_metadata())
obj = self._positional_metadata_constructor_(
4, positional_metadata=pd.DataFrame(index=['a', 'b', 'c', 'd']))
self.assertFalse(obj.has_positional_metadata())
obj = self._positional_metadata_constructor_(
0, positional_metadata={'foo': []})
self.assertTrue(obj.has_positional_metadata())
obj = self._positional_metadata_constructor_(
4, positional_metadata={'foo': [1, 2, 3, 4]})
self.assertTrue(obj.has_positional_metadata())
obj = self._positional_metadata_constructor_(
2, positional_metadata={'foo': [1, 2], 'bar': ['abc', 'def']})
self.assertTrue(obj.has_positional_metadata())
class IntervalMetadataMixinTests:
def _set_up(self):
self.upper_bound = 9
self.im = IntervalMetadata(self.upper_bound)
self.intvls = [
{'bounds': [(0, 1), (2, 9)], 'metadata': {'gene': 'sagA'}},
{'bounds': [(0, 1)], 'metadata': {'gene': ['a'],
'product': 'foo'}}]
def test_constructor_invalid(self):
with self.assertRaisesRegex(TypeError,
'You must provide `IntervalMetadata` '
'object.'):
self._interval_metadata_constructor_(0, '')
def test_constructor_empty_interval_metadata_upper_bound_is_none(self):
im = IntervalMetadata(None)
for i in [0, 1, 3, 100]:
x = self._interval_metadata_constructor_(i, im)
# the upper bound is reset to seq/axis length
self.assertEqual(x.interval_metadata.upper_bound, i)
self.assertEqual(x.interval_metadata._intervals, im._intervals)
# original interval metadata upper bound is not changed
self.assertIsNone(im.upper_bound)
def test_constructor_interval_metadata_upper_bound_is_none(self):
im = IntervalMetadata(None)
# populate im
im.add(**self.intvls[0])
im.add(**self.intvls[1])
for i in [1000, 100]:
x = self._interval_metadata_constructor_(i, im)
# the upper bound is reset to seq/axis length
self.assertEqual(x.interval_metadata.upper_bound, i)
self.assertEqual(x.interval_metadata._intervals, im._intervals)
# original interval metadata upper bound is not changed
self.assertIsNone(im.upper_bound)
def test_constructor_interval_bounds_larger_than_len(self):
im = IntervalMetadata(None)
# populate im
im.add(**self.intvls[0])
im.add(**self.intvls[1])
for i in [0, 1, 3]:
# error to reset upper bound to a smaller value than seq/axis len
with self.assertRaisesRegex(
ValueError, r'larger than upper bound \(%r\)' % i):
self._interval_metadata_constructor_(i, im)
# original interval metadata upper bound is not changed
self.assertIsNone(im.upper_bound)
def test_constructor_interval_metadata_len_mismatch(self):
for i in [0, 1, 3, 100]:
with self.assertRaisesRegex(
ValueError, r'\(%d\).*\(%d\)' % (self.upper_bound, i)):
self._interval_metadata_constructor_(i, self.im)
def test_constructor_interval_metadata_len(self):
for n in 1, 2, 3:
im = IntervalMetadata(n)
im.add([(0, 1)], metadata={'a': 'b'})
obj = self._interval_metadata_constructor_(n, im)
self.assertTrue(obj.has_interval_metadata())
self.assertIsInstance(obj.interval_metadata, IntervalMetadata)
def test_constructor_interval_metadata_len_0(self):
im = IntervalMetadata(0)
obj = self._interval_metadata_constructor_(0, im)
self.assertFalse(obj.has_interval_metadata())
def test_constructor_no_interval_metadata(self):
for i, im in [(0, None), (self.upper_bound, self.im)]:
obj = self._interval_metadata_constructor_(i, im)
self.assertFalse(obj.has_interval_metadata())
self.assertIsInstance(obj.interval_metadata, IntervalMetadata)
def test_constructor_handles_missing_interval_metadata_efficiently(self):
obj = self._interval_metadata_constructor_(self.upper_bound)
self.assertIsNone(obj._interval_metadata)
obj = self._interval_metadata_constructor_(
self.upper_bound, interval_metadata=None)
self.assertIsNone(obj._interval_metadata)
def test_constructor_makes_shallow_copy_of_interval_metadata(self):
intvl = self.im.add(**self.intvls[1])
obj = self._interval_metadata_constructor_(self.upper_bound, self.im)
self.assertEqual(obj.interval_metadata, self.im)
self.assertIsNot(obj.interval_metadata, self.im)
# Changing mutable value of metadata of the old interval
# also changes obj.
intvl.metadata['gene'].append('b')
self.assertEqual(obj.interval_metadata, self.im)
# Changing old interval doesn't change obj
intvl.bounds = [(3, 6)]
self.assertNotEqual(obj.interval_metadata, self.im)
def test_eq_basic(self):
im1 = IntervalMetadata(self.upper_bound)
im1.add(**self.intvls[0])
obj1 = self._interval_metadata_constructor_(self.upper_bound, im1)
im2 = IntervalMetadata(self.upper_bound)
im2.add(**self.intvls[0])
obj2 = self._interval_metadata_constructor_(self.upper_bound, im2)
self.assertReallyEqual(obj1, obj2)
def test_eq_populated_differently(self):
im1 = IntervalMetadata(self.upper_bound)
im1.add(**self.intvls[0])
obj1 = self._interval_metadata_constructor_(self.upper_bound, im1)
obj2 = self._interval_metadata_constructor_(self.upper_bound)
obj2.interval_metadata.add(**self.intvls[0])
self.assertReallyEqual(obj1, obj2)
def test_eq_handles_missing_positional_metadata_efficiently(self):
obj1 = self._interval_metadata_constructor_(self.upper_bound)
obj2 = self._interval_metadata_constructor_(self.upper_bound)
self.assertReallyEqual(obj1, obj2)
self.assertIsNone(obj1._interval_metadata)
self.assertIsNone(obj2._interval_metadata)
def test_ne_diff_len(self):
obj1 = self._interval_metadata_constructor_(0)
obj2 = self._interval_metadata_constructor_(self.upper_bound)
self.assertReallyNotEqual(obj1, obj2)
def test_ne_only_one_is_empty(self):
im1 = IntervalMetadata(self.upper_bound)
im1.add(**self.intvls[0])
obj1 = self._interval_metadata_constructor_(self.upper_bound, im1)
obj2 = self._interval_metadata_constructor_(self.upper_bound)
self.assertReallyNotEqual(obj1, obj2)
def test_ne(self):
im1 = IntervalMetadata(self.upper_bound)
im1.add(**self.intvls[0])
obj1 = self._interval_metadata_constructor_(self.upper_bound, im1)
im2 = IntervalMetadata(self.upper_bound)
im2.add(**self.intvls[1])
obj2 = self._interval_metadata_constructor_(self.upper_bound, im2)
self.assertReallyNotEqual(obj1, obj2)
def test_copy_interval_metadata_empty(self):
obj = self._interval_metadata_constructor_(self.upper_bound, self.im)
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNone(obj_copy._interval_metadata)
self.assertEqual(obj._interval_metadata, self.im)
def test_copy_interval_metadata_none(self):
obj = self._interval_metadata_constructor_(self.upper_bound)
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNone(obj._interval_metadata)
self.assertIsNone(obj_copy._interval_metadata)
def test_copy_interval_metadata(self):
self.im.add(**self.intvls[1])
obj = self._interval_metadata_constructor_(self.upper_bound, self.im)
obj_copy = copy.copy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNot(obj.interval_metadata,
obj_copy.interval_metadata)
self.assertIsNot(obj.interval_metadata._intervals,
obj_copy.interval_metadata._intervals)
for i, j in zip(obj.interval_metadata._intervals,
obj_copy.interval_metadata._intervals):
self.assertIsNot(i, j)
self.assertIsNot(i.metadata, j.metadata)
for k in i.metadata:
self.assertIs(i.metadata[k], j.metadata[k])
def test_deepcopy_interval_metadata(self):
self.im.add(**self.intvls[1])
obj = self._interval_metadata_constructor_(self.upper_bound, self.im)
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNot(obj.interval_metadata,
obj_copy.interval_metadata)
self.assertIsNot(obj.interval_metadata._intervals,
obj_copy.interval_metadata._intervals)
for i, j in zip(obj.interval_metadata._intervals,
obj_copy.interval_metadata._intervals):
self.assertIsNot(i, j)
self.assertIsNot(i.metadata, j.metadata)
self.assertIsNot(i.metadata['gene'], j.metadata['gene'])
self.assertIs(i.metadata['product'], j.metadata['product'])
def test_deepcopy_interval_metadata_empty(self):
obj = self._interval_metadata_constructor_(self.upper_bound, self.im)
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNone(obj_copy._interval_metadata)
self.assertEqual(obj._interval_metadata, self.im)
def test_deepcopy_interval_metadata_none(self):
obj = self._interval_metadata_constructor_(self.upper_bound, None)
obj_copy = copy.deepcopy(obj)
self.assertEqual(obj, obj_copy)
self.assertIsNot(obj, obj_copy)
self.assertIsNone(obj._interval_metadata)
self.assertIsNone(obj_copy._interval_metadata)
def test_deepcopy_memo_is_respected(self):
# Basic test to ensure deepcopy's memo is passed through to recursive
# deepcopy calls.
obj = self._interval_metadata_constructor_(self.upper_bound, self.im)
memo = {}
copy.deepcopy(obj, memo)
self.assertGreater(len(memo), 1)
def test_interval_metadata_getter(self):
self.im.add(**self.intvls[0])
obj = self._interval_metadata_constructor_(self.upper_bound, self.im)
self.assertIsInstance(obj.interval_metadata, IntervalMetadata)
self.assertEqual(self.im, obj.interval_metadata)
# Update existing metadata.
obj.interval_metadata._intervals[0].metadata['gene'] = 'sagB'
self.assertNotEqual(obj.interval_metadata, self.im)
self.im._intervals[0].metadata['gene'] = 'sagB'
self.assertEqual(obj.interval_metadata, self.im)
# Add new interval feature.
obj.interval_metadata.add(**self.intvls[1])
self.im.add(**self.intvls[1])
self.assertEqual(obj.interval_metadata, self.im)
def test_interval_metadata_getter_no_interval_metadata(self):
obj = self._interval_metadata_constructor_(self.upper_bound)
self.assertIsNone(obj._interval_metadata)
self.assertIsInstance(obj.interval_metadata, IntervalMetadata)
self.assertEqual(obj.interval_metadata, self.im)
self.assertIsNotNone(obj._interval_metadata)
def test_interval_metadata_setter(self):
obj = self._interval_metadata_constructor_(self.upper_bound)
self.assertFalse(obj.has_interval_metadata())
obj.interval_metadata = self.im
self.assertFalse(obj.has_interval_metadata())
self.assertEqual(obj.interval_metadata, self.im)
self.im.add(**self.intvls[1])
obj.interval_metadata = self.im
self.assertTrue(obj.has_interval_metadata())
self.assertEqual(obj.interval_metadata, self.im)
def test_interval_metadata_setter_makes_copy(self):
intvl = self.im.add(**self.intvls[1])
obj = self._interval_metadata_constructor_(self.upper_bound)
obj.interval_metadata = self.im
self.assertEqual(obj.interval_metadata, self.im)
self.assertIsNot(obj.interval_metadata, self.im)
# Changing mutable value of metadata of the old interval
# also changes obj.
intvl.metadata['gene'].append('b')
self.assertEqual(obj.interval_metadata, self.im)
# Changing old interval doesn't change obj
intvl.bounds = [(3, 6)]
self.assertNotEqual(obj.interval_metadata, self.im)
def test_interval_metadata_setter_len_mismatch(self):
self.im.add(**self.intvls[1])
obj = self._interval_metadata_constructor_(self.upper_bound, self.im)
for i in 0, 1, 3, 100:
with self.assertRaisesRegex(
ValueError, r'\(%d\).*\(%d\)' % (i, self.upper_bound)):
obj.interval_metadata = IntervalMetadata(i)
self.assertEqual(obj.interval_metadata, self.im)
def test_interval_metadata_setter_invalid_type(self):
self.im.add(**self.intvls[0])
obj = self._interval_metadata_constructor_(self.upper_bound, self.im)
for i in [2, None, '', {}, []]:
with self.assertRaisesRegex(
TypeError,
'You must provide `IntervalMetadata` object'):
obj.interval_metadata = i
self.assertEqual(self.im, obj.interval_metadata)
def test_interval_metadata_setter_empty_upper_bound_is_none(self):
im = IntervalMetadata(None)
for i in [0, 1, 3, 100]:
x = self._interval_metadata_constructor_(i)
x.interval_metadata = im
self.assertFalse(x.has_interval_metadata())
# the upper bound is reset to seq/axis length
self.assertEqual(x.interval_metadata.upper_bound, i)
# original interval metadata upper bound is not changed
self.assertIsNone(im.upper_bound)
def test_interval_metadata_setter_upper_bound_is_none(self):
im = IntervalMetadata(None)
# populate im
im.add(**self.intvls[0])
im.add(**self.intvls[1])
for i in [1000, 100]:
x = self._interval_metadata_constructor_(i)
x.interval_metadata = im
# the upper bound is reset to seq/axis length
self.assertEqual(x.interval_metadata.upper_bound, i)
self.assertEqual(x.interval_metadata._intervals, im._intervals)
# original interval metadata upper bound is not changed
self.assertIsNone(im.upper_bound)
def test_interval_metadata_setter_interval_bounds_larger_than_len(self):
im = IntervalMetadata(None)
# populate im
im.add(**self.intvls[0])
im.add(**self.intvls[1])
for i in [0, 1, 3]:
# error to reset upper bound to a smaller value than seq/axis len
with self.assertRaisesRegex(
ValueError, r'larger than upper bound \(%r\)' % i):
x = self._interval_metadata_constructor_(i)
x.interval_metadata = im
# original interval metadata upper bound is not changed
self.assertIsNone(im.upper_bound)
def test_interval_metadata_deleter_empty(self):
obj = self._interval_metadata_constructor_(self.upper_bound, self.im)
del obj.interval_metadata
self.assertIsNone(obj._interval_metadata)
self.assertFalse(obj.has_interval_metadata())
# Delete again. test idempotent
del obj.interval_metadata
self.assertIsNone(obj._interval_metadata)
self.assertFalse(obj.has_interval_metadata())
def test_interval_metadata_deleter(self):
self.im.add(**self.intvls[0])
obj = self._interval_metadata_constructor_(self.upper_bound, self.im)
del obj.interval_metadata
self.assertIsNone(obj._interval_metadata)
self.assertFalse(obj.has_interval_metadata())
def test_has_interval_metadata(self):
obj = self._interval_metadata_constructor_(self.upper_bound)
self.assertFalse(obj.has_interval_metadata())
obj = self._interval_metadata_constructor_(self.upper_bound, self.im)
self.assertFalse(obj.has_interval_metadata())
self.im.add([(0, 1)])
obj = self._interval_metadata_constructor_(self.upper_bound, self.im)
self.assertTrue(obj.has_interval_metadata())
| gregcaporaso/scikit-bio | skbio/metadata/_testing.py | Python | bsd-3-clause | 53,266 | [
"scikit-bio"
] | 8c3fd3c98c3bb51a10978d9bab1cc9b5ec5263dfef7832608a5e3168534826f3 |
# BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
import operator
import bisect
import os
import pkgutil
import numpy as np
import constants
def pretty_print_table(table,use_tabs=False):
"""
Takes a 2d table and prints it in a nice text based format. If
use_tabs=True then only \t is used as a separator. This is useful for
importing the data into other apps (Excel, ...). The default is to pad
the columns with spaces to make them look neat. The first column is
left aligned, while the remainder is right aligned.
"""
if use_tabs:
for r in table:
print "\t".join(r).replace("_","\_")
return
def col_width(table, colidx):
return max([len(str(row[colidx])) for row in table])
# create a format string with the first column left aligned, the others right
# example: {:<27}{:>11}{:>6}{:>8}
frmt = "".join([ ('{:<' if i==0 else '{:>')+str(1+col_width(table,i))+'}' for i in range(len(table[0])) ])
for r in table:
print frmt.format(*r)
def sort_table(table, col=0):
"""
Sort the table according to the column number
"""
return sorted(table, key=operator.itemgetter(col))
def float_eq(a,b):
"""
Test if two floats are almost equal to each other
"""
return abs(a-b)<1e-10*max(1e-5,abs(a),abs(b))
def linear_interpol(x, x1, x2, y1, y2):
"""
Linearly interpolate to point x, between
the points (x1,y1), (x2,y2)
"""
assert(x1<=x)
assert(x2>=x)
assert(x1<=x2)
alpha = (x - x1) / (x2-x1)
return (1.-alpha)*y1 + alpha*y2
def read_table(filename):
datastream = pkgutil.get_data('burnman', 'data/'+filename)
datalines = [ line.strip() for line in datastream.split('\n') if line.strip() ]
table=[]
for line in datalines:
if (line[0]!='#'):
numbers = np.fromstring( line , sep =' ')
table.append(numbers)
return np.array(table)
def cut_table(table, min_value, max_value):
tablen=[]
for i in range(min_value,max_value,1):
tablen.append(table[i,:])
return tablen
def lookup_and_interpolate(table_x, table_y, x_value):
idx = bisect.bisect_left(table_x, x_value) - 1
if (idx < 0):
return table_y[0]
elif (idx < len(table_x)-1):
return linear_interpol(x_value, table_x[idx], table_x[idx+1], \
table_y[idx], table_y[idx+1])
else:
return table_y[idx]
def molar_volume_from_unit_cell_volume(unit_cell_v, z):
"""
Takes unit cell volume in Angstroms^3 per unitcell, as is often reported,
and the z number for the mineral (number of formula units per unit cell,
NOT number of atoms per formula unit), and calculates
the molar volume, as expected by the equations of state.
"""
return unit_cell_v*constants.Avogadro/1e30/z
| QuLogic/burnman | burnman/tools.py | Python | gpl-2.0 | 2,943 | [
"Avogadro"
] | 49105adcc36bb4c8fc2c39489522c9fe3cf56b65e7d27195df528e52c7e34f49 |
#!/usr/bin/env python
"""
Dispersion analysis of a heterogeneous finite scale periodic cell.
The periodic cell mesh has to contain two subdomains Y1, Y2, so that different
material properties can be defined in each of the subdomains (see `--pars`
option).
"""
from __future__ import absolute_import
import os
import sys
sys.path.append('.')
import functools
from copy import copy
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
from sfepy.base.base import import_file, output, Struct
from sfepy.base.conf import dict_from_string, ProblemConf
from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options
from sfepy.base.log import Log
from sfepy.discrete.fem import MeshIO
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson as stiffness
import sfepy.mechanics.matcoefs as mc
from sfepy.mechanics.units import apply_unit_multipliers
import sfepy.discrete.fem.periodic as per
from sfepy.homogenization.utils import define_box_regions
from sfepy.discrete import Problem
from sfepy.solvers import Solver
from sfepy.solvers.ts import TimeStepper
def apply_units_le(pars, unit_multipliers):
new_pars = apply_unit_multipliers(pars,
['stress', 'one', 'density',
'stress', 'one' ,'density'],
unit_multipliers)
return new_pars
def define_le(filename_mesh, pars, approx_order, refinement_level, solver_conf,
plane='strain'):
io = MeshIO.any_from_filename(filename_mesh)
bbox = io.read_bounding_box()
dim = bbox.shape[1]
size = (bbox[1] - bbox[0]).max()
options = {
'absolute_mesh_path' : True,
'refinement_level' : refinement_level,
}
fields = {
'displacement': ('complex', dim, 'Omega', approx_order),
}
young1, poisson1, density1, young2, poisson2, density2 = pars
materials = {
'm' : ({
'D' : {'Y1' : stiffness(dim, young=young1, poisson=poisson1,
plane=plane),
'Y2' : stiffness(dim, young=young2, poisson=poisson2,
plane=plane)},
'density' : {'Y1' : density1, 'Y2' : density2},
},),
'wave' : ({
'.vec' : [1] * dim,
},),
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Y1': 'cells of group 1',
'Y2': 'cells of group 2',
}
regions.update(define_box_regions(dim,
bbox[0], bbox[1], 1e-8))
ebcs = {
}
if dim == 3:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'},
'match_x_plane'),
'periodic_y' : (['Near', 'Far'], {'u.all' : 'u.all'},
'match_y_plane'),
'periodic_z' : (['Top', 'Bottom'], {'u.all' : 'u.all'},
'match_z_plane'),
}
else:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'},
'match_y_line'),
'periodic_y' : (['Bottom', 'Top'], {'u.all' : 'u.all'},
'match_x_line'),
}
per.set_accuracy(1e-8 * size)
functions = {
'match_x_plane' : (per.match_x_plane,),
'match_y_plane' : (per.match_y_plane,),
'match_z_plane' : (per.match_z_plane,),
'match_x_line' : (per.match_x_line,),
'match_y_line' : (per.match_y_line,),
}
integrals = {
'i' : 2 * approx_order,
}
equations = {
'K' : 'dw_lin_elastic.i.Omega(m.D, v, u)',
'S' : 'dw_elastic_wave.i.Omega(m.D, wave.vec, v, u)',
'R' : """dw_elastic_wave_cauchy.i.Omega(m.D, wave.vec, u, v)
- dw_elastic_wave_cauchy.i.Omega(m.D, wave.vec, v, u)""",
'M' : 'dw_volume_dot.i.Omega(m.density, v, u)',
}
solver_0 = solver_conf.copy()
solver_0['name'] = 'eig'
return locals()
def set_wave_dir_le(materials, wdir):
wave_mat = materials['wave']
wave_mat.datas['special']['vec'] = wdir
def _max_diff_csr(mtx1, mtx2):
aux = nm.abs((mtx1 - mtx2).data)
return aux.max() if len(aux) else 0.0
def save_eigenvectors(filename, svecs, pb):
if svecs is None: return
variables = pb.get_variables()
# Make full eigenvectors (add DOFs fixed by boundary conditions).
vecs = nm.empty((variables.di.ptr[-1], svecs.shape[1]),
dtype=svecs.dtype)
for ii in range(svecs.shape[1]):
vecs[:, ii] = variables.make_full_vec(svecs[:, ii])
# Save the eigenvectors.
out = {}
state = pb.create_state()
for ii in range(svecs.shape[1]):
state.set_full(vecs[:, ii])
aux = state.create_output_dict()
out.update({key + '%03d' % ii : aux[key] for key in aux})
pb.save_state(filename, out=out)
helps = {
'pars' :
'material parameters in Y1, Y2 subdomains in basic units'
' [default: %(default)s]',
'conf' :
'if given, an alternative problem description file with apply_units() and'
' define() functions [default: %(default)s]',
'mesh_size' :
'desired mesh size (max. of bounding box dimensions) in basic units'
' - the input periodic cell mesh is rescaled to this size'
' [default: %(default)s]',
'unit_multipliers' :
'basic unit multipliers (time, length, mass) [default: %(default)s]',
'plane' :
'for 2D problems, plane strain or stress hypothesis selection'
' [default: %(default)s]',
'wave_dir' : 'the wave vector direction (will be normalized)'
' [default: %(default)s]',
'mode' : 'solution mode: omega = solve a generalized EVP for omega,'
' kappa = solve a quadratic generalized EVP for kappa'
' [default: %(default)s]',
'range' : 'the wave vector magnitude / frequency range'
' (like numpy.linspace) depending on the mode option'
' [default: %(default)s]',
'order' : 'displacement field approximation order [default: %(default)s]',
'refine' : 'number of uniform mesh refinements [default: %(default)s]',
'n_eigs' : 'the number of eigenvalues to compute [default: %(default)s]',
'eigs_only' : 'compute only eigenvalues, not eigenvectors',
'solver_conf' : 'eigenvalue problem solver configuration options'
' [default: %(default)s]',
'save_materials' : 'save material parameters into'
' <output_directory>/materials.vtk',
'log_std_waves' : 'log also standard pressure dilatation and shear waves',
'silent' : 'do not print messages to screen',
'clear' :
'clear old solution files from output directory',
'output_dir' :
'output directory [default: %(default)s]',
'mesh_filename' :
'input periodic cell mesh file name [default: %(default)s]',
}
def main():
# Aluminium and epoxy.
default_pars = '70e9,0.35,2.799e3, 3.8e9,0.27,1.142e3'
default_solver_conf = ("kind='eig.scipy',method='eigh',tol=1.0e-5,"
"maxiter=1000,which='LM',sigma=0.0")
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--pars', metavar='young1,poisson1,density1'
',young2,poisson2,density2',
action='store', dest='pars',
default=default_pars, help=helps['pars'])
parser.add_argument('--conf', metavar='filename',
action='store', dest='conf',
default=None, help=helps['conf'])
parser.add_argument('--mesh-size', type=float, metavar='float',
action='store', dest='mesh_size',
default=None, help=helps['mesh_size'])
parser.add_argument('--unit-multipliers',
metavar='c_time,c_length,c_mass',
action='store', dest='unit_multipliers',
default='1.0,1.0,1.0', help=helps['unit_multipliers'])
parser.add_argument('--plane', action='store', dest='plane',
choices=['strain', 'stress'],
default='strain', help=helps['plane'])
parser.add_argument('--wave-dir', metavar='float,float[,float]',
action='store', dest='wave_dir',
default='1.0,0.0,0.0', help=helps['wave_dir'])
parser.add_argument('--mode', action='store', dest='mode',
choices=['omega', 'kappa'],
default='omega', help=helps['mode'])
parser.add_argument('--range', metavar='start,stop,count',
action='store', dest='range',
default='10,100,10', help=helps['range'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=1, help=helps['order'])
parser.add_argument('--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-n', '--n-eigs', metavar='int', type=int,
action='store', dest='n_eigs',
default=6, help=helps['n_eigs'])
parser.add_argument('--eigs-only',
action='store_true', dest='eigs_only',
default=False, help=helps['eigs_only'])
parser.add_argument('--solver-conf', metavar='dict-like',
action='store', dest='solver_conf',
default=default_solver_conf, help=helps['solver_conf'])
parser.add_argument('--save-materials',
action='store_true', dest='save_materials',
default=False, help=helps['save_materials'])
parser.add_argument('--log-std-waves',
action='store_true', dest='log_std_waves',
default=False, help=helps['log_std_waves'])
parser.add_argument('--silent',
action='store_true', dest='silent',
default=False, help=helps['silent'])
parser.add_argument('-c', '--clear',
action='store_true', dest='clear',
default=False, help=helps['clear'])
parser.add_argument('-o', '--output-dir', metavar='path',
action='store', dest='output_dir',
default='output', help=helps['output_dir'])
parser.add_argument('mesh_filename', default='',
help=helps['mesh_filename'])
options = parser.parse_args()
output_dir = options.output_dir
output.set_output(filename=os.path.join(output_dir,'output_log.txt'),
combined=options.silent == False)
if options.conf is not None:
mod = import_file(options.conf)
apply_units = mod.apply_units
define = mod.define
set_wave_dir = mod.set_wave_dir
else:
apply_units = apply_units_le
define = define_le
set_wave_dir = set_wave_dir_le
options.pars = [float(ii) for ii in options.pars.split(',')]
options.unit_multipliers = [float(ii)
for ii in options.unit_multipliers.split(',')]
options.wave_dir = [float(ii)
for ii in options.wave_dir.split(',')]
aux = options.range.split(',')
options.range = [float(aux[0]), float(aux[1]), int(aux[2])]
options.solver_conf = dict_from_string(options.solver_conf)
if options.clear:
remove_files_patterns(output_dir,
['*.h5', '*.vtk', '*.txt'],
ignores=['output_log.txt'],
verbose=True)
filename = os.path.join(output_dir, 'options.txt')
ensure_path(filename)
save_options(filename, [('options', vars(options))])
pars = apply_units(options.pars, options.unit_multipliers)
output('material parameters with applied unit multipliers:')
output(pars)
if options.mode == 'omega':
rng = copy(options.range)
rng[:2] = apply_unit_multipliers(options.range[:2],
['wave_number', 'wave_number'],
options.unit_multipliers)
output('wave number range with applied unit multipliers:', rng)
else:
rng = copy(options.range)
rng[:2] = apply_unit_multipliers(options.range[:2],
['frequency', 'frequency'],
options.unit_multipliers)
output('frequency range with applied unit multipliers:', rng)
define_problem = functools.partial(define,
filename_mesh=options.mesh_filename,
pars=pars,
approx_order=options.order,
refinement_level=options.refine,
solver_conf=options.solver_conf,
plane=options.plane)
conf = ProblemConf.from_dict(define_problem(), sys.modules[__name__])
pb = Problem.from_conf(conf)
dim = pb.domain.shape.dim
if dim != 2:
options.plane = 'strain'
wdir = nm.asarray(options.wave_dir[:dim], dtype=nm.float64)
wdir = wdir / nm.linalg.norm(wdir)
stepper = TimeStepper(rng[0], rng[1], dt=None, n_step=rng[2])
bbox = pb.domain.mesh.get_bounding_box()
size = (bbox[1] - bbox[0]).max()
scaling0 = apply_unit_multipliers([1.0], ['length'],
options.unit_multipliers)[0]
scaling = scaling0
if options.mesh_size is not None:
scaling *= options.mesh_size / size
output('scaling factor of periodic cell mesh coordinates:', scaling)
output('new mesh size with applied unit multipliers:', scaling * size)
pb.domain.mesh.coors[:] *= scaling
pb.set_mesh_coors(pb.domain.mesh.coors, update_fields=True)
bzone = 2.0 * nm.pi / (scaling * size)
output('1. Brillouin zone size:', bzone * scaling0)
output('1. Brillouin zone size with applied unit multipliers:', bzone)
pb.time_update()
pb.update_materials()
if options.save_materials or options.log_std_waves:
stiffness = pb.evaluate('ev_integrate_mat.2.Omega(m.D, u)',
mode='el_avg', copy_materials=False, verbose=False)
young, poisson = mc.youngpoisson_from_stiffness(stiffness,
plane=options.plane)
density = pb.evaluate('ev_integrate_mat.2.Omega(m.density, u)',
mode='el_avg', copy_materials=False, verbose=False)
if options.save_materials:
out = {}
out['young'] = Struct(name='young', mode='cell',
data=young[..., None, None])
out['poisson'] = Struct(name='poisson', mode='cell',
data=poisson[..., None, None])
out['density'] = Struct(name='density', mode='cell', data=density)
materials_filename = os.path.join(output_dir, 'materials.vtk')
pb.save_state(materials_filename, out=out)
# Set the normalized wave vector direction to the material(s).
set_wave_dir(pb.get_materials(), wdir)
conf = pb.solver_confs['eig']
eig_solver = Solver.any_from_conf(conf)
# Assemble the matrices.
mtx_m = pb.mtx_a.copy()
eq_m = pb.equations['M']
mtx_m = eq_m.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx_m)
mtx_m.eliminate_zeros()
mtx_k = pb.mtx_a.copy()
eq_k = pb.equations['K']
mtx_k = eq_k.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx_k)
mtx_k.eliminate_zeros()
mtx_s = pb.mtx_a.copy()
eq_s = pb.equations['S']
mtx_s = eq_s.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx_s)
mtx_s.eliminate_zeros()
mtx_r = pb.mtx_a.copy()
eq_r = pb.equations['R']
mtx_r = eq_r.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx_r)
mtx_r.eliminate_zeros()
output('symmetry checks of real blocks:')
output('M - M^T:', _max_diff_csr(mtx_m, mtx_m.T))
output('K - K^T:', _max_diff_csr(mtx_k, mtx_k.T))
output('S - S^T:', _max_diff_csr(mtx_s, mtx_s.T))
output('R + R^T:', _max_diff_csr(mtx_r, -mtx_r.T))
n_eigs = options.n_eigs
if options.n_eigs > mtx_k.shape[0]:
options.n_eigs = mtx_k.shape[0]
n_eigs = None
if options.mode == 'omega':
eigenshapes_filename = os.path.join(output_dir,
'frequency-eigenshapes-%s.vtk'
% stepper.suffix)
extra = []
extra_plot_kwargs = []
if options.log_std_waves:
lam, mu = mc.lame_from_youngpoisson(young, poisson,
plane=options.plane)
alam = nm.average(lam)
amu = nm.average(mu)
adensity = nm.average(density)
cp = nm.sqrt((alam + 2.0 * amu) / adensity)
cs = nm.sqrt(amu / adensity)
output('average p-wave speed:', cp)
output('average shear wave speed:', cs)
extra = [r'$\omega_p$', r'$\omega_s$']
extra_plot_kwargs = [{'ls' : '--', 'color' : 'k'},
{'ls' : '--', 'color' : 'gray'}]
log = Log([[r'$\lambda_{%d}$' % ii for ii in range(options.n_eigs)],
[r'$\omega_{%d}$'
% ii for ii in range(options.n_eigs)] + extra],
plot_kwargs=[{}, [{}] * options.n_eigs + extra_plot_kwargs],
yscales=['linear', 'linear'],
xlabels=[r'$\kappa$', r'$\kappa$'],
ylabels=[r'eigenvalues $\lambda_i$',
r'frequencies $\omega_i$'],
log_filename=os.path.join(output_dir, 'frequencies.txt'),
aggregate=1000, sleep=0.1)
for iv, wmag in stepper:
output('step %d: wave vector %s' % (iv, wmag * wdir))
mtx_a = mtx_k + wmag**2 * mtx_s + (1j * wmag) * mtx_r
mtx_b = mtx_m
output('A - A^H:', _max_diff_csr(mtx_a, mtx_a.H))
if options.eigs_only:
eigs = eig_solver(mtx_a, mtx_b, n_eigs=n_eigs,
eigenvectors=False)
svecs = None
else:
eigs, svecs = eig_solver(mtx_a, mtx_b, n_eigs=options.n_eigs,
eigenvectors=True)
omegas = nm.sqrt(eigs)
output('eigs, omegas:\n', nm.c_[eigs, omegas])
out = tuple(eigs) + tuple(omegas)
if options.log_std_waves:
out = out + (cp * wmag, cs * wmag)
log(*out, x=[wmag, wmag])
save_eigenvectors(eigenshapes_filename % iv, svecs, pb)
log(save_figure=os.path.join(output_dir, 'frequencies.png'))
log(finished=True)
else:
import scipy.sparse as sps
from sksparse.cholmod import cholesky
eigenshapes_filename = os.path.join(output_dir,
'wave-number-eigenshapes-%s.vtk'
% stepper.suffix)
factor = cholesky(mtx_s)
perm = factor.P()
ir = nm.arange(len(perm))
mtx_p = sps.coo_matrix((nm.ones_like(perm), (ir, perm)))
mtx_l = mtx_p.T * factor.L()
mtx_eye = sps.eye(mtx_l.shape[0], dtype=nm.float64)
output('S - LL^T:', _max_diff_csr(mtx_s, mtx_l * mtx_l.T))
log = Log([[r'$\kappa_{%d}$' % ii for ii in range(options.n_eigs)]],
plot_kwargs=[{'ls' : 'None', 'marker' : 'o'}],
yscales=['linear'],
xlabels=[r'$\omega$'],
ylabels=[r'wave numbers $\kappa_i$'],
log_filename=os.path.join(output_dir, 'wave-numbers.txt'),
aggregate=1000, sleep=0.1)
for io, omega in stepper:
output('step %d: frequency %s' % (io, omega))
mtx_a = sps.bmat([[mtx_k - omega**2 * mtx_m, None],
[None, mtx_eye]])
mtx_b = sps.bmat([[1j * mtx_r, mtx_l],
[mtx_l.T, None]])
output('A - A^T:', _max_diff_csr(mtx_a, mtx_a.T))
output('A - A^H:', _max_diff_csr(mtx_a, mtx_a.T))
output('B - B^H:', _max_diff_csr(mtx_b, mtx_b.H))
if options.eigs_only:
eigs = eig_solver(mtx_a, mtx_b, n_eigs=n_eigs,
eigenvectors=False)
svecs = None
else:
eigs, svecs = eig_solver(mtx_a, mtx_b, n_eigs=options.n_eigs,
eigenvectors=True)
kappas = eigs
output('kappas:\n', kappas[:, None])
out = tuple(kappas)
log(*out, x=[omega])
save_eigenvectors(eigenshapes_filename % io, svecs, pb)
log(save_figure=os.path.join(output_dir, 'wave-numbers.png'))
log(finished=True)
if __name__ == '__main__':
main()
| lokik/sfepy | examples/linear_elasticity/dispersion_analysis.py | Python | bsd-3-clause | 21,427 | [
"VTK"
] | 6f6580ab9c043b8885b6bbde56fdd65fc6a2d58aa196537eb8f1341ddb76d72f |
#!/usr/bin/env python
#
# Copyright 2008,2009,2011,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
DESC_KEY = 'desc'
SAMP_RATE_KEY = 'samp_rate'
LINK_RATE_KEY = 'link_rate'
GAIN_KEY = 'gain'
TX_FREQ_KEY = 'tx_freq'
DSP_FREQ_KEY = 'dsp_freq'
RF_FREQ_KEY = 'rf_freq'
AMPLITUDE_KEY = 'amplitude'
AMPL_RANGE_KEY = 'ampl_range'
WAVEFORM_FREQ_KEY = 'waveform_freq'
WAVEFORM_OFFSET_KEY = 'waveform_offset'
WAVEFORM2_FREQ_KEY = 'waveform2_freq'
FREQ_RANGE_KEY = 'freq_range'
GAIN_RANGE_KEY = 'gain_range'
TYPE_KEY = 'type'
def setter(ps, key, val): ps[key] = val
from gnuradio import gr, gru, uhd, eng_notation
from gnuradio.gr.pubsub import pubsub
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
import math
n2s = eng_notation.num_to_str
waveforms = { gr.GR_SIN_WAVE : "Complex Sinusoid",
gr.GR_CONST_WAVE : "Constant",
gr.GR_GAUSSIAN : "Gaussian Noise",
gr.GR_UNIFORM : "Uniform Noise",
"2tone" : "Two Tone",
"sweep" : "Sweep" }
#
# GUI-unaware GNU Radio flowgraph. This may be used either with command
# line applications or GUI applications.
#
class top_block(gr.top_block, pubsub):
def __init__(self, options, args):
gr.top_block.__init__(self)
pubsub.__init__(self)
self._verbose = options.verbose
#initialize values from options
self._setup_usrpx(options)
self[SAMP_RATE_KEY] = options.samp_rate
self[TX_FREQ_KEY] = options.tx_freq
self[AMPLITUDE_KEY] = options.amplitude
self[WAVEFORM_FREQ_KEY] = options.waveform_freq
self[WAVEFORM_OFFSET_KEY] = options.offset
self[WAVEFORM2_FREQ_KEY] = options.waveform2_freq
self[DSP_FREQ_KEY] = 0
self[RF_FREQ_KEY] = 0
#subscribe set methods
self.subscribe(SAMP_RATE_KEY, self.set_samp_rate)
self.subscribe(GAIN_KEY, self.set_gain)
self.subscribe(TX_FREQ_KEY, self.set_freq)
self.subscribe(AMPLITUDE_KEY, self.set_amplitude)
self.subscribe(WAVEFORM_FREQ_KEY, self.set_waveform_freq)
self.subscribe(WAVEFORM2_FREQ_KEY, self.set_waveform2_freq)
self.subscribe(TYPE_KEY, self.set_waveform)
#force update on pubsub keys
for key in (SAMP_RATE_KEY, GAIN_KEY, TX_FREQ_KEY,
AMPLITUDE_KEY, WAVEFORM_FREQ_KEY,
WAVEFORM_OFFSET_KEY, WAVEFORM2_FREQ_KEY):
self[key] = self[key]
self[TYPE_KEY] = options.type #set type last
def _setup_usrpx(self, options):
self._u = uhd.usrp_sink(device_addr=options.args, stream_args=uhd.stream_args('fc32'))
self._u.set_samp_rate(options.samp_rate)
# Set the subdevice spec
if(options.spec):
self._u.set_subdev_spec(options.spec, 0)
# Set the gain on the usrp from options
if(options.gain):
self._u.set_gain(options.gain)
# Set the antenna
if(options.antenna):
self._u.set_antenna(options.antenna, 0)
# Setup USRP Configuration value
try:
usrp_info = self._u.get_usrp_info()
mboard_id = usrp_info.get("mboard_id")
mboard_serial = usrp_info.get("mboard_serial")
if mboard_serial == "":
mboard_serial = "no serial"
dboard_subdev_name = usrp_info.get("tx_subdev_name")
dboard_serial = usrp_info.get("tx_serial")
if dboard_serial == "":
dboard_serial = "no serial"
subdev = self._u.get_subdev_spec()
antenna = self._u.get_antenna()
desc_key_str = "Motherboard: %s [%s]\n" % (mboard_id, mboard_serial)
desc_key_str += "Daughterboard: %s [%s]\n" % (dboard_subdev_name, dboard_serial)
desc_key_str += "Subdev: %s\n" % subdev
desc_key_str += "Antenna: %s" % antenna
except:
desc_key_str = "USRP configuration output not implemented in this version"
self.publish(DESC_KEY, lambda: desc_key_str)
self.publish(FREQ_RANGE_KEY, self._u.get_freq_range)
self.publish(GAIN_RANGE_KEY, self._u.get_gain_range)
self.publish(GAIN_KEY, self._u.get_gain)
print "UHD Signal Generator"
print "Version: %s" % uhd.get_version_string()
print "\nUsing USRP configuration:"
print desc_key_str + "\n"
# Direct asynchronous notifications to callback function
if options.show_async_msg:
self.async_msgq = gr.msg_queue(0)
self.async_src = uhd.amsg_source("", self.async_msgq)
self.async_rcv = gru.msgq_runner(self.async_msgq, self.async_callback)
def async_callback(self, msg):
md = self.async_src.msg_to_async_metadata_t(msg)
print "Channel: %i Time: %f Event: %i" % (md.channel, md.time_spec.get_real_secs(), md.event_code)
def _set_tx_amplitude(self, ampl):
"""
Sets the transmit amplitude sent to the USRP
@param ampl the amplitude or None for automatic
"""
ampl_range = self[AMPL_RANGE_KEY]
if ampl is None:
ampl = (ampl_range[1] - ampl_range[0])*0.15 + ampl_range[0]
self[AMPLITUDE_KEY] = max(ampl_range[0], min(ampl, ampl_range[1]))
def set_samp_rate(self, sr):
self._u.set_samp_rate(sr)
sr = self._u.get_samp_rate()
if self[TYPE_KEY] in (gr.GR_SIN_WAVE, gr.GR_CONST_WAVE):
self._src.set_sampling_freq(self[SAMP_RATE_KEY])
elif self[TYPE_KEY] == "2tone":
self._src1.set_sampling_freq(self[SAMP_RATE_KEY])
self._src2.set_sampling_freq(self[SAMP_RATE_KEY])
elif self[TYPE_KEY] == "sweep":
self._src1.set_sampling_freq(self[SAMP_RATE_KEY])
self._src2.set_sampling_freq(self[WAVEFORM_FREQ_KEY]*2*math.pi/self[SAMP_RATE_KEY])
else:
return True # Waveform not yet set
if self._verbose:
print "Set sample rate to:", sr
return True
def set_gain(self, gain):
if gain is None:
g = self[GAIN_RANGE_KEY]
gain = float(g.start()+g.stop())/2
if self._verbose:
print "Using auto-calculated mid-point TX gain"
self[GAIN_KEY] = gain
return
self._u.set_gain(gain)
if self._verbose:
print "Set TX gain to:", gain
def set_freq(self, target_freq):
if target_freq is None:
f = self[FREQ_RANGE_KEY]
target_freq = float(f.start()+f.stop())/2.0
if self._verbose:
print "Using auto-calculated mid-point frequency"
self[TX_FREQ_KEY] = target_freq
return
tr = self._u.set_center_freq(target_freq)
fs = "%sHz" % (n2s(target_freq),)
if tr is not None:
self._freq = target_freq
self[DSP_FREQ_KEY] = tr.actual_dsp_freq
self[RF_FREQ_KEY] = tr.actual_rf_freq
if self._verbose:
print "Set center frequency to", self._u.get_center_freq()
print "Tx RF frequency: %sHz" % (n2s(tr.actual_rf_freq),)
print "Tx DSP frequency: %sHz" % (n2s(tr.actual_dsp_freq),)
elif self._verbose:
print "Failed to set freq."
return tr
def set_waveform_freq(self, freq):
if self[TYPE_KEY] == gr.GR_SIN_WAVE:
self._src.set_frequency(freq)
elif self[TYPE_KEY] == "2tone":
self._src1.set_frequency(freq)
elif self[TYPE_KEY] == 'sweep':
#there is no set sensitivity, redo fg
self[TYPE_KEY] = self[TYPE_KEY]
return True
def set_waveform2_freq(self, freq):
if freq is None:
self[WAVEFORM2_FREQ_KEY] = -self[WAVEFORM_FREQ_KEY]
return
if self[TYPE_KEY] == "2tone":
self._src2.set_frequency(freq)
elif self[TYPE_KEY] == "sweep":
self._src1.set_frequency(freq)
return True
def set_waveform(self, type):
self.lock()
self.disconnect_all()
if type == gr.GR_SIN_WAVE or type == gr.GR_CONST_WAVE:
self._src = gr.sig_source_c(self[SAMP_RATE_KEY], # Sample rate
type, # Waveform type
self[WAVEFORM_FREQ_KEY], # Waveform frequency
self[AMPLITUDE_KEY], # Waveform amplitude
self[WAVEFORM_OFFSET_KEY]) # Waveform offset
elif type == gr.GR_GAUSSIAN or type == gr.GR_UNIFORM:
self._src = gr.noise_source_c(type, self[AMPLITUDE_KEY])
elif type == "2tone":
self._src1 = gr.sig_source_c(self[SAMP_RATE_KEY],
gr.GR_SIN_WAVE,
self[WAVEFORM_FREQ_KEY],
self[AMPLITUDE_KEY]/2.0,
0)
if(self[WAVEFORM2_FREQ_KEY] is None):
self[WAVEFORM2_FREQ_KEY] = -self[WAVEFORM_FREQ_KEY]
self._src2 = gr.sig_source_c(self[SAMP_RATE_KEY],
gr.GR_SIN_WAVE,
self[WAVEFORM2_FREQ_KEY],
self[AMPLITUDE_KEY]/2.0,
0)
self._src = gr.add_cc()
self.connect(self._src1,(self._src,0))
self.connect(self._src2,(self._src,1))
elif type == "sweep":
# rf freq is center frequency
# waveform_freq is total swept width
# waveform2_freq is sweep rate
# will sweep from (rf_freq-waveform_freq/2) to (rf_freq+waveform_freq/2)
if self[WAVEFORM2_FREQ_KEY] is None:
self[WAVEFORM2_FREQ_KEY] = 0.1
self._src1 = gr.sig_source_f(self[SAMP_RATE_KEY],
gr.GR_TRI_WAVE,
self[WAVEFORM2_FREQ_KEY],
1.0,
-0.5)
self._src2 = gr.frequency_modulator_fc(self[WAVEFORM_FREQ_KEY]*2*math.pi/self[SAMP_RATE_KEY])
self._src = gr.multiply_const_cc(self[AMPLITUDE_KEY])
self.connect(self._src1,self._src2,self._src)
else:
raise RuntimeError("Unknown waveform type")
self.connect(self._src, self._u)
self.unlock()
if self._verbose:
print "Set baseband modulation to:", waveforms[type]
if type == gr.GR_SIN_WAVE:
print "Modulation frequency: %sHz" % (n2s(self[WAVEFORM_FREQ_KEY]),)
print "Initial phase:", self[WAVEFORM_OFFSET_KEY]
elif type == "2tone":
print "Tone 1: %sHz" % (n2s(self[WAVEFORM_FREQ_KEY]),)
print "Tone 2: %sHz" % (n2s(self[WAVEFORM2_FREQ_KEY]),)
elif type == "sweep":
print "Sweeping across %sHz to %sHz" % (n2s(-self[WAVEFORM_FREQ_KEY]/2.0),n2s(self[WAVEFORM_FREQ_KEY]/2.0))
print "Sweep rate: %sHz" % (n2s(self[WAVEFORM2_FREQ_KEY]),)
print "TX amplitude:", self[AMPLITUDE_KEY]
def set_amplitude(self, amplitude):
if amplitude < 0.0 or amplitude > 1.0:
if self._verbose:
print "Amplitude out of range:", amplitude
return False
if self[TYPE_KEY] in (gr.GR_SIN_WAVE, gr.GR_CONST_WAVE, gr.GR_GAUSSIAN, gr.GR_UNIFORM):
self._src.set_amplitude(amplitude)
elif self[TYPE_KEY] == "2tone":
self._src1.set_amplitude(amplitude/2.0)
self._src2.set_amplitude(amplitude/2.0)
elif self[TYPE_KEY] == "sweep":
self._src.set_k(amplitude)
else:
return True # Waveform not yet set
if self._verbose:
print "Set amplitude to:", amplitude
return True
def get_options():
usage="%prog: [options]"
parser = OptionParser(option_class=eng_option, usage=usage)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args , [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("-s", "--samp-rate", type="eng_float", default=1e6,
help="set sample rate (bandwidth) [default=%default]")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("-f", "--tx-freq", type="eng_float", default=None,
help="Set carrier frequency to FREQ [default=mid-point]",
metavar="FREQ")
parser.add_option("-x", "--waveform-freq", type="eng_float", default=0,
help="Set baseband waveform frequency to FREQ [default=%default]")
parser.add_option("-y", "--waveform2-freq", type="eng_float", default=None,
help="Set 2nd waveform frequency to FREQ [default=%default]")
parser.add_option("--sine", dest="type", action="store_const", const=gr.GR_SIN_WAVE,
help="Generate a carrier modulated by a complex sine wave",
default=gr.GR_SIN_WAVE)
parser.add_option("--const", dest="type", action="store_const", const=gr.GR_CONST_WAVE,
help="Generate a constant carrier")
parser.add_option("--offset", type="eng_float", default=0,
help="Set waveform phase offset to OFFSET [default=%default]")
parser.add_option("--gaussian", dest="type", action="store_const", const=gr.GR_GAUSSIAN,
help="Generate Gaussian random output")
parser.add_option("--uniform", dest="type", action="store_const", const=gr.GR_UNIFORM,
help="Generate Uniform random output")
parser.add_option("--2tone", dest="type", action="store_const", const="2tone",
help="Generate Two Tone signal for IMD testing")
parser.add_option("--sweep", dest="type", action="store_const", const="sweep",
help="Generate a swept sine wave")
parser.add_option("", "--amplitude", type="eng_float", default=0.15,
help="Set output amplitude to AMPL (0.0-1.0) [default=%default]",
metavar="AMPL")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="Use verbose console output [default=%default]")
parser.add_option("", "--show-async-msg", action="store_true", default=False,
help="Show asynchronous message notifications from UHD [default=%default]")
(options, args) = parser.parse_args()
return (options, args)
# If this script is executed, the following runs. If it is imported,
# the below does not run.
def test_main():
if gr.enable_realtime_scheduling() != gr.RT_OK:
print "Note: failed to enable realtime scheduling, continuing"
# Grab command line options and create top block
try:
(options, args) = get_options()
tb = top_block(options, args)
except RuntimeError, e:
print e
sys.exit(1)
tb.start()
raw_input('Press Enter to quit: ')
tb.stop()
tb.wait()
# Make sure to create the top block (tb) within a function:
# That code in main will allow tb to go out of scope on return,
# which will call the decontructor on usrp and stop transmit.
# Whats odd is that grc works fine with tb in the __main__,
# perhaps its because the try/except clauses around tb.
if __name__ == "__main__":
test_main()
| tyc85/nwsdr-3.6.3-dsc | gr-uhd/apps/uhd_siggen_base.py | Python | gpl-3.0 | 16,747 | [
"Gaussian"
] | b0ae5a661c05ae82b6afa49aba75dff2c154cfe414205b08474c012a2abc61bd |
#!/usr/bin/env python2
# Copyright (C) 2015-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
# #
# This is an example for an MD simulation of a simple Lennard-Jones #
# fluid with ESPResSo++. #
# #
###########################################################################
"""
We will start with particles at random positions within
the simulation box interacting via a shifted Lennard-Jones type potential
with an interaction cutoff at 2.5.
Newtons equations of motion are integrated with a Velocity-Verlet integrator.
The canonical (NVT) ensemble is realized by using a Langevin thermostat.
In order to prevent explosion due to strongly overlapping volumes of
random particles the system needs to be warmed up first.
Warm-up is accomplished by using a repelling-only LJ interaction
(cutoff=1.12246, shift=0.25) with a force capping at radius 0.6
and initial small LJ epsilon value of 0.1.
During warmup epsilon is gradually increased to its final value 1.0.
After warm-up the system is equilibrated using the full uncapped LJ Potential.
If a system still explodes during warmup or equilibration, warmup time
could be increased by increasing warmup_nloops and the capradius could
be set to another value. Depending on the system (number of particles, density, ...)
it could also be necessary to vary sigma during warmup.
The simulation consists of the following steps:
1. specification of the main simulation parameters
2. setup of the system, random number generator and parallelisation
3. setup of the integrator and simulation ensemble
4. adding the particles
5. setting up interaction potential for the warmup
6. running the warmup loop
7. setting up interaction potential for the equilibration
8. running the equilibration loop
9. writing configuration to a file
"""
import espressopp
########################################################################
# 1. specification of the main simulation parameters #
########################################################################
# number of particles
Npart = 32768
# density of particles
rho = 0.8442
# length of simulation box
L = pow(Npart/rho, 1.0/3.0)
# cubic simulation box of size L
box = (L, L, L)
# cutoff of the short range potential
r_cutoff = 2.5
# VerletList skin size (also used for domain decomposition)
skin = 0.4
# the temperature of the system
temperature = 1.0
# time step for the velocity verlet integrator
dt = 0.005
# Lennard Jones epsilon during equilibration phase
epsilon = 1.0
# Lennard Jones sigma during warmup and equilibration
sigma = 1.0
# interaction cut-off used during the warm-up phase
warmup_cutoff = pow(2.0, 1.0/6.0)
# number of warm-up loops
warmup_nloops = 100
# number of integration steps performed in each warm-up loop
warmup_isteps = 200
# total number of integration steps of the warm-up phase
total_warmup_steps = warmup_nloops * warmup_isteps
# initial value for LJ epsilon at beginning of warmup
epsilon_start = 0.1
# final value for LJ epsilon at end of warmup
epsilon_end = 1.0
# increment epsilon by epsilon delta after each warmup_loop
epsilon_delta = (epsilon_end - epsilon_start) / warmup_nloops
# force capping radius
capradius = 0.6
# number of equilibration loops
equil_nloops = 100
# number of integration steps performed in each equilibration loop
equil_isteps = 100
# print ESPResSo++ version and compile info
print espressopp.Version().info()
# print simulation parameters (useful to have them in a log file)
print "Npart = ", Npart
print "rho = ", rho
print "L = ", L
print "box = ", box
print "r_cutoff = ", r_cutoff
print "skin = ", skin
print "temperature = ", temperature
print "dt = ", dt
print "epsilon = ", epsilon
print "sigma = ", sigma
print "warmup_cutoff = ", warmup_cutoff
print "warmup_nloops = ", warmup_nloops
print "warmup_isteps = ", warmup_isteps
print "total_warmup_steps = ", total_warmup_steps
print "epsilon_start = ", epsilon_start
print "epsilon_end = ", epsilon_end
print "epsilon_delta = ", epsilon_delta
print "capradius = ", capradius
print "equil_nloops = ", equil_nloops
print "equil_isteps = ", equil_isteps
########################################################################
# 2. setup of the system, random number geneartor and parallelisation #
########################################################################
# create the basic system
system = espressopp.System()
# use the random number generator that is included within the ESPResSo++ package
system.rng = espressopp.esutil.RNG()
# use orthorhombic periodic boundary conditions
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
# set the skin size used for verlet lists and cell sizes
system.skin = skin
# get the number of CPUs to use
NCPUs = espressopp.MPI.COMM_WORLD.size
# calculate a regular 3D grid according to the number of CPUs available
nodeGrid = espressopp.tools.decomp.nodeGrid(NCPUs,box,warmup_cutoff, skin)
# calculate a 3D subgrid to speed up verlet list builds and communication
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, warmup_cutoff, skin)
# create a domain decomposition particle storage with the calculated nodeGrid and cellGrid
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
print "NCPUs = ", NCPUs
print "nodeGrid = ", nodeGrid
print "cellGrid = ", cellGrid
########################################################################
# 3. setup of the integrator and simulation ensemble #
########################################################################
# use a velocity Verlet integration scheme
integrator = espressopp.integrator.VelocityVerlet(system)
# set the integration step
integrator.dt = dt
# use a thermostat if the temperature is set
if (temperature != None):
# create e Langevin thermostat
thermostat = espressopp.integrator.LangevinThermostat(system)
# set Langevin friction constant
thermostat.gamma = 1.0
# set temperature
thermostat.temperature = temperature
# tell the integrator to use this thermostat
integrator.addExtension(thermostat)
## steps 2. and 3. could be short-cut by the following expression:
## system, integrator = espressopp.standard_system.Default(box, warmup_cutoff, skin, dt, temperature)
########################################################################
# 4. adding the particles #
########################################################################
print "adding ", Npart, " particles to the system ..."
for pid in range(Npart):
# get a 3D random coordinate within the box
pos = system.bc.getRandomPos()
# add a particle with particle id pid and coordinate pos to the system
# coordinates are automatically folded according to periodic boundary conditions
# the following default values are set for each particle:
# (type=0, mass=1.0, velocity=(0,0,0), charge=0.0)
system.storage.addParticle(pid, pos)
# distribute the particles to parallel CPUs
system.storage.decompose()
########################################################################
# 5. setting up interaction potential for the warmup #
########################################################################
# create a verlet list that uses a cutoff radius = warmup_cutoff
# the verlet radius is automatically increased by system.skin (see system setup)
verletlist = espressopp.VerletList(system, warmup_cutoff)
# create a force capped Lennard-Jones potential
# the potential is automatically shifted so that U(r=cutoff) = 0.0
LJpot = espressopp.interaction.LennardJonesCapped(epsilon=epsilon_start, sigma=sigma, cutoff=warmup_cutoff, caprad=capradius, shift='auto')
# create a force capped Lennard-Jones interaction that uses a verlet list
interaction = espressopp.interaction.VerletListLennardJonesCapped(verletlist)
# tell the interaction to use the above defined force capped Lennard-Jones potential
# between 2 particles of type 0
interaction.setPotential(type1=0, type2=0, potential=LJpot)
########################################################################
# 6. running the warmup loop
########################################################################
# make the force capping interaction known to the system
system.addInteraction(interaction)
print "starting warm-up ..."
# print some status information (time, measured temperature, pressure,
# pressure tensor (xy only), kinetic energy, potential energy, total energy, boxsize)
espressopp.tools.analyse.info(system, integrator)
for step in range(warmup_nloops):
# perform warmup_isteps integraton steps
integrator.run(warmup_isteps)
# decrease force capping radius in the potential
LJpot.epsilon += epsilon_delta
# update the type0-type0 interaction to use the new values of LJpot
interaction.setPotential(type1=0, type2=0, potential=LJpot)
# print status info
espressopp.tools.analyse.info(system, integrator)
print "warmup finished"
# remove the force capping interaction from the system
system.removeInteraction(0)
# the equilibration uses a different interaction cutoff therefore the current
# verlet list is not needed any more and would waste only CPU time
verletlist.disconnect()
########################################################################
# 7. setting up interaction potential for the equilibration #
########################################################################
# create a new verlet list that uses a cutoff radius = r_cutoff
# the verlet radius is automatically increased by system.skin (see system setup)
verletlist = espressopp.VerletList(system, r_cutoff)
# define a Lennard-Jones interaction that uses a verlet list
interaction = espressopp.interaction.VerletListLennardJones(verletlist)
# use a Lennard-Jones potential between 2 particles of type 0
# the potential is automatically shifted so that U(r=cutoff) = 0.0
# if the potential should not be shifted set shift=0.0
potential = interaction.setPotential(type1=0, type2=0,
potential=espressopp.interaction.LennardJones(
epsilon=epsilon, sigma=sigma, cutoff=r_cutoff, shift=0.0))
########################################################################
# 8. running the equilibration loop #
########################################################################
# add the new interaction to the system
system.addInteraction(interaction)
# since the interaction cut-off changed the size of the cells that are used
# to speed up verlet list builds should be adjusted accordingly
system.storage.cellAdjust()
# set all integrator timers to zero again (they were increased during warmup)
integrator.resetTimers()
# set integrator time step to zero again
integrator.step = 0
print "starting equilibration ..."
# print inital status information
espressopp.tools.analyse.info(system, integrator)
for step in range(equil_nloops):
# perform equilibration_isteps integration steps
integrator.run(equil_isteps)
# print status information
espressopp.tools.analyse.info(system, integrator)
print "equilibration finished"
########################################################################
# 9. writing configuration to file #
########################################################################
# write folded xyz coordinates and particle velocities into a file
# format of xyz file is:
# first line : number of particles
# second line : box_Lx, box_Ly, box_Lz
# all other lines : ParticleID ParticleType x_pos y_pos z_pos x_vel y_vel z_vel
filename = "lennard_jones_fluid_%0i.xyz" % integrator.step
print "writing final configuration file ..."
espressopp.tools.writexyz(filename, system, velocities = True, unfolded = False)
# also write a PDB file which can be used to visualize configuration with VMD
print "writing pdb file ..."
filename = "lennard_jones_fluid_%0i.pdb" % integrator.step
espressopp.tools.pdbwrite(filename, system, molsize=Npart)
print "finished."
| kkreis/espressopp | examples/lennard_jones/lennard_jones.py | Python | gpl-3.0 | 13,497 | [
"ESPResSo",
"VMD"
] | 9f407b1ffe2f8bbf9fb87421adb35c0315485ec9065f566b07b78e6771577073 |
import struct, string, re
__all__ = [ 'resolve' ]
def resolve(code):
"""
Transform a twocc or fourcc code into a name. Returns a 2-tuple of (cc,
codec) where both are strings and cc is a string in the form '0xXX' if it's
a twocc, or 'ABCD' if it's a fourcc. If the given code is not a known
twocc or fourcc, the return value will be (None, 'Unknown'), unless the
code is otherwise a printable string in which case it will be returned as
the codec.
"""
if isinstance(code, str):
codec = 'Unknown'
# Check for twocc
if re.match(r'^0x[\da-f]{1,4}$', code, re.I):
# Twocc in hex form
return code, TWOCC.get(int(code, 16), codec)
elif code.isdigit() and 0 <= int(code) <= 0xff:
# Twocc in decimal form
return hex(int(code)), TWOCC.get(int(code), codec)
elif len(code) == 2:
code = struct.unpack('H', code)[0]
return hex(code), TWOCC.get(code, codec)
elif len(code) != 4 and len([ x for x in code if x not in string.printable ]) == 0:
# Code is a printable string.
codec = str(code)
if code[:2] == 'MS' and code[2:].upper() in FOURCC:
code = code[2:]
if code.upper() in FOURCC:
return code.upper(), str(FOURCC[code.upper()])
return None, codec
elif isinstance(code, int):
return hex(code), TWOCC.get(code, 'Unknown')
return None, 'Unknown'
TWOCC = {
0x0000: 'Unknown Wave Format',
0x0001: 'PCM',
0x0002: 'Microsoft ADPCM',
0x0003: 'IEEE Float',
0x0004: 'Compaq Computer VSELP',
0x0005: 'IBM CVSD',
0x0006: 'A-Law',
0x0007: 'mu-Law',
0x0008: 'Microsoft DTS',
0x0009: 'Microsoft DRM',
0x0010: 'OKI ADPCM',
0x0011: 'Intel DVI/IMA ADPCM',
0x0012: 'Videologic MediaSpace ADPCM',
0x0013: 'Sierra Semiconductor ADPCM',
0x0014: 'Antex Electronics G.723 ADPCM',
0x0015: 'DSP Solutions DigiSTD',
0x0016: 'DSP Solutions DigiFIX',
0x0017: 'Dialogic OKI ADPCM',
0x0018: 'MediaVision ADPCM',
0x0019: 'Hewlett-Packard CU',
0x0020: 'Yamaha ADPCM',
0x0021: 'Speech Compression Sonarc',
0x0022: 'DSP Group TrueSpeech',
0x0023: 'Echo Speech EchoSC1',
0x0024: 'Audiofile AF36',
0x0025: 'Audio Processing Technology APTX',
0x0026: 'AudioFile AF10',
0x0027: 'Prosody 1612',
0x0028: 'LRC',
0x0030: 'Dolby AC2',
0x0031: 'Microsoft GSM 6.10',
0x0032: 'MSNAudio',
0x0033: 'Antex Electronics ADPCME',
0x0034: 'Control Resources VQLPC',
0x0035: 'DSP Solutions DigiREAL',
0x0036: 'DSP Solutions DigiADPCM',
0x0037: 'Control Resources CR10',
0x0038: 'Natural MicroSystems VBXADPCM',
0x0039: 'Crystal Semiconductor IMA ADPCM',
0x003A: 'EchoSC3',
0x003B: 'Rockwell ADPCM',
0x003C: 'Rockwell Digit LK',
0x003D: 'Xebec',
0x0040: 'Antex Electronics G.721 ADPCM',
0x0041: 'G.728 CELP',
0x0042: 'MSG723',
0x0043: 'IBM AVC ADPCM',
0x0045: 'ITU-T G.726 ADPCM',
0x0050: 'MPEG 1, Layer 1,2',
0x0052: 'RT24',
0x0053: 'PAC',
0x0055: 'MPEG Layer 3',
0x0059: 'Lucent G.723',
0x0060: 'Cirrus',
0x0061: 'ESPCM',
0x0062: 'Voxware',
0x0063: 'Canopus Atrac',
0x0064: 'G.726 ADPCM',
0x0065: 'G.722 ADPCM',
0x0066: 'DSAT',
0x0067: 'DSAT Display',
0x0069: 'Voxware Byte Aligned',
0x0070: 'Voxware AC8',
0x0071: 'Voxware AC10',
0x0072: 'Voxware AC16',
0x0073: 'Voxware AC20',
0x0074: 'Voxware MetaVoice',
0x0075: 'Voxware MetaSound',
0x0076: 'Voxware RT29HW',
0x0077: 'Voxware VR12',
0x0078: 'Voxware VR18',
0x0079: 'Voxware TQ40',
0x0080: 'Softsound',
0x0081: 'Voxware TQ60',
0x0082: 'MSRT24',
0x0083: 'G.729A',
0x0084: 'MVI MV12',
0x0085: 'DF G.726',
0x0086: 'DF GSM610',
0x0088: 'ISIAudio',
0x0089: 'Onlive',
0x0091: 'SBC24',
0x0092: 'Dolby AC3 SPDIF',
0x0093: 'MediaSonic G.723',
0x0094: 'Aculab PLC Prosody 8KBPS',
0x0097: 'ZyXEL ADPCM',
0x0098: 'Philips LPCBB',
0x0099: 'Packed',
0x00A0: 'Malden Electronics PHONYTALK',
0x00FF: 'AAC',
0x0100: 'Rhetorex ADPCM',
0x0101: 'IBM mu-law',
0x0102: 'IBM A-law',
0x0103: 'IBM AVC Adaptive Differential Pulse Code Modulation',
0x0111: 'Vivo G.723',
0x0112: 'Vivo Siren',
0x0123: 'Digital G.723',
0x0125: 'Sanyo LD ADPCM',
0x0130: 'Sipro Lab Telecom ACELP.net',
0x0131: 'Sipro Lab Telecom ACELP.4800',
0x0132: 'Sipro Lab Telecom ACELP.8V3',
0x0133: 'Sipro Lab Telecom ACELP.G.729',
0x0134: 'Sipro Lab Telecom ACELP.G.729A',
0x0135: 'Sipro Lab Telecom ACELP.KELVIN',
0x0140: 'Windows Media Video V8',
0x0150: 'Qualcomm PureVoice',
0x0151: 'Qualcomm HalfRate',
0x0155: 'Ring Zero Systems TUB GSM',
0x0160: 'Windows Media Audio V1 / DivX audio (WMA)',
0x0161: 'Windows Media Audio V7 / V8 / V9',
0x0162: 'Windows Media Audio Professional V9',
0x0163: 'Windows Media Audio Lossless V9',
0x0170: 'UNISYS NAP ADPCM',
0x0171: 'UNISYS NAP ULAW',
0x0172: 'UNISYS NAP ALAW',
0x0173: 'UNISYS NAP 16K',
0x0200: 'Creative Labs ADPCM',
0x0202: 'Creative Labs Fastspeech8',
0x0203: 'Creative Labs Fastspeech10',
0x0210: 'UHER Informatic ADPCM',
0x0215: 'Ulead DV ACM',
0x0216: 'Ulead DV ACM',
0x0220: 'Quarterdeck',
0x0230: 'I-link Worldwide ILINK VC',
0x0240: 'Aureal Semiconductor RAW SPORT',
0x0241: 'ESST AC3',
0x0250: 'Interactive Products HSX',
0x0251: 'Interactive Products RPELP',
0x0260: 'Consistent Software CS2',
0x0270: 'Sony ATRAC3 (SCX, same as MiniDisk LP2)',
0x0300: 'Fujitsu FM Towns Snd',
0x0400: 'BTV Digital',
0x0401: 'Intel Music Coder (IMC)',
0x0402: 'Ligos Indeo Audio',
0x0450: 'QDesign Music',
0x0680: 'VME VMPCM',
0x0681: 'AT&T Labs TPC',
0x0700: 'YMPEG Alpha',
0x08AE: 'ClearJump LiteWave',
0x1000: 'Olivetti GSM',
0x1001: 'Olivetti ADPCM',
0x1002: 'Olivetti CELP',
0x1003: 'Olivetti SBC',
0x1004: 'Olivetti OPR',
0x1100: 'Lernout & Hauspie LH Codec',
0x1101: 'Lernout & Hauspie CELP codec',
0x1102: 'Lernout & Hauspie SBC codec',
0x1103: 'Lernout & Hauspie SBC codec',
0x1104: 'Lernout & Hauspie SBC codec',
0x1400: 'Norris',
0x1401: 'AT&T ISIAudio',
0x1500: 'Soundspace Music Compression',
0x181C: 'VoxWare RT24 speech codec',
0x181E: 'Lucent elemedia AX24000P Music codec',
0x1C07: 'Lucent SX8300P speech codec',
0x1C0C: 'Lucent SX5363S G.723 compliant codec',
0x1F03: 'CUseeMe DigiTalk (ex-Rocwell)',
0x1FC4: 'NCT Soft ALF2CD ACM',
0x2000: 'AC3',
0x2001: 'Dolby DTS (Digital Theater System)',
0x2002: 'RealAudio 1 / 2 14.4',
0x2003: 'RealAudio 1 / 2 28.8',
0x2004: 'RealAudio G2 / 8 Cook (low bitrate)',
0x2005: 'RealAudio 3 / 4 / 5 Music (DNET)',
0x2006: 'RealAudio 10 AAC (RAAC)',
0x2007: 'RealAudio 10 AAC+ (RACP)',
0x3313: 'makeAVIS',
0x4143: 'Divio MPEG-4 AAC audio',
0x434C: 'LEAD Speech',
0x564C: 'LEAD Vorbis',
0x674F: 'Ogg Vorbis (mode 1)',
0x6750: 'Ogg Vorbis (mode 2)',
0x6751: 'Ogg Vorbis (mode 3)',
0x676F: 'Ogg Vorbis (mode 1+)',
0x6770: 'Ogg Vorbis (mode 2+)',
0x6771: 'Ogg Vorbis (mode 3+)',
0x7A21: 'GSM-AMR (CBR, no SID)',
0x7A22: 'GSM-AMR (VBR, including SID)',
0xDFAC: 'DebugMode SonicFoundry Vegas FrameServer ACM Codec',
0xF1AC: 'Free Lossless Audio Codec FLAC',
0xFFFE: 'Extensible wave format',
0xFFFF: 'development'
}
FOURCC = {
'1978': 'A.M.Paredes predictor (LossLess)',
'2VUY': 'Optibase VideoPump 8-bit 4:2:2 Component YCbCr',
'3IV0': 'MPEG4-based codec 3ivx',
'3IV1': '3ivx v1',
'3IV2': '3ivx v2',
'3IVD': 'FFmpeg DivX ;-) (MS MPEG-4 v3)',
'3IVX': 'MPEG4-based codec 3ivx',
'8BPS': 'Apple QuickTime Planar RGB with Alpha-channel',
'AAS4': 'Autodesk Animator codec (RLE)',
'AASC': 'Autodesk Animator',
'ABYR': 'Kensington ABYR',
'ACTL': 'Streambox ACT-L2',
'ADV1': 'Loronix WaveCodec',
'ADVJ': 'Avid M-JPEG Avid Technology Also known as AVRn',
'AEIK': 'Intel Indeo Video 3.2',
'AEMI': 'Array VideoONE MPEG1-I Capture',
'AFLC': 'Autodesk Animator FLC',
'AFLI': 'Autodesk Animator FLI',
'AHDV': 'CineForm 10-bit Visually Perfect HD',
'AJPG': '22fps JPEG-based codec for digital cameras',
'AMPG': 'Array VideoONE MPEG',
'ANIM': 'Intel RDX (ANIM)',
'AP41': 'AngelPotion Definitive',
'AP42': 'AngelPotion Definitive',
'ASLC': 'AlparySoft Lossless Codec',
'ASV1': 'Asus Video v1',
'ASV2': 'Asus Video v2',
'ASVX': 'Asus Video 2.0 (audio)',
'ATM4': 'Ahead Nero Digital MPEG-4 Codec',
'AUR2': 'Aura 2 Codec - YUV 4:2:2',
'AURA': 'Aura 1 Codec - YUV 4:1:1',
'AV1X': 'Avid 1:1x (Quick Time)',
'AVC1': 'H.264 AVC',
'AVD1': 'Avid DV (Quick Time)',
'AVDJ': 'Avid Meridien JFIF with Alpha-channel',
'AVDN': 'Avid DNxHD (Quick Time)',
'AVDV': 'Avid DV',
'AVI1': 'MainConcept Motion JPEG Codec',
'AVI2': 'MainConcept Motion JPEG Codec',
'AVID': 'Avid Motion JPEG',
'AVIS': 'Wrapper for AviSynth',
'AVMP': 'Avid IMX (Quick Time)',
'AVR ': 'Avid ABVB/NuVista MJPEG with Alpha-channel',
'AVRN': 'Avid Motion JPEG',
'AVUI': 'Avid Meridien Uncompressed with Alpha-channel',
'AVUP': 'Avid 10bit Packed (Quick Time)',
'AYUV': '4:4:4 YUV (AYUV)',
'AZPR': 'Quicktime Apple Video',
'AZRP': 'Quicktime Apple Video',
'BGR ': 'Uncompressed BGR32 8:8:8:8',
'BGR(15)': 'Uncompressed BGR15 5:5:5',
'BGR(16)': 'Uncompressed BGR16 5:6:5',
'BGR(24)': 'Uncompressed BGR24 8:8:8',
'BHIV': 'BeHere iVideo',
'BINK': 'RAD Game Tools Bink Video',
'BIT ': 'BI_BITFIELDS (Raw RGB)',
'BITM': 'Microsoft H.261',
'BLOX': 'Jan Jezabek BLOX MPEG Codec',
'BLZ0': 'DivX for Blizzard Decoder Filter',
'BT20': 'Conexant Prosumer Video',
'BTCV': 'Conexant Composite Video Codec',
'BTVC': 'Conexant Composite Video',
'BW00': 'BergWave (Wavelet)',
'BW10': 'Data Translation Broadway MPEG Capture',
'BXBG': 'BOXX BGR',
'BXRG': 'BOXX RGB',
'BXY2': 'BOXX 10-bit YUV',
'BXYV': 'BOXX YUV',
'CC12': 'Intel YUV12',
'CDV5': 'Canopus SD50/DVHD',
'CDVC': 'Canopus DV',
'CDVH': 'Canopus SD50/DVHD',
'CFCC': 'Digital Processing Systems DPS Perception',
'CFHD': 'CineForm 10-bit Visually Perfect HD',
'CGDI': 'Microsoft Office 97 Camcorder Video',
'CHAM': 'Winnov Caviara Champagne',
'CJPG': 'Creative WebCam JPEG',
'CLJR': 'Cirrus Logic YUV 4 pixels',
'CLLC': 'Canopus LossLess',
'CLPL': 'YV12',
'CMYK': 'Common Data Format in Printing',
'COL0': 'FFmpeg DivX ;-) (MS MPEG-4 v3)',
'COL1': 'FFmpeg DivX ;-) (MS MPEG-4 v3)',
'CPLA': 'Weitek 4:2:0 YUV Planar',
'CRAM': 'Microsoft Video 1 (CRAM)',
'CSCD': 'RenderSoft CamStudio lossless Codec',
'CTRX': 'Citrix Scalable Video Codec',
'CUVC': 'Canopus HQ',
'CVID': 'Radius Cinepak',
'CWLT': 'Microsoft Color WLT DIB',
'CYUV': 'Creative Labs YUV',
'CYUY': 'ATI YUV',
'D261': 'H.261',
'D263': 'H.263',
'DAVC': 'Dicas MPEGable H.264/MPEG-4 AVC base profile codec',
'DC25': 'MainConcept ProDV Codec',
'DCAP': 'Pinnacle DV25 Codec',
'DCL1': 'Data Connection Conferencing Codec',
'DCT0': 'WniWni Codec',
'DFSC': 'DebugMode FrameServer VFW Codec',
'DIB ': 'Full Frames (Uncompressed)',
'DIV1': 'FFmpeg-4 V1 (hacked MS MPEG-4 V1)',
'DIV2': 'MS MPEG-4 V2',
'DIV3': 'DivX v3 MPEG-4 Low-Motion',
'DIV4': 'DivX v3 MPEG-4 Fast-Motion',
'DIV5': 'DIV5',
'DIV6': 'DivX MPEG-4',
'DIVX': 'DivX',
'DM4V': 'Dicas MPEGable MPEG-4',
'DMB1': 'Matrox Rainbow Runner hardware MJPEG',
'DMB2': 'Paradigm MJPEG',
'DMK2': 'ViewSonic V36 PDA Video',
'DP02': 'DynaPel MPEG-4',
'DPS0': 'DPS Reality Motion JPEG',
'DPSC': 'DPS PAR Motion JPEG',
'DRWX': 'Pinnacle DV25 Codec',
'DSVD': 'DSVD',
'DTMT': 'Media-100 Codec',
'DTNT': 'Media-100 Codec',
'DUCK': 'Duck True Motion 1.0',
'DV10': 'BlueFish444 (lossless RGBA, YUV 10-bit)',
'DV25': 'Matrox DVCPRO codec',
'DV50': 'Matrox DVCPRO50 codec',
'DVAN': 'DVAN',
'DVC ': 'Apple QuickTime DV (DVCPRO NTSC)',
'DVCP': 'Apple QuickTime DV (DVCPRO PAL)',
'DVCS': 'MainConcept DV Codec',
'DVE2': 'InSoft DVE-2 Videoconferencing',
'DVH1': 'Pinnacle DVHD100',
'DVHD': 'DV 1125 lines at 30.00 Hz or 1250 lines at 25.00 Hz',
'DVIS': 'VSYNC DualMoon Iris DV codec',
'DVL ': 'Radius SoftDV 16:9 NTSC',
'DVLP': 'Radius SoftDV 16:9 PAL',
'DVMA': 'Darim Vision DVMPEG',
'DVOR': 'BlueFish444 (lossless RGBA, YUV 10-bit)',
'DVPN': 'Apple QuickTime DV (DV NTSC)',
'DVPP': 'Apple QuickTime DV (DV PAL)',
'DVR1': 'TARGA2000 Codec',
'DVRS': 'VSYNC DualMoon Iris DV codec',
'DVSD': 'DV',
'DVSL': 'DV compressed in SD (SDL)',
'DVX1': 'DVX1000SP Video Decoder',
'DVX2': 'DVX2000S Video Decoder',
'DVX3': 'DVX3000S Video Decoder',
'DX50': 'DivX v5',
'DXGM': 'Electronic Arts Game Video codec',
'DXSB': 'DivX Subtitles Codec',
'DXT1': 'Microsoft DirectX Compressed Texture (DXT1)',
'DXT2': 'Microsoft DirectX Compressed Texture (DXT2)',
'DXT3': 'Microsoft DirectX Compressed Texture (DXT3)',
'DXT4': 'Microsoft DirectX Compressed Texture (DXT4)',
'DXT5': 'Microsoft DirectX Compressed Texture (DXT5)',
'DXTC': 'Microsoft DirectX Compressed Texture (DXTC)',
'DXTN': 'Microsoft DirectX Compressed Texture (DXTn)',
'EKQ0': 'Elsa EKQ0',
'ELK0': 'Elsa ELK0',
'EM2V': 'Etymonix MPEG-2 I-frame',
'EQK0': 'Elsa graphics card quick codec',
'ESCP': 'Eidos Escape',
'ETV1': 'eTreppid Video ETV1',
'ETV2': 'eTreppid Video ETV2',
'ETVC': 'eTreppid Video ETVC',
'FFDS': 'FFDShow supported',
'FFV1': 'FFDShow supported',
'FFVH': 'FFVH codec',
'FLIC': 'Autodesk FLI/FLC Animation',
'FLJP': 'D-Vision Field Encoded Motion JPEG',
'FLV1': 'FLV1 codec',
'FMJP': 'D-Vision fieldbased ISO MJPEG',
'FRLE': 'SoftLab-NSK Y16 + Alpha RLE',
'FRWA': 'SoftLab-Nsk Forward Motion JPEG w/ alpha channel',
'FRWD': 'SoftLab-Nsk Forward Motion JPEG',
'FRWT': 'SoftLab-NSK Vision Forward Motion JPEG with Alpha-channel',
'FRWU': 'SoftLab-NSK Vision Forward Uncompressed',
'FVF1': 'Iterated Systems Fractal Video Frame',
'FVFW': 'ff MPEG-4 based on XviD codec',
'GEPJ': 'White Pine (ex Paradigm Matrix) Motion JPEG Codec',
'GJPG': 'Grand Tech GT891x Codec',
'GLCC': 'GigaLink AV Capture codec',
'GLZW': 'Motion LZW',
'GPEG': 'Motion JPEG',
'GPJM': 'Pinnacle ReelTime MJPEG Codec',
'GREY': 'Apparently a duplicate of Y800',
'GWLT': 'Microsoft Greyscale WLT DIB',
'H260': 'H.260',
'H261': 'H.261',
'H262': 'H.262',
'H263': 'H.263',
'H264': 'H.264 AVC',
'H265': 'H.265',
'H266': 'H.266',
'H267': 'H.267',
'H268': 'H.268',
'H269': 'H.269',
'HD10': 'BlueFish444 (lossless RGBA, YUV 10-bit)',
'HDX4': 'Jomigo HDX4',
'HFYU': 'Huffman Lossless Codec',
'HMCR': 'Rendition Motion Compensation Format (HMCR)',
'HMRR': 'Rendition Motion Compensation Format (HMRR)',
'I263': 'Intel ITU H.263 Videoconferencing (i263)',
'I420': 'Intel Indeo 4',
'IAN ': 'Intel RDX',
'ICLB': 'InSoft CellB Videoconferencing',
'IDM0': 'IDM Motion Wavelets 2.0',
'IF09': 'Microsoft H.261',
'IGOR': 'Power DVD',
'IJPG': 'Intergraph JPEG',
'ILVC': 'Intel Layered Video',
'ILVR': 'ITU-T H.263+',
'IMC1': 'IMC1',
'IMC2': 'IMC2',
'IMC3': 'IMC3',
'IMC4': 'IMC4',
'IMJG': 'Accom SphereOUS MJPEG with Alpha-channel',
'IPDV': 'I-O Data Device Giga AVI DV Codec',
'IPJ2': 'Image Power JPEG2000',
'IR21': 'Intel Indeo 2.1',
'IRAW': 'Intel YUV Uncompressed',
'IUYV': 'Interlaced version of UYVY (line order 0,2,4 then 1,3,5 etc)',
'IV30': 'Ligos Indeo 3.0',
'IV31': 'Ligos Indeo 3.1',
'IV32': 'Ligos Indeo 3.2',
'IV33': 'Ligos Indeo 3.3',
'IV34': 'Ligos Indeo 3.4',
'IV35': 'Ligos Indeo 3.5',
'IV36': 'Ligos Indeo 3.6',
'IV37': 'Ligos Indeo 3.7',
'IV38': 'Ligos Indeo 3.8',
'IV39': 'Ligos Indeo 3.9',
'IV40': 'Ligos Indeo Interactive 4.0',
'IV41': 'Ligos Indeo Interactive 4.1',
'IV42': 'Ligos Indeo Interactive 4.2',
'IV43': 'Ligos Indeo Interactive 4.3',
'IV44': 'Ligos Indeo Interactive 4.4',
'IV45': 'Ligos Indeo Interactive 4.5',
'IV46': 'Ligos Indeo Interactive 4.6',
'IV47': 'Ligos Indeo Interactive 4.7',
'IV48': 'Ligos Indeo Interactive 4.8',
'IV49': 'Ligos Indeo Interactive 4.9',
'IV50': 'Ligos Indeo Interactive 5.0',
'IY41': 'Interlaced version of Y41P (line order 0,2,4,...,1,3,5...)',
'IYU1': '12 bit format used in mode 2 of the IEEE 1394 Digital Camera 1.04 spec',
'IYU2': '24 bit format used in mode 2 of the IEEE 1394 Digital Camera 1.04 spec',
'IYUV': 'Intel Indeo iYUV 4:2:0',
'JBYR': 'Kensington JBYR',
'JFIF': 'Motion JPEG (FFmpeg)',
'JPEG': 'Still Image JPEG DIB',
'JPG ': 'JPEG compressed',
'JPGL': 'Webcam JPEG Light',
'KMVC': 'Karl Morton\'s Video Codec',
'KPCD': 'Kodak Photo CD',
'L261': 'Lead Technologies H.261',
'L263': 'Lead Technologies H.263',
'LAGS': 'Lagarith LossLess',
'LBYR': 'Creative WebCam codec',
'LCMW': 'Lead Technologies Motion CMW Codec',
'LCW2': 'LEADTools MCMW 9Motion Wavelet)',
'LEAD': 'LEAD Video Codec',
'LGRY': 'Lead Technologies Grayscale Image',
'LJ2K': 'LEADTools JPEG2000',
'LJPG': 'LEAD MJPEG Codec',
'LMP2': 'LEADTools MPEG2',
'LOCO': 'LOCO Lossless Codec',
'LSCR': 'LEAD Screen Capture',
'LSVM': 'Vianet Lighting Strike Vmail (Streaming)',
'LZO1': 'LZO compressed (lossless codec)',
'M261': 'Microsoft H.261',
'M263': 'Microsoft H.263',
'M4CC': 'ESS MPEG4 Divio codec',
'M4S2': 'Microsoft MPEG-4 (M4S2)',
'MC12': 'ATI Motion Compensation Format (MC12)',
'MC24': 'MainConcept Motion JPEG Codec',
'MCAM': 'ATI Motion Compensation Format (MCAM)',
'MCZM': 'Theory MicroCosm Lossless 64bit RGB with Alpha-channel',
'MDVD': 'Alex MicroDVD Video (hacked MS MPEG-4)',
'MDVF': 'Pinnacle DV/DV50/DVHD100',
'MHFY': 'A.M.Paredes mhuffyYUV (LossLess)',
'MJ2C': 'Morgan Multimedia Motion JPEG2000',
'MJPA': 'Pinnacle ReelTime MJPG hardware codec',
'MJPB': 'Motion JPEG codec',
'MJPG': 'Motion JPEG DIB',
'MJPX': 'Pegasus PICVideo Motion JPEG',
'MMES': 'Matrox MPEG-2 I-frame',
'MNVD': 'MindBend MindVid LossLess',
'MP2A': 'MPEG-2 Audio',
'MP2T': 'MPEG-2 Transport Stream',
'MP2V': 'MPEG-2 Video',
'MP41': 'Microsoft MPEG-4 V1 (enhansed H263)',
'MP42': 'Microsoft MPEG-4 (low-motion)',
'MP43': 'Microsoft MPEG-4 (fast-motion)',
'MP4A': 'MPEG-4 Audio',
'MP4S': 'Microsoft MPEG-4 (MP4S)',
'MP4T': 'MPEG-4 Transport Stream',
'MP4V': 'Apple QuickTime MPEG-4 native',
'MPEG': 'MPEG-1',
'MPG1': 'FFmpeg-1',
'MPG2': 'FFmpeg-1',
'MPG3': 'Same as Low motion DivX MPEG-4',
'MPG4': 'Microsoft MPEG-4 Video High Speed Compressor',
'MPGI': 'Sigma Designs MPEG',
'MPNG': 'Motion PNG codec',
'MRCA': 'Martin Regen Codec',
'MRLE': 'Run Length Encoding',
'MSS1': 'Windows Screen Video',
'MSS2': 'Windows Media 9',
'MSUC': 'MSU LossLess',
'MSVC': 'Microsoft Video 1',
'MSZH': 'Lossless codec (ZIP compression)',
'MTGA': 'Motion TGA images (24, 32 bpp)',
'MTX1': 'Matrox MTX1',
'MTX2': 'Matrox MTX2',
'MTX3': 'Matrox MTX3',
'MTX4': 'Matrox MTX4',
'MTX5': 'Matrox MTX5',
'MTX6': 'Matrox MTX6',
'MTX7': 'Matrox MTX7',
'MTX8': 'Matrox MTX8',
'MTX9': 'Matrox MTX9',
'MV12': 'MV12',
'MVI1': 'Motion Pixels MVI',
'MVI2': 'Motion Pixels MVI',
'MWV1': 'Aware Motion Wavelets',
'MYUV': 'Media-100 844/X Uncompressed',
'NAVI': 'nAVI',
'NDIG': 'Ahead Nero Digital MPEG-4 Codec',
'NHVU': 'NVidia Texture Format (GEForce 3)',
'NO16': 'Theory None16 64bit uncompressed RAW',
'NT00': 'NewTek LigtWave HDTV YUV with Alpha-channel',
'NTN1': 'Nogatech Video Compression 1',
'NTN2': 'Nogatech Video Compression 2 (GrabBee hardware coder)',
'NUV1': 'NuppelVideo',
'NV12': '8-bit Y plane followed by an interleaved U/V plane with 2x2 subsampling',
'NV21': 'As NV12 with U and V reversed in the interleaved plane',
'NVDS': 'nVidia Texture Format',
'NVHS': 'NVidia Texture Format (GEForce 3)',
'NVS0': 'nVidia GeForce Texture',
'NVS1': 'nVidia GeForce Texture',
'NVS2': 'nVidia GeForce Texture',
'NVS3': 'nVidia GeForce Texture',
'NVS4': 'nVidia GeForce Texture',
'NVS5': 'nVidia GeForce Texture',
'NVT0': 'nVidia GeForce Texture',
'NVT1': 'nVidia GeForce Texture',
'NVT2': 'nVidia GeForce Texture',
'NVT3': 'nVidia GeForce Texture',
'NVT4': 'nVidia GeForce Texture',
'NVT5': 'nVidia GeForce Texture',
'PDVC': 'I-O Data Device Digital Video Capture DV codec',
'PGVV': 'Radius Video Vision',
'PHMO': 'IBM Photomotion',
'PIM1': 'Pegasus Imaging',
'PIM2': 'Pegasus Imaging',
'PIMJ': 'Pegasus Imaging Lossless JPEG',
'PIXL': 'MiroVideo XL (Motion JPEG)',
'PNG ': 'Apple PNG',
'PNG1': 'Corecodec.org CorePNG Codec',
'PVEZ': 'Horizons Technology PowerEZ',
'PVMM': 'PacketVideo Corporation MPEG-4',
'PVW2': 'Pegasus Imaging Wavelet Compression',
'PVWV': 'Pegasus Imaging Wavelet 2000',
'PXLT': 'Apple Pixlet (Wavelet)',
'Q1.0': 'Q-Team QPEG 1.0 (www.q-team.de)',
'Q1.1': 'Q-Team QPEG 1.1 (www.q-team.de)',
'QDGX': 'Apple QuickDraw GX',
'QPEG': 'Q-Team QPEG 1.0',
'QPEQ': 'Q-Team QPEG 1.1',
'R210': 'BlackMagic YUV (Quick Time)',
'R411': 'Radius DV NTSC YUV',
'R420': 'Radius DV PAL YUV',
'RAVI': 'GroupTRON ReferenceAVI codec (dummy for MPEG compressor)',
'RAV_': 'GroupTRON ReferenceAVI codec (dummy for MPEG compressor)',
'RAW ': 'Full Frames (Uncompressed)',
'RGB ': 'Full Frames (Uncompressed)',
'RGB(15)': 'Uncompressed RGB15 5:5:5',
'RGB(16)': 'Uncompressed RGB16 5:6:5',
'RGB(24)': 'Uncompressed RGB24 8:8:8',
'RGB1': 'Uncompressed RGB332 3:3:2',
'RGBA': 'Raw RGB with alpha',
'RGBO': 'Uncompressed RGB555 5:5:5',
'RGBP': 'Uncompressed RGB565 5:6:5',
'RGBQ': 'Uncompressed RGB555X 5:5:5 BE',
'RGBR': 'Uncompressed RGB565X 5:6:5 BE',
'RGBT': 'Computer Concepts 32-bit support',
'RL4 ': 'RLE 4bpp RGB',
'RL8 ': 'RLE 8bpp RGB',
'RLE ': 'Microsoft Run Length Encoder',
'RLE4': 'Run Length Encoded 4',
'RLE8': 'Run Length Encoded 8',
'RMP4': 'REALmagic MPEG-4 Video Codec',
'ROQV': 'Id RoQ File Video Decoder',
'RPZA': 'Apple Video 16 bit "road pizza"',
'RT21': 'Intel Real Time Video 2.1',
'RTV0': 'NewTek VideoToaster',
'RUD0': 'Rududu video codec',
'RV10': 'RealVideo codec',
'RV13': 'RealVideo codec',
'RV20': 'RealVideo G2',
'RV30': 'RealVideo 8',
'RV40': 'RealVideo 9',
'RVX ': 'Intel RDX (RVX )',
'S263': 'Sorenson Vision H.263',
'S422': 'Tekram VideoCap C210 YUV 4:2:2',
'SAMR': 'Adaptive Multi-Rate (AMR) audio codec',
'SAN3': 'MPEG-4 codec (direct copy of DivX 3.11a)',
'SDCC': 'Sun Communication Digital Camera Codec',
'SEDG': 'Samsung MPEG-4 codec',
'SFMC': 'CrystalNet Surface Fitting Method',
'SHR0': 'BitJazz SheerVideo',
'SHR1': 'BitJazz SheerVideo',
'SHR2': 'BitJazz SheerVideo',
'SHR3': 'BitJazz SheerVideo',
'SHR4': 'BitJazz SheerVideo',
'SHR5': 'BitJazz SheerVideo',
'SHR6': 'BitJazz SheerVideo',
'SHR7': 'BitJazz SheerVideo',
'SJPG': 'CUseeMe Networks Codec',
'SL25': 'SoftLab-NSK DVCPRO',
'SL50': 'SoftLab-NSK DVCPRO50',
'SLDV': 'SoftLab-NSK Forward DV Draw codec',
'SLIF': 'SoftLab-NSK MPEG2 I-frames',
'SLMJ': 'SoftLab-NSK Forward MJPEG',
'SMC ': 'Apple Graphics (SMC) codec (256 color)',
'SMSC': 'Radius SMSC',
'SMSD': 'Radius SMSD',
'SMSV': 'WorldConnect Wavelet Video',
'SNOW': 'SNOW codec',
'SP40': 'SunPlus YUV',
'SP44': 'SunPlus Aiptek MegaCam Codec',
'SP53': 'SunPlus Aiptek MegaCam Codec',
'SP54': 'SunPlus Aiptek MegaCam Codec',
'SP55': 'SunPlus Aiptek MegaCam Codec',
'SP56': 'SunPlus Aiptek MegaCam Codec',
'SP57': 'SunPlus Aiptek MegaCam Codec',
'SP58': 'SunPlus Aiptek MegaCam Codec',
'SPIG': 'Radius Spigot',
'SPLC': 'Splash Studios ACM Audio Codec',
'SPRK': 'Sorenson Spark',
'SQZ2': 'Microsoft VXTreme Video Codec V2',
'STVA': 'ST CMOS Imager Data (Bayer)',
'STVB': 'ST CMOS Imager Data (Nudged Bayer)',
'STVC': 'ST CMOS Imager Data (Bunched)',
'STVX': 'ST CMOS Imager Data (Extended CODEC Data Format)',
'STVY': 'ST CMOS Imager Data (Extended CODEC Data Format with Correction Data)',
'SV10': 'Sorenson Video R1',
'SVQ1': 'Sorenson Video R3',
'SVQ3': 'Sorenson Video 3 (Apple Quicktime 5)',
'SWC1': 'MainConcept Motion JPEG Codec',
'T420': 'Toshiba YUV 4:2:0',
'TGA ': 'Apple TGA (with Alpha-channel)',
'THEO': 'FFVFW Supported Codec',
'TIFF': 'Apple TIFF (with Alpha-channel)',
'TIM2': 'Pinnacle RAL DVI',
'TLMS': 'TeraLogic Motion Intraframe Codec (TLMS)',
'TLST': 'TeraLogic Motion Intraframe Codec (TLST)',
'TM20': 'Duck TrueMotion 2.0',
'TM2A': 'Duck TrueMotion Archiver 2.0',
'TM2X': 'Duck TrueMotion 2X',
'TMIC': 'TeraLogic Motion Intraframe Codec (TMIC)',
'TMOT': 'Horizons Technology TrueMotion S',
'TR20': 'Duck TrueMotion RealTime 2.0',
'TRLE': 'Akula Alpha Pro Custom AVI (LossLess)',
'TSCC': 'TechSmith Screen Capture Codec',
'TV10': 'Tecomac Low-Bit Rate Codec',
'TVJP': 'TrueVision Field Encoded Motion JPEG',
'TVMJ': 'Truevision TARGA MJPEG Hardware Codec',
'TY0N': 'Trident TY0N',
'TY2C': 'Trident TY2C',
'TY2N': 'Trident TY2N',
'U263': 'UB Video StreamForce H.263',
'U<Y ': 'Discreet UC YUV 4:2:2:4 10 bit',
'U<YA': 'Discreet UC YUV 4:2:2:4 10 bit (with Alpha-channel)',
'UCOD': 'eMajix.com ClearVideo',
'ULTI': 'IBM Ultimotion',
'UMP4': 'UB Video MPEG 4',
'UYNV': 'UYVY',
'UYVP': 'YCbCr 4:2:2',
'UYVU': 'SoftLab-NSK Forward YUV codec',
'UYVY': 'UYVY 4:2:2 byte ordering',
'V210': 'Optibase VideoPump 10-bit 4:2:2 Component YCbCr',
'V261': 'Lucent VX2000S',
'V422': '24 bit YUV 4:2:2 Format',
'V655': '16 bit YUV 4:2:2 Format',
'VBLE': 'MarcFD VBLE Lossless Codec',
'VCR1': 'ATI VCR 1.0',
'VCR2': 'ATI VCR 2.0',
'VCR3': 'ATI VCR 3.0',
'VCR4': 'ATI VCR 4.0',
'VCR5': 'ATI VCR 5.0',
'VCR6': 'ATI VCR 6.0',
'VCR7': 'ATI VCR 7.0',
'VCR8': 'ATI VCR 8.0',
'VCR9': 'ATI VCR 9.0',
'VDCT': 'Video Maker Pro DIB',
'VDOM': 'VDOnet VDOWave',
'VDOW': 'VDOnet VDOLive (H.263)',
'VDST': 'VirtualDub remote frameclient ICM driver',
'VDTZ': 'Darim Vison VideoTizer YUV',
'VGPX': 'VGPixel Codec',
'VIDM': 'DivX 5.0 Pro Supported Codec',
'VIDS': 'YUV 4:2:2 CCIR 601 for V422',
'VIFP': 'VIFP',
'VIV1': 'Vivo H.263',
'VIV2': 'Vivo H.263',
'VIVO': 'Vivo H.263 v2.00',
'VIXL': 'Miro Video XL',
'VLV1': 'Videologic VLCAP.DRV',
'VP30': 'On2 VP3.0',
'VP31': 'On2 VP3.1',
'VP40': 'On2 TrueCast VP4',
'VP50': 'On2 TrueCast VP5',
'VP60': 'On2 TrueCast VP6',
'VP61': 'On2 TrueCast VP6.1',
'VP62': 'On2 TrueCast VP6.2',
'VP70': 'On2 TrueMotion VP7',
'VQC1': 'Vector-quantised codec 1',
'VQC2': 'Vector-quantised codec 2',
'VR21': 'BlackMagic YUV (Quick Time)',
'VSSH': 'Vanguard VSS H.264',
'VSSV': 'Vanguard Software Solutions Video Codec',
'VSSW': 'Vanguard VSS H.264',
'VTLP': 'Alaris VideoGramPixel Codec',
'VX1K': 'VX1000S Video Codec',
'VX2K': 'VX2000S Video Codec',
'VXSP': 'VX1000SP Video Codec',
'VYU9': 'ATI Technologies YUV',
'VYUY': 'ATI Packed YUV Data',
'WBVC': 'Winbond W9960',
'WHAM': 'Microsoft Video 1 (WHAM)',
'WINX': 'Winnov Software Compression',
'WJPG': 'AverMedia Winbond JPEG',
'WMV1': 'Windows Media Video V7',
'WMV2': 'Windows Media Video V8',
'WMV3': 'Windows Media Video V9',
'WMVA': 'WMVA codec',
'WMVP': 'Windows Media Video V9',
'WNIX': 'WniWni Codec',
'WNV1': 'Winnov Hardware Compression',
'WNVA': 'Winnov hw compress',
'WRLE': 'Apple QuickTime BMP Codec',
'WRPR': 'VideoTools VideoServer Client Codec',
'WV1F': 'WV1F codec',
'WVLT': 'IllusionHope Wavelet 9/7',
'WVP2': 'WVP2 codec',
'X263': 'Xirlink H.263',
'X264': 'XiWave GNU GPL x264 MPEG-4 Codec',
'XLV0': 'NetXL Video Decoder',
'XMPG': 'Xing MPEG (I-Frame only)',
'XVID': 'XviD MPEG-4',
'XVIX': 'Based on XviD MPEG-4 codec',
'XWV0': 'XiWave Video Codec',
'XWV1': 'XiWave Video Codec',
'XWV2': 'XiWave Video Codec',
'XWV3': 'XiWave Video Codec (Xi-3 Video)',
'XWV4': 'XiWave Video Codec',
'XWV5': 'XiWave Video Codec',
'XWV6': 'XiWave Video Codec',
'XWV7': 'XiWave Video Codec',
'XWV8': 'XiWave Video Codec',
'XWV9': 'XiWave Video Codec',
'XXAN': 'XXAN',
'XYZP': 'Extended PAL format XYZ palette',
'Y211': 'YUV 2:1:1 Packed',
'Y216': 'Pinnacle TARGA CineWave YUV (Quick Time)',
'Y411': 'YUV 4:1:1 Packed',
'Y41B': 'YUV 4:1:1 Planar',
'Y41P': 'PC1 4:1:1',
'Y41T': 'PC1 4:1:1 with transparency',
'Y422': 'Y422',
'Y42B': 'YUV 4:2:2 Planar',
'Y42T': 'PCI 4:2:2 with transparency',
'Y444': 'IYU2',
'Y8 ': 'Grayscale video',
'Y800': 'Simple grayscale video',
'YC12': 'Intel YUV12 Codec',
'YMPG': 'YMPEG Alpha',
'YU12': 'ATI YV12 4:2:0 Planar',
'YU92': 'Intel - YUV',
'YUNV': 'YUNV',
'YUV2': 'Apple Component Video (YUV 4:2:2)',
'YUV8': 'Winnov Caviar YUV8',
'YUV9': 'Intel YUV9',
'YUVP': 'YCbCr 4:2:2',
'YUY2': 'Uncompressed YUV 4:2:2',
'YUYV': 'Canopus YUV',
'YV12': 'YVU12 Planar',
'YV16': 'Elecard YUV 4:2:2 Planar',
'YV92': 'Intel Smart Video Recorder YVU9',
'YVU9': 'Intel YVU9 Planar',
'YVYU': 'YVYU 4:2:2 byte ordering',
'ZLIB': 'ZLIB',
'ZPEG': 'Metheus Video Zipper',
'ZYGO': 'ZyGo Video Codec'
}
# make it fool prove
for code, value in list(FOURCC.items()):
if not code.upper() in FOURCC:
FOURCC[code.upper()] = value
if code.endswith(' '):
FOURCC[code.strip().upper()] = value
| freevo/kaa-metadata | src/fourcc.py | Python | gpl-2.0 | 30,686 | [
"CRYSTAL"
] | eb1211acb2de27127f04af405792905f6d762a900e8184d43b73d6af23286ae1 |
from numpy import *
'''
Authors: Kai Liao, Adri Agnello (UCSB and UCLA)
Phil Marshall (Stanford)
Started: Liao, Aug.2014
Description: Convert Adri's Mathematica version into Python.
Given the survey imaging conditions and an object ID in OM10,
paint it in chosen band (g,r,i,z). Current python version
adapted on doubles, must be refactored to work on quads and
systems with any number of point-sources in image-plane.
'''
def bs(n):
return (2.*n-1.)/3.
def Sersic(R, n):
return exp(-bs(n)*R**(1./n))
def flase(x, y, flat, pa, n):
return Sersic((flat*(x*cos(pa)+y*sin(pa))**2.+flat**(-1.0)*(-sin(pa)*x+cos(pa)*y)**2.)**0.5, n)
def G(x, dx):
return exp(-0.5*x**2./dx**2.)/((2*pi)**0.5*dx)
def GG(x, dx):
return G(abs(x)**0.5, dx)
def Gint(x, y, dx, dy):
return (9./16)*G(x,dx)*G(y,dy)+(3./32)*(G(x+1.,dx)*G(y,dy)+G(x-1.,dx)*G(y,dy)+\
G(x,dx)*G(y+1.,dy)+G(x,dx)*G(y-1.,dy))+(1/64.)*(G(x-1.,dx)*G(y-1.,dy)+\
G(x-1.,dx)G(y+1.,dy)+G(x+1.,dx)*G(y-1.,dt)+G(x+1.,dx)*G(y+1.,dy))
#Gint is useful to interpolate the Gaussian psf on 3*3 grid, i.e. sharing PSF fluxes among neighbouring pixels.
#SDSS
pixscale = 0.4
meanIQ = 1.4/2
meanIQ = meanIQ/(log(2.)*2.**0.5) #the log is log_e!
meandepth = 20.8 #magnitudes per arcsecond
errdepth = 0.3
#more specific: band fluxes and fluctuations
gmean = 21.9
egd = 0.3
gsky = pixscale**2.*10.**(9.-0.4*gmean)
rmean = 20.9
erd = 0.3
rsky = pixscale**2.*10.**(9.-0.4*rmean)
imean = 20.2
eid = 0.4
isky = pixscale**2.*10.**(9.-0.4*imean)
zmean = 18.9
ezd = 0.5
zsky = pixscale**2.*10.**(9.-0.4*zmean)
#psf width distributions
mgIQ = 1.65/(2.*2.**0.5*log(2.))
dgIQ = 0.4/(2.*2.**0.5*log(2.))
moIQ = 1.4/(2.*2.**0.5*log(2.))
doIQ = 0.3/(2.*2.**0.5*log(2.)) #psf width in the other bands
dr = pixscale**2.*10.**(9.-0.4*meandepth)/5. #five sigma detection of deepest source
expo = (log(10.)*erd/(2.5*(2*pi)**0.5))/dr**2.
dg = (log(10.)*egd/(2.5*(2*pi)**0.5))**0.5/expo**0.5
di = (log(10.)*eid/(2.5*(2*pi)**0.5))**0.5/expo**0.5
dz = (log(10.)*ezd/(2.5*(2*pi)**0.5))**0.5/expo**0.5
| mbaumer/OM10 | om10/imagenew.py | Python | mit | 2,083 | [
"Gaussian"
] | bce5105bcdc95d70e334268a6fde3cb6a38598b5e7a3932b2bdf468efae1e78b |
import sys
sys.path.append('..')
import stile
class DummyDataHandler(stile.DataHandler):
def __init__(self):
self.source_file_name = 'example_source_catalog.dat'
self.lens_file_name = 'example_lens_catalog.dat'
self.read_method = stile.ReadASCIITable
self.fields={'id': 0, 'ra': 1, 'dec': 2, 'z': 3, 'g1': 4, 'g2': 5}
self.output_path='.'
def listData(self,object_types,epoch,extent,data_format):
if (epoch=='single' and
(extent=='field' or extent=='patch' or extent=='tract') and
data_format=='table'):
return_list = []
for object_type in object_types:
if object_type=="galaxy":
return_list.append(self.source_file_name)
elif object_type=="galaxy lens":
return_list.append(self.lens_file_name)
else:
raise NotImplementedError("Can only serve 'galaxy' or 'galaxy lens' "+
"object types")
return return_list
else:
raise ValueError('DummyDataHandler does not contain data of this type: %s %s %s %s'%(
str(object_types),epoch,extent,data_format))
def getData(self,id,object_types,epoch,extent,data_format,bin_list=None):
if hasattr(id,'__iter__'):
return [self.getData(iid,ot,epoch,extent,data_format,bin_list)
for iid,ot in zip(id,object_types)]
if not data_format=='table':
raise ValueError('Only table data provided by DummyDataHandler')
if not epoch=='single':
raise ValueError('Only single-epoch data provided by DummyDataHandler')
if id==self.lens_file_name or id==self.source_file_name:
data = stile.FormatArray(self.read_method(id),fields=self.fields)
if bin_list:
for bin in bin_list:
data = bin(data)
return data
else:
raise ValueError('Unknown data ID')
| msimet/Stile | examples/dummy.py | Python | bsd-3-clause | 2,110 | [
"Galaxy"
] | daaae61714df28d0dedec3d36c03d447c9435d6ff5a0c980c1cfa1ee7b288d0a |
#!/usr/bin/env python3
# Version 1.1
# Author Alexis Blanchet-Cohen
# Date: 09/06/2014
import argparse
import glob
import os
import os.path
import pandas
import subprocess
import util
# Read the command line arguments.
parser = argparse.ArgumentParser(description="Generates GATK BaseRecalibrator scripts.")
parser.add_argument("-s", "--scriptsDirectory", help="Scripts directory. DEFAULT=baserecalibrator", default="baserecalibrator")
parser.add_argument("-i", "--inputDirectory", help="Input directory with BAM files. DEFAULT=../results/bwa", default="../results/bwa")
parser.add_argument("-o", "--outputDirectory", help="Output directory with realigned BAM files. DEFAULT=../results/bwa", default="../results/bwa")
parser.add_argument("-q", "--submitJobsToQueue", help="Submit jobs to queue immediately.", choices=["yes", "no", "y", "n"], default="no")
args = parser.parse_args()
# If not in the main scripts directory, cd to the main scripts directory, if it exists.
util.cdMainScriptsDirectory()
# Process the command line arguments.
inputDirectory = os.path.abspath(args.inputDirectory)
outputDirectory = os.path.abspath(args.outputDirectory)
scriptsDirectory = os.path.abspath(args.scriptsDirectory)
# Read configuration files
config = util.readConfigurationFiles()
header = config.getboolean("server", "PBS_header")
toolsFolder = config.get("server", "toolsFolder")
genome = config.get("project", "genome")
genomeFolder = config.get(genome, "genomeFolder")
genomeFile = config.get(genome, "genomeFile")
xmx = config.get("baserecalibrator", "xmx")
# Get samples
samples = util.getsamples(lanes=True)
# Create scripts directory, if it does not exist yet, and cd to it.
if not os.path.exists(scriptsDirectory):
os.mkdir(scriptsDirectory)
os.chdir(scriptsDirectory)
# Write the scripts
for sample in samples:
# Write the script
scriptName = "baserecalibrator_" + sample + ".sh"
script = open(scriptName, "w")
if header:
util.writeHeader(script, config, "indelrealigner")
script.write("java -Xmx" + xmx + " \\\n")
script.write("-jar " + os.path.join(toolsFolder, "GenomeAnalysisTK.jar") + " \\\n")
script.write("--analysis_type BaseRecalibrator" + " \\\n")
script.write("--reference_sequence " + genomeFile + " \\\n")
script.write("--input_file " + os.path.join(inputDirectory, sample, sample + "_realigned_reads.bam") + " \\\n")
script.write("--knownSites " + os.path.join(genomeFolder, "1000G_phase1.indels.b37.vcf") + " \\\n")
script.write("--knownSites " + os.path.join(genomeFolder, "Mills_and_1000G_gold_standard.indels.b37.vcf") + " \\\n")
script.write("--out recal_data.table" + " \\\n")
script.write("&> " + scriptName + ".log")
script.write("\n\n")
script.write("java -Xmx" + xmx + " \\\n")
script.write("-jar " + os.path.join(toolsFolder, "GenomeAnalysisTK.jar") + " \\\n")
script.write("--analysis_type BaseRecalibrator" + " \\\n")
script.write("--reference_sequence " + genomeFile + " \\\n")
script.write("--input_file " + os.path.join(inputDirectory, sample, sample + "_realigned_reads.bam") + " \\\n")
script.write("--knownSites " + os.path.join(genomeFolder, "1000G_phase1.indels.b37.vcf") + " \\\n")
script.write("--knownSites " + os.path.join(genomeFolder, "Mills_and_1000G_gold_standard.indels.b37.vcf") + " \\\n")
script.write("--BQSR recal_data.table" + " \\\n")
script.write("--out post_recal_data.table" + " \\\n")
script.write("&>> " + scriptName + ".log")
script.write("\n\n")
script.write("java -Xmx" + xmx + " \\\n")
script.write("-jar " + os.path.join(toolsFolder, "GenomeAnalysisTK.jar") + " \\\n")
script.write("--analysis_type AnalyzeCovariates" + " \\\n")
script.write("--reference_sequence " + genomeFile + " \\\n")
script.write("--beforeReportFile recal_data.table" + " \\\n")
script.write("--afterReportFile post_recal_data.table" + " \\\n")
script.write("--plotsReportFile recalibration_plots.pdf" + " \\\n")
script.write("&>> " + scriptName + ".log")
script.write("\n\n")
script.write("java -Xmx" + xmx + " \\\n")
script.write("-jar " + os.path.join(toolsFolder, "GenomeAnalysisTK.jar") + " \\\n")
script.write("--analysis_type PrintReads" + " \\\n")
script.write("--reference_sequence " + genomeFile + " \\\n")
script.write("--input_file " + os.path.join(inputDirectory, sample, sample + "_realigned_reads.bam") + " \\\n")
script.write("--BQSR recal_data.table" + " \\\n")
script.write("--out " + os.path.join(outputDirectory, sample, sample + "_recal_reads.bam") + " \\\n")
script.write("&>> " + scriptName + ".log")
script.close()
if (args.submitJobsToQueue.lower() == "yes") | (args.submitJobsToQueue.lower() == "y"):
subprocess.call("submitJobs.py", shell=True)
| blancha/abcngspipelines | exomeseq/baserecalibrator.py | Python | gpl-3.0 | 4,830 | [
"BWA"
] | e81ec8132215e8ba121c7802e7f91b312401dad51a1789e8962b760e4a1735ef |
""" Python test discovery, setup and run of test functions. """
import re
import fnmatch
import functools
import py
import inspect
import sys
import pytest
from _pytest.mark import MarkDecorator, MarkerError
from py._code.code import TerminalRepr
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
import _pytest
import _pytest._pluggy as pluggy
cutdir2 = py.path.local(_pytest.__file__).dirpath()
cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
NoneType = type(None)
NOTSET = object()
isfunction = inspect.isfunction
isclass = inspect.isclass
callable = py.builtin.callable
# used to work around a python2 exception info leak
exc_clear = getattr(sys, 'exc_clear', lambda: None)
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(''))
if hasattr(inspect, 'signature'):
def _format_args(func):
return str(inspect.signature(func))
else:
def _format_args(func):
return inspect.formatargspec(*inspect.getargspec(func))
def _has_positional_arg(func):
return func.__code__.co_argcount
def filter_traceback(entry):
return entry.path != cutdir1 and not entry.path.relto(cutdir2)
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = py.code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
try:
return func.im_func
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception.
Attribute access can potentially fail for 'evil' Python objects.
See issue214
"""
try:
return getattr(object, name, default)
except Exception:
return default
class FixtureFunctionMarker:
def __init__(self, scope, params,
autouse=False, yieldctx=False, ids=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.yieldctx = yieldctx
self.ids = ids
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or or without parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids)
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a yield-fixture factory function
(EXPERIMENTAL).
This takes the same arguments as :py:func:`pytest.fixture` but
expects a fixture function to use a ``yield`` instead of a ``return``
statement to provide a fixture. See
http://pytest.org/en/latest/yieldfixture.html for more info.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, yieldctx=True)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse,
yieldctx=True, ids=ids)
defaultfuncargprefixmarker = fixture()
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(pytest, name))
if node is not None:
return node.obj
doc = "python %s object this node was collected from (can be None)." % (
name.lower(),)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--fixtures', '--funcargs',
action="store_true", dest="showfixtures", default=False,
help="show available fixtures, sorted by plugin appearance")
parser.addini("usefixtures", type="args", default=[],
help="list of default fixtures to be used with this project")
parser.addini("python_files", type="args",
default=['test_*.py', '*_test.py'],
help="glob-style file patterns for Python test module discovery")
parser.addini("python_classes", type="args", default=["Test",],
help="prefixes or glob names for Python test class discovery")
parser.addini("python_functions", type="args", default=["test",],
help="prefixes or glob names for Python test function and "
"method discovery")
group.addoption("--import-mode", default="prepend",
choices=["prepend", "append"], dest="importmode",
help="prepend/append to sys.path when importing test modules, "
"default is to prepend.")
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
def pytest_generate_tests(metafunc):
# those alternative spellings are common - raise a specific error to alert
# the user
alt_spellings = ['parameterize', 'parametrise', 'parameterise']
for attr in alt_spellings:
if hasattr(metafunc.function, attr):
msg = "{0} has '{1}', spelling should be 'parametrize'"
raise MarkerError(msg.format(metafunc.function.__name__, attr))
try:
markers = metafunc.function.parametrize
except AttributeError:
return
for marker in markers:
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line("markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and "
"examples."
)
config.addinivalue_line("markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
)
def pytest_sessionstart(session):
session._fixturemanager = FixtureManager(session)
@pytest.hookimpl(trylast=True)
def pytest_namespace():
raises.Exception = pytest.fail.Exception
return {
'fixture': fixture,
'yield_fixture': yield_fixture,
'raises' : raises,
'collect': {
'Module': Module, 'Class': Class, 'Instance': Instance,
'Function': Function, 'Generator': Generator,
'_fillfuncargs': fillfixtures}
}
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
@pytest.hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
if pyfuncitem._isyieldedfunction():
testfunction(*pyfuncitem._args)
else:
funcargs = pyfuncitem.funcargs
testargs = {}
for arg in pyfuncitem._fixtureinfo.argnames:
testargs[arg] = funcargs[arg]
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
for pat in parent.config.getini('python_files'):
if path.fnmatch(pat):
break
else:
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def pytest_pycollect_makemodule(path, parent):
return Module(path, parent)
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
raise StopIteration
# nothing was collected elsewhere, let's do it here
if isclass(obj):
if collector.istestclass(obj, name):
Class = collector._getcustomclass("Class")
outcome.force_result(Class(name, parent=collector))
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
if not isfunction(obj):
collector.warn(code="C2", message=
"cannot collect %r because it is not a function."
% name, )
if getattr(obj, "__test__", True):
if is_generator(obj):
res = Generator(name, parent=collector)
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def is_generator(func):
try:
return py.code.getrawcode(func).co_flags & 32 # generator function
except AttributeError: # builtin functions have no bytecode
# assume them to not be generators
return False
class PyobjContext(object):
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
def obj():
def fget(self):
try:
return self._obj
except AttributeError:
self._obj = obj = self._getobj()
return obj
def fset(self, value):
self._obj = value
return property(fget, fset, None, "underlying python object")
obj = obj()
def _getobj(self):
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
assert name.endswith(".py")
name = name[:-3]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def _getfslineno(self):
return getfslineno(self.obj)
def reportinfo(self):
# XXX caching?
obj = self.obj
if hasattr(obj, 'compat_co_firstlineno'):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = obj.compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, pytest.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_functions', name)
def isnosetest(self, obj):
""" Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator
"""
return safe_getattr(obj, '__test__', False)
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_classes', name)
def istestfunction(self, obj, name):
return (
(self.funcnamefilter(name) or self.isnosetest(obj))
and safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
)
def istestclass(self, obj, name):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ('*' in option or '?' in option or '[' in option) and \
fnmatch.fnmatch(name, option):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, '__dict__', {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
l = []
for dic in dicts:
for name, obj in dic.items():
if name in seen:
continue
seen[name] = True
res = self.makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
l.extend(res)
l.sort(key=lambda item: item.reportinfo()[:2])
return l
def makeitem(self, name, obj):
#assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
transfer_markers(funcobj, cls, module)
fm = self.session._fixturemanager
fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
metafunc = Metafunc(funcobj, fixtureinfo, self.config,
cls=cls, module=module)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
if methods:
self.ihook.pytest_generate_tests.call_extra(methods,
dict(metafunc=metafunc))
else:
self.ihook.pytest_generate_tests(metafunc=metafunc)
Function = self._getcustomclass("Function")
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
add_funcarg_pseudo_fixture_def(self, metafunc, fm)
for callspec in metafunc._calls:
subname = "%s[%s]" %(name, callspec.id)
yield Function(name=subname, parent=self,
callspec=callspec, callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id:True})
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
def _marked(func, mark):
""" Returns True if :func: is already marked with :mark:, False otherwise.
This can happen if marker is applied to class and the test file is
invoked more than once.
"""
try:
func_mark = getattr(func, mark.name)
except AttributeError:
return False
return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
def transfer_markers(funcobj, cls, mod):
# XXX this should rather be code in the mark plugin or the mark
# plugin should merge with the python plugin.
for holder in (cls, mod):
try:
pytestmark = holder.pytestmark
except AttributeError:
continue
if isinstance(pytestmark, list):
for mark in pytestmark:
if not _marked(funcobj, mark):
mark(funcobj)
else:
if not _marked(funcobj, pytestmark):
pytestmark(funcobj)
class Module(pytest.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._memoizedcall('_obj', self._importtestmodule)
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Module, self).collect()
def _importtestmodule(self):
# we assume we are only called once per module
importmode = self.config.getoption("--import-mode")
try:
mod = self.fspath.pyimport(ensuresyspath=importmode)
except SyntaxError:
raise self.CollectError(
py.code.ExceptionInfo().getrepr(style="short"))
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules"
% e.args
)
#print "imported test module", mod
self.config.pluginmanager.consider_module(mod)
return mod
def setup(self):
setup_module = xunitsetup(self.obj, "setUpModule")
if setup_module is None:
setup_module = xunitsetup(self.obj, "setup_module")
if setup_module is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a pytest style one
# so we pass the current module object
if _has_positional_arg(setup_module):
setup_module(self.obj)
else:
setup_module()
fin = getattr(self.obj, 'tearDownModule', None)
if fin is None:
fin = getattr(self.obj, 'teardown_module', None)
if fin is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, it's probably a pytest style one
# so we pass the current module object
if _has_positional_arg(fin):
finalizer = lambda: fin(self.obj)
else:
finalizer = fin
self.addfinalizer(finalizer)
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if hasinit(self.obj):
self.warn("C1", "cannot collect test class %r because it has a "
"__init__ constructor" % self.obj.__name__)
return []
return [self._getcustomclass("Instance")(name="()", parent=self)]
def setup(self):
setup_class = xunitsetup(self.obj, 'setup_class')
if setup_class is not None:
setup_class = getattr(setup_class, 'im_func', setup_class)
setup_class = getattr(setup_class, '__func__', setup_class)
setup_class(self.obj)
fin_class = getattr(self.obj, 'teardown_class', None)
if fin_class is not None:
fin_class = getattr(fin_class, 'im_func', fin_class)
fin_class = getattr(fin_class, '__func__', fin_class)
self.addfinalizer(lambda: fin_class(self.obj))
class Instance(PyCollector):
def _getobj(self):
obj = self.parent.obj()
return obj
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Instance, self).collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if hasattr(self, '_preservedparent'):
obj = self._preservedparent
elif isinstance(self.parent, Instance):
obj = self.parent.newinstance()
self.obj = self._getobj()
else:
obj = self.parent.obj
if inspect.ismethod(self.obj):
setup_name = 'setup_method'
teardown_name = 'teardown_method'
else:
setup_name = 'setup_function'
teardown_name = 'teardown_function'
setup_func_or_method = xunitsetup(obj, setup_name)
if setup_func_or_method is not None:
setup_func_or_method(self.obj)
fin = getattr(obj, teardown_name, None)
if fin is not None:
self.addfinalizer(lambda: fin(self.obj))
def _prunetraceback(self, excinfo):
if hasattr(self, '_obj') and not self.config.option.fulltrace:
code = py.code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
#ntraceback = ntraceback.cut(excludepath=cutdir2)
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.option.tbstyle == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style('short')
def _repr_failure_py(self, excinfo, style="long"):
if excinfo.errisinstance(pytest.fail.Exception):
if not excinfo.value.pytrace:
return str(excinfo.value)
return super(FunctionMixin, self)._repr_failure_py(excinfo,
style=style)
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.option.tbstyle
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class Generator(FunctionMixin, PyCollector):
def collect(self):
# test generators are seen as collectors but they also
# invoke setup/teardown on popular request
# (induced by the common "test_*" naming shared with normal tests)
self.session._setupstate.prepare(self)
# see FunctionMixin.setup and test_setupstate_is_preserved_134
self._preservedparent = self.parent.obj
l = []
seen = {}
for i, x in enumerate(self.obj()):
name, call, args = self.getcallargs(x)
if not callable(call):
raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
if name is None:
name = "[%d]" % i
else:
name = "['%s']" % name
if name in seen:
raise ValueError("%r generated tests with non-unique name %r" %(self, name))
seen[name] = True
l.append(self.Function(name, self, args=args, callobj=call))
return l
def getcallargs(self, obj):
if not isinstance(obj, (tuple, list)):
obj = (obj,)
# explict naming
if isinstance(obj[0], py.builtin._basestring):
name = obj[0]
obj = obj[1:]
else:
name = None
call, args = obj[0], obj[1:]
return name, call, args
def hasinit(obj):
init = getattr(obj, '__init__', None)
if init:
if init != object.__init__:
return True
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
_notexists = object()
class CallSpec2(object):
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = _notexists
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
self.keywords = {}
self.indices = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.keywords.update(self.keywords)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalid_args = self._globalid_args
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate %r" %(arg,))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is _notexists:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
param_index):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
valtype_for_arg = valtypes[arg]
getattr(self, valtype_for_arg)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not _notexists:
self._idlist.append(id)
if param is not _notexists:
assert self._globalparam is _notexists
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = scopenum_function
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
class Metafunc(FuncargnamesCompatAttr):
"""
Metafunc objects are passed to the ``pytest_generate_tests`` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
:ivar fixturenames: set of fixture names required by the test function
:ivar function: underlying python test function
:ivar cls: class object where the test function is defined in or ``None``.
:ivar module: the module object where the test function is defined in.
:ivar config: access to the :class:`_pytest.config.Config` object for the
test session.
:ivar funcargnames:
.. deprecated:: 2.3
Use ``fixturenames`` instead.
"""
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
self.config = config
self.module = module
self.function = function
self.fixturenames = fixtureinfo.names_closure
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
self.cls = cls
self._calls = []
self._ids = py.builtin.set()
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of simple values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: The list of argnames or boolean. A list of arguments'
names (subset of argnames). If True the list contains all names from
the argnames. Each argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
# individual parametrized argument sets can be wrapped in a series
# of markers in which case we unwrap the values and apply the mark
# at Function init
newkeywords = {}
unwrapped_argvalues = []
for i, argval in enumerate(argvalues):
while isinstance(argval, MarkDecorator):
newmark = MarkDecorator(argval.markname,
argval.args[:-1], argval.kwargs)
newmarks = newkeywords.setdefault(i, {})
newmarks[newmark.markname] = newmark
argval = argval.args[-1]
unwrapped_argvalues.append(argval)
argvalues = unwrapped_argvalues
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if len(argnames) == 1:
argvalues = [(val,) for val in argvalues]
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
if scope is None:
scope = "function"
scopenum = scopes.index(scope)
valtypes = {}
for arg in argnames:
if arg not in self.fixturenames:
raise ValueError("%r uses no fixture %r" %(self.function, arg))
if indirect is True:
valtypes = dict.fromkeys(argnames, "params")
elif indirect is False:
valtypes = dict.fromkeys(argnames, "funcargs")
elif isinstance(indirect, (tuple, list)):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
raise ValueError("indirect given to %r: fixture %r doesn't exist" %(
self.function, arg))
valtypes[arg] = "params"
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids and len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
if not ids:
ids = idmaker(argnames, argvalues, idfn)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, valset in enumerate(argvalues):
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtypes, argnames, valset, ids[param_index],
newkeywords.get(param_index, {}), scopenum,
param_index)
newcalls.append(newcallspec)
self._calls = newcalls
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
""" (deprecated, use parametrize) Add a new call to the underlying
test function during the collection phase of a test run. Note that
request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
pytest.fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is _notexists:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs)
def _idval(val, argname, idx, idfn):
if idfn:
try:
s = idfn(val)
if s:
return s
except Exception:
pass
if isinstance(val, (float, int, str, bool, NoneType)):
return str(val)
elif isinstance(val, REGEX_TYPE):
return val.pattern
elif enum is not None and isinstance(val, enum.Enum):
return str(val)
elif isclass(val) and hasattr(val, '__name__'):
return val.__name__
return str(argname)+str(idx)
def _idvalset(idx, valset, argnames, idfn):
this_id = [_idval(val, argname, idx, idfn)
for val, argname in zip(valset, argnames)]
return "-".join(this_id)
def idmaker(argnames, argvalues, idfn=None):
ids = [_idvalset(valindex, valset, argnames, idfn)
for valindex, valset in enumerate(argvalues)]
if len(set(ids)) < len(ids):
# user may have provided a bad idfn which means the ids are not unique
ids = [str(i) + testid for i, testid in enumerate(ids)]
return ids
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
fixturedef = fixturedefs[-1]
loc = getlocation(fixturedef.func, curdir)
available.append((len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname, fixturedef))
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from %s" %(module,))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if verbose > 0:
funcargspec = "%s -- %s" %(argname, bestrel,)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
for line in doc.strip().split("\n"):
tw.line(" " + line.strip())
else:
tw.line(" %s: no docstring available" %(loc,),
red=True)
def getlocation(function, curdir):
import inspect
fn = py.path.local(inspect.getfile(function))
lineno = py.builtin._getcode(function).co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" %(fn, lineno+1)
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
""" assert that a code block/function call raises @expected_exception
and raise a failure exception otherwise.
This helper produces a ``py.code.ExceptionInfo()`` object.
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
Or you can specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
A third possibility is to use a string to be executed::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
Performance note:
-----------------
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``py.code.ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
if expected_exception is AssertionError:
# we want to catch a AssertionError
# replace our subclass with the builtin one
# see https://github.com/pytest-dev/pytest/issues/176
from _pytest.assertion.util import BuiltinAssertionError \
as expected_exception
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(expected_exception, tuple):
for exc in expected_exception:
if not isclass(exc):
raise TypeError(msg % type(exc))
elif not isclass(expected_exception):
raise TypeError(msg % type(expected_exception))
if not args:
return RaisesContext(expected_exception)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
#print "raises frame scope: %r" % frame.f_locals
try:
code = py.code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return py.code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return py.code.ExceptionInfo()
pytest.fail("DID NOT RAISE")
class RaisesContext(object):
def __init__(self, expected_exception):
self.expected_exception = expected_exception
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(py.code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail("DID NOT RAISE")
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
return issubclass(self.excinfo.type, self.expected_exception)
#
# the basic pytest Function item
#
class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
_genid = None
def __init__(self, name, parent, args=None, config=None,
callspec=None, callobj=NOTSET, keywords=None, session=None,
fixtureinfo=None):
super(Function, self).__init__(name, parent, config=config,
session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
if callspec:
self.callspec = callspec
self.keywords.update(callspec.keywords)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self.parent, self.obj, self.cls,
funcargs=not self._isyieldedfunction())
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
def _initrequest(self):
self.funcargs = {}
if self._isyieldedfunction():
assert not hasattr(self, "callspec"), (
"yielded functions (deprecated) cannot have funcargs")
else:
if hasattr(self, "callspec"):
callspec = self.callspec
assert not callspec.funcargs
self._genid = callspec.id
if hasattr(callspec, "param"):
self.param = callspec.param
self._request = FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getattr(self.obj, 'im_func', self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def _isyieldedfunction(self):
return getattr(self, "_args", None) is not None
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
# check if parametrization happend with an empty list
try:
self.callspec._emptyparamspecified
except AttributeError:
pass
else:
fs, lineno = self._getfslineno()
pytest.skip("got empty parameter set, function %s at %s:%d" %(
self.function.__name__, fs, lineno))
super(Function, self).setup()
fillfixtures(self)
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "cls", "module", "session"
self.scope = "function"
self._funcargs = {}
self._fixturedefs = {}
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self.fixturenames = fixtureinfo.names_closure
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfuncargvalue(argname) usage which was naturally
# not known at parsing/collection time
fixturedefs = self._fixturemanager.getfixturedefs(
argname, self._pyfuncitem.parent.nodeid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(pytest.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(pytest.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfuncargvalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfuncargvalue(self, argname):
""" Dynamically retrieve a named fixture function argument.
As of pytest-2.3, it is easier and usually better to access other
fixture values by stating it as an input argument in the fixture
function. If you only can decide about using another fixture at test
setup time, you may use this function to retrieve it inside a fixture
function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixturedefs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfuncargvalue(fixturedef)
self._funcargs[argname] = result
self._fixturedefs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfuncargvalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
pytest.fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" %(
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" %(
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" %(self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._funcargs = request._funcargs
self._fixturedefs = request._fixturedefs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self.fixturenames = request.fixturenames
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
stack = stack[:-1] # the last fixture raise an error, let's present
# it at the requesting side
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except IOError:
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno+1))
else:
addline("file %s, line %s" % (fspath, lineno+1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
for name, fixturedef in fm._arg2fixturedefs.items():
parentid = self.request._pyfuncitem.parent.nodeid
faclist = list(fm._matchfactories(fixturedef, parentid))
if faclist:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" %(", ".join(available),)
msg += "\n use 'py.test --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
#tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
for line in self.errorstring.split("\n"):
tw.line(" " + line.strip(), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
if cls is not None:
startindex = 1
else:
startindex = None
argnames = getfuncargnames(func, startindex)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i+1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
argnames = func_params[0]
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if argname not in func_params and argname not in argnames:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
obj = getattr(holderobj, name, None)
if not callable(obj):
continue
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
marker = defaultfuncargprefixmarker
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
assert not name.startswith(self._argprefix)
fixturedef = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
yieldctx=marker.yieldctx,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixturedef.has_location:
faclist.append(fixturedef)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixturedef)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
source = py.code.Source(fixturefunc)
pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
if yieldctx:
if not is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="yield_fixture requires yield statement in function")
iter = fixturefunc(**kwargs)
next = getattr(iter, "__next__", None)
if next is None:
next = getattr(iter, "next")
res = next()
def teardown():
try:
next()
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
if is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="pytest.fixture functions cannot use ``yield``. "
"Instead write and return an inner function/generator "
"and let the consumer call and iterate over it.")
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
yieldctx, unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scopes.index(scope or "function")
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.yieldctx = yieldctx
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
try:
while self._finalizer:
func = self._finalizer.pop()
func()
finally:
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
kwargs = {}
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixturedef.cached_result
request._check_scope(argname, request.scope, fixturedef.scope)
kwargs[argname] = result
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
fixturefunc = self.func
if self.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = self.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "self" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(self.func)
if fixturefunc != self.func:
fixturefunc = fixturefunc.__get__(request.instance)
try:
result = call_fixture_func(fixturefunc, request, kwargs,
self.yieldctx)
except Exception:
self.cached_result = (None, my_cache_key, sys.exc_info())
raise
self.cached_result = (result, my_cache_key, None)
return result
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
if mock is not None:
return len([p for p in patchings
if not p.attribute_name and p.new is mock.DEFAULT])
return len(patchings)
def getfuncargnames(function, startindex=None):
# XXX merge with main.py's varnames
#assert not inspect.isclass(function)
realfunction = function
while hasattr(realfunction, "__wrapped__"):
realfunction = realfunction.__wrapped__
if startindex is None:
startindex = inspect.ismethod(function) and 1 or 0
if realfunction != function:
startindex += num_mock_patch_args(function)
function = realfunction
if isinstance(function, functools.partial):
argnames = inspect.getargs(py.code.getrawcode(function.func))[0]
partial = function
argnames = argnames[len(partial.args):]
if partial.keywords:
for kw in partial.keywords:
argnames.remove(kw)
else:
argnames = inspect.getargs(py.code.getrawcode(function))[0]
defaults = getattr(function, 'func_defaults',
getattr(function, '__defaults__', None)) or ()
numdefaults = len(defaults)
if numdefaults:
return tuple(argnames[startindex:-numdefaults])
return tuple(argnames[startindex:])
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = set(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache,scopenum+1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
argkeys = argkeys.difference(ignore)
if argkeys: # found a slicing key
slicing_argkey = argkeys.pop()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indictes.items() is random order of argnames but
# then again different functions (items) can change order of
# arguments so it doesn't matter much probably
for argname, param_index in cs.indices.items():
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
def xunitsetup(obj, name):
meth = getattr(obj, name, None)
if getfixturemarker(meth) is None:
return meth
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
scopename2class = {
'class': Class,
'module': Module,
'function': pytest.Item,
}
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
| mhils/pytest | _pytest/python.py | Python | mit | 85,260 | [
"VisIt"
] | 8c51beaf7c75b56665ad8a208acde34002aa2ea425cb4c96a468e265ea0c0291 |
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Unittests for Bio.Align.Applications interface for MAFFT
This code is part of the Biopython distribution and governed by its
license. Please see the LICENSE file that should have been included
as part of this package.
"""
import sys
import os
import unittest
import subprocess
from Bio import MissingExternalDependencyError
from Bio.Align.Applications import MafftCommandline
# Try to avoid problems when the OS is in another language
os.environ['LANG'] = 'C'
mafft_exe = None
if sys.platform=="win32":
raise MissingExternalDependencyError("Testing with MAFFT not implemented on Windows yet")
else:
from Bio._py3k import getoutput
output = getoutput("mafft -help")
if "not found" not in output and "MAFFT" in output:
mafft_exe = "mafft"
if not mafft_exe:
raise MissingExternalDependencyError(
"Install MAFFT if you want to use the Bio.Align.Applications wrapper.")
def check_mafft_version(mafft_exe):
child = subprocess.Popen("%s --help" % mafft_exe,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform!="win32"))
stdoutdata, stderrdata = child.communicate()
output = stdoutdata + "\n" + stderrdata
return_code = child.returncode
del child
if "correctly installed?" in output \
or "mafft binaries have to be installed" in output:
raise MissingExternalDependencyError(
"MAFFT does not seem to be correctly installed.")
# e.g. "MAFFT version 5.732 (2005/09/14)\n"
# e.g. " MAFFT v6.717b (2009/12/03)\n"
for marker in ["MAFFT version", "MAFFT v"]:
index = output.find(marker)
if index == -1:
continue
version = output[index+len(marker):].strip().split(None, 1)[0]
major = int(version.split(".", 1)[0])
if major < 6:
raise MissingExternalDependencyError("Test requires MAFFT v6 or "
"later (found %s)." % version)
return (major, version)
raise MissingExternalDependencyError("Couldn't determine MAFFT version.")
# This also checks it actually runs!
version_major, version_string = check_mafft_version(mafft_exe)
class MafftApplication(unittest.TestCase):
def setUp(self):
self.infile1 = "Fasta/f002"
def tearDown(self):
if os.path.isfile("Fasta/f002.tree"):
os.remove("Fasta/f002.tree")
def test_Mafft_simple(self):
"""Simple round-trip through app with infile.
Result passed to stdout.
"""
# Use a keyword argument at init,
cmdline = MafftCommandline(mafft_exe, input=self.infile1)
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
stdoutdata, stderrdata = cmdline()
self.assertTrue(stdoutdata.startswith(">gi|1348912|gb|G26680|G26680"))
self.assertTrue("Progressive alignment ..." in stderrdata, stderrdata)
self.assertTrue("$#=0" not in stderrdata)
def test_Mafft_with_options(self):
"""Simple round-trip through app with infile and options.
Result passed to stdout.
"""
cmdline = MafftCommandline(mafft_exe)
cmdline.set_parameter("input", self.infile1)
cmdline.set_parameter("maxiterate", 100)
cmdline.set_parameter("--localpair", True)
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
stdoutdata, stderrdata = cmdline()
self.assertTrue(stdoutdata.startswith(">gi|1348912|gb|G26680|G26680"))
self.assertTrue("$#=0" not in stderrdata)
def test_Mafft_with_Clustalw_output(self):
"""Simple round-trip through app with clustal output"""
cmdline = MafftCommandline(mafft_exe)
# Use some properties:
cmdline.input = self.infile1
cmdline.clustalout = True
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
stdoutdata, stderrdata = cmdline()
# e.g. "CLUSTAL format alignment by MAFFT ..."
# or "CLUSTAL (-like) formatted alignment by MAFFT FFT-NS-2 (v6.240)"
self.assertTrue(stdoutdata.startswith("CLUSTAL"), stdoutdata)
self.assertTrue("$#=0" not in stderrdata)
if version_major >= 7:
def test_Mafft_with_PHYLIP_output(self):
"""Simple round-trip through app with PHYLIP output"""
cmdline = MafftCommandline(mafft_exe, input=self.infile1,
phylipout=True)
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
stdoutdata, stderrdata = cmdline()
# e.g. " 3 706\n" or " 3 681" but allow some variation in the column count
self.assertTrue(stdoutdata.startswith(" 3 68") or
stdoutdata.startswith(" 3 69") or
stdoutdata.startswith(" 3 70"), stdoutdata)
self.assertTrue("gi|1348912 " in stdoutdata,
stdoutdata)
self.assertTrue("gi|1348912|gb|G26680|G26680" not in stdoutdata,
stdoutdata)
self.assertTrue("$#=0" not in stderrdata)
def test_Mafft_with_PHYLIP_namelength(self):
"""Check PHYLIP with --namelength"""
cmdline = MafftCommandline(mafft_exe, input=self.infile1,
phylipout=True, namelength=50)
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
stdoutdata, stderrdata = cmdline()
# e.g. " 3 706\n" or " 3 681" but allow some variation in the column count
self.assertTrue(stdoutdata.startswith(" 3 68") or
stdoutdata.startswith(" 3 69") or
stdoutdata.startswith(" 3 70"), stdoutdata)
self.assertTrue("gi|1348912|gb|G26680|G26680" in stdoutdata,
stdoutdata)
self.assertTrue("$#=0" not in stderrdata)
def test_Mafft_with_complex_command_line(self):
"""Round-trip with complex command line."""
cmdline = MafftCommandline(mafft_exe)
cmdline.set_parameter("input", self.infile1)
cmdline.set_parameter("--localpair", True)
cmdline.set_parameter("--weighti", 4.2)
cmdline.set_parameter("retree", 5)
cmdline.set_parameter("maxiterate", 200)
cmdline.set_parameter("--nofft", True)
cmdline.set_parameter("op", 2.04)
cmdline.set_parameter("--ep", 0.51)
cmdline.set_parameter("--lop", 0.233)
cmdline.set_parameter("lep", 0.2)
cmdline.set_parameter("--reorder", True)
cmdline.set_parameter("--treeout", True)
cmdline.set_parameter("nuc", True)
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
self.assertEqual(str(cmdline), mafft_exe
+ " --localpair --weighti 4.2 --retree 5 "
+ "--maxiterate 200 --nofft --op 2.04 --ep 0.51"
+ " --lop 0.233 --lep 0.2 --reorder --treeout"
+ " --nuc Fasta/f002")
stdoutdata, stderrdata = cmdline()
self.assertTrue(stdoutdata.startswith(">gi|1348912|gb|G26680|G26680"))
self.assertTrue("$#=0" not in stderrdata)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| updownlife/multipleK | dependencies/biopython-1.65/Tests/test_Mafft_tool.py | Python | gpl-2.0 | 7,614 | [
"Biopython"
] | 42da166de48abd4da60b403221bf9a698e549412d21429288c0633731df9e143 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
******************************************
espressopp.analysis.MeanSquareInternalDist
******************************************
.. function:: espressopp.analysis.MeanSquareInternalDist(system, chainlength, start_pid)
:param system:
:param chainlength:
:param start_pid:
:type system:
:type chainlength:
:type start_pid:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.ConfigsParticleDecomp import *
from _espressopp import analysis_MeanSquareInternalDist
class MeanSquareInternalDistLocal(ConfigsParticleDecompLocal, analysis_MeanSquareInternalDist):
def __init__(self, system, chainlength, start_pid=0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_MeanSquareInternalDist, system, chainlength, start_pid)
if pmi.isController:
class MeanSquareInternalDist(ConfigsParticleDecomp):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.MeanSquareInternalDistLocal',
pmiproperty = [ 'print_progress' ]
)
| govarguz/espressopp | src/analysis/MeanSquareInternalDist.py | Python | gpl-3.0 | 2,007 | [
"ESPResSo"
] | 389345040b2b9cf69b41cabaf0313eab5b31571a6dc377d5e71798357aae96cd |
from __future__ import print_function, division
import os, unittest, numpy as np
from timeit import default_timer as timer
from pyscf.nao import mf as mf_c
from pyscf.nao.m_ao_eval import ao_eval
class KnowValues(unittest.TestCase):
def test_ao_eval_speed(self):
""" Test the computation of atomic orbitals in coordinate space """
dname = os.path.dirname(os.path.abspath(__file__))
mf = mf_c(verbosity=0, label='water', cd=dname, gen_pb=False, force_gamma=True, Ecut=20)
g = mf.mesh3d.get_3dgrid()
t0 = timer()
oc2v1 = mf.comp_aos_den(g.coords)
t1 = timer()
oc2v2 = mf.comp_aos_py(g.coords)
t2 = timer()
print(__name__, 't1 t2: ', t1-t0, t2-t1)
print(abs(oc2v1-oc2v2).sum()/oc2v2.size, (abs(oc2v1-oc2v2).max()))
self.assertTrue(np.allclose(oc2v1, oc2v2, atol=3.5e-5))
if __name__ == "__main__": unittest.main()
| gkc1000/pyscf | pyscf/nao/test/test_0204_ao_eval_speed.py | Python | apache-2.0 | 896 | [
"PySCF"
] | 72c6aba4bf5543bfe1480da212d59c242047c790b0975eec2aa63fb46b6b5fdc |
# NOTE: This example uses the next generation Twilio helper library - for more
# information on how to download and install this version, visit
# https://www.twilio.com/docs/libraries/python
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account = os.environ['TWILIO_ACCOUNT_SID']
token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account, token)
binding1 = '{"binding_type":"sms","address":"+15555555555"}'
binding2 = '{"binding_type":' + \
'"facebook-messenger","address":"123456789123"}'
notification = client.notify.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.notifications.create(to_binding=[binding1, binding2], body="Hello Bob")
print(notification)
| TwilioDevEd/api-snippets | notifications/rest/notifications/send-passthrough-notification/send-passthrough-notification.7.x.py | Python | mit | 800 | [
"VisIt"
] | 25b95f89b287480f3a8e3f107f4794f43ddb63007aaf64f6c206c3c2acc93ad3 |
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.exceptions import H2OValueError
from h2o.model.extensions import has_extension
from h2o.model.model_base import ModelBase
# noinspection PyUnresolvedReferences
from h2o.utils.compatibility import * # NOQA
from h2o.utils.shared_utils import _colmean
from h2o.utils.typechecks import assert_is_type
class H2ORegressionModel(ModelBase):
def _make_model(self):
return H2ORegressionModel()
def plot(self, timestep="AUTO", metric="AUTO", save_plot_path=None, **kwargs):
"""
Plots training set (and validation set if available) scoring history for an H2ORegressionModel. The timestep
and metric arguments are restricted to what is available in its scoring history.
:param timestep: A unit of measurement for the x-axis.
:param metric: A unit of measurement for the y-axis.
:param save_plot_path: a path to save the plot via using matplotlib function savefig
:returns: Object that contains the resulting scoring history plot (can be accessed using result.figure()).
:examples:
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> r = cars[0].runif()
>>> train = cars[r > .2]
>>> valid = cars[r <= .2]
>>> response_col = "economy"
>>> distribution = "gaussian"
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> gbm = H2OGradientBoostingEstimator(nfolds=3,
... distribution=distribution,
... fold_assignment="Random")
>>> gbm.train(x=predictors,
... y=response_col,
... training_frame=train,
... validation_frame=valid)
>>> gbm.plot(timestep="AUTO", metric="AUTO",)
"""
if not has_extension(self, 'ScoringHistory'):
raise H2OValueError("Scoring history plot is not available for this type of model (%s)." % self.algo)
valid_metrics = self._allowed_metrics('regression')
if valid_metrics is not None:
assert_is_type(metric, 'AUTO', *valid_metrics), "metric for H2ORegressionModel must be one of %s" % valid_metrics
if metric == "AUTO":
metric = self._default_metric('regression') or 'AUTO'
self.scoring_history_plot(timestep=timestep, metric=metric, save_plot_path=save_plot_path, **kwargs)
def _mean_var(frame, weights=None):
"""
Compute the (weighted) mean and variance.
:param frame: Single column H2OFrame
:param weights: optional weights column
:returns: The (weighted) mean and variance
"""
return _colmean(frame), frame.var()
def h2o_mean_absolute_error(y_actual, y_predicted, weights=None):
"""
Mean absolute error regression loss.
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:param weights: (Optional) sample weights
:returns: mean absolute error loss (best is 0.0).
"""
ModelBase._check_targets(y_actual, y_predicted)
return _colmean((y_predicted - y_actual).abs())
def h2o_mean_squared_error(y_actual, y_predicted, weights=None):
"""
Mean squared error regression loss
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:param weights: (Optional) sample weights
:returns: mean squared error loss (best is 0.0).
"""
ModelBase._check_targets(y_actual, y_predicted)
return _colmean((y_predicted - y_actual) ** 2)
def h2o_median_absolute_error(y_actual, y_predicted):
"""
Median absolute error regression loss
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:returns: median absolute error loss (best is 0.0)
"""
ModelBase._check_targets(y_actual, y_predicted)
return (y_predicted - y_actual).abs().median()
def h2o_explained_variance_score(y_actual, y_predicted, weights=None):
"""
Explained variance regression score function.
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:param weights: (Optional) sample weights
:returns: the explained variance score.
"""
ModelBase._check_targets(y_actual, y_predicted)
_, numerator = _mean_var(y_actual - y_predicted, weights)
_, denominator = _mean_var(y_actual, weights)
if denominator == 0.0:
return 1. if numerator == 0 else 0. # 0/0 => 1, otherwise, 0
return 1 - numerator / denominator
def h2o_r2_score(y_actual, y_predicted, weights=1.):
"""
R-squared (coefficient of determination) regression score function
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:param weights: (Optional) sample weights
:returns: R-squared (best is 1.0, lower is worse).
"""
ModelBase._check_targets(y_actual, y_predicted)
numerator = (weights * (y_actual - y_predicted) ** 2).sum().flatten()
denominator = (weights * (y_actual - _colmean(y_actual)) ** 2).sum().flatten()
if denominator == 0.0:
return 1. if numerator == 0. else 0. # 0/0 => 1, else 0
return 1 - numerator / denominator
| h2oai/h2o-3 | h2o-py/h2o/model/regression.py | Python | apache-2.0 | 5,424 | [
"Gaussian"
] | f95d521daf3310ffb1ebbdbb301656f3fc7452d42a63423cd9af81f1cf80561a |
import mysql.connector,string,math
from mysql.connector import errorcode
DB_CONFIG={
'user':'zt',
'password':'123456',
'host':'localhost',
'database':'neuron'
}
def get_db_con(conf=DB_CONFIG):
try:
conn=mysql.connector.connect(**conf)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exists")
else:
print(err)
else:
return conn
def create_tables(table_name_base,quantile_points,conn=None):
data_sql=("CREATE TABLE `"+table_name_base+"` ("
" `nid` int(11) NOT NULL,"
" `time` float NOT NULL,"
" PRIMARY KEY (`nid`,`time`)"
") ENGINE=MyISAM DEFAULT CHARSET=utf8")
beh_sql=("CREATE TABLE `"+table_name_base+"_beh` ("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `type` smallint(6) NOT NULL,"
" `begin` float NOT NULL,"
" `end` float NOT NULL,"
" `duration` float NOT NULL,"
" `rest` float DEFAULT NULL COMMENT 'Time between this end to next begin, i.e. begin(i)+rest(i)=begin(i+1).',"
" PRIMARY KEY (`id`)"
") ENGINE=MyISAM AUTO_INCREMENT=107 DEFAULT CHARSET=utf8")
neuron_sql=("CREATE TABLE `"+table_name_base+"_neuron` ("
" `nid` int(11) NOT NULL,"
" `count` int(11) DEFAULT NULL,"
" `beg_time` float DEFAULT NULL,"
" `end_time` float DEFAULT NULL,"
" `duration` float DEFAULT NULL,"
" `dif_min` float DEFAULT NULL,"
" `dif_max` float DEFAULT NULL,"
" `dif_mode` float DEFAULT NULL COMMENT 'most often value',"
" `dif_mode_count` int(11) DEFAULT NULL,"
" `dif_mean` double DEFAULT NULL COMMENT 'First moment',"
" `dif_std` double DEFAULT NULL COMMENT 'Square root of Second central moment.\n(dif_cm2=std^2)',"
" `dif_cm3` double DEFAULT NULL COMMENT 'Third central moment.\nSkewness=cm3/std^3 (third standardized moment).',"
" `dif_cm4` double DEFAULT NULL COMMENT 'Fourth central moment.\nKurtosis=cm4/std^4 (fourth standardized moment) WITHOUT \"-3\".\n(kurtosis >= skewness^2 + 1).\n',"
" PRIMARY KEY (`nid`),"
" UNIQUE KEY `nid_UNIQUE` (`nid`)"
") ENGINE=InnoDB DEFAULT CHARSET=utf8")
n_dig=int(math.ceil(-math.log10(min(quantile_points))))
scale=10**n_dig
q_base_sql=" `q_%0"+str(n_dig)+"d` float NULL, "
col_sql_base=("CREATE TABLE `"+table_name_base+"` ("
" `first` int(11) NOT NULL,"
" `second` int(11) NOT NULL,"
" `count` int(11) DEFAULT NULL,"
" `zero_count` int(11) DEFAULT NULL,"
" `min` float DEFAULT NULL,"
" `max` float DEFAULT NULL,"
" `mode` float DEFAULT NULL,"
" `mode_count` int(11) DEFAULT NULL,"
" `mean` float DEFAULT NULL,"
" `std` double DEFAULT NULL,"
" `cm3` double DEFAULT NULL,"
" `cm4` double DEFAULT NULL,"
+"\n"+string.join([(q_base_sql%(v*scale)) for v in quantile_points],'\n')+"\n"
" PRIMARY KEY (`first`,`second`)"
") ENGINE=MyISAM DEFAULT CHARSET=utf8")
col_j_sql=col_sql_base.replace(table_name_base,table_name_base+'_col_j')+" COMMENT='CA (co-active): smallest time interval from an activation of the first neuron to the nearest activation of the second neuron, without jumping any other activation of the first neuron. i.e. the second neuron''s activation is the nearest one AFTER first neuron''s.'"
col_nj_sql=col_sql_base.replace(table_name_base,table_name_base+'_col_nj')+" COMMENT='CANJ (co-active no jump): smallest time interval from an activation of the first neuron to the nearest activation of the second neuron, without jumping any other activation of the first neuron. i.e. the second neuron''s activation is the nearest one AFTER first neuron''s, and the first activation of the first neuron is also the nearest one BEFORE the second''s.'"
if conn==None:
con=get_db_con()
else:
con=conn
try:
cur=con.cursor()
print 'creating raw data table'
cur.execute(data_sql)
print 'creating beh data table'
cur.execute(beh_sql)
print 'creating neuron info table'
cur.execute(neuron_sql)
print 'creating correlation (jump) info table'#,col_j_sql
cur.execute(col_j_sql)
print 'creating correlation (no-jump) info table'#,col_nj_sql
cur.execute(col_nj_sql)
except mysql.connector.Error as err:
con.rollback()
print 'Error:',err
else:
con.commit()
finally:
if conn==None:
con.close()
def insert_template(data,table,conn=None):
length=len(data[0])
insert_sql="INSERT INTO "+table+" VALUES(%s"+",%s"*(length-1)+")"
print insert_sql
if conn==None:
con=get_db_con()
else:
con=conn
cursor=con.cursor()
count=0
for t in data:
#print t
cursor.execute(insert_sql,t)
count+=1
if count%10000==0:
print count,'pieces processed'
print count,'pieces processed'
con.commit()
if conn==None:
con.close()
def import_to_db(file_name,func_read,table_name):
print 'reading',file_name
data=func_read(file_name)
print 'finish reading :',len(data),'in all'
print 'inserting',table_name
insert_template(data,table_name);
print 'finish inserting'
return data
def init_neuron(table_name_base,conn=None):
#put initial values to the neuron table using the data in raw data table.
sql=("insert into `"+table_name_base+"_neuron`(nid,count,beg_time,end_time,duration) "
"select nid,count(*),min(time),max(time), max(time)-min(time) from `"+table_name_base+"` group by nid order by nid")
if conn==None:
con=get_db_con();
else:
con=conn
print sql
cur=con.cursor()
# cur.execute("select nid,count(*),min(time),max(time), max(time)-min(time) from `"+table_name_base+"` group by nid order by nid")
# for line in cur:
# print line
try:
cur.execute(sql);
except mysql.connector.Error as err:
con.rollback()
print 'Error:',err
else:
con.commit();
if conn==None:
con.close()
def update_neuron_dif(data,table_name_base,conn=None):
update_sql=("update `"+table_name_base+"_neuron` set "
"dif_min=%s,dif_max=%s,dif_mode=%s,dif_mode_count=%s,"
"dif_mean=%s,dif_std=%s,dif_cm3=%s,dif_cm4=%s "
"where nid=%s");
print update_sql
if conn==None:
con=get_db_con();
else:
con=conn
cursor=con.cursor();
nid=0;
try:
for t in data:
nid+=1
cursor.execute(update_sql,
(t['min'],t['max'],t['mode'],t['mode_count'],
t['mean'],t['std'],t['cm3'],t['cm4'],nid))
print nid,'neuron updated.'
except mysql.connector.Error as err:
con.rollback()
print 'Error:',err
else:
con.commit();
if conn==None:
con.close()
def insert_dif(data_mat,table_name_base,noJump,conn=None):
table_name=table_name_base+('_col_nj' if noJump else '_col_j')
n=len(data_mat)
length=len(data_mat[0][0])-1+len(data_mat[0][0]['quantile'])
insert_sql=("insert into `"+table_name+"` values(%s,%s,%s"+",%s"*(length-1)+")")
# print insert_sql
if conn==None:
con=get_db_con();
else:
con=conn
cur=con.cursor()
try:
for i in range(n):
for j in range(n):
t=data_mat[i][j]
v=[i+1,j+1,t['count'],t['zero_count'],t['min'],t['max'],t['mode'],t['mode_count'],
t['mean'],t['std'],t['cm3'],t['cm4']]
v.extend(x for x in t['quantile'])
cur.execute(insert_sql,v);
except mysql.connector.Error as err:
con.rollback()
print 'Error:',err
else:
con.commit();
if conn==None:
con.close()
if __name__=='__main__':
basic_table_name='r108_122911'
#create_tables(basic_table_name)
#after inserting some dummy data, run:
#init_neuron(basic_table_name)
| yanxiangtianji/Neuron | dataInit/db_base.py | Python | gpl-2.0 | 7,237 | [
"NEURON"
] | 69e7cf12372acc08cd69ae172274ed71cec2b082890a4631819bf1c58f134528 |
'''
Functions over spatial regions of images.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = ['map_window', 'map_outer_window_stats', 'map_class_ids',
'map_classes']
import itertools
import numpy as np
import spectral as spy
from .algorithms import GaussianStats, iterator_ij
def get_window_bounds(nrows, ncols, height, width, i, j):
'''Returns boundaries of an image window centered on a specified pixel.
Arguments:
`nrows` (int):
Total number of rows in the image
`ncols` (int):
Total number of columns in the image
`height` (int):
Height of the desired window (in pixels)
`width` (int):
Width of the desired window (in pixels)
`i` (int):
Row index of the pixel
`j` (int):
Column index of the pixel
Return value:
A 4-tuple of ints of the form
(row_start, row_stop, col_start, col_stop).
The dimensions of the window will always be (`height`, `width`). For
pixels near the border of the image where there are insufficient pixels
between the specified pixel and image border, the window will be flush
against the border of the image and the pixel position will be offset
from the center of the widow.
For an alternate function that clips window pixels near the border of the
image, see `get_window_bounds_clipped`.
'''
if height > nrows or width > ncols:
raise ValueError('Window size is too large for image dimensions.')
rmin = i - height // 2
rmax = rmin + height
if rmin < 0:
rmax = height
rmin = 0
elif rmax > nrows:
rmax = nrows
rmin = nrows - height
cmin = j - width // 2
cmax = cmin + width
if cmin < 0:
cmax = width
cmin = 0
elif cmax > ncols:
cmax = ncols
cmin = ncols - width
return (rmin, rmax, cmin, cmax)
def get_window_bounds_clipped(nrows, ncols, height, width, i, j):
'''Returns boundaries of an image window centered on a specified pixel.
Arguments:
`nrows` (int):
Total number of rows in the image
`ncols` (int):
Total number of columns in the image
`height` (int):
Height of the desired window (in pixels)
`width` (int):
Width of the desired window (in pixels)
`i` (int):
Row index of the pixel
`j` (int):
Column index of the pixel
Return value:
A 4-tuple of ints of the form
(row_start, row_stop, col_start, col_stop).
Near the boder of the image where there are insufficient pixels between
the specified pixel and the image border, the window will be clipped.
For an alternate function that always returns a window with dimensions
(`width`, `height`), see `get_window_bounds`.
'''
if height > nrows or width > ncols:
raise ValueError('Window size is too large for image dimensions.')
rmin = i - height // 2
rmax = rmin + height
if rmin < 0:
rmin = 0
elif rmax > nrows:
rmax = nrows
cmin = j - width // 2
cmax = cmin + width
if cmin < 0:
cmin = 0
elif cmax > ncols:
cmax = ncols
return (rmin, rmax, cmin, cmax)
def map_window(func, image, window, rslice=(None,), cslice=(None,),
border='shift', dtype=None):
'''Applies a function over a rolling spatial window.
Arguments:
`func` (callable):
The function to apply. This function must accept two inputs:
`X` (ndarray):
The image data corresponding to the spatial window for the
current pixel being evaluated. `X` will have shape
`window + (N,)`, where `N` is the number of bands in the image.
For pixels near the border of the image, the first two
dimensions of `X` may be smaller if `border` is set to "clip".
`ij` (2-tuple of integers):
Indicates the row/column of the current pixel within the
window. For `window` with even dimensions or for pixels near
the image border, this may not correspond to the center pixel
in the window.
`image` (`SpyFile` or np.ndarray):
The image on which the apply `func` with the specified window.
`window` (int or 2-tuple of ints):
The size of the window, in pixels. If this value is an integer,
the height and width of the window will both be set to the value.
Otherwise, `window` should be a tuple of the form (height, width).
`rslice` (tuple):
Tuple of `slice` parameters specifying at which rows the function
should be applied. If not provided, `func` is applied to all rows.
`cslice` (tuple):
Tuple of `slice` parameters specifying at which columns the
function should be applied. If not provided, `func` is applied to
all columns.
`border` (string, default "shift"):
Indicates how to handles windows near the edge of the window. If
the value is "shift", the window dimensions will alway be
`(width, height)` but near the image border the pixel being
iterated will be offset from the center of the window. If set to
"clip", window regions falling outside the image border will be
clipped and the window dimension will be reduced.
`dtype` (np.dtype):
Optional dtype for the output.
Return value:
Returns an np.ndarray with shape corresponding to the row and column
start/stop indices and shape of `func` output.
Examples:
---------
To produce a new image that is a 3x3 pixel average of the input image:
>>> f = lambda X, ij: np.mean(X.reshape((-1, X.shape[-1])), axis=0)
>>> image_3x3 = map_window(f, image, 3)
Perform a 5x5 pixel average but only retain values at every fifth row and
column (i.e., simulate an image at one fifth resolution):
>>> image.shape
(145, 145, 220)
>>> image_5x5 = map_window(f, image, 5, (2, -2, 5), (2, -2, 5))
>>> image_5x5.shape
(29, 29, 220)
'''
if isinstance(window, (list, tuple)):
(height, width) = window[:]
else:
(height, width) = (window, window)
if border == 'shift':
get_window = get_window_bounds
elif border == 'clip':
get_window = get_window_bounds_clipped
else:
raise ValueError('Unrecognized border option.')
(nrows, ncols) = image.shape[:2]
# Row/Col indices at which to apply the windowed function
rvals = list(range(*slice(*rslice).indices(nrows)))
cvals = list(range(*slice(*cslice).indices(ncols)))
def get_val(i, j):
(r0, r1, c0, c1) = get_window(nrows, ncols, height, width, i, j)
return func(image[r0:r1, c0:c1],
(i - r0, j - c0)).astype(dtype)
return np.array([[get_val(r, c) for c in cvals]
for r in rvals]).astype(dtype)
def map_outer_window_stats(func, image, inner, outer, dim_out=1, cov=None,
dtype=None, rslice=(None,), cslice=(None,)):
'''Maps a function accepting `GaussianStats` over a rolling spatial window.
Arguments:
`func` (callable):
A callable object that will be applied to each pixel when the
__call__ method is called for this object. The __call__ method
of `func` must accept two arguments:
- `X` (`GaussianStats`):
The Gaussian statistics computed from pixels in the outer
window (excluding the inner window).
- `v` (ndarray):
An ndarray representing the pixel for which the window
was produced.
`image` (`SpyFile` or np.ndarray):
The image on which the apply `func` with the specified window.
`inner` (int or 2-tuple of ints):
The size of the inner window, in pixels. If this value is an integer,
the height and width of the window will both be set to the given value.
Otherwise, `inner` should be a tuple of the form (height, width).
All pixels within the inner window are excluded from statistics
computed for the associated pixel.
`outer` (int or 2-tuple of ints):
The size of the outer window, in pixels. If this value is an integer,
the height and width of the window will both be set to the given value.
Otherwise, `outer` should be a tuple of the form (height, width).
All pixels in the outer window (but not in the inner window) are
used to compute statistics for the associated pixel.
`rslice` (tuple):
Tuple of `slice` parameters specifying at which rows the function
should be applied. If not provided, `func` is applied to all rows.
`cslice` (tuple):
Tuple of `slice` parameters specifying at which columns the
function should be applied. If not provided, `func` is applied to
all columns.
`dtype` (np.dtype):
Optional dtype for the output.
Return value:
Returns an np.ndarray whose elements are the result of mapping `func`
to the pixels and associated window stats.
Examples:
---------
To create an RX anomaly detector with a 3x3 pixel inner window and 17x17
outer window (note that `spectral.rx` already does this):
>>> def mahalanobis(bg, x):
... return (x - bg.mean).dot(bg.inv_cov).dot(x - bg.mean)
...
>>> rx_scores = map_outer_window_stats(mahalanobis, image, 3, 17)
'''
mapper = WindowedGaussianBackgroundMapper(inner, outer, func, cov, dim_out,
dtype)
return mapper(image, rslice, cslice)
class WindowedGaussianBackgroundMapper(object):
'''A class for procucing window statistics with an inner exclusion window.
'''
def __init__(self, inner, outer, function=None, cov=None, dim_out=None,
dtype=None):
'''Creates a detector with the given inner/outer window.
Arguments:
`inner` (integer or 2-tuple of integers):
Width and heigth of inner window, in pixels.
`outer` (integer or 2-tuple of integers):
Width and heigth of outer window, in pixels. Dimensions must
be greater than inner window
`function` (callable object):
A callable object that will be applied to each pixel when the
__call__ method is called for this object. The __call__ method
of `function` must accept two arguments:
- A `GaussianStats` object.
- An ndarray representing the pixel for which the
were computed.
`cov` (ndarray):
An optional covariance to use. If this parameter is given,
`cov` will be used for all RX calculations (background
covariance will not be recomputed in each window). Only the
background mean will be recomputed in each window).
`dim_out` (int):
The dimensionality of the output of `function` when called on
a pixel spectrum. If this value is not specified, `function`
will be checked to see if it has a `dim_out` member. If it
does not, `dim_out` will be assumed to be 1.
`dtype`:
Optional dtype for the output array. If not specified,
np.float32 is used.
'''
if isinstance(inner, (list, tuple)):
self.inner = inner[:]
else:
self.inner = (inner, inner)
if isinstance(outer, (list, tuple)):
self.outer = outer[:]
else:
self.outer = (outer, outer)
self.callable = function
self.cov = cov
self.dim_out = dim_out
self.create_mask = None
if dtype is not None:
self.dtype = dtype
else:
self.dtype = np.float32
def __call__(self, image, rslice=(None,), cslice=(None,)):
'''Applies the objects callable function to the image data.
Arguments:
`image` (numpy.ndarray):
An image with shape (R, C, B).
`rslice` (tuple):
Tuple of `slice` parameters specifying at which rows the function
should be applied. If not provided, `func` is applied to all rows.
`cslice` (tuple):
Tuple of `slice` parameters specifying at which columns the
function should be applied. If not provided, `func` is applied to
all columns.
Returns numpy.ndarray:
An array whose elements correspond to the outputs from the
object's callable function.
'''
(R, C, B) = image.shape
(row_border, col_border) = [x // 2 for x in self.outer]
if self.dim_out is not None:
dim_out = self.dim_out
elif hasattr(self.callable, 'dim_out') and \
self.callable.dim_out is not None:
dim_out = self.callable.dim_out
else:
dim_out = 1
# Row/Col indices at which to apply the windowed function
rvals = list(range(*slice(*rslice).indices(R)))
cvals = list(range(*slice(*cslice).indices(C)))
nrows_out = len(rvals)
ncols_out = len(cvals)
if dim_out > 1:
x = np.ones((nrows_out, ncols_out, dim_out),
dtype=np.float32) * -1.0
else:
x = np.ones((nrows_out, ncols_out), dtype=self.dtype) * -1.0
npixels = self.outer[0] * self.outer[1] - self.inner[0] * self.inner[1]
if self.cov is None and npixels < B:
raise ValueError('Window size provides too few samples for ' \
'image data dimensionality.')
if self.create_mask is not None:
create_mask = self.create_mask
else:
create_mask = inner_outer_window_mask_creator(image.shape,
self.inner,
self.outer)
interior_mask = create_mask(R // 2, C // 2, True)[2].ravel()
interior_indices = np.argwhere(interior_mask == 0).squeeze()
(i_interior_start, i_interior_stop) = (row_border, R - row_border)
(j_interior_start, j_interior_stop) = (col_border, C - col_border)
status = spy._status
status.display_percentage('Processing image: ')
if self.cov is not None:
# Since we already have the covariance, just use np.mean to get
# means of the inner window and outer (including the inner), then
# use those to calculate the mean of the outer window alone.
background = GaussianStats(cov=self.cov)
for i in range(nrows_out):
for j in range(ncols_out):
(inner, outer) = create_mask(rvals[i], cvals[j], False)
N_in = (inner[1] - inner[0]) * (inner[3] - inner[2])
N_tot = (outer[1] - outer[0]) * (outer[3] - outer[2])
mean_out = np.mean(image[outer[0]: outer[1],
outer[2]: outer[3]].reshape(-1, B),
axis=0)
mean_in = np.mean(image[outer[0]: outer[1],
outer[2]: outer[3]].reshape(-1, B),
axis=0)
mean = mean_out * (float(N_tot) / (N_tot - N_in)) - \
mean_in * (float(N_in) / (N_tot - N_in))
background.mean = mean
x[i, j] = self.callable(background,
image[rvals[i], cvals[j]])
if i % (nrows_out // 10) == 0:
status.update_percentage(100. * i // nrows_out)
else:
# Need to calculate both the mean and covariance for the outer
# window (without the inner).
(h, w) = self.outer[:]
for i in range(nrows_out):
ii = rvals[i] - h // 2
for j in range(ncols_out):
jj = cvals[j] - w // 2
if i_interior_start <= rvals[i] < i_interior_stop and \
j_interior_start <= cvals[j] < j_interior_stop:
X = image[ii : ii + h, jj : jj + w, :]
indices = interior_indices
else:
(inner, (i0, i1, j0, j1), mask) = \
create_mask(rvals[i], cvals[j], True)
indices = np.argwhere(mask.ravel() == 0).squeeze()
X = image[i0 : i1, j0 : j1, :]
X = np.take(X.reshape((-1, B)), indices, axis=0)
mean = np.mean(X, axis=0)
cov = np.cov(X, rowvar=False)
background = GaussianStats(mean, cov)
x[i, j] = self.callable(background,
image[rvals[i], cvals[j]])
if i % (nrows_out // 10) == 0:
status.update_percentage(100. * i / nrows_out)
status.end_percentage()
return x
def inner_outer_window_mask_creator(image_shape, inner, outer):
'''Returns a function to give inner/outer windows.
Arguments:
`image_shape` (tuple of integers):
Specifies the dimensions of the image for which windows are to be
produced. Only the first two dimensions (rows, columns) is used.
`inner` (int or 2-tuple of integers):
Height and width of the inner window, in pixels.
`outer` (int or 2-tuple of integers):
Height and width of the outer window, in pixels.
Return value:
A function that accepts the following arguments:
`i` (int):
Row index of pixel for which to generate the mask
`j` (int):
Row index of pixel for which to generate the mask
`gen_mask` (bool, default False):
A boolean flag indicating whether to return a boolean mask of
shape (window[1], window[1]), indicating which pixels in the
window should be used for background statistics calculations.
If `gen_mask` is False, the return value is a 2-tuple of 4-tuples,
where the 2-tuples specify the start/stop row/col indices for the
inner and outer windows, respectively. Each of the 4-tuples is of the
form (row_start, row_stop, col_start, col_stop).
If `gen_mask` is True, a third element is added the tuple, which is
the boolean mask for the inner/outer window.
'''
(R, C) = image_shape[:2]
if isinstance(inner, (list, tuple)):
(hi, wi) = inner[:]
else:
(hi, wi) = (inner, inner)
if isinstance(outer, (list, tuple)):
(ho, wo) = outer[:]
else:
(ho, wo) = (outer, outer)
if wi > wo or hi > ho:
raise ValueError('Inner window dimensions must be smaller than outer.')
(ai, bi) = (hi // 2, wi // 2)
(ao, bo) = (ho // 2, wo // 2)
def create_mask(i, j, gen_mask=False):
# Inner window
inner_imin = i - ai
inner_imax = inner_imin + hi
if inner_imin < 0:
inner_imax = hi
inner_imin = 0
elif inner_imax > R:
inner_imax = R
inner_imin = R - hi
inner_jmin = j - bi
inner_jmax = inner_jmin + wi
if inner_jmin < 0:
inner_jmax = wi
inner_jmin = 0
elif inner_jmax > C:
inner_jmax = C
inner_jmin = C - wi
# Outer window
outer_imin = i - ao
outer_imax = outer_imin + ho
if outer_imin < 0:
outer_imax = ho
outer_imin = 0
elif outer_imax > R:
outer_imax = R
outer_imin = R - ho
outer_jmin = j - bo
outer_jmax = outer_jmin + wo
if outer_jmin < 0:
outer_jmax = wo
outer_jmin = 0
elif outer_jmax > C:
outer_jmax = C
outer_jmin = C - wo
inner = (inner_imin, inner_imax, inner_jmin, inner_jmax)
outer = (outer_imin, outer_imax, outer_jmin, outer_jmax)
if not gen_mask:
return (inner, outer)
mask = np.zeros((ho, wo), dtype=np.bool)
mask[inner_imin - outer_imin : inner_imax - outer_imin,
inner_jmin - outer_jmin : inner_jmax - outer_jmin] = True
return (inner, outer, mask)
return create_mask
def map_class_ids(src_class_image, dest_class_image, unlabeled=None):
'''Create a mapping between class labels in two classification images.
Running a classification algorithm (particularly an unsupervised one)
multiple times on the same image can yield similar results but with
different class labels (indices) for the same classes. This function
produces a mapping of class indices from one classification image to
another by finding class indices that share the most pixels between the
two classification images.
Arguments:
`src_class_image` (ndarray):
An MxN integer array of class indices. The indices in this array
will be mapped to indices in `dest_class_image`.
`dest_class_image` (ndarray):
An MxN integer array of class indices.
`unlabeled` (int or array of ints):
If this argument is provided, all pixels (in both images) will be
ignored when counting coincident pixels to determine the mapping.
If mapping a classification image to a ground truth image that has
a labeled background value, set `unlabeled` to that value.
Return Value:
A dictionary whose keys are class indices from `src_class_image` and
whose values are class indices from `dest_class_image`.
.. seealso::
:func:`map_classes`
'''
src_ids = list(set(src_class_image.ravel()))
dest_ids = list(set(dest_class_image.ravel()))
cmap = {}
if unlabeled is not None:
if isinstance(unlabeled, int):
unlabeled = [unlabeled]
for i in unlabeled:
if i in src_ids:
src_ids.remove(i)
cmap[i] = i
if i in dest_ids:
dest_ids.remove(i)
else:
unlabeled = []
N_src = len(src_ids)
N_dest = len(dest_ids)
# Create matrix of coincidence counts between classes in src and dest.
matches = np.zeros((N_src, N_dest), np.uint16)
for i in range(N_src):
src_is_i = (src_class_image == src_ids[i])
for j in range(N_dest):
matches[i, j] = np.sum(np.logical_and(src_is_i,
dest_class_image == dest_ids[j]))
unmapped = set(src_ids)
dest_available = set(dest_ids)
while len(unmapped) > 0:
(i, j) = tuple(np.argwhere(matches == np.max(matches))[0])
mmax = matches[i, j]
if mmax == 0:
# Nothing left to map. Pick unused indices from dest_class_image
for (old, new) in zip(sorted(unmapped), sorted(dest_available)):
cmap[old] = new
unmapped.remove(old)
dest_available.remove(new)
for old in unmapped:
# The list of target classes has been exhausted. Pick the
# smallest dest value that isn't already used.
def next_id():
for ii in itertools.count():
if ii not in unlabeled and ii not in cmap.values():
return ii
cmap[old] = next_id()
break
cmap[src_ids[i]] = dest_ids[j]
unmapped.remove(src_ids[i])
dest_available.remove(dest_ids[j])
matches[i, :] = 0
matches[:, j] = 0
return cmap
def map_classes(class_image, class_id_map, allow_unmapped=False):
'''Modifies class indices according to a class index mapping.
Arguments:
`class_image`: (ndarray):
An MxN array of integer class indices.
`class_id_map`: (dict):
A dict whose keys are indices from `class_image` and whose values
are new values for the corresponding indices. This value is
usually the output of :func:`map_class_ids`.
`allow_unmapped` (bool, default False):
A flag indicating whether class indices can appear in `class_image`
without a corresponding key in `class_id_map`. If this value is
False and an index in the image is found without a mapping key,
a :class:`ValueError` is raised. If True, the unmapped index will
appear unmodified in the output image.
Return Value:
An integer-valued ndarray with same shape as `class_image`
Example:
>>> m = spy.map_class_ids(result, gt, unlabeled=0)
>>> result_mapped = spy.map_classes(result, m)
.. seealso::
:func:`map_class_ids`
'''
if not allow_unmapped \
and not set(class_id_map.keys()).issuperset(set(class_image.ravel())):
raise ValueError('`src` has class values with no mapping key')
mapped = np.array(class_image)
for (i, j) in class_id_map.items():
mapped[class_image == i] = j
return mapped
def expand_binary_mask_for_window(mask, height, width):
'''Returns a new mask including window around each pixel in source mask.
Arguments:
`mask` (2D ndarray):
An ndarray whose non-zero elements define a mask.
`height` (int):
Height of the window.
`width` (int):
Width of the window
Returns a new mask of ones and zeros with same shape as `mask`. For each
non-zero element in mask, the returned mask will contain a value of one
for all pixels in the `height`x`width` window about the pixel and zeros
elsewhere.
'''
m = np.zeros_like(mask)
(mask_height, mask_width) = mask.shape
for (i, j) in iterator_ij(mask):
(r0, r1, c0, c1) = get_window_bounds_clipped(mask_height, mask_width,
height, width, i, j)
m[r0:r1, c0:c1] = 1
return m
| spectralpython/spectral | spectral/algorithms/spatial.py | Python | gpl-2.0 | 27,113 | [
"Gaussian"
] | 536240e1d590505021cbe3b77c0a9dba541026009efd1be0bd16d61ad8b2e0fc |
# #-------------------------------------------------------------------------------
# # Cloud-COPASI
# # Copyright (c) 2013 Edward Kent.
# # All rights reserved. This program and the accompanying materials
# # are made available under the terms of the GNU Public License v3.0
# # which accompanies this distribution, and is available at
# # http://www.gnu.org/licenses/gpl.html
# #-------------------------------------------------------------------------------
#
# import os, glob, sys, importlib
#
# #Get a list of the subpackages in the module path
# #Must contain plugin.py
# def get_subpackages(path):
# directory =path[0]
# def is_plugin_package(d):
# d = os.path.join(directory, d)
# return os.path.isdir(d) and glob.glob(os.path.join(d, '__init__.py*')) and glob.glob(os.path.join(d, 'plugin.py*'))
#
# return filter(is_plugin_package, os.listdir(directory))
#
#
# #Go through the list of packages and get the task_type tuple
# def get_task_types(subpackages):
# output = []
# for package in subpackages:
# module = importlib.import_module(__package__ + '.' + package + '.plugin')
# task_type = module.internal_type
# output.append(task_type)
# return output
# subpackages = get_subpackages(__path__)
#
# task_types = get_task_types(subpackages)
#
# #Method for loading a plugin and returning the TaskPlugin class (not instance)
# def get_class(name):
# module = importlib.import_module(__package__ + '.' + name + '.plugin')
# plugin = getattr(module, 'TaskPlugin')
# return plugin | edkent/cloud-copasi | cloud_copasi/web_interface/task_plugins/__init__.py | Python | gpl-3.0 | 1,570 | [
"COPASI"
] | f472e9b878ad2cd8fc1391fb876b61935db339a3a42d77bfb7010569d12db7e8 |
# Copyright (c) 2017, Henrique Miranda
# All rights reserved.
#
# This file is part of the yambopy project
#
from yambopy import *
from math import sqrt
from time import time
max_exp = 50
min_exp =-100.
def abs2(x):
return x.real**2 + x.imag**2
def lorentzian(x,x0,g):
height=1./(np.pi*g)
return height*(g**2)/((x-x0)**2+g**2)
def gaussian(x,x0,s):
height=1./(np.sqrt(2.*np.pi)*s)
argument=-0.5*((x-x0)/s)**2
#Avoiding undeflow errors...
np.place(argument,argument<min_exp,min_exp)
return height*np.exp(argument)
class YamboDipolesDB():
"""
Class to read the dipoles databases from the ``ndb.dip*`` files
Can be used to for exapmle plot the imaginary part of the dielectric
function which corresponds to the optical absorption
"""
def __init__(self,lattice,save='SAVE',filename='ndb.dip_iR_and_P',dip_type='iR',field_dir=[1,0,0],field_dir3=[0,0,1]):
self.lattice = lattice
self.filename = "%s/%s"%(save,filename)
#read dipoles
try:
database = Dataset(self.filename, 'r')
except:
raise IOError("Error opening %s in YamboDipolesDB"%self.filename)
self.nq_ibz, self.nq_ibz, self.nk_ibz, self.nk_bz = database.variables['HEAD_R_LATT'][:].astype(int)
self.spin = database.variables['SPIN_VARS'][1].astype(int)
# indexv is the maximum partially occupied band
# indexc is the minimum partially empty band
self.min_band, self.max_band, self.indexv, self.indexc = database.variables['PARS'][:4].astype(int)
database.close()
# determine the number of bands
self.nbands = self.max_band-self.min_band+1
self.nbandsv = self.indexv-self.min_band+1
self.nbandsc = self.max_band-self.indexc+1
#read the database
self.dipoles = self.readDB(dip_type)
#expand the dipoles to the full brillouin zone
self.expandDipoles(self.dipoles)
def normalize(self,electrons):
"""
Use the electrons to normalize the dipole matrix elements
"""
eiv = electrons.eigenvalues
nkpoints, nbands = eiv.shape
for nk in xrange(nkpoints):
eivk = eiv[nk]
#create eigenvalues differences arrays
norm = np.array([ [ec-ev for ev in eivk] for ec in eivk ])
#normalize
for i,j in product(xrange(nbands),repeat=2):
if norm[i,j] == 0:
self.dipoles[nk,:,i,j] = 0
else:
self.dipoles[nk,:,i,j] = self.dipoles[nk,:,i,j]/norm[i,j]
dipoles = self.dipoles
def readDB(self,dip_type):
"""
The dipole matrix has the following indexes:
[nkpoints, 3, nspin, nbands conduction, nbands valence]
"""
self.dip_type = dip_type
dipoles = np.zeros([self.nk_ibz,3,self.nbandsc,self.nbandsv],dtype=np.complex64)
#check dipole db format
filename = "%s_fragment_1"%(self.filename)
db = Dataset(filename)
tag1 = 'DIP_iR_k_0001_spin_0001'
tag2 = 'DIP_iR_k_0001_xyz_0001_spin_0001'
if tag1 in db.variables.keys():
dipoles_format = 1
elif tag2 in db.variables.keys():
dipoles_format = 2
db.close()
for nk in range(self.nk_ibz):
#open database for each k-point
filename = "%s_fragment_%d"%(self.filename,nk+1)
db = Dataset(filename)
if dipoles_format == 1:
dip = db.variables['DIP_%s_k_%04d_spin_%04d'%(dip_type,nk+1,1)][:].view(dtype=np.complex64)[:,:,:,0]
for i in xrange(3):
dipoles[nk,i] = dip[:,:,i].T
elif dipoles_format == 2:
for i in xrange(3):
dip = db.variables['DIP_%s_k_%04d_xyz_%04d_spin_%04d'%(dip_type,nk+1,i+1,1)][:]
dipoles[nk,i] = dip[0].T+dip[1].T*1j
#close database
db.close()
return dipoles
def expandDipoles(self,dipoles=None,field_dir=[1,0,0],field_dir3=[0,0,1]):
"""
Expand diples from the IBZ to the FBZ
"""
if dipoles is None:
dipoles = self.dipoles
#check if we need to expand the dipoles to the full BZ
lattice = self.lattice
kpts = lattice.car_kpoints
nks = lattice.kpoints_indexes
nss = lattice.symmetry_indexes
#normalize the fields
field_dir = np.array(field_dir)
field_dir = field_dir/np.linalg.norm(field_dir)
field_dir3 = np.array(field_dir3)
field_dir3 = field_dir3/np.linalg.norm(field_dir3)
#calculate polarization directions
field_dirx = field_dir
field_diry = np.cross(field_dir3,field_dirx)
field_dirz = field_dir3
#get band indexes
nkpoints = len(nks)
indexv = self.min_band-1
indexc = self.indexc-1
nbands = self.min_band+self.nbands-1
#Note that P is Hermitian and iR anti-hermitian.
if self.dip_type == 'P':
factor = 1.0
else:
factor = -1.0
#save dipoles in the ibz
self.dipoles_ibz = dipoles
#get dipoles in the full Brilouin zone
self.dipoles = np.zeros([nkpoints,3,nbands,nbands],dtype=np.complex64)
for nk_fbz,nk_ibz,ns in zip(xrange(nkpoints),nks,nss):
#if time rev we conjugate
if lattice.time_rev_list[ns]:
dip = np.conjugate(dipoles[nk_ibz,:,:,:])
else:
dip = dipoles[nk_ibz,:,:,:]
#get symmmetry operation
sym = lattice.sym_car[ns].T
#get projection operation
pro = np.array([field_dirx,field_diry,field_dirz])
#transformation
tra = np.dot(pro,sym)
for c,v in product(xrange(self.nbandsc),xrange(self.nbandsv)):
#rotate dipoles
self.dipoles[nk_fbz,:,indexc+c,indexv+v] = np.dot(tra,dip[:,c,v])
#make hermitian
for c,v in product(xrange(self.nbandsc),xrange(self.nbandsv)):
self.dipoles[nk_fbz,:,indexv+v,indexc+c] = factor*np.conjugate(self.dipoles[nk_fbz,:,indexc+c,indexv+v])
self.field_dirx = field_dirx
self.field_diry = field_diry
self.field_dirz = field_dirz
return dipoles, kpts
def plot(self,ax,kpoint=0,dir=0,func=abs2):
return ax.matshow(func(self.dipoles[kpoint,dir]))
def ip_eps2(self,electrons,pol=1,ntot_dip=-1,GWshift=0.,broad=0.1,broadtype='l',nbnds=[-1,-1],emin=0.,emax=10.,esteps=500):
"""
Compute independent-particle absorption (by Fulvio Paleari)
electrons -> electrons YamboElectronsDB
GWshift -> rigid GW shift in eV
broad -> broadening of peaks
broadtype -> 'l' is lorentzian, 'g' is gaussian
nbnds -> number of [valence, conduction] bands included starting from Fermi level. Default means all are included
emin,emax,esteps -> frequency range for the plot
"""
#get eigenvalues and weights of electrons
eiv = electrons.eigenvalues
weights = electrons.weights
nv = electrons.nbandsv
nc = electrons.nbandsc
#get dipoles
dipoles = self.dipoles_ibz
#get frequencies and im
freq = np.linspace(emin,emax,esteps)
eps2 = np.zeros([len(freq)])
#Cut bands to the maximum number used for the dipoles
if ntot_dip>0:
eiv = eiv[:,:ntot_dip]
nc=ntot_dip-nv
#Print band gap values and apply GW_shift
eiv = electrons.energy_gaps(GWshift)
#Check bands to include in the calculation
if nbnds[0]<0: nbnds[0]=nv
if nbnds[1]<0: nbnds[1]=nc
iv = nv-nbnds[0] #first valence
lc = nv+nbnds[1] #last conduction
#choose broadening
if "l" in broadtype:
broadening = lorentzian
else:
broadening = gaussian
pols = np.array(pols)
na = np.newaxis
#calculate epsilon
for c,v in product(range(nv,lc),range(iv,nv)):
#get electron-hole energy and dipoles
ecv = eiv[:,c]-eiv[:,v]
dip2 = np.sum(abs2(dipoles[:,pols,c-nv,v]),axis=1)
#make dimensions match
dip2a = dip2[na,:]
ecva = ecv[na,:]
freqa = freq[:,na]
wa = weights[na,:]
#calculate the lorentzians
broadw = broadening(freqa,ecva,broad)
#scale broadening with dipoles and weights
epsk = wa*dip2a*broadw
#integrate over kpoints
eps2 += np.sum(epsk,axis=1)
return freq, eps2
def __str__(self):
s = ""
s += "\nkpoints:\n"
s += "nk_ibz : %d\n"%self.nk_ibz
if self.expand: s += "nk_bz : %d\n"%self.nk_bz
s += "\nnumber of bands:\n"
s += "nbands : %d\n" % self.nbands
s += "nbandsv: %d\n" % self.nbandsv
s += "nbandsc: %d\n" % self.nbandsc
s += "indexv : %d\n" % (self.min_band-1)
s += "indexc : %d\n" % (self.indexc-1)
if self.expand:
s += "field_dirx: %10.6lf %10.6lf %10.6lf\n"%tuple(self.field_dirx)
s += "field_diry: %10.6lf %10.6lf %10.6lf\n"%tuple(self.field_diry)
s += "field_dirz: %10.6lf %10.6lf %10.6lf\n"%tuple(self.field_dirz)
return s
if __name__ == "__main__":
ddb = DipolesDB()
ddb.get_databases()
print ddb
| alexandremorlet/yambopy | yambopy/dbs/dipolesdb.py | Python | bsd-3-clause | 9,838 | [
"Gaussian"
] | 97f0d80cb303bdde3b1a6fc5d91db27d5b313d112884b24f56f95b67a26e176b |
###############################################################################
# Copyright 2016 - Climate Research Division
# Environment and Climate Change Canada
#
# This file is part of the "EC-CAS diags" package.
#
# "EC-CAS diags" is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "EC-CAS diags" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with "EC-CAS diags". If not, see <http://www.gnu.org/licenses/>.
###############################################################################
# Interface for reading Transcom3 input data.
# Helper method - read a raw binary record
def read_array (f, axes, name):
from pygeode.var import Var
import numpy as np
dimensions = [len(a) for a in axes]
# Input is in big-endian?
i4 = np.dtype('>i4')
r8 = np.dtype('>f8')
size = np.fromfile(file=f,dtype=i4,count=1)[0] / 8
assert size == reduce(int.__mul__,dimensions,1)
data = np.fromfile(file=f,dtype=r8,count=size)
size2 = np.fromfile(file=f,dtype=i4,count=1)[0] / 8
assert size2 == size
data = data.reshape(dimensions)
return Var(axes, values=data, name=name)
class Transcom3(object):
"""
Input fluxes for the TransCom3 protocol.
"""
@staticmethod
def open_file (filename):
import numpy as np
from pygeode.formats import netcdf
from pygeode.axis import Lat, Lon, NamedAxis
from pygeode.timeaxis import ModelTime365
from pygeode.dataset import Dataset
from subprocess import Popen, PIPE
if filename.endswith("input.new.dat.Z"):
infile = Popen(["uncompress", "-c", filename], bufsize=-1, stdout=PIPE).stdout
elif filename.endswith("input.new.dat"):
infile = open(filename,mode='rb')
else:
raise ValueError("Unrecognized file '%s'"%filename)
lon = Lon(np.linspace(-179.75,179.75,720))
lat = Lat(np.linspace(-89.75,89.75,360))
landregion = NamedAxis(np.arange(1,12), name='landregion')
oceanregion = NamedAxis(np.arange(1,12), name='oceanregion')
month = ModelTime365(month=np.arange(1,13), units='days')
ff90 = read_array(infile, (lat,lon), 'ff90')
ff95 = read_array(infile, (lat,lon), 'ff95')
nep = read_array(infile, (month,lat,lon), 'nep')
ocean= read_array(infile, (month,lat,lon), 'ocean')
landunit = read_array(infile, (landregion,lat,lon), 'landunit')
oceanunit = read_array(infile, (month,oceanregion,lat,lon), 'oceanunit')
sf6 = read_array(infile, (landregion,lat,lon), 'sf6')
return Dataset([ff90,ff95,nep,ocean,landunit,oceanunit,sf6])
@staticmethod
def decode (data):
from pygeode.dataset import Dataset
# Set up outputs
# Right now, only need landunit & oceanunit.
outdata = []
if 'landunit' in data:
for i in range(1,len(data.landregion)+1):
var = data.landunit.squeeze(landregion=i)
var.name = "CO2_landunit_%02d_flux"%i
outdata.append(var)
if 'oceanunit' in data:
for i in range(1,len(data.oceanregion)+1):
var = data.oceanunit.squeeze(oceanregion=i)
var.name = "CO2_oceanunit_%02d_flux"%i
outdata.append(var)
# Set the units,
for var in outdata:
var.atts['units'] = 'kg(C) m-2 s-1'
var.atts['specie'] = 'CO2'
return outdata
# Add this interface to the table.
from . import table
table['transcom3-input'] = Transcom3
| neishm/EC-CAS-diags | eccas_diags/interfaces/transcom3_input.py | Python | lgpl-3.0 | 3,785 | [
"NetCDF"
] | 75d85d82bf3aeefe1d051e6ae708a5ec34e0ed93b4cbfa2ef1d1b9349d133c91 |
# (c) 2015, Brian Coca <briancoca+dev@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.errors import AnsibleError
from ansible.utils.unicode import to_str
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
src = self._task.args.get('src', None)
remote_src = boolean(self._task.args.get('remote_src', 'no'))
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
if src is None:
result['failed'] = True
result['msg'] = "src is required"
return result
elif remote_src:
# everything is remote, so we just execute the module
# without changing any of the module arguments
result.update(self._execute_module(task_vars=task_vars))
return result
try:
src = self._find_needle('files', src)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_str(e)
return result
# create the remote tmp dir if needed, and put the source file there
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path(remote_user)
self._cleanup_remote_tmp = True
tmp_src = self._connection._shell.join_path(tmp, os.path.basename(src))
self._transfer_file(src, tmp_src)
self._fixup_perms(tmp, remote_user, recursive=True)
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
)
)
result.update(self._execute_module('patch', module_args=new_module_args, task_vars=task_vars))
self._remove_tmp_path(tmp)
return result
| Censio/ansible-dev | lib/ansible/plugins/action/patch.py | Python | gpl-3.0 | 2,651 | [
"Brian"
] | 222c92cdf43861bd2bb099ade2aa0c54e1c0558f451e8662e53ab5dbe94d3c7c |
#!/usr/bin/env python
# This work was funded by Roche and generously donated to the free
# and open source cheminformatics community.
## Copyright (c) 2012 Andrew Dalke Scientific AB
## Andrew Dalke <dalke@dalkescientific.com>
##
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
##
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
##
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""MCS - find a Maximum Common Substructure
This software finds the maximum common substructure of a set of
structures and reports it as a SMARTS string.
The SMARTS string depends on the desired match properties. For
example, if ring atoms are only allowed to match ring atoms then an
aliphatic ring carbon in the query is converted to the SMARTS "[C;R]",
and the double-bond ring bond converted to "=;@" while the respective
chain-only version are "[C;!R]" and "=;!@".
"""
# The simplified algorithm description is:
#
# best_substructure = None
# pick one structure as the query, and other as the targets
# for each substructure in the query graph:
# convert it to a SMARTS string based on the desired match properties
# if the SMARTS pattern exists in all of the targets:
# then this is a common substructure
# keep track of the maximum such common structure,
#
# The algorithm will usually take a long time. There are several
# ways to speed it up.
#
# == Bond elimination ==
#
# As the first step, remove bonds which obviously cannot be part of the
# MCS.
#
# This requires atom and bond type information, which I store as SMARTS
# patterns. A bond can only be in the MCS if its canonical bond type is
# present in all of the structures. A bond type is string made of the
# SMARTS for one atom, the SMARTS for the bond, and the SMARTS for the
# other atom. The canonical bond type is the lexographically smaller of
# the two possible bond types for a bond.
#
# The atom and bond SMARTS depend on the type comparison used.
#
# The "ring-matches-ring-only" option adds an "@" or "!@" to the bond
# SMARTS, so that the canonical bondtype for "C-C" becomes [#6]-@[#6] or
# [#6]-!@[#6] if the bond is in a ring or not in a ring, and if atoms
# are compared by element and bonds are compared by bondtype. (This
# option does not add "R" or "!R" to the atom SMARTS because there
# should be a single bond in the MCS of c1ccccc1O and CO.)
#
# The result of all of this atom and bond typing is a "TypedMolecule"
# for each input structure.
#
# I then find which canonical bondtypes are present in all of the
# structures. I convert each TypedMolecule into a
# FragmentedTypedMolecule which has the same atom information but only
# those bonds whose bondtypes are in all of the structures. This can
# break a structure into multiple, disconnected fragments, hence the
# name.
#
# (BTW, I would like to use the fragmented molecules as the targets
# because I think the SMARTS match would go faster, but the RDKit SMARTS
# matcher doesn't like them. I think it's because the new molecule
# hasn't been sanitized and the underlying data structure the ring
# information doesn't exist. Instead, I use the input structures for the
# SMARTS match.)
#
# == Use the structure with the smallest largest fragment as the query ==
# == and sort the targets by the smallest largest fragment ==
#
# I pick one of the FragmentedTypedMolecule instances as the source of
# substructure enumeration. Which one?
#
# My heuristic is to use the one with the smallest largest fragment.
# Hopefully it produces the least number of subgraphs, but that's also
# related to the number of rings, so a large linear graph will product
# fewer subgraphs than a small fused ring system. I don't know how to
# quantify that.
#
# For each of the fragmented structures, I find the number of atoms in
# the fragment with the most atoms, and I find the number of bonds in
# the fragment with the most bonds. These might not be the same
# fragment.
#
# I sort the input structures by the number of bonds in the largest
# fragment, with ties broken first on the number of atoms, and then on
# the input order. The smallest such structure is the query structure,
# and the remaining are the targets.
#
# == Use a breadth-first search and a priority queue to ==
# == enumerate the fragment subgraphs ==
#
# I extract each of the fragments from the FragmentedTypedMolecule into
# a TypedFragment, which I use to make an EnumerationMolecule. An
# enumeration molecule contains a pair of directed edges for each atom,
# which simplifies the enumeration algorithm.
#
# The enumeration algorithm is based around growing a seed. A seed
# contains the current subgraph atoms and bonds as well as an exclusion
# set of bonds which cannot be used for future grown. The initial seed
# is the first bond in the fragment, which may potentially grow to use
# the entire fragment. The second seed is the second bond in the
# fragment, which is excluded from using the first bond in future
# growth. The third seed starts from the third bond, which may not use
# the first or second bonds during growth, and so on.
#
#
# A seed can grow along bonds connected to an atom in the seed but which
# aren't already in the seed and aren't in the set of excluded bonds for
# the seed. If there are no such bonds then subgraph enumeration ends
# for this fragment. Given N bonds there are 2**N-1 possible ways to
# grow, which is just the powerset of the available bonds, excluding the
# no-growth case.
#
# This breadth-first growth takes into account all possibilties of using
# the available N bonds so all of those bonds are added to the exclusion
# set of the newly expanded subgraphs.
#
# For performance reasons, the bonds used for growth are separated into
# 'internal' bonds, which connect two atoms already in the subgraph, and
# 'external' bonds, which lead outwards to an atom not already in the
# subgraph.
#
# Each seed growth can add from 0 to N new atoms and bonds. The goal is
# to maximize the subgraph size so the seeds are stored in a priority
# queue, ranked so the seed with the most bonds is processed first. This
# turns the enumeration into something more like a depth-first search.
#
#
# == Prune seeds which aren't found in all of the structures ==
#
# At each stage of seed growth I check that the new seed exists in all
# of the original structures. (Well, all except the one which I
# enumerate over in the first place; by definition that one will match.)
# If it doesn't match then there's no reason to include this seed or any
# larger seeds made from it.
#
# The check is easy; I turn the subgraph into its corresponding SMARTS
# string and use RDKit's normal SMARTS matcher to test for a match.
#
# There are three ways to generate a SMARTS string: 1) arbitrary, 2)
# canonical, 3) hybrid.
#
# I have not tested #1. During most of the development I assumed that
# SMARTS matches across a few hundred structures would be slow, so that
# the best solution is to generate a *canonical* SMARTS and cache the
# match information.
#
# Well, it turns out that my canonical SMARTS match code takes up most
# of the MCS run-time. If I drop the canonicalization step then the
# code averages about 5-10% faster. This isn't the same as #1 - I still
# do the initial atom assignment based on its neighborhood, which is
# like a circular fingerprint of size 2 and *usually* gives a consistent
# SMARTS pattern, which I can then cache.
#
# However, there are times when the non-canonical SMARTS code is slower.
# Obviously one is if there are a lot of structures, and another if is
# there is a lot of symmetry. I'm still working on characterizing this.
#
#
# == Maximize atoms? or bonds? ==
#
# The above algorithm enumerates all subgraphs of the query and
# identifies those subgraphs which are common to all input structures.
#
# It's trivial then to keep track of the current "best" subgraph, which
# can defined as having the subgraph with the most atoms, or the most
# bonds. Both of those options are implemented.
#
# It would not be hard to keep track of all other subgraphs which are
# the same size.
#
# == complete_ring_only implementation ==
#
# The "complete ring only" option is implemented by first enabling the
# "ring-matches-ring-only" option, as otherwise it doesn't make sense.
#
# Second, in order to be a "best" subgraph, all bonds in the subgraph
# which are ring bonds in the original molecule must also be in a ring
# in the subgraph. This is handled as a post-processing step.
#
# (Note: some possible optimizations, like removing ring bonds from
# structure fragments which are not in a ring, are not yet implemented.)
#
#
# == Prune seeds which have no potential for growing large enough ==
#
# Given a seed, its set of edges available for growth, and the set of
# excluded bonds, figure out the maximum possible growth for the seed.
# If this maximum possible is less than the current best subgraph then
# prune.
#
# This requires a graph search, currently done in Python, which is a bit
# expensive. To speed things up, I precompute some edge information.
# That is, if I know that a given bond is a chain bond (not in a ring)
# then I can calculate the maximum number of atoms and bonds for seed
# growth along that bond, in either direction. However, precomputation
# doesn't take into account the excluded bonds, so after a while the
# predicted value is too high.
#
# Again, I'm still working on characterizing this, and an implementation
# in C++ would have different tradeoffs.
import sys
from rdkit import Chem
import copy
import itertools
import heapq
heappush = heapq.heappush
heappop = heapq.heappop
from itertools import chain, combinations
import collections
from collections import defaultdict
import time
__all__ = ["FindMCS"]
### A place to set global options
# (Is this really useful?)
class Default(object):
timeout = None
maximize = "bonds"
atom_compare = "elements"
bond_compare = "bondtypes"
match_valences = False
ring_matches_ring_only = False
complete_rings_only = False
####### Atom type and bond type information #####
# Lookup up the atomic symbol given its atomic number
_get_symbol = Chem.GetPeriodicTable().GetElementSymbol
# Lookup table to get the SMARTS for an atom given its element
# This uses the '#<n>' notation for atoms which may be aromatic.
# Eg, '#6' for carbon, instead of 'C,c'.
# Use the standard element symbol for atoms which can't be aromatic.
class _AtomSmartsNoAromaticity(dict):
def __missing__(self, eleno):
value = _get_symbol(eleno)
self[eleno] = value
return value
_atom_smarts_no_aromaticity = _AtomSmartsNoAromaticity()
# Initialize to the ones which need special treatment
# RDKit supports b, c, n, o, p, s, se, and te.
# Daylight and OpenSMILES don't 'te' but do support 'as'
# For better portability, I use the '#' notation for all of them.
# H is also here because they need to always appear as [#1]
# ([H] in SMARTS means "an atom with an H", not "an H")
for eleno in (1, 5, 6, 7, 8, 15, 16, 33, 34, 52):
_atom_smarts_no_aromaticity[eleno] = "#" + str(eleno)
assert _atom_smarts_no_aromaticity[6] == "#6"
assert _atom_smarts_no_aromaticity[2] == "He"
# Match any atom
def _atom_typer_any(atoms):
return ["*"] * len(atoms)
# Match atom by atomic element; usually by symbol
def _atom_typer_elements(atoms):
return [_atom_smarts_no_aromaticity[atom.GetAtomicNum()] for atom in atoms]
# Match atom by isotope number.
def _atom_typer_isotopes(atoms):
return ["%d*" % atom.GetIsotope() for atom in atoms]
# Match any bond
def _bond_typer_any(bonds):
return ["~"] * len(bonds)
# Match bonds based on bond type, including aromaticity
def _bond_typer_bondtypes(bonds):
# Aromaticity matches are important
bond_smarts_types = []
for bond in bonds:
bond_term = bond.GetSmarts()
if not bond_term:
# The SMILES "", means "single or aromatic" as SMARTS.
# Figure out which one.
if bond.GetIsAromatic():
bond_term = ':'
else:
bond_term = '-'
bond_smarts_types.append(bond_term)
return bond_smarts_types
_atom_typers = {
"any": _atom_typer_any,
"elements": _atom_typer_elements,
"isotopes": _atom_typer_isotopes,
}
_bond_typers = {
"any": _bond_typer_any,
"bondtypes": _bond_typer_bondtypes,
}
### Different ways of storing atom/bond information about the input structures ###
# A TypedMolecule contains the input molecule, unmodified, along with
# atom type, and bond type information; both as SMARTS fragments. The
# "canonical_bondtypes" uniquely charactizes a bond; two bonds will
# match if and only if their canonical bondtypes match. (Meaning:
# bonds must be of equivalent type, and must go between atoms of
# equivalent types.)
class _TypedMolecule(object):
def __init__(self, rdmol, rdmol_atoms, rdmol_bonds, atom_smarts_types,
bond_smarts_types, canonical_bondtypes):
self.rdmol = rdmol
# These exist as a performance hack. It's faster to store the
# atoms and bond as a Python list than to do GetAtoms() and
# GetBonds() again. The stage 2 TypedMolecule does not use
# these.
self.rdmol_atoms = rdmol_atoms
self.rdmol_bonds = rdmol_bonds
# List of SMARTS to use for each atom and bond
self.atom_smarts_types = atom_smarts_types
self.bond_smarts_types = bond_smarts_types
# List of canonical bondtype strings
self.canonical_bondtypes = canonical_bondtypes
# Question: Do I also want the original_rdmol_indices? With
# the normal SMARTS I can always do the substructure match
# again to find the indices, but perhaps this will be needed
# when atom class patterns are fully implemented.
# Start with a set of TypedMolecules. Find the canonical_bondtypes
# which only exist in all them, then fragment each TypedMolecule to
# produce a FragmentedTypedMolecule containing the same atom
# information but containing only bonds with those
# canonical_bondtypes.
class _FragmentedTypedMolecule(object):
def __init__(self, rdmol, rdmol_atoms, orig_atoms, orig_bonds,
atom_smarts_types, bond_smarts_types, canonical_bondtypes):
self.rdmol = rdmol
self.rdmol_atoms = rdmol_atoms
self.orig_atoms = orig_atoms
self.orig_bonds = orig_bonds
# List of SMARTS to use for each atom and bond
self.atom_smarts_types = atom_smarts_types
self.bond_smarts_types = bond_smarts_types
# List of canonical bondtype strings
self.canonical_bondtypes = canonical_bondtypes
# A FragmentedTypedMolecule can contain multiple fragments. Once I've
# picked the FragmentedTypedMolecule to use for enumeration, I extract
# each of the fragments as the basis for an EnumerationMolecule.
class TypedFragment(object):
def __init__(self, rdmol,
orig_atoms, orig_bonds,
atom_smarts_types, bond_smarts_types, canonical_bondtypes):
self.rdmol = rdmol
self.orig_atoms = orig_atoms
self.orig_bonds = orig_bonds
self.atom_smarts_types = atom_smarts_types
self.bond_smarts_types = bond_smarts_types
self.canonical_bondtypes = canonical_bondtypes
# The two possible bond types are
# atom1_smarts + bond smarts + atom2_smarts
# atom2_smarts + bond smarts + atom1_smarts
# The canonical bond type is the lexically smaller of these two.
def _get_canonical_bondtypes(rdmol, bonds, atom_smarts_types, bond_smarts_types):
canonical_bondtypes = []
for bond, bond_smarts in zip(bonds, bond_smarts_types):
atom1_smarts = atom_smarts_types[bond.GetBeginAtomIdx()]
atom2_smarts = atom_smarts_types[bond.GetEndAtomIdx()]
if atom1_smarts > atom2_smarts:
atom1_smarts, atom2_smarts = atom2_smarts, atom1_smarts
canonical_bondtypes.append("[%s]%s[%s]" % (atom1_smarts, bond_smarts, atom2_smarts))
return canonical_bondtypes
# Create a TypedMolecule using the element-based typing scheme
# TODO: refactor this. It doesn't seem right to pass boolean flags.
def _get_typed_molecule(rdmol, atom_typer, bond_typer, match_valences = Default.match_valences,
ring_matches_ring_only = Default.ring_matches_ring_only):
atoms = list(rdmol.GetAtoms())
atom_smarts_types = atom_typer(atoms)
# Get the valence information, if requested
if match_valences:
new_atom_smarts_types = []
for (atom, atom_smarts_type) in zip(atoms, atom_smarts_types):
valence = atom.GetImplicitValence() + atom.GetExplicitValence()
valence_str = "v%d" % valence
if "," in atom_smarts_type:
atom_smarts_type += ";" + valence_str
else:
atom_smarts_type += valence_str
new_atom_smarts_types.append(atom_smarts_type)
atom_smarts_types = new_atom_smarts_types
# Store and reuse the bond information because I use it twice.
# In a performance test, the times went from 2.0 to 1.4 seconds by doing this.
bonds = list(rdmol.GetBonds())
bond_smarts_types = bond_typer(bonds)
if ring_matches_ring_only:
new_bond_smarts_types = []
for bond, bond_smarts in zip(bonds, bond_smarts_types):
if bond.IsInRing():
if bond_smarts == ":":
# No need to do anything; it has to be in a ring
pass
else:
if "," in bond_smarts:
bond_smarts += ";@"
else:
bond_smarts += "@"
else:
if "," in bond_smarts:
bond_smarts += ";!@"
else:
bond_smarts += "!@"
new_bond_smarts_types.append(bond_smarts)
bond_smarts_types = new_bond_smarts_types
canonical_bondtypes = _get_canonical_bondtypes(rdmol, bonds, atom_smarts_types, bond_smarts_types)
return _TypedMolecule(rdmol, atoms, bonds, atom_smarts_types, bond_smarts_types, canonical_bondtypes)
def _convert_input_to_typed_molecules(mols, atom_typer, bond_typer, match_valences, ring_matches_ring_only):
typed_mols = []
for molno, rdmol in enumerate(mols):
typed_mol = _get_typed_molecule(rdmol, atom_typer, bond_typer,
match_valences=match_valences, ring_matches_ring_only=ring_matches_ring_only)
typed_mols.append(typed_mol)
return typed_mols
def _check_atom_classes(molno, num_atoms, atom_classes):
if num_atoms != len(atom_classes):
raise ValueError("mols[%d]: len(atom_classes) must be the same as the number of atoms" % (molno,))
for atom_class in atom_classes:
if not isinstance(atom_class, int):
raise ValueError("mols[%d]: atom_class elements must be integers" % (molno,))
if not (1 <= atom_class < 1000):
raise ValueError("mols[%d]: atom_class elements must be in the range 1 <= value < 1000" %
(molno,))
#############################################
# This section deals with finding the canonical bondtype counts and
# making new TypedMolecule instances where the atoms contain only the
# bond types which are in all of the structures.
# In the future I would like to keep track of the bond types which are
# in the current subgraph. If any subgraph bond type count is ever
# larger than the maximum counts computed across the whole set, then
# prune. But so far I don't have a test set which drives the need for
# that.
# Return a dictionary mapping iterator item to occurence count
def _get_counts(it):
d = defaultdict(int)
for item in it:
d[item] += 1
return dict(d)
# Merge two count dictionaries, returning the smallest count for any
# entry which is in both.
def _intersect_counts(counts1, counts2):
d = {}
for k, v1 in counts1.iteritems():
if k in counts2:
v = min(v1, counts2[k])
d[k] = v
return d
# Figure out which canonical bonds SMARTS occur in every molecule
def _get_canonical_bondtype_counts(typed_mols):
# Get all of the canonical bond counts in the first molecule
bondtype_counts = _get_counts(typed_mols[0].canonical_bondtypes)
# Iteratively intersect it with the other typed molecules
for typed_mol in typed_mols[1:]:
new_counts = _get_counts(typed_mol.canonical_bondtypes)
bondtype_counts = _intersect_counts(bondtype_counts, new_counts)
return bondtype_counts
# If I know which bondtypes exist in all of the structures, I can
# remove all bonds which aren't in all structures. RDKit's Molecule
# class doesn't let me edit in-place, so I end up making a new one
# which doesn't have unsupported bond types.
def _remove_unknown_bondtypes(typed_mol, supported_canonical_bondtypes):
emol = Chem.EditableMol(Chem.Mol())
# Copy all of the atoms, even those which don't have any bonds.
for atom in typed_mol.rdmol_atoms:
emol.AddAtom(atom)
# Copy over all the bonds with a supported bond type.
# Make sure to update the bond SMARTS and canonical bondtype lists.
orig_bonds = []
new_bond_smarts_types = []
new_canonical_bondtypes = []
for bond, bond_smarts, canonical_bondtype in zip(typed_mol.rdmol_bonds, typed_mol.bond_smarts_types,
typed_mol.canonical_bondtypes):
if canonical_bondtype in supported_canonical_bondtypes:
orig_bonds.append(bond)
new_bond_smarts_types.append(bond_smarts)
new_canonical_bondtypes.append(canonical_bondtype)
emol.AddBond(bond.GetBeginAtomIdx(), bond.GetEndAtomIdx(), bond.GetBondType())
new_mol = emol.GetMol()
return _FragmentedTypedMolecule(new_mol, list(new_mol.GetAtoms()),
typed_mol.rdmol_atoms, orig_bonds,
typed_mol.atom_smarts_types, new_bond_smarts_types,
new_canonical_bondtypes)
# The molecule at this point has been (potentially) fragmented by
# removing bonds with unsupported bond types. The MCS cannot contain
# more atoms than the fragment of a given molecule with the most
# atoms, and the same for bonds. Find those upper limits. Note that
# the fragment with the most atoms is not necessarily the one with the
# most bonds.
def _find_upper_fragment_size_limits(rdmol, atoms):
max_num_atoms = max_twice_num_bonds = 0
for atom_indices in Chem.GetMolFrags(rdmol):
num_atoms = len(atom_indices)
if num_atoms > max_num_atoms:
max_num_atoms = num_atoms
# Every bond is connected to two atoms, so this is the
# simplest way to count the number of bonds in the fragment.
twice_num_bonds = 0
for atom_index in atom_indices:
# XXX Why is there no 'atom.GetNumBonds()'?
twice_num_bonds += sum(1 for bond in atoms[atom_index].GetBonds())
if twice_num_bonds > max_twice_num_bonds:
max_twice_num_bonds = twice_num_bonds
return max_num_atoms, max_twice_num_bonds // 2
####### Convert the selected TypedMolecule into an EnumerationMolecule
# I convert one of the typed fragment molecules (specifically, the one
# with the smallest largest fragment score) into a list of
# EnumerationMolecule instances. Each fragment from the typed molecule
# gets turned into an EnumerationMolecule.
# An EnumerationMolecule contains the data I need to enumerate all of
# its subgraphs.
# An EnumerationMolecule contains a list of 'Atom's and list of 'Bond's.
# Atom and Bond indices are offsets into those respective lists.
# An Atom has a list of "bond_indices", which are offsets into the bonds.
# A Bond has a 2-element list of "atom_indices", which are offsets into the atoms.
EnumerationMolecule = collections.namedtuple("Molecule", "rdmol atoms bonds directed_edges")
Atom = collections.namedtuple("Atom", "real_atom atom_smarts bond_indices is_in_ring")
Bond = collections.namedtuple("Bond", "real_bond bond_smarts canonical_bondtype atom_indices is_in_ring")
# A Bond is linked to by two 'DirectedEdge's; one for each direction.
# The DirectedEdge.bond_index references the actual RDKit bond instance.
# 'end_atom_index' is the index of the destination atom of the directed edge
# This is used in a 'directed_edges' dictionary so that
# [edge.end_atom_index for edge in directed_edges[atom_index]]
# is the list of all atom indices connected to 'atom_index'
DirectedEdge = collections.namedtuple("DirectedEdge",
"bond_index end_atom_index")
# A Subgraph is a list of atom and bond indices in an EnumerationMolecule
Subgraph = collections.namedtuple("Subgraph", "atom_indices bond_indices")
def _get_typed_fragment(typed_mol, atom_indices):
rdmol = typed_mol.rdmol
rdmol_atoms = typed_mol.rdmol_atoms
# I need to make a new RDKit Molecule containing only the fragment.
# XXX Why is that? Do I use the molecule for more than the number of atoms and bonds?
# Copy over the atoms
emol = Chem.EditableMol(Chem.Mol())
atom_smarts_types = []
atom_map = {}
for i, atom_index in enumerate(atom_indices):
atom = rdmol_atoms[atom_index]
emol.AddAtom(atom)
atom_smarts_types.append(typed_mol.atom_smarts_types[atom_index])
atom_map[atom_index] = i
# Copy over the bonds.
orig_bonds = []
bond_smarts_types = []
new_canonical_bondtypes = []
for bond, orig_bond, bond_smarts, canonical_bondtype in zip(
rdmol.GetBonds(), typed_mol.orig_bonds,
typed_mol.bond_smarts_types, typed_mol.canonical_bondtypes):
begin_atom_idx = bond.GetBeginAtomIdx()
end_atom_idx = bond.GetEndAtomIdx()
count = (begin_atom_idx in atom_map) + (end_atom_idx in atom_map)
# Double check that I have a proper fragment
if count == 2:
bond_smarts_types.append(bond_smarts)
new_canonical_bondtypes.append(canonical_bondtype)
emol.AddBond(atom_map[begin_atom_idx], atom_map[end_atom_idx], bond.GetBondType())
orig_bonds.append(orig_bond)
elif count == 1:
raise AssertionError("connected/disconnected atoms?")
return TypedFragment(emol.GetMol(),
[typed_mol.orig_atoms[atom_index] for atom_index in atom_indices],
orig_bonds,
atom_smarts_types, bond_smarts_types, new_canonical_bondtypes)
def _fragmented_mol_to_enumeration_mols(typed_mol, minNumAtoms=2):
if minNumAtoms < 2:
raise ValueError("minNumAtoms must be at least 2")
fragments = []
for atom_indices in Chem.GetMolFrags(typed_mol.rdmol):
# No need to even look at fragments which are too small.
if len(atom_indices) < minNumAtoms:
continue
# Convert a fragment from the TypedMolecule into a new
# TypedMolecule containing only that fragment.
# You might think I could merge 'get_typed_fragment()' with
# the code to generate the EnumerationMolecule. You're
# probably right. This code reflects history. My original code
# didn't break the typed molecule down to its fragments.
typed_fragment = _get_typed_fragment(typed_mol, atom_indices)
rdmol = typed_fragment.rdmol
atoms = []
for atom, orig_atom, atom_smarts_type in zip(rdmol.GetAtoms(), typed_fragment.orig_atoms,
typed_fragment.atom_smarts_types):
bond_indices = [bond.GetIdx() for bond in atom.GetBonds()]
#assert atom.GetSymbol() == orig_atom.GetSymbol()
atom_smarts = '[' + atom_smarts_type + ']'
atoms.append(Atom(atom, atom_smarts, bond_indices, orig_atom.IsInRing()))
directed_edges = collections.defaultdict(list)
bonds = []
for bond_index, (bond, orig_bond, bond_smarts, canonical_bondtype) in enumerate(
zip(rdmol.GetBonds(), typed_fragment.orig_bonds,
typed_fragment.bond_smarts_types, typed_fragment.canonical_bondtypes)):
atom_indices = [bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()]
bonds.append(Bond(bond, bond_smarts, canonical_bondtype, atom_indices, orig_bond.IsInRing()))
directed_edges[atom_indices[0]].append(DirectedEdge(bond_index, atom_indices[1]))
directed_edges[atom_indices[1]].append(DirectedEdge(bond_index, atom_indices[0]))
fragment = EnumerationMolecule(rdmol, atoms, bonds, dict(directed_edges))
fragments.append(fragment)
# Optimistically try the largest fragments first
fragments.sort(key = lambda fragment: len(fragment.atoms), reverse=True)
return fragments
####### Canonical SMARTS generation using Weininger, Weininger, and Weininger's CANGEN
# CANGEN "combines two separate algorithms, CANON and GENES. The
# first stage, CANON, labels a molecualr structure with canonical
# labels. ... Each atom is given a numerical label on the basis of its
# topology. In the second stage, GENES generates the unique SMILES
# ... . [It] selects the starting atom and makes branching decisions
# by referring to the canonical labels as needed."
# CANON is based on the fundamental theorem of arithmetic, that is,
# the unique prime factorization theorem. Which means I need about as
# many primes as I have atoms.
# I could have a fixed list of a few thousand primes but I don't like
# having a fixed upper limit to my molecule size. I modified the code
# Georg Schoelly posted at http://stackoverflow.com/a/568618/64618 .
# This is one of many ways to generate an infinite sequence of primes.
def gen_primes():
d = defaultdict(list)
q = 2
while 1:
if q not in d:
yield q
d[q*q].append(q)
else:
for p in d[q]:
d[p+q].append(p)
del d[q]
q += 1
_prime_stream = gen_primes()
# Code later on uses _primes[n] and if that fails, calls _get_nth_prime(n)
_primes = []
def _get_nth_prime(n):
# Keep appending new primes from the stream until I have enough.
current_size = len(_primes)
while current_size <= n:
_primes.append(next(_prime_stream))
current_size += 1
return _primes[n]
# Prime it with more values then will likely occur
_get_nth_prime(1000)
###
# The CANON algorithm is documented as:
# (1) Set atomic vector to initial invariants. Go to step 3.
# (2) Set vector to product of primes corresponding to neighbors' ranks.
# (3) Sort vector, maintaining stability over previous ranks.
# (4) Rank atomic vector.
# (5) If not invariants partitioning, go to step 2.
# (6) On first pass, save partitioning as symmetry classes [not used here]
# (7) If highest rank is smaller than number of nodes, break ties, go to step 2
# (8) ... else done.
# I track the atom information as a list of CangenNode instances.
class CangenNode(object):
# Using __slots__ improves get_initial_cangen_nodes performance by over 10%
# and dropped my overall time (in one benchmark) from 0.75 to 0.73 seconds
__slots__ = ["index", "atom_smarts", "value", "neighbors", "rank", "outgoing_edges"]
def __init__(self, index, atom_smarts):
self.index = index
self.atom_smarts = atom_smarts # Used to generate the SMARTS output
self.value = 0
self.neighbors = []
self.rank = 0
self.outgoing_edges = []
# The outgoing edge information is used to generate the SMARTS output
# The index numbers are offsets in the subgraph, not in the original molecule
OutgoingEdge = collections.namedtuple("OutgoingEdge",
"from_atom_index bond_index bond_smarts other_node_idx other_node")
# Convert a Subgraph of a given EnumerationMolecule into a list of
# CangenNodes. This contains the more specialized information I need
# for canonicalization and for SMARTS generation.
def get_initial_cangen_nodes(subgraph, enumeration_mol, atom_assignment, do_initial_assignment=True):
# The subgraph contains a set of atom and bond indices in the enumeration_mol.
# The CangenNode corresponds to an atom in the subgraph, plus relations
# to other atoms in the subgraph.
# I need to convert from offsets in molecule space to offset in subgraph space.
# Map from enumeration mol atom indices to subgraph/CangenNode list indices
atom_map = {}
cangen_nodes = []
atoms = enumeration_mol.atoms
canonical_labels = []
for i, atom_index in enumerate(subgraph.atom_indices):
atom_map[atom_index] = i
cangen_nodes.append(CangenNode(i, atoms[atom_index].atom_smarts))
canonical_labels.append([])
# Build the neighbor and directed edge lists
for bond_index in subgraph.bond_indices:
bond = enumeration_mol.bonds[bond_index]
from_atom_index, to_atom_index = bond.atom_indices
from_subgraph_atom_index = atom_map[from_atom_index]
to_subgraph_atom_index = atom_map[to_atom_index]
from_node = cangen_nodes[from_subgraph_atom_index]
to_node = cangen_nodes[to_subgraph_atom_index]
from_node.neighbors.append(to_node)
to_node.neighbors.append(from_node)
canonical_bondtype = bond.canonical_bondtype
canonical_labels[from_subgraph_atom_index].append(canonical_bondtype)
canonical_labels[to_subgraph_atom_index].append(canonical_bondtype)
from_node.outgoing_edges.append(
OutgoingEdge(from_subgraph_atom_index, bond_index, bond.bond_smarts,
to_subgraph_atom_index, to_node))
to_node.outgoing_edges.append(
OutgoingEdge(to_subgraph_atom_index, bond_index, bond.bond_smarts,
from_subgraph_atom_index, from_node))
if do_initial_assignment:
# Do the initial graph invariant assignment. (Step 1 of the CANON algorithm)
# These are consistent only inside of the given 'atom_assignment' lookup.
for atom_index, node, canonical_label in zip(subgraph.atom_indices, cangen_nodes, canonical_labels):
# The initial invariant is the sorted canonical bond labels
# plus the atom smarts, separated by newline characters.
#
# This is equivalent to a circular fingerprint of width 2, and
# gives more unique information than the Weininger method.
canonical_label.sort()
canonical_label.append(atoms[atom_index].atom_smarts)
label = "\n".join(canonical_label)
# The downside of using a string is that I need to turn it
# into a number which is consistent across all of the SMARTS I
# generate as part of the MCS search. Use a lookup table for
# that which creates a new number of the label wasn't seen
# before, or uses the old one if it was.
node.value = atom_assignment[label]
return cangen_nodes
# Rank a sorted list (by value) of CangenNodes
def rerank(cangen_nodes):
rank = 0 # Note: Initial rank is 1, in line with the Weininger paper
prev_value = -1
for node in cangen_nodes:
if node.value != prev_value:
rank += 1
prev_value = node.value
node.rank = rank
# Given a start/end range in the CangenNodes, sorted by value,
# find the start/end for subranges with identical values
def find_duplicates(cangen_nodes, start, end):
result = []
prev_value = -1
count = 0
for index in xrange(start, end):
node = cangen_nodes[index]
if node.value == prev_value:
count += 1
else:
if count > 1:
# New subrange containing duplicates
result.append( (start, index) )
count = 1
prev_value = node.value
start = index
if count > 1:
# Last elements were duplicates
result.append( (start, end) )
return result
#@profile
def canon(cangen_nodes):
# Precondition: node.value is set to the initial invariant
# (1) Set atomic vector to initial invariants (assumed on input)
# Do the initial ranking
cangen_nodes.sort(key = lambda node: node.value)
rerank(cangen_nodes)
# Keep refining the sort order until it's unambiguous
master_sort_order = cangen_nodes[:]
# Find the start/end range for each stretch of duplicates
duplicates = find_duplicates(cangen_nodes, 0, len(cangen_nodes))
PRIMES = _primes # micro-optimization; make this a local name lookup
while duplicates:
# (2) Set vector to product of primes corresponding to neighbor's ranks
for node in cangen_nodes:
try:
node.value = PRIMES[node.rank]
except IndexError:
node.value = _get_nth_prime(node.rank)
for node in cangen_nodes:
# Apply the fundamental theorem of arithmetic; compute the
# product of the neighbors' primes
p = 1
for neighbor in node.neighbors:
p *= neighbor.value
node.value = p
# (3) Sort vector, maintaining stability over previous ranks
# (I maintain stability by refining ranges in the
# master_sort_order based on the new ranking)
cangen_nodes.sort(key = lambda node: node.value)
# (4) rank atomic vector
rerank(cangen_nodes)
# See if any of the duplicates have been resolved.
new_duplicates = []
unchanged = True # This is buggy? Need to check the entire state XXX
for (start, end) in duplicates:
# Special case when there's only two elements to store.
# This optimization sped up cangen by about 8% because I
# don't go through the sort machinery
if start+2 == end:
node1, node2 = master_sort_order[start], master_sort_order[end-1]
if node1.value > node2.value:
master_sort_order[start] = node2
master_sort_order[end-1] = node1
else:
subset = master_sort_order[start:end]
subset.sort(key = lambda node: node.value)
master_sort_order[start:end] = subset
subset_duplicates = find_duplicates(master_sort_order, start, end)
new_duplicates.extend(subset_duplicates)
if unchanged:
# Have we distinguished any of the duplicates?
if not (len(subset_duplicates) == 1 and subset_duplicates[0] == (start, end)):
unchanged = False
# (8) ... else done
# Yippee! No duplicates left. Everything has a unique value.
if not new_duplicates:
break
# (5) If not invariant partitioning, go to step 2
if not unchanged:
duplicates = new_duplicates
continue
duplicates = new_duplicates
# (6) On first pass, save partitioning as symmetry classes
pass # I don't need this information
# (7) If highest rank is smaller than number of nodes, break ties, go to step 2
# I follow the Weininger algorithm and use 2*rank or 2*rank-1.
# This requires that the first rank is 1, not 0.
for node in cangen_nodes:
node.value = node.rank * 2
# The choice of tie is arbitrary. Weininger breaks the first tie.
# I break the last tie because it's faster in Python to delete
# from the end than the beginning.
start, end = duplicates[-1]
cangen_nodes[start].value -= 1
if end == start+2:
# There were only two nodes with the same value. Now there
# are none. Remove information about that duplicate.
del duplicates[-1]
else:
# The first N-1 values are still duplicates.
duplicates[-1] = (start+1, end)
rerank(cangen_nodes)
# Restore to the original order (ordered by subgraph atom index)
# because the bond information used during SMARTS generation
# references atoms by that order.
cangen_nodes.sort(key=lambda node: node.index)
def get_closure_label(bond_smarts, closure):
if closure < 10:
return bond_smarts + str(closure)
else:
return bond_smarts + "%%%02d" % closure
# Precompute the initial closure heap.
_available_closures = range(1, 101)
heapq.heapify(_available_closures)
# The Weininger paper calls this 'GENES'; I call it "generate_smarts."
# I use a different algorithm than GENES. It's still use two
# passes. The first pass identifies the closure bonds using a
# depth-first search. The second pass builds the SMILES string.
def generate_smarts(cangen_nodes):
start_index = 0
best_rank = cangen_nodes[0].rank
for i, node in enumerate(cangen_nodes):
if node.rank < best_rank:
best_rank = node.rank
start_index = i
node.outgoing_edges.sort(key=lambda edge: edge.other_node.rank)
visited_atoms = [0] * len(cangen_nodes)
closure_bonds = set()
## First, find the closure bonds using a DFS
stack = []
atom_idx = start_index
stack.extend(reversed(cangen_nodes[atom_idx].outgoing_edges))
visited_atoms[atom_idx] = True
while stack:
edge = stack.pop()
if visited_atoms[edge.other_node_idx]:
closure_bonds.add(edge.bond_index)
else:
visited_atoms[edge.other_node_idx] = 1
for next_edge in reversed(cangen_nodes[edge.other_node_idx].outgoing_edges):
if next_edge.other_node_idx == edge.from_atom_index:
# Don't worry about going back along the same route
continue
stack.append(next_edge)
available_closures = _available_closures[:]
unclosed_closures = {}
# I've identified the closure bonds.
# Use a stack machine to traverse the graph and build the SMARTS.
# The instruction contains one of 4 instructions, with associated data
# 0: add the atom's SMARTS and put its connections on the machine
# 1: add the bond's SMARTS and put the other atom on the machine
# 3: add a ')' to the SMARTS
# 4: add a '(' and the bond SMARTS
smiles_terms = []
stack = [(0, (start_index, -1))]
while stack:
action, data = stack.pop()
if action == 0:
# Add an atom.
# The 'while 1:' emulates a goto for the special case
# where the atom is connected to only one other atom. I
# don't need to use the stack machinery for that case, and
# can speed up this function by about 10%.
while 1:
# Look at the bonds starting from this atom
num_neighbors = 0
atom_idx, prev_bond_idx = data
smiles_terms.append(cangen_nodes[atom_idx].atom_smarts)
outgoing_edges = cangen_nodes[atom_idx].outgoing_edges
for outgoing_edge in outgoing_edges:
bond_idx = outgoing_edge.bond_index
# Is this a ring closure bond?
if bond_idx in closure_bonds:
# Have we already seen it before?
if bond_idx not in unclosed_closures:
# This is new. Add as a ring closure.
closure = heappop(available_closures)
smiles_terms.append(get_closure_label(outgoing_edge.bond_smarts, closure))
unclosed_closures[bond_idx] = closure
else:
closure = unclosed_closures[bond_idx]
smiles_terms.append(get_closure_label(outgoing_edge.bond_smarts, closure))
heappush(available_closures, closure)
del unclosed_closures[bond_idx]
else:
# This is a new outgoing bond.
if bond_idx == prev_bond_idx:
# Don't go backwards along the bond I just came in on
continue
if num_neighbors == 0:
# This is the first bond. There's a good chance that
# it's the only bond.
data = (outgoing_edge.other_node_idx, bond_idx)
bond_smarts = outgoing_edge.bond_smarts
else:
# There are multiple bonds. Can't shortcut.
if num_neighbors == 1:
# Capture the information for the first bond
# This direction doesn't need the (branch) characters.
stack.append((0, data))
stack.append((1, bond_smarts))
# Add information for this bond
stack.append((3, None))
stack.append((0, (outgoing_edge.other_node_idx, bond_idx)))
stack.append((4, outgoing_edge.bond_smarts))
num_neighbors += 1
if num_neighbors != 1:
# If there's only one item then goto action==0 again.
break
smiles_terms.append(bond_smarts)
elif action == 1:
# Process a bond which does not need '()'s
smiles_terms.append(data) # 'data' is bond_smarts
continue
elif action == 3:
smiles_terms.append(')')
elif action == 4:
smiles_terms.append('(' + data) # 'data' is bond_smarts
else:
raise AssertionError
return "".join(smiles_terms)
# Full canonicalization is about 5% slower unless there are well over 100 structures
# in the data set, which is not expected to be common.
# Commented out the canon() step until there's a better solution (eg, adapt based
# in the input size.)
def make_canonical_smarts(subgraph, enumeration_mol, atom_assignment):
cangen_nodes = get_initial_cangen_nodes(subgraph, enumeration_mol, atom_assignment, True)
#canon(cangen_nodes)
smarts = generate_smarts(cangen_nodes)
return smarts
## def make_semicanonical_smarts(subgraph, enumeration_mol, atom_assignment):
## cangen_nodes = get_initial_cangen_nodes(subgraph, enumeration_mol, atom_assignment, True)
## # There's still some order because of the canonical bond typing, but it isn't perfect
## #canon(cangen_nodes)
## smarts = generate_smarts(cangen_nodes)
## return smarts
def make_arbitrary_smarts(subgraph, enumeration_mol, atom_assignment):
cangen_nodes = get_initial_cangen_nodes(subgraph, enumeration_mol, atom_assignment, False)
# Use an arbitrary order
for i, node in enumerate(cangen_nodes):
node.value = i
smarts = generate_smarts(cangen_nodes)
return smarts
############## Subgraph enumeration ##################
# A 'seed' is a subgraph containing a subset of the atoms and bonds in
# the graph. The idea is to try all of the ways in which to grow the
# seed to make a new seed which contains the original seed.
# There are two ways to grow a seed:
# - add a bond which is not in the seed but where both of its
# atoms are in the seed
# - add a bond which is not in the seed but where one of its
# atoms is in the seed (and the other is not)
# The algorithm takes the seed, and finds all of both categories of
# bonds. If there are N total such bonds then there are 2**N-1
# possible new seeds which contain the original seed. This is simply
# the powerset of the possible bonds, excepting the case with no
# bonds.
# Generate all 2**N-1 new seeds. Place the new seeds back in the
# priority queue to check for additional growth.
# I place the seeds in priority queue, sorted by score (typically the
# number of atoms) to preferentially search larger structures first. A
# simple stack or deque wouldn't work because the new seeds have
# between 1 to N-1 new atoms and bonds.
# Some useful preamble code
# Taken from the Python documentation
def _powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
# Same as the above except the empty term is not returned
def _nonempty_powerset(iterable):
"nonempty_powerset([1,2,3]) --> (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
it = chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
it.next()
return it
# Call this to get a new unique function. Used to break ties in the
# priority queue.
tiebreaker = itertools.count().next
### The enumeration code
# Given a set of atoms, find all of the ways to leave those atoms.
# There are two possibilities:
# 1) bonds; which connect two atoms which are already in 'atom_indices'
# 2) directed edges; which go to atoms that aren't in 'atom_indices'
# and which aren't already in visited_bond_indices. These are external
# to the subgraph.
# The return is a 2-element tuple containing:
# (the list of bonds from (1), the list of directed edges from (2))
def find_extensions(atom_indices, visited_bond_indices, directed_edges):
internal_bonds = set()
external_edges = []
for atom_index in atom_indices:
for directed_edge in directed_edges[atom_index]:
# Skip outgoing edges which have already been evaluated
if directed_edge.bond_index in visited_bond_indices:
continue
if directed_edge.end_atom_index in atom_indices:
# case 1: This bond goes to another atom which is already in the subgraph.
internal_bonds.add(directed_edge.bond_index)
else:
# case 2: This goes to a new (external) atom
external_edges.append(directed_edge)
# I don't think I need the list()
return list(internal_bonds), external_edges
# Given the 2-element tuple (internal_bonds, external_edges),
# construct all of the ways to combine them to generate a new subgraph
# from the old one. This is done via a powerset.
# This generates a two-element tuple containing:
# - the set of newly added atom indices (or None)
# - the new subgraph
def all_subgraph_extensions(enumeration_mol, subgraph, visited_bond_indices, internal_bonds, external_edges):
#print "Subgraph", len(subgraph.atom_indices), len(subgraph.bond_indices), "X", enumeration_mol.rdmol.GetNumAtoms()
#print "subgraph atoms", subgraph.atom_indices
#print "subgraph bonds", subgraph.bond_indices
#print "internal", internal_bonds, "external", external_edges
# only internal bonds
if not external_edges:
#assert internal_bonds, "Must have at least one internal bond"
it = _nonempty_powerset(internal_bonds)
for internal_bond in it:
# Make the new subgraphs
bond_indices = set(subgraph.bond_indices)
bond_indices.update(internal_bond)
yield None, Subgraph(subgraph.atom_indices, frozenset(bond_indices)), 0, 0
return
# only external edges
if not internal_bonds:
it = _nonempty_powerset(external_edges)
exclude_bonds = set(chain(visited_bond_indices, (edge.bond_index for edge in external_edges)))
for external_ext in it:
new_atoms = frozenset(ext.end_atom_index for ext in external_ext)
atom_indices = frozenset(chain(subgraph.atom_indices, new_atoms))
bond_indices = frozenset(chain(subgraph.bond_indices,
(ext.bond_index for ext in external_ext)))
num_possible_atoms, num_possible_bonds = find_extension_size(
enumeration_mol, new_atoms, exclude_bonds, external_ext)
#num_possible_atoms = len(enumeration_mol.atoms) - len(atom_indices)
#num_possible_bonds = len(enumeration_mol.bonds) - len(bond_indices)
yield new_atoms, Subgraph(atom_indices, bond_indices), num_possible_atoms, num_possible_bonds
return
# Both internal bonds and external edges
internal_powerset = list(_powerset(internal_bonds))
external_powerset = _powerset(external_edges)
exclude_bonds = set(chain(visited_bond_indices, (edge.bond_index for edge in external_edges)))
for external_ext in external_powerset:
if not external_ext:
# No external extensions. Must have at least one internal bond.
for internal_bond in internal_powerset[1:]:
bond_indices = set(subgraph.bond_indices)
bond_indices.update(internal_bond)
yield None, Subgraph(subgraph.atom_indices, bond_indices), 0, 0
else:
new_atoms = frozenset(ext.end_atom_index for ext in external_ext)
atom_indices = frozenset(chain(subgraph.atom_indices, new_atoms))
# no_go_bond_indices = set(chain(visited_bond_indices, extern
bond_indices = frozenset(chain(subgraph.bond_indices,
(ext.bond_index for ext in external_ext)))
num_possible_atoms, num_possible_bonds = find_extension_size(
enumeration_mol, atom_indices, exclude_bonds, external_ext)
#num_possible_atoms = len(enumeration_mol.atoms) - len(atom_indices)
for internal_bond in internal_powerset:
bond_indices2 = frozenset(chain(bond_indices, internal_bond))
#num_possible_bonds = len(enumeration_mol.bonds) - len(bond_indices2)
yield new_atoms, Subgraph(atom_indices, bond_indices2), num_possible_atoms, num_possible_bonds
def find_extension_size(enumeration_mol, known_atoms, exclude_bonds, directed_edges):
num_remaining_atoms = num_remaining_bonds = 0
visited_atoms = set(known_atoms)
visited_bonds = set(exclude_bonds)
#print "start atoms", visited_atoms
#print "start bonds", visited_bonds
#print "Along", [directed_edge.bond_index for directed_edge in directed_edges]
for directed_edge in directed_edges:
#print "Take", directed_edge
stack = [directed_edge.end_atom_index]
# simple depth-first search search
while stack:
atom_index = stack.pop()
for next_edge in enumeration_mol.directed_edges[atom_index]:
#print "Visit", next_edge.bond_index, next_edge.end_atom_index
bond_index = next_edge.bond_index
if bond_index in visited_bonds:
#print "Seen bond", bond_index
continue
num_remaining_bonds += 1
visited_bonds.add(bond_index)
#print "New BOND!", bond_index, "count", num_remaining_bonds
next_atom_index = next_edge.end_atom_index
if next_atom_index in visited_atoms:
#print "Seen atom"
continue
num_remaining_atoms += 1
#print "New atom!", next_atom_index, "count", num_remaining_atoms
visited_atoms.add(next_atom_index)
stack.append(next_atom_index)
#print "==>", num_remaining_atoms, num_remaining_bonds
return num_remaining_atoms, num_remaining_bonds
# Check if a SMARTS is in all targets.
# Uses a dictionary-style API, but please only use matcher[smarts]
# Caches all previous results.
class CachingTargetsMatcher(dict):
def __init__(self, targets):
self.targets = targets
super(dict, self).__init__()
def __missing__(self, smarts):
pat = Chem.MolFromSmarts(smarts)
if pat is None:
raise AssertionError("Bad SMARTS: %r" % (smarts,))
for target in self.targets:
if not target.HasSubstructMatch(pat):
# Does not match. No need to continue processing
self[smarts] = False
return False
# TODO: should I move the mismatch structure forward
# so that it's tested earlier next time?
# Matches everything
self[smarts] = True
return True
##### Different maximization algorithms ######
def prune_maximize_bonds(subgraph, mol, num_remaining_atoms, num_remaining_bonds, best_sizes):
# Quick check if this is a viable search direction
num_atoms = len(subgraph.atom_indices)
num_bonds = len(subgraph.bond_indices)
best_num_atoms, best_num_bonds = best_sizes
# Prune subgraphs which are too small can never become big enough
diff_bonds = (num_bonds + num_remaining_bonds) - best_num_bonds
if diff_bonds < 0:
return True
elif diff_bonds == 0:
# Then we also maximize the number of atoms
diff_atoms = (num_atoms + num_remaining_atoms) - best_num_atoms
if diff_atoms <= 0:
return True
return False
def prune_maximize_atoms(subgraph, mol, num_remaining_atoms, num_remaining_bonds, best_sizes):
# Quick check if this is a viable search direction
num_atoms = len(subgraph.atom_indices)
num_bonds = len(subgraph.bond_indices)
best_num_atoms, best_num_bonds = best_sizes
# Prune subgraphs which are too small can never become big enough
diff_atoms = (num_atoms + num_remaining_atoms) - best_num_atoms
if diff_atoms < 0:
return True
elif diff_atoms == 0:
diff_bonds = (num_bonds + num_remaining_bonds) - best_num_bonds
if diff_bonds <= 0:
return True
else:
#print "Could still have", diff_atoms
#print num_atoms, num_remaining_atoms, best_num_atoms
pass
return False
##### Callback handlers for storing the "best" information #####x
class _SingleBest(object):
def __init__(self):
self.best_num_atoms = self.best_num_bonds = -1
self.best_smarts = None
self.sizes = (-1, -1)
def _new_best(self, num_atoms, num_bonds, smarts):
self.best_num_atoms = num_atoms
self.best_num_bonds = num_bonds
self.best_smarts = smarts
self.sizes = sizes = (num_atoms, num_bonds)
return sizes
def get_result(self, completed):
return MCSResult(self.best_num_atoms, self.best_num_bonds, self.best_smarts, completed)
class MCSResult(object):
"""MCS Search results
Attributes are:
numAtoms - the number of atoms in the MCS
numBonds - the number of bonds in the MCS
smarts - the SMARTS pattern which defines the MCS
completed - True if the MCS search went to completion. Otherwise False.
"""
def __init__(self, numAtoms, numBonds, smarts, completed):
self.numAtoms = numAtoms
self.numBonds = numBonds
self.smarts = smarts
self.completed = completed
def __nonzero__(self):
return self.smarts is not None
def __repr__(self):
return "MCSResult(numAtoms=%d, numBonds=%d, smarts=%r, completed=%d)" % (
self.numAtoms, self.numBonds, self.smarts, self.completed)
def __str__(self):
msg = "MCS %r has %d atoms and %d bonds" % (self.smarts, self.numAtoms, self.numBonds)
if not self.completed:
msg += " (timed out)"
return msg
class SingleBestAtoms(_SingleBest):
def add_new_match(self, subgraph, mol, smarts):
sizes = self.sizes
# See if the subgraph match is better than the previous best
num_subgraph_atoms = len(subgraph.atom_indices)
if num_subgraph_atoms < sizes[0]:
return sizes
num_subgraph_bonds = len(subgraph.bond_indices)
if num_subgraph_atoms == sizes[0]:
if num_subgraph_bonds <= sizes[1]:
return sizes
return self._new_best(num_subgraph_atoms, num_subgraph_bonds, smarts)
class SingleBestBonds(_SingleBest):
def add_new_match(self, subgraph, mol, smarts):
sizes = self.sizes
# See if the subgraph match is better than the previous best
num_subgraph_bonds = len(subgraph.bond_indices)
if num_subgraph_bonds < sizes[1]:
return sizes
num_subgraph_atoms = len(subgraph.atom_indices)
if num_subgraph_bonds == sizes[1] and num_subgraph_atoms <= sizes[0]:
return sizes
return self._new_best(num_subgraph_atoms, num_subgraph_bonds, smarts)
### Check if there are any ring atoms; used in --complete-rings-only
# This is (yet) another depth-first graph search algorithm
def check_complete_rings_only(smarts, subgraph, enumeration_mol):
#print "check", smarts, len(subgraph.atom_indices), len(subgraph.bond_indices)
atoms = enumeration_mol.atoms
bonds = enumeration_mol.bonds
# First, are any of bonds in the subgraph ring bonds in the original structure?
ring_bonds = []
for bond_index in subgraph.bond_indices:
bond = bonds[bond_index]
if bond.is_in_ring:
ring_bonds.append(bond_index)
#print len(ring_bonds), "ring bonds"
if not ring_bonds:
# No need to check .. this is an acceptable structure
return True
if len(ring_bonds) <= 2:
# No need to check .. there are no rings of size 2
return False
# Otherwise there's more work. Need to ensure that
# all ring atoms are still in a ring in the subgraph.
confirmed_ring_bonds = set()
subgraph_ring_bond_indices = set(ring_bonds)
for bond_index in ring_bonds:
#print "start with", bond_index, "in?", bond_index in confirmed_ring_bonds
if bond_index in confirmed_ring_bonds:
continue
# Start a new search, starting from this bond
from_atom_index, to_atom_index = bonds[bond_index].atom_indices
# Map from atom index to depth in the bond stack
atom_depth = {from_atom_index: 0,
to_atom_index: 1}
bond_stack = [bond_index]
backtrack_stack = []
prev_bond_index = bond_index
current_atom_index = to_atom_index
while 1:
# Dive downwards, ever downwards
next_bond_index = next_atom_index = None
this_is_a_ring = False
for outgoing_edge in enumeration_mol.directed_edges[current_atom_index]:
if outgoing_edge.bond_index == prev_bond_index:
# Don't loop back
continue
if outgoing_edge.bond_index not in subgraph_ring_bond_indices:
# Only advance along ring edges which are in the subgraph
continue
if outgoing_edge.end_atom_index in atom_depth:
#print "We have a ring"
# It's a ring! Mark everything as being in a ring
confirmed_ring_bonds.update(bond_stack[atom_depth[outgoing_edge.end_atom_index]:])
confirmed_ring_bonds.add(outgoing_edge.bond_index)
if len(confirmed_ring_bonds) == len(ring_bonds):
#print "Success!"
return True
this_is_a_ring = True
continue
# New atom. Need to explore it.
#print "we have a new bond", outgoing_edge.bond_index, "to atom", outgoing_edge.end_atom_index
if next_bond_index is None:
# This will be the immediate next bond to search in the DFS
next_bond_index = outgoing_edge.bond_index
next_atom_index = outgoing_edge.end_atom_index
else:
# Otherwise, backtrack and examine the other bonds
backtrack_stack.append(
(len(bond_stack), outgoing_edge.bond_index, outgoing_edge.end_atom_index) )
if next_bond_index is None:
# Could not find a path to take. Might be because we looped back.
if this_is_a_ring:
#assert prev_bond_index in confirmed_ring_bonds, (prev_bond_index, confirmed_ring_bonds)
# We did! That means we can backtrack
while backtrack_stack:
old_size, prev_bond_index, current_atom_index = backtrack_stack.pop()
if bond_index not in confirmed_ring_bonds:
# Need to explore this path.
# Back up and start the search from here
del bond_stack[old_size:]
break
else:
# No more backtracking. We fail. Try next bond?
# (If it had been sucessful then the
# len(confirmed_ring_bonds) == len(ring_bonds)
# would have return True)
break
else:
# Didn't find a ring, nowhere to advance
return False
else:
# Continue deeper
bond_stack.append(next_bond_index)
atom_depth[next_atom_index] = len(bond_stack)
prev_bond_index = next_bond_index
current_atom_index = next_atom_index
# If we reached here then try the next bond
#print "Try again"
class SingleBestAtomsCompleteRingsOnly(_SingleBest):
def add_new_match(self, subgraph, mol, smarts):
sizes = self.sizes
# See if the subgraph match is better than the previous best
num_subgraph_atoms = len(subgraph.atom_indices)
if num_subgraph_atoms < sizes[0]:
return sizes
num_subgraph_bonds = len(subgraph.bond_indices)
if num_subgraph_atoms == sizes[0] and num_subgraph_bonds <= sizes[1]:
return sizes
if check_complete_rings_only(smarts, subgraph, mol):
return self._new_best(num_subgraph_atoms, num_subgraph_bonds, smarts)
return sizes
class SingleBestBondsCompleteRingsOnly(_SingleBest):
def add_new_match(self, subgraph, mol, smarts):
sizes = self.sizes
# See if the subgraph match is better than the previous best
num_subgraph_bonds = len(subgraph.bond_indices)
if num_subgraph_bonds < sizes[1]:
return sizes
num_subgraph_atoms = len(subgraph.atom_indices)
if num_subgraph_bonds == sizes[1] and num_subgraph_atoms <= sizes[0]:
return sizes
if check_complete_rings_only(smarts, subgraph, mol):
return self._new_best(num_subgraph_atoms, num_subgraph_bonds, smarts)
return sizes
_maximize_options = {
("atoms", False): (prune_maximize_atoms, SingleBestAtoms),
("atoms", True): (prune_maximize_atoms, SingleBestAtomsCompleteRingsOnly),
("bonds", False): (prune_maximize_bonds, SingleBestBonds),
("bonds", True): (prune_maximize_bonds, SingleBestBondsCompleteRingsOnly),
}
###### The engine of the entire system. Enumerate subgraphs and see if they match. #####
def _enumerate_subgraphs(enumeration_mols, prune, atom_assignment, matches_all_targets, hits, timeout):
if timeout is None:
end_time = None
else:
end_time = time.time() + timeout
seeds = []
best_sizes = (0, 0)
# Do a quick check for the not uncommon case where one of the input fragments
# is the largest substructure or one off from the largest.
for mol in enumeration_mols:
atom_range = range(len(mol.atoms))
bond_set = set(range(len(mol.bonds)))
subgraph = Subgraph(atom_range, bond_set)
if not prune(subgraph, mol, 0, 0, best_sizes):
# Micro-optimization: the largest fragment SMARTS doesn't
# need to be canonicalized because there will only ever be
# one match. It's also unlikely that the other largest
# fragments need canonicalization.
smarts = make_arbitrary_smarts(subgraph, mol, atom_assignment)
if matches_all_targets[smarts]:
best_sizes = hits.add_new_match(subgraph, mol, smarts)
for mol in enumeration_mols:
directed_edges = mol.directed_edges
# Using 20001 random ChEMBL pairs, timeout=15.0 seconds
# 1202.6s with original order
# 1051.9s sorting by (bond.is_in_ring, bond_index)
# 1009.7s sorting by (bond.is_in_ring + atom1.is_in_ring + atom2.is_in_ring)
# 1055.2s sorting by (if bond.is_in_ring: 2; else: -(atom1.is_in_ring + atom2.is_in_ring))
# 1037.4s sorting by (atom1.is_in_ring + atom2.is_in_ring)
sorted_bonds = list(enumerate(mol.bonds))
def get_bond_ring_score((bond_index, bond), atoms=mol.atoms):
a1, a2 = bond.atom_indices
return bond.is_in_ring + atoms[a1].is_in_ring + atoms[a2].is_in_ring
sorted_bonds.sort(key = get_bond_ring_score)
visited_bond_indices = set()
num_remaining_atoms = len(mol.atoms)-2
num_remaining_bonds = len(mol.bonds)
for bond_index, bond in sorted_bonds: #enumerate(mol.bonds): #
#print "bond_index", bond_index, len(mol.bonds)
visited_bond_indices.add(bond_index)
num_remaining_bonds -= 1
subgraph = Subgraph(bond.atom_indices, frozenset([bond_index]))
# I lie about the remaining atom/bond sizes here.
if prune(subgraph, mol, num_remaining_atoms, num_remaining_bonds, best_sizes):
continue
# bond.canonical_bondtype doesn't necessarily give the same
# SMARTS as make_canonical_smarts, but that doesn't matter.
# 1) I know it's canonical, 2) it's faster, and 3) there is
# no place else which generates single-bond canonical SMARTS.
#smarts = make_canonical_smarts(subgraph, mol, atom_assignment)
smarts = bond.canonical_bondtype
if matches_all_targets[smarts]:
best_sizes = hits.add_new_match(subgraph, mol, smarts)
else:
raise AssertionError("This should never happen: %r" % (smarts,))
continue
a1, a2 = bond.atom_indices
outgoing_edges = [e for e in (directed_edges[a1] + directed_edges[a2])
if e.end_atom_index not in bond.atom_indices and e.bond_index not in visited_bond_indices]
empty_internal = frozenset()
if not outgoing_edges:
pass
else:
# The priority is the number of bonds in the subgraph, ordered so
# that the subgraph with the most bonds comes first. Since heapq
# puts the smallest value first, I reverse the number. The initial
# subgraphs have 1 bond, so the initial score is -1.
heappush(seeds, (-1, tiebreaker(), subgraph,
visited_bond_indices.copy(), empty_internal, outgoing_edges,
mol, directed_edges))
# I made so many subtle mistakes where I used 'subgraph' instead
# of 'new_subgraph' in the following section that I finally
# decided to get rid of 'subgraph' and use 'old_subgraph' instead.
del subgraph
while seeds:
if end_time:
if time.time() >= end_time:
return False
#print "There are", len(seeds), "seeds", seeds[0][:2]
score, _, old_subgraph, visited_bond_indices, internal_bonds, external_edges, mol, directed_edges = heappop(seeds)
new_visited_bond_indices = visited_bond_indices.copy()
new_visited_bond_indices.update(internal_bonds)
## for edge in external_edges:
## assert edge.bond_index not in new_visited_bond_indices
new_visited_bond_indices.update(edge.bond_index for edge in external_edges)
for new_atoms, new_subgraph, num_remaining_atoms, num_remaining_bonds in \
all_subgraph_extensions(mol, old_subgraph, visited_bond_indices, internal_bonds, external_edges):
if prune(new_subgraph, mol, num_remaining_atoms, num_remaining_bonds, best_sizes):
#print "PRUNE", make_canonical_smarts(new_subgraph, mol, atom_assignment)
continue
smarts = make_canonical_smarts(new_subgraph, mol, atom_assignment)
if matches_all_targets[smarts]:
#print "YES", smarts
best_sizes = hits.add_new_match(new_subgraph, mol, smarts)
else:
#print "NO", smarts
continue
if not new_atoms:
continue
new_internal_bonds, new_external_edges = find_extensions(
new_atoms, new_visited_bond_indices, directed_edges)
if new_internal_bonds or new_external_edges:
# Rank so the subgraph with the highest number of bonds comes first
heappush(seeds, (-len(new_subgraph.bond_indices), tiebreaker(), new_subgraph,
new_visited_bond_indices, new_internal_bonds, new_external_edges,
mol, directed_edges))
return True
# Assign a unique identifier to every unique key
class Uniquer(dict):
def __init__(self):
self.counter = itertools.count().next
def __missing__(self, key):
self[key] = count = self.counter()
return count
def EnumerationMCS(enumeration_mols, targets, maximize = Default.maximize,
complete_rings_only = Default.complete_rings_only,
timeout = Default.timeout):
atom_assignment = Uniquer()
matches_all_targets = CachingTargetsMatcher(list(targets))
try:
prune, hits_class = _maximize_options[(maximize, bool(complete_rings_only))]
except KeyError:
raise ValueError("Unknown 'maximize' option %r" % (maximize,))
hits = hits_class()
success = _enumerate_subgraphs(enumeration_mols, prune, atom_assignment, matches_all_targets, hits, timeout)
return hits.get_result(success)
########## Main driver for the MCS code
def FindMCS(mols, minNumAtoms=2,
maximize = Default.maximize,
atomCompare = Default.atom_compare,
bondCompare = Default.bond_compare,
matchValences = Default.match_valences,
ringMatchesRingOnly = False,
completeRingsOnly = False,
timeout=Default.timeout,
):
"""Find the maximum common substructure of a set of molecules
In the simplest case, pass in a list of molecules and get back
an MCSResult object which describes the MCS:
>>> from rdkit import Chem
>>> mols = [Chem.MolFromSmiles("C#CCP"), Chem.MolFromSmiles("C=CCO")]
>>> from rdkit.Chem import MCS
>>> MCS.FindMCS(mols)
MCSResult(numAtoms=2, numBonds=1, smarts='[#6]-[#6]', completed=1)
The SMARTS '[#6]-[#6]' matches the largest common substructure of
the input structures. It has 2 atoms and 1 bond. If there is no
MCS which is at least `minNumAtoms` in size then the result will set
numAtoms and numBonds to -1 and set smarts to None.
By default, two atoms match if they are the same element and two
bonds match if they have the same bond type. Specify `atomCompare`
and `bondCompare` to use different comparison functions, as in:
>>> MCS.FindMCS(mols, atomCompare="any")
MCSResult(numAtoms=3, numBonds=2, smarts='[*]-[*]-[*]', completed=1)
>>> MCS.FindMCS(mols, bondCompare="any")
MCSResult(numAtoms=3, numBonds=2, smarts='[#6]~[#6]~[#6]', completed=1)
An atomCompare of "any" says that any atom matches any other atom,
"elements" compares by element type, and "isotopes" matches based on
the isotope label. Isotope labels can be used to implement user-defined
atom types. A bondCompare of "any" says that any bond matches any
other bond, and "bondtypes" says bonds are equivalent if and only if
they have the same bond type.
A substructure has both atoms and bonds. The default `maximize`
setting of "atoms" finds a common substructure with the most number
of atoms. Use maximize="bonds" to maximize the number of bonds.
Maximizing the number of bonds tends to maximize the number of rings,
although two small rings may have fewer bonds than one large ring.
You might not want a 3-valent nitrogen to match one which is 5-valent.
The default `matchValences` value of False ignores valence information.
When True, the atomCompare setting is modified to also require that
the two atoms have the same valency.
>>> MCS.FindMCS(mols, matchValences=True)
MCSResult(numAtoms=2, numBonds=1, smarts='[#6v4]-[#6v4]', completed=1)
It can be strange to see a linear carbon chain match a carbon ring,
which is what the `ringMatchesRingOnly` default of False does. If
you set it to True then ring bonds will only match ring bonds.
>>> mols = [Chem.MolFromSmiles("C1CCC1CCC"), Chem.MolFromSmiles("C1CCCCCC1")]
>>> MCS.FindMCS(mols)
MCSResult(numAtoms=7, numBonds=6, smarts='[#6]-[#6]-[#6]-[#6]-[#6]-[#6]-[#6]', completed=1)
>>> MCS.FindMCS(mols, ringMatchesRingOnly=True)
MCSResult(numAtoms=4, numBonds=3, smarts='[#6](-@[#6])-@[#6]-@[#6]', completed=1)
You can further restrict things and require that partial rings
(as in this case) are not allowed. That is, if an atom is part of
the MCS and the atom is in a ring of the entire molecule then
that atom is also in a ring of the MCS. Set `completeRingsOnly`
to True to toggle this requirement and also sets ringMatchesRingOnly
to True.
>>> mols = [Chem.MolFromSmiles("CCC1CC2C1CN2"), Chem.MolFromSmiles("C1CC2C1CC2")]
>>> MCS.FindMCS(mols)
MCSResult(numAtoms=6, numBonds=6, smarts='[#6]-1-[#6]-[#6](-[#6])-[#6]-1-[#6]', completed=1)
>>> MCS.FindMCS(mols, ringMatchesRingOnly=True)
MCSResult(numAtoms=5, numBonds=5, smarts='[#6]-@1-@[#6]-@[#6]-@[#6]-@1-@[#6]', completed=1)
>>> MCS.FindMCS(mols, completeRingsOnly=True)
MCSResult(numAtoms=4, numBonds=4, smarts='[#6]-@1-@[#6]-@[#6]-@[#6]-@1', completed=1)
The MCS algorithm will exhaustively search for a maximum common substructure.
Typically this takes a fraction of a second, but for some comparisons this
can take minutes or longer. Use the `timeout` parameter to stop the search
after the given number of seconds (wall-clock seconds, not CPU seconds) and
return the best match found in that time. If timeout is reached then the
`completed` property of the MCSResult will be 0 instead of 1.
>>> mols = [Chem.MolFromSmiles("Nc1ccccc1"*100), Chem.MolFromSmiles("Nc1ccccccccc1"*100)]
>>> MCS.FindMCS(mols, timeout=0.1)
MCSResult(numAtoms=16, numBonds=15, smarts='[#7]-[#6](:[#6](-[#7]-[#6](:[#6](
-[#7]-[#6]):[#6]):[#6]:[#6]:[#6]):[#6]):[#6]:[#6]:[#6]', completed=0)
(The MCS after 50 seconds contained 511 atoms.)
"""
if minNumAtoms < 2:
raise ValueError("minNumAtoms must be at least 2")
if timeout is not None:
if timeout <= 0.0:
raise ValueError("timeout must be None or a positive value")
if completeRingsOnly:
ringMatchesRingOnly = True
try:
atom_typer = _atom_typers[atomCompare]
except KeyError:
raise ValueError("Unknown atomCompare option %r" % (atomCompare,))
try:
bond_typer = _bond_typers[bondCompare]
except KeyError:
raise ValueError("Unknown bondCompare option %r" % (bondCompare,))
# Make copies of all of the molecules so I can edit without worrying about the original
typed_mols = _convert_input_to_typed_molecules(mols, atom_typer, bond_typer,
match_valences = matchValences,
ring_matches_ring_only = ringMatchesRingOnly)
bondtype_counts = _get_canonical_bondtype_counts(typed_mols)
fragmented_mols = [_remove_unknown_bondtypes(typed_mol, bondtype_counts) for typed_mol in typed_mols]
sizes = []
max_num_atoms = fragmented_mols[0].rdmol.GetNumAtoms()
max_num_bonds = fragmented_mols[0].rdmol.GetNumBonds()
for tiebreaker, (typed_mol, fragmented_mol) in enumerate(zip(typed_mols, fragmented_mols)):
num_atoms, num_bonds = _find_upper_fragment_size_limits(fragmented_mol.rdmol,
fragmented_mol.rdmol_atoms)
if num_atoms < minNumAtoms:
return MCSResult(-1, -1, None, True)
if num_atoms < max_num_atoms:
max_num_atoms = num_atoms
if num_bonds < max_num_bonds:
max_num_bonds = num_bonds
sizes.append( (num_bonds, num_atoms, tiebreaker, typed_mol, fragmented_mol) )
if sizes is None:
# There was a short-cut exit because one of the molecules didn't have a large enough fragment
return MCSResult(-1, -1, None, True)
assert min(size[1] for size in sizes) >= minNumAtoms
# Sort so the molecule with the smallest largest fragment (by bonds) comes first.
# Break ties with the smallest number of atoms.
# Break secondary ties by position.
sizes.sort()
#print "Using", Chem.MolToSmiles(sizes[0][4].rdmol)
# Use the first as the query, the rest as the targets
query_fragments = _fragmented_mol_to_enumeration_mols(sizes[0][4], minNumAtoms)
targets = [size[3].rdmol for size in sizes[1:]]
mcs_result = EnumerationMCS(query_fragments, targets, maximize=maximize,
complete_rings_only=completeRingsOnly, timeout=timeout)
return mcs_result
if __name__ == "__main__":
mol1 = Chem.MolFromSmiles("c1ccccc1O")
mol2 = Chem.MolFromSmiles("c1ccncc1O")
x = FindMCS([mol1, mol2])
print x
print repr(x)
| rdkit/rdkit-orig | rdkit/Chem/MCS.py | Python | bsd-3-clause | 83,945 | [
"RDKit",
"VisIt"
] | 179682eb65ba162349fdc8542c538cd9dcbb36984ce34dc2ddda1852ffcd4b13 |
"""
python -c "import doctest, cyth; print(doctest.testmod(cyth.cyth_helpers))"
TODO: Change this file to cyth_manglers? Functions which mangle names?
"""
from __future__ import absolute_import, division, print_function
from os.path import splitext, split, join, relpath
import utool
import os
import astor
rrr = utool.inject_reload_function(__name__, 'cyth_helpers')
def get_py_module_name(py_fpath):
relfpath = relpath(py_fpath, os.getcwd())
name, ext = splitext(relfpath)
assert ext == '.py', 'bad input'
modname = name.replace('/', '.').replace('\\', '.')
return modname
def get_cyth_name(py_name):
"""
>>> py_name = 'vtool.keypoint'
>>> cy_name = get_cyth_name(py_name)
>>> print(cy_name)
vtool._keypoint_cyth
"""
# Ensure other modules are not affected
components = py_name.split('.')
components[-1] = '_' + components[-1] + '_cyth'
cy_name = '.'.join(components)
return cy_name
def get_cyth_path(py_fpath):
"""
>>> py_fpath = '/foo/vtool/vtool/keypoint.py'
>>> cy_fpath = get_cyth_path(py_fpath)
>>> print(cy_fpath)
/foo/vtool/vtool/_keypoint_cyth.pyx
"""
dpath, fname = split(py_fpath)
name, ext = splitext(fname)
assert ext == '.py', 'not a python file'
cy_fpath = join(dpath, get_cyth_name(name) + '.pyx')
return cy_fpath
def get_c_path(cy_fpath):
"""
>>> cy_fpath = '/foo/vtool/vtool/_linalg_cyth.pyx'
>>> print(cy_fpath)
/foo/vtool/vtool/_keypoint_cyth.pyx
"""
dpath, fname = split(cy_fpath)
name, ext = splitext(fname)
assert ext == '.pyx', 'not a cython file'
c_fpath = join(dpath, name + '.c')
return c_fpath
def get_cyth_bench_path(py_fpath):
"""
>>> py_fpath = '/foo/vtool/vtool/keypoint.py'
>>> cy_fpath = get_cyth_bench_path(py_fpath)
>>> print(cy_fpath)
/foo/vtool/vtool/_keypoint_cyth_bench.py
"""
dpath, fname = split(py_fpath)
name, ext = splitext(fname)
assert ext == '.py', 'not a python file'
cy_fpath = utool.unixpath(join(dpath, get_cyth_name(name) + '_bench.py'))
return cy_fpath
def get_cyth_pxd_path(py_fpath):
"""
>>> py_fpath = '/foo/vtool/vtool/keypoint.py'
>>> cy_fpath = get_cyth_pxd_path(py_fpath)
>>> print(cy_fpath)
/foo/vtool/vtool/_keypoint_cyth.pxd
"""
dpath, fname = split(py_fpath)
name, ext = splitext(fname)
assert ext == '.py', 'not a python file'
cy_fpath = utool.unixpath(join(dpath, get_cyth_name(name) + '.pxd'))
return cy_fpath
def get_cyth_safe_funcname(pyth_funcname):
return pyth_funcname + '_cyth'
def ast_to_sourcecode(node):
generator = astor.codegen.SourceGenerator(' ' * 4)
generator.visit(node)
return ''.join(generator.result)
| aweinstock314/cyth | cyth/cyth_helpers.py | Python | apache-2.0 | 2,759 | [
"VisIt"
] | 8b07ee7a98e3d97cbcd2bbbd10960470fd3ac191321211621c675397940519c5 |
# Natural Language Toolkit: Corpus Reader Utilities
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Steven Bird <sb@ldc.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
import os
import sys
import bisect
import re
import tempfile
try: import cPickle as pickle
except ImportError: import pickle
from itertools import islice
# Use the c version of ElementTree, which is faster, if possible:
try: from xml.etree import cElementTree as ElementTree
except ImportError: from xml.etree import ElementTree
from nltk.tokenize import wordpunct_tokenize
from nltk.internals import slice_bounds
from nltk.data import PathPointer, FileSystemPathPointer, ZipFilePathPointer
from nltk.data import SeekableUnicodeStreamReader
from nltk.sourcedstring import SourcedStringStream
from nltk.util import AbstractLazySequence, LazySubsequence, LazyConcatenation, py25
######################################################################
#{ Corpus View
######################################################################
class StreamBackedCorpusView(AbstractLazySequence):
"""
A 'view' of a corpus file, which acts like a sequence of tokens:
it can be accessed by index, iterated over, etc. However, the
tokens are only constructed as-needed -- the entire corpus is
never stored in memory at once.
The constructor to ``StreamBackedCorpusView`` takes two arguments:
a corpus fileid (specified as a string or as a ``PathPointer``);
and a block reader. A "block reader" is a function that reads
zero or more tokens from a stream, and returns them as a list. A
very simple example of a block reader is:
>>> def simple_block_reader(stream):
... return stream.readline().split()
This simple block reader reads a single line at a time, and
returns a single token (consisting of a string) for each
whitespace-separated substring on the line.
When deciding how to define the block reader for a given
corpus, careful consideration should be given to the size of
blocks handled by the block reader. Smaller block sizes will
increase the memory requirements of the corpus view's internal
data structures (by 2 integers per block). On the other hand,
larger block sizes may decrease performance for random access to
the corpus. (But note that larger block sizes will *not*
decrease performance for iteration.)
Internally, ``CorpusView`` maintains a partial mapping from token
index to file position, with one entry per block. When a token
with a given index *i* is requested, the ``CorpusView`` constructs
it as follows:
1. First, it searches the toknum/filepos mapping for the token
index closest to (but less than or equal to) *i*.
2. Then, starting at the file position corresponding to that
index, it reads one block at a time using the block reader
until it reaches the requested token.
The toknum/filepos mapping is created lazily: it is initially
empty, but every time a new block is read, the block's
initial token is added to the mapping. (Thus, the toknum/filepos
map has one entry per block.)
In order to increase efficiency for random access patterns that
have high degrees of locality, the corpus view may cache one or
more blocks.
:note: Each ``CorpusView`` object internally maintains an open file
object for its underlying corpus file. This file should be
automatically closed when the ``CorpusView`` is garbage collected,
but if you wish to close it manually, use the ``close()``
method. If you access a ``CorpusView``'s items after it has been
closed, the file object will be automatically re-opened.
:warning: If the contents of the file are modified during the
lifetime of the ``CorpusView``, then the ``CorpusView``'s behavior
is undefined.
:warning: If a unicode encoding is specified when constructing a
``CorpusView``, then the block reader may only call
``stream.seek()`` with offsets that have been returned by
``stream.tell()``; in particular, calling ``stream.seek()`` with
relative offsets, or with offsets based on string lengths, may
lead to incorrect behavior.
:ivar _block_reader: The function used to read
a single block from the underlying file stream.
:ivar _toknum: A list containing the token index of each block
that has been processed. In particular, ``_toknum[i]`` is the
token index of the first token in block ``i``. Together
with ``_filepos``, this forms a partial mapping between token
indices and file positions.
:ivar _filepos: A list containing the file position of each block
that has been processed. In particular, ``_toknum[i]`` is the
file position of the first character in block ``i``. Together
with ``_toknum``, this forms a partial mapping between token
indices and file positions.
:ivar _stream: The stream used to access the underlying corpus file.
:ivar _len: The total number of tokens in the corpus, if known;
or None, if the number of tokens is not yet known.
:ivar _eofpos: The character position of the last character in the
file. This is calculated when the corpus view is initialized,
and is used to decide when the end of file has been reached.
:ivar _cache: A cache of the most recently read block. It
is encoded as a tuple (start_toknum, end_toknum, tokens), where
start_toknum is the token index of the first token in the block;
end_toknum is the token index of the first token not in the
block; and tokens is a list of the tokens in the block.
"""
def __init__(self, fileid, block_reader=None, startpos=0,
encoding=None, source=None):
"""
Create a new corpus view, based on the file ``fileid``, and
read with ``block_reader``. See the class documentation
for more information.
:param fileid: The path to the file that is read by this
corpus view. ``fileid`` can either be a string or a
``PathPointer``.
:param startpos: The file position at which the view will
start reading. This can be used to skip over preface
sections.
:param encoding: The unicode encoding that should be used to
read the file's contents. If no encoding is specified,
then the file's contents will be read as a non-unicode
string (i.e., a str).
:param source: If specified, then use an ``SourcedStringStream``
to annotate all strings read from the file with
information about their start offset, end ofset,
and docid. The value of ``source`` will be used as the docid.
"""
if block_reader:
self.read_block = block_reader
# Initialize our toknum/filepos mapping.
self._toknum = [0]
self._filepos = [startpos]
self._encoding = encoding
self._source = source
# We don't know our length (number of tokens) yet.
self._len = None
self._fileid = fileid
self._stream = None
self._current_toknum = None
"""This variable is set to the index of the next token that
will be read, immediately before ``self.read_block()`` is
called. This is provided for the benefit of the block
reader, which under rare circumstances may need to know
the current token number."""
self._current_blocknum = None
"""This variable is set to the index of the next block that
will be read, immediately before ``self.read_block()`` is
called. This is provided for the benefit of the block
reader, which under rare circumstances may need to know
the current block number."""
# Find the length of the file.
try:
if isinstance(self._fileid, PathPointer):
self._eofpos = self._fileid.file_size()
else:
self._eofpos = os.stat(self._fileid).st_size
except Exception as exc:
raise ValueError('Unable to open or access %r -- %s' %
(fileid, exc))
# Maintain a cache of the most recently read block, to
# increase efficiency of random access.
self._cache = (-1, -1, None)
fileid = property(lambda self: self._fileid, doc="""
The fileid of the file that is accessed by this view.
:type: str or PathPointer""")
def read_block(self, stream):
"""
Read a block from the input stream.
:return: a block of tokens from the input stream
:rtype: list(any)
:param stream: an input stream
:type stream: stream
"""
raise NotImplementedError('Abstract Method')
def _open(self):
"""
Open the file stream associated with this corpus view. This
will be called performed if any value is read from the view
while its file stream is closed.
"""
if isinstance(self._fileid, PathPointer):
self._stream = self._fileid.open(self._encoding)
elif self._encoding:
self._stream = SeekableUnicodeStreamReader(
open(self._fileid, 'rb'), self._encoding)
else:
self._stream = open(self._fileid, 'rb')
if self._source is not None:
self._stream = SourcedStringStream(self._stream, self._source)
def close(self):
"""
Close the file stream associated with this corpus view. This
can be useful if you are worried about running out of file
handles (although the stream should automatically be closed
upon garbage collection of the corpus view). If the corpus
view is accessed after it is closed, it will be automatically
re-opened.
"""
if self._stream is not None:
self._stream.close()
self._stream = None
def __len__(self):
if self._len is None:
# iterate_from() sets self._len when it reaches the end
# of the file:
for tok in self.iterate_from(self._toknum[-1]): pass
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
start, stop = slice_bounds(self, i)
# Check if it's in the cache.
offset = self._cache[0]
if offset <= start and stop <= self._cache[1]:
return self._cache[2][start-offset:stop-offset]
# Construct & return the result.
return LazySubsequence(self, start, stop)
else:
# Handle negative indices
if i < 0: i += len(self)
if i < 0: raise IndexError('index out of range')
# Check if it's in the cache.
offset = self._cache[0]
if offset <= i < self._cache[1]:
return self._cache[2][i-offset]
# Use iterate_from to extract it.
try:
return self.iterate_from(i).next()
except StopIteration:
raise IndexError('index out of range')
# If we wanted to be thread-safe, then this method would need to
# do some locking.
def iterate_from(self, start_tok):
# Start by feeding from the cache, if possible.
if self._cache[0] <= start_tok < self._cache[1]:
for tok in self._cache[2][start_tok-self._cache[0]:]:
yield tok
start_tok += 1
# Decide where in the file we should start. If `start` is in
# our mapping, then we can jump straight to the correct block;
# otherwise, start at the last block we've processed.
if start_tok < self._toknum[-1]:
block_index = bisect.bisect_right(self._toknum, start_tok)-1
toknum = self._toknum[block_index]
filepos = self._filepos[block_index]
else:
block_index = len(self._toknum)-1
toknum = self._toknum[-1]
filepos = self._filepos[-1]
# Open the stream, if it's not open already.
if self._stream is None:
self._open()
# Each iteration through this loop, we read a single block
# from the stream.
while filepos < self._eofpos:
# Read the next block.
self._stream.seek(filepos)
self._current_toknum = toknum
self._current_blocknum = block_index
tokens = self.read_block(self._stream)
assert isinstance(tokens, (tuple, list, AbstractLazySequence)), (
'block reader %s() should return list or tuple.' %
self.read_block.__name__)
num_toks = len(tokens)
new_filepos = self._stream.tell()
assert new_filepos > filepos, (
'block reader %s() should consume at least 1 byte (filepos=%d)' %
(self.read_block.__name__, filepos))
# Update our cache.
self._cache = (toknum, toknum+num_toks, list(tokens))
# Update our mapping.
assert toknum <= self._toknum[-1]
if num_toks > 0:
block_index += 1
if toknum == self._toknum[-1]:
assert new_filepos > self._filepos[-1] # monotonic!
self._filepos.append(new_filepos)
self._toknum.append(toknum+num_toks)
else:
# Check for consistency:
assert new_filepos == self._filepos[block_index], (
'inconsistent block reader (num chars read)')
assert toknum+num_toks == self._toknum[block_index], (
'inconsistent block reader (num tokens returned)')
# If we reached the end of the file, then update self._len
if new_filepos == self._eofpos:
self._len = toknum + num_toks
# Generate the tokens in this block (but skip any tokens
# before start_tok). Note that between yields, our state
# may be modified.
for tok in tokens[max(0, start_tok-toknum):]:
yield tok
# If we're at the end of the file, then we're done.
assert new_filepos <= self._eofpos
if new_filepos == self._eofpos:
break
# Update our indices
toknum += num_toks
filepos = new_filepos
# If we reach this point, then we should know our length.
assert self._len is not None
# Use concat for these, so we can use a ConcatenatedCorpusView
# when possible.
def __add__(self, other):
return concat([self, other])
def __radd__(self, other):
return concat([other, self])
def __mul__(self, count):
return concat([self] * count)
def __rmul__(self, count):
return concat([self] * count)
class ConcatenatedCorpusView(AbstractLazySequence):
"""
A 'view' of a corpus file that joins together one or more
``StreamBackedCorpusViews<StreamBackedCorpusView>``. At most
one file handle is left open at any time.
"""
def __init__(self, corpus_views):
self._pieces = corpus_views
"""A list of the corpus subviews that make up this
concatenation."""
self._offsets = [0]
"""A list of offsets, indicating the index at which each
subview begins. In particular::
offsets[i] = sum([len(p) for p in pieces[:i]])"""
self._open_piece = None
"""The most recently accessed corpus subview (or None).
Before a new subview is accessed, this subview will be closed."""
def __len__(self):
if len(self._offsets) <= len(self._pieces):
# Iterate to the end of the corpus.
for tok in self.iterate_from(self._offsets[-1]): pass
return self._offsets[-1]
def close(self):
for piece in self._pieces:
piece.close()
def iterate_from(self, start_tok):
piecenum = bisect.bisect_right(self._offsets, start_tok)-1
while piecenum < len(self._pieces):
offset = self._offsets[piecenum]
piece = self._pieces[piecenum]
# If we've got another piece open, close it first.
if self._open_piece is not piece:
if self._open_piece is not None:
self._open_piece.close()
self._open_piece = piece
# Get everything we can from this piece.
for tok in piece.iterate_from(max(0, start_tok-offset)):
yield tok
# Update the offset table.
if piecenum+1 == len(self._offsets):
self._offsets.append(self._offsets[-1] + len(piece))
# Move on to the next piece.
piecenum += 1
def concat(docs):
"""
Concatenate together the contents of multiple documents from a
single corpus, using an appropriate concatenation function. This
utility function is used by corpus readers when the user requests
more than one document at a time.
"""
if len(docs) == 1:
return docs[0]
if len(docs) == 0:
raise ValueError('concat() expects at least one object!')
types = set([d.__class__ for d in docs])
# If they're all strings, use string concatenation.
if types.issubset([str, unicode, basestring]):
return reduce((lambda a,b:a+b), docs, '')
# If they're all corpus views, then use ConcatenatedCorpusView.
for typ in types:
if not issubclass(typ, (StreamBackedCorpusView,
ConcatenatedCorpusView)):
break
else:
return ConcatenatedCorpusView(docs)
# If they're all lazy sequences, use a lazy concatenation
for typ in types:
if not issubclass(typ, AbstractLazySequence):
break
else:
return LazyConcatenation(docs)
# Otherwise, see what we can do:
if len(types) == 1:
typ = list(types)[0]
if issubclass(typ, list):
return reduce((lambda a,b:a+b), docs, [])
if issubclass(typ, tuple):
return reduce((lambda a,b:a+b), docs, ())
if ElementTree.iselement(typ):
xmltree = ElementTree.Element('documents')
for doc in docs: xmltree.append(doc)
return xmltree
# No method found!
raise ValueError("Don't know how to concatenate types: %r" % types)
######################################################################
#{ Corpus View for Pickled Sequences
######################################################################
class PickleCorpusView(StreamBackedCorpusView):
"""
A stream backed corpus view for corpus files that consist of
sequences of serialized Python objects (serialized using
``pickle.dump``). One use case for this class is to store the
result of running feature detection on a corpus to disk. This can
be useful when performing feature detection is expensive (so we
don't want to repeat it); but the corpus is too large to store in
memory. The following example illustrates this technique:
.. doctest::
:options: +SKIP
>>> from nltk.corpus.reader.util import PickleCorpusView
>>> from nltk.util import LazyMap
>>> feature_corpus = LazyMap(detect_features, corpus)
>>> PickleCorpusView.write(feature_corpus, some_fileid)
>>> pcv = PickleCorpusView(some_fileid)
"""
BLOCK_SIZE = 100
PROTOCOL = -1
def __init__(self, fileid, delete_on_gc=False):
"""
Create a new corpus view that reads the pickle corpus
``fileid``.
:param delete_on_gc: If true, then ``fileid`` will be deleted
whenever this object gets garbage-collected.
"""
self._delete_on_gc = delete_on_gc
StreamBackedCorpusView.__init__(self, fileid)
def read_block(self, stream):
result = []
for i in range(self.BLOCK_SIZE):
try: result.append(pickle.load(stream))
except EOFError: break
return result
def __del__(self):
"""
If ``delete_on_gc`` was set to true when this
``PickleCorpusView`` was created, then delete the corpus view's
fileid. (This method is called whenever a
``PickledCorpusView`` is garbage-collected.
"""
if getattr(self, '_delete_on_gc'):
if os.path.exists(self._fileid):
try: os.remove(self._fileid)
except (OSError, IOError): pass
self.__dict__.clear() # make the garbage collector's job easier
@classmethod
def write(cls, sequence, output_file):
if isinstance(output_file, basestring):
output_file = open(output_file, 'wb')
for item in sequence:
pickle.dump(item, output_file, cls.PROTOCOL)
@classmethod
def cache_to_tempfile(cls, sequence, delete_on_gc=True):
"""
Write the given sequence to a temporary file as a pickle
corpus; and then return a ``PickleCorpusView`` view for that
temporary corpus file.
:param delete_on_gc: If true, then the temporary file will be
deleted whenever this object gets garbage-collected.
"""
try:
fd, output_file_name = tempfile.mkstemp('.pcv', 'nltk-')
output_file = os.fdopen(fd, 'wb')
cls.write(sequence, output_file)
output_file.close()
return PickleCorpusView(output_file_name, delete_on_gc)
except (OSError, IOError) as e:
raise ValueError('Error while creating temp file: %s' % e)
######################################################################
#{ Block Readers
######################################################################
def read_whitespace_block(stream):
toks = []
for i in range(20): # Read 20 lines at a time.
toks.extend(stream.readline().split())
return toks
def read_wordpunct_block(stream):
toks = []
for i in range(20): # Read 20 lines at a time.
toks.extend(wordpunct_tokenize(stream.readline()))
return toks
def read_line_block(stream):
toks = []
for i in range(20):
line = stream.readline()
if not line: return toks
toks.append(line.rstrip('\n'))
return toks
def read_blankline_block(stream):
s = ''
while True:
line = stream.readline()
# End of file:
if not line:
if s: return [s]
else: return []
# Blank line:
elif line and not line.strip():
if s: return [s]
# Other line:
else:
s += line
def read_alignedsent_block(stream):
s = ''
while True:
line = stream.readline()
if line[0] == '=' or line[0] == '\n' or line[:2] == '\r\n':
continue
# End of file:
if not line:
if s: return [s]
else: return []
# Other line:
else:
s += line
if re.match('^\d+-\d+', line) is not None:
return [s]
def read_regexp_block(stream, start_re, end_re=None):
"""
Read a sequence of tokens from a stream, where tokens begin with
lines that match ``start_re``. If ``end_re`` is specified, then
tokens end with lines that match ``end_re``; otherwise, tokens end
whenever the next line matching ``start_re`` or EOF is found.
"""
# Scan until we find a line matching the start regexp.
while True:
line = stream.readline()
if not line: return [] # end of file.
if re.match(start_re, line): break
# Scan until we find another line matching the regexp, or EOF.
lines = [line]
while True:
oldpos = stream.tell()
line = stream.readline()
# End of file:
if not line:
return [''.join(lines)]
# End of token:
if end_re is not None and re.match(end_re, line):
return [''.join(lines)]
# Start of new token: backup to just before it starts, and
# return the token we've already collected.
if end_re is None and re.match(start_re, line):
stream.seek(oldpos)
return [''.join(lines)]
# Anything else is part of the token.
lines.append(line)
def read_sexpr_block(stream, block_size=16384, comment_char=None):
"""
Read a sequence of s-expressions from the stream, and leave the
stream's file position at the end the last complete s-expression
read. This function will always return at least one s-expression,
unless there are no more s-expressions in the file.
If the file ends in in the middle of an s-expression, then that
incomplete s-expression is returned when the end of the file is
reached.
:param block_size: The default block size for reading. If an
s-expression is longer than one block, then more than one
block will be read.
:param comment_char: A character that marks comments. Any lines
that begin with this character will be stripped out.
(If spaces or tabs precede the comment character, then the
line will not be stripped.)
"""
start = stream.tell()
block = stream.read(block_size)
encoding = getattr(stream, 'encoding', None)
assert encoding is not None or isinstance(block, str)
if encoding not in (None, 'utf-8'):
import warnings
warnings.warn('Parsing may fail, depending on the properties '
'of the %s encoding!' % encoding)
# (e.g., the utf-16 encoding does not work because it insists
# on adding BOMs to the beginning of encoded strings.)
if comment_char:
COMMENT = re.compile('(?m)^%s.*$' % re.escape(comment_char))
while True:
try:
# If we're stripping comments, then make sure our block ends
# on a line boundary; and then replace any comments with
# space characters. (We can't just strip them out -- that
# would make our offset wrong.)
if comment_char:
block += stream.readline()
block = re.sub(COMMENT, _sub_space, block)
# Read the block.
tokens, offset = _parse_sexpr_block(block)
# Skip whitespace
offset = re.compile(r'\s*').search(block, offset).end()
# Move to the end position.
if encoding is None:
stream.seek(start+offset)
else:
stream.seek(start+len(block[:offset].encode(encoding)))
# Return the list of tokens we processed
return tokens
except ValueError as e:
if e.args[0] == 'Block too small':
next_block = stream.read(block_size)
if next_block:
block += next_block
continue
else:
# The file ended mid-sexpr -- return what we got.
return [block.strip()]
else: raise
def _sub_space(m):
"""Helper function: given a regexp match, return a string of
spaces that's the same length as the matched string."""
return ' '*(m.end()-m.start())
def _parse_sexpr_block(block):
tokens = []
start = end = 0
while end < len(block):
m = re.compile(r'\S').search(block, end)
if not m:
return tokens, end
start = m.start()
# Case 1: sexpr is not parenthesized.
if m.group() != '(':
m2 = re.compile(r'[\s(]').search(block, start)
if m2:
end = m2.start()
else:
if tokens: return tokens, end
raise ValueError('Block too small')
# Case 2: parenthesized sexpr.
else:
nesting = 0
for m in re.compile(r'[()]').finditer(block, start):
if m.group()=='(': nesting += 1
else: nesting -= 1
if nesting == 0:
end = m.end()
break
else:
if tokens: return tokens, end
raise ValueError('Block too small')
tokens.append(block[start:end])
return tokens, end
######################################################################
#{ Finding Corpus Items
######################################################################
def find_corpus_fileids(root, regexp):
if not isinstance(root, PathPointer):
raise TypeError('find_corpus_fileids: expected a PathPointer')
regexp += '$'
# Find fileids in a zipfile: scan the zipfile's namelist. Filter
# out entries that end in '/' -- they're directories.
if isinstance(root, ZipFilePathPointer):
fileids = [name[len(root.entry):] for name in root.zipfile.namelist()
if not name.endswith('/')]
items = [name for name in fileids if re.match(regexp, name)]
return sorted(items)
# Find fileids in a directory: use os.walk to search all (proper
# or symlinked) subdirectories, and match paths against the regexp.
elif isinstance(root, FileSystemPathPointer):
items = []
# workaround for py25 which doesn't support followlinks
kwargs = {}
if not py25():
kwargs = {'followlinks': True}
for dirname, subdirs, fileids in os.walk(root.path, **kwargs):
prefix = ''.join('%s/' % p for p in _path_from(root.path, dirname))
items += [prefix+fileid for fileid in fileids
if re.match(regexp, prefix+fileid)]
# Don't visit svn directories:
if '.svn' in subdirs: subdirs.remove('.svn')
return sorted(items)
else:
raise AssertionError("Don't know how to handle %r" % root)
def _path_from(parent, child):
if os.path.split(parent)[1] == '':
parent = os.path.split(parent)[0]
path = []
while parent != child:
child, dirname = os.path.split(child)
path.insert(0, dirname)
assert os.path.split(child)[0] != child
return path
######################################################################
#{ Paragraph structure in Treebank files
######################################################################
def tagged_treebank_para_block_reader(stream):
# Read the next paragraph.
para = ''
while True:
line = stream.readline()
# End of paragraph:
if re.match('======+\s*$', line):
if para.strip(): return [para]
# End of file:
elif line == '':
if para.strip(): return [para]
else: return []
# Content line:
else:
para += line
| abad623/verbalucce | verbalucce/nltk/corpus/reader/util.py | Python | apache-2.0 | 31,153 | [
"VisIt"
] | 47652b67b5a70ab8735ab1cd1967aac295a7ebff77c6cd4614365a0848485812 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (C) 2010-2022 GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (https://www.globalquakemodel.org/tools-products) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# (hazard@globalquakemodel.org).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
'''
Module to test :openquake.hmtk.faults.mfd.characterisric.Characteristic class
'''
import unittest
import numpy as np
from openquake.hazardlib.scalerel import WC1994
from openquake.hmtk.faults.mfd.characteristic import Characteristic
aaae = np.testing.assert_array_almost_equal
class TestSimpleCharacteristic(unittest.TestCase):
'''
Implements the basic set of tests for the simple estimator of the
characteristic earthquake for a fault
:class openquake.hmtk.faults.mfd.characteristic.Characteristic
'''
def setUp(self):
'''
'''
self.model = Characteristic()
self.config = {'MFD_spacing': 0.1,
'Model_Weight': 1.0,
'Maximum_Magnitude': None,
'Maximum_Uncertainty': None,
'Lower_Bound': -2.,
'Upper_Bound': 2.,
'Sigma': None}
self.msr = WC1994()
def test_model_setup(self):
'''
Simple test to ensure model sets up correctly
'''
self.model.setUp(self.config)
expected_dict = {'bin_width': 0.1,
'lower_bound': -2.0,
'mfd_model': 'Characteristic',
'mfd_weight': 1.0,
'mmax': None,
'mmax_sigma': None,
'mmin': None,
'occurrence_rate': None,
'sigma': None,
'upper_bound': 2.0}
self.assertDictEqual(self.model.__dict__, expected_dict)
def test_get_mmax(self):
'''
Tests the function to get Mmax
Values come from WC1994 (tested in openquake.hazardlib) - only
functionality is tested for here!
'''
# Case 1 MMmax and uncertainty specified in config
self.config['Maximum_Magnitude'] = 8.0
self.config['Maximum_Magnitude_Uncertainty'] = 0.2
self.model = Characteristic()
self.model.setUp(self.config)
self.model.get_mmax(self.config, self.msr, 0., 8500.)
self.assertAlmostEqual(self.model.mmax, 8.0)
self.assertAlmostEqual(self.model.mmax_sigma, 0.2)
# Case 2: Mmax and uncertainty not specified in config
self.config['Maximum_Magnitude'] = None
self.config['Maximum_Magnitude_Uncertainty'] = None
self.model = Characteristic()
self.model.setUp(self.config)
self.model.get_mmax(self.config, self.msr, 0., 8500.)
self.assertAlmostEqual(self.model.mmax, 7.9880073)
self.assertAlmostEqual(self.model.mmax_sigma, 0.23)
def test_get_mfd(self):
'''
Tests the calculation of activity rates for the simple
characteristic earthquake distribution.
'''
# Test case 1: Ordinatry fault with Area 8500 km ** 2 (Mmax ~ 8.0),
# and a slip rate of 5 mm/yr. Double truncated Gaussian between [-2, 2]
# standard deviations with sigma = 0.12
self.config = {'MFD_spacing': 0.1,
'Model_Weight': 1.0,
'Maximum_Magnitude': None,
'Maximum_Uncertainty': None,
'Lower_Bound': -2.,
'Upper_Bound': 2.,
'Sigma': 0.12}
self.model = Characteristic()
self.model.setUp(self.config)
self.model.get_mmax(self.config, self.msr, 0., 8500.)
_, _, _ = self.model.get_mfd(5.0, 8500.)
aaae(self.model.occurrence_rate,
np.array([4.20932867e-05, 2.10890168e-04, 3.80422666e-04,
3.56294331e-04, 1.73223702e-04, 2.14781079e-05]))
expected_rate = np.sum(self.model.occurrence_rate)
# Test case 2: Same fault with no standard deviation
self.config['Sigma'] = None
self.model.setUp(self.config)
self.model.get_mmax(self.config, self.msr, 0., 8500.)
_, _, _ = self.model.get_mfd(5.0, 8500.)
aaae(0.0011844, self.model.occurrence_rate)
# As a final check - ensure that the sum of the activity rates from the
# truncated Gaussian model is equal to the rate from the model with no
# variance
aaae(expected_rate, self.model.occurrence_rate, 3)
| gem/oq-engine | openquake/hmtk/tests/faults/mfd/test_characteristic.py | Python | agpl-3.0 | 6,255 | [
"Gaussian"
] | b99bd4632e6197006edf01c2b7119cedd287aa20dd322a911979f5dae16f0ca9 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Collection of utility functions that can be used throughout
the photometry package.
.. codeauthor:: Rasmus Handberg <rasmush@phys.au.dk>
"""
import numpy as np
from astropy.io import fits
from bottleneck import move_median, nanmedian, nanmean, allnan, nanargmin, nanargmax
import logging
import tqdm
from scipy.special import erf
from scipy.stats import binned_statistic
import configparser
import json
import os.path
import glob
import re
import itertools
from functools import lru_cache
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import concurrent.futures
from threading import Lock
from collections import defaultdict
# Constants:
mad_to_sigma = 1.482602218505602 #: Constant for converting from MAD to SIGMA. Constant is 1/norm.ppf(3/4)
#--------------------------------------------------------------------------------------------------
@lru_cache(maxsize=1)
def load_settings():
"""
Load settings.
Returns:
:class:`configparser.ConfigParser`:
"""
settings = configparser.ConfigParser()
settings.read(os.path.join(os.path.dirname(__file__), 'data', 'settings.ini'))
return settings
#--------------------------------------------------------------------------------------------------
@lru_cache(maxsize=10)
def load_sector_settings(sector=None):
with open(os.path.join(os.path.dirname(__file__), 'data', 'sectors.json'), 'r') as fid:
settings = json.load(fid)
if sector is not None:
return settings['sectors'][str(sector)]
return settings
#--------------------------------------------------------------------------------------------------
@lru_cache(maxsize=32)
def find_ffi_files(rootdir, sector=None, camera=None, ccd=None):
"""
Search directory recursively for TESS FFI images in FITS format.
The function is cached, meaning the first time it is run on a particular ``rootdir``
the list of files in that directory will be read and cached to memory and used in
subsequent calls to the function. This means that any changes to files on disk after
the first call of the function will not be picked up in subsequent calls to this function.
Parameters:
rootdir (str): Directory to search recursively for TESS FFI images.
sector (int or None, optional): Only return files from the given sector.
If ``None``, files from all sectors are returned.
camera (int or None, optional): Only return files from the given camera number (1-4).
If ``None``, files from all cameras are returned.
ccd (int or None, optional): Only return files from the given CCD number (1-4).
If ``None``, files from all CCDs are returned.
Returns:
list: List of full paths to FFI FITS files found in directory. The list will
be sorted accoridng to the filename of the files, i.e. primarily by time.
"""
logger = logging.getLogger(__name__)
# Create the filename pattern to search for:
sector_str = r'\d{4}' if sector is None else f'{sector:04d}'
camera = r'\d' if camera is None else str(camera)
ccd = r'\d' if ccd is None else str(ccd)
filename_pattern = r'^tess\d+-s(?P<sector>' + sector_str + ')-(?P<camera>' + camera + r')-(?P<ccd>' + ccd + r')-\d{4}-[xsab]_ffic\.fits(\.gz)?$'
logger.debug("Searching for FFIs in '%s' using pattern '%s'", rootdir, filename_pattern)
regexp = re.compile(filename_pattern)
# Do a recursive search in the directory, finding all files that match the pattern:
matches = []
for root, dirnames, filenames in os.walk(rootdir, followlinks=True):
for filename in filenames:
if regexp.match(filename):
matches.append(os.path.join(root, filename))
# Sort the list of files by thir filename:
matches.sort(key=lambda x: os.path.basename(x))
return matches
#--------------------------------------------------------------------------------------------------
@lru_cache(maxsize=10)
def _find_tpf_files(rootdir, sector=None, cadence=None):
logger = logging.getLogger(__name__)
# Create the filename pattern to search for:
sector_str = r'\d{4}' if sector is None else f'{sector:04d}'
suffix = {None: '(fast-)?tp', 120: 'tp', 20: 'fast-tp'}[cadence]
re_pattern = r'^tess\d+-s(?P<sector>' + sector_str + r')-(?P<starid>\d+)-\d{4}-[xsab]_' + suffix + r'\.fits(\.gz)?$'
regexps = [re.compile(re_pattern)]
logger.debug("Searching for TPFs in '%s' using pattern '%s'", rootdir, re_pattern)
# Pattern used for TESS Alert data:
if cadence is None or cadence == 120:
sector_str = r'\d{2}' if sector is None else f'{sector:02d}'
re_pattern2 = r'^hlsp_tess-data-alerts_tess_phot_(?P<starid>\d+)-s(?P<sector>' + sector_str + r')_tess_v\d+_tp\.fits(\.gz)?$'
regexps.append(re.compile(re_pattern2))
logger.debug("Searching for TPFs in '%s' using pattern '%s'", rootdir, re_pattern2)
# Do a recursive search in the directory, finding all files that match the pattern:
filedict = defaultdict(list)
for root, dirnames, filenames in os.walk(rootdir, followlinks=True):
for filename in filenames:
for regex in regexps:
m = regex.match(filename)
if m:
starid = int(m.group('starid'))
filedict[starid].append(os.path.join(root, filename))
break
# Ensure that each list is sorted by itself. We do this once here
# so we don't have to do it each time a specific starid is requested:
for key in filedict.keys():
filedict[key].sort(key=lambda x: os.path.basename(x))
return filedict
#--------------------------------------------------------------------------------------------------
def find_tpf_files(rootdir, starid=None, sector=None, camera=None, ccd=None, cadence=None,
findmax=None):
"""
Search directory recursively for TESS Target Pixel Files.
The function is cached, meaning the first time it is run on a particular ``rootdir``
the list of files in that directory will be read and cached to memory and used in
subsequent calls to the function. This means that any changes to files on disk after
the first call of the function will not be picked up in subsequent calls to this function.
Parameters:
rootdir (str): Directory to search recursively for TESS TPF files.
starid (int, optional): Only return files from the given TIC number.
If ``None``, files from all TIC numbers are returned.
sector (int, optional): Only return files from the given sector.
If ``None``, files from all sectors are returned.
camera (int or None, optional): Only return files from the given camera number (1-4).
If ``None``, files from all cameras are returned.
ccd (int, optional): Only return files from the given CCD number (1-4).
If ``None``, files from all CCDs are returned.
cadence (int, optional): Only return files from the given cadence (20 or 120).
If ``None``, files from all cadences are returned.
findmax (int, optional): Maximum number of files to return.
If ``None``, return all files.
Note:
Filtering on camera and/or ccd will cause the program to read the headers
of the files in order to determine the camera and ccd from which they came.
This can significantly slow down the query.
Returns:
list: List of full paths to TPF FITS files found in directory. The list will
be sorted according to the filename of the files, i.e. primarily by time.
"""
if cadence is not None and cadence not in (120, 20):
raise ValueError("Invalid cadence. Must be either 20 or 120.")
# Call cached function which searches for files on disk:
filedict = _find_tpf_files(rootdir, sector=sector, cadence=cadence)
if starid is not None:
files = filedict.get(starid, [])
else:
# If we are not searching for a particilar starid,
# simply flatten the dict to a list of all found files
# and sort the list of files by thir filename:
files = list(itertools.chain(*filedict.values()))
files.sort(key=lambda x: os.path.basename(x))
# Expensive check which involve opening the files and reading headers:
# We are only removing elements, and preserving the ordering, so there
# is no need for re-sorting the list afterwards.
if camera is not None or ccd is not None:
matches = []
for fpath in files:
if camera is not None and fits.getval(fpath, 'CAMERA', ext=0) != camera:
continue
if ccd is not None and fits.getval(fpath, 'CCD', ext=0) != ccd:
continue
# Add the file to the list, but stop if we have already
# reached the number of files we need to find:
matches.append(fpath)
if findmax is not None and len(matches) >= findmax:
break
files = matches
# Just to ensure that we are not returning more than we should:
if findmax is not None and len(files) > findmax:
files = files[:findmax]
return files
#--------------------------------------------------------------------------------------------------
@lru_cache(maxsize=32)
def find_hdf5_files(rootdir, sector=None, camera=None, ccd=None):
"""
Search the input directory for HDF5 files matching constraints.
Parameters:
rootdir (str): Directory to search for HDF5 files.
sector (int, list or None, optional): Only return files from the given sectors.
If ``None``, files from all TIC numbers are returned.
camera (int, list or None, optional): Only return files from the given camera.
If ``None``, files from all cameras are returned.
ccd (int, list or None, optional): Only return files from the given ccd.
If ``None``, files from all ccds are returned.
Returns:
list: List of paths to HDF5 files matching constraints.
"""
sector = to_tuple(sector, (None,))
camera = to_tuple(camera, (1,2,3,4))
ccd = to_tuple(ccd, (1,2,3,4))
filelst = []
for sector, camera, ccd in itertools.product(sector, camera, ccd):
sector_str = '???' if sector is None else f'{sector:03d}'
filelst += glob.glob(os.path.join(rootdir, f'sector{sector_str:s}_camera{camera:d}_ccd{ccd:d}.hdf5'))
return filelst
#--------------------------------------------------------------------------------------------------
@lru_cache(maxsize=32)
def find_catalog_files(rootdir, sector=None, camera=None, ccd=None):
"""
Search the input directory for CATALOG (sqlite) files matching constraints.
Parameters:
rootdir (str): Directory to search for CATALOG files.
sector (int, list or None, optional): Only return files from the given sectors.
If ``None``, files from all TIC numbers are returned.
camera (int, list or None, optional): Only return files from the given camera.
If ``None``, files from all cameras are returned.
ccd (int, list or None, optional): Only return files from the given ccd.
If ``None``, files from all ccds are returned.
Returns:
list: List of paths to CATALOG files matching constraints.
"""
sector = to_tuple(sector, (None,))
camera = to_tuple(camera, (1,2,3,4))
ccd = to_tuple(ccd, (1,2,3,4))
filelst = []
for sector, camera, ccd in itertools.product(sector, camera, ccd):
sector_str = '???' if sector is None else f'{sector:03d}'
filelst += glob.glob(os.path.join(rootdir, f'catalog_sector{sector_str:s}_camera{camera:d}_ccd{ccd:d}.sqlite'))
return filelst
#--------------------------------------------------------------------------------------------------
def load_ffi_fits(path, return_header=False, return_uncert=False):
"""
Load FFI FITS file.
Calibrations columns and rows are trimmed from the image.
Parameters:
path (str): Path to FITS file.
return_header (bool, optional): Return FITS headers as well. Default is ``False``.
Returns:
numpy.ndarray: Full Frame Image.
list: If ``return_header`` is enabled, will return a dict of the FITS headers.
"""
with fits.open(path, mode='readonly') as hdu:
hdr = hdu[0].header
if hdr.get('TELESCOP') == 'TESS' and hdu[1].header.get('NAXIS1') == 2136 and hdu[1].header.get('NAXIS2') == 2078:
img = np.asarray(hdu[1].data[0:2048, 44:2092], dtype='float32')
if return_uncert:
imgerr = np.asarray(hdu[2].data[0:2048, 44:2092], dtype='float32')
headers = dict(hdu[0].header)
headers.update(dict(hdu[1].header))
else:
img = np.asarray(hdu[0].data, dtype='float32')
headers = dict(hdu[0].header)
if return_uncert:
imgerr = np.asarray(hdu[1].data, dtype='float32')
if return_uncert and return_header:
return img, headers, imgerr
elif return_uncert:
return img, imgerr
elif return_header:
return img, headers
else:
return img
#--------------------------------------------------------------------------------------------------
def to_tuple(inp, default=None):
"""
Convert iterable or single values to tuple.
This function is used for converting inputs, perticularly for
preparing input to functions cached with :func:`functools.lru_cache`,
to ensure inputs are hashable.
Parameters:
inp: Input to convert to tuple.
default: If ``input`` is ``None`` return this instead.
Returns:
tuple: ``inp`` converted to tuple.
"""
if inp is None:
return default
if isinstance(inp, (list, set, frozenset, np.ndarray)):
return tuple(inp)
if isinstance(inp, (int, float, bool, str)):
return (inp, )
return inp
#--------------------------------------------------------------------------------------------------
def _move_median_central_1d(x, width_points):
y = move_median(x, width_points, min_count=1)
y = np.roll(y, -width_points//2+1)
for k in range(width_points//2+1):
y[k] = nanmedian(x[:(k+2)])
y[-(k+1)] = nanmedian(x[-(k+2):])
return y
#--------------------------------------------------------------------------------------------------
def move_median_central(x, width_points, axis=0):
return np.apply_along_axis(_move_median_central_1d, axis, x, width_points)
#--------------------------------------------------------------------------------------------------
def add_proper_motion(ra, dec, pm_ra, pm_dec, bjd, epoch=2000.0):
"""
Project coordinates (ra,dec) with proper motions to new epoch.
Parameters:
ra (float) : Right ascension.
dec (float) : Declination.
pm_ra (float) : Proper motion in RA (mas/year).
pm_dec (float) : Proper motion in Declination (mas/year).
bjd (float) : Julian date to calculate coordinates for.
epoch (float, optional) : Epoch of ``ra`` and ``dec``. Default=2000.
Returns:
(float, float) : RA and Declination at the specified date.
"""
# Convert BJD to epoch (year):
epoch_now = (bjd - 2451544.5)/365.25 + 2000.0
# How many years since the catalog's epoch?
timeelapsed = epoch_now - epoch # in years
# Calculate the dec:
decrate = pm_dec/3600000.0 # in degrees/year (assuming original was in mas/year)
decindegrees = dec + timeelapsed*decrate
# Calculate the unprojected rate of RA motion, using the mean declination between
# the catalog and present epoch.
rarate = pm_ra/np.cos((dec + timeelapsed*decrate/2.0)*np.pi/180.0)/3600000.0 # in degress of RA/year (assuming original was in mas/year)
raindegrees = ra + timeelapsed*rarate
# Return the current positions
return raindegrees, decindegrees
#--------------------------------------------------------------------------------------------------
def integratedGaussian(x, y, flux, x_0, y_0, sigma=1):
"""
Evaluate a 2D symmetrical Gaussian integrated in pixels.
Parameters:
x (numpy.ndarray): x coordinates at which to evaluate the PSF.
y (numpy.ndarray): y coordinates at which to evaluate the PSF.
flux (float): Integrated value.
x_0 (float): Centroid position.
y_0 (float): Centroid position.
sigma (float, optional): Standard deviation of Gaussian. Default=1.
Returns:
numpy array : 2D Gaussian integrated pixel values at (x,y).
Note:
Inspired by
https://github.com/astropy/photutils/blob/master/photutils/psf/models.py
Example:
>>> import numpy as np
>>> X, Y = np.meshgrid(np.arange(-1,2), np.arange(-1,2))
>>> integratedGaussian(X, Y, 10, 0, 0)
array([[ 0.58433556, 0.92564571, 0.58433556],
[ 0.92564571, 1.46631496, 0.92564571],
[ 0.58433556, 0.92564571, 0.58433556]])
"""
denom = np.sqrt(2) * sigma
return (flux / 4 * ((erf((x - x_0 + 0.5) / denom)
- erf((x - x_0 - 0.5) / denom)) * (erf((y - y_0 + 0.5) / denom)
- erf((y - y_0 - 0.5) / denom)))) # noqa: ET126
#--------------------------------------------------------------------------------------------------
def mag2flux(mag, zp=20.451):
"""
Convert from magnitude to flux using scaling relation from
aperture photometry. This is an estimate.
The default scaling is based on TASOC Data Release 5 from sectors 1-5.
Parameters:
mag (ndarray): Magnitude in TESS band.
zp (float): Zero-point to use in scaling. Default is estimated from
TASOC Data Release 5 from TESS sectors 1-5.
Returns:
ndarray: Corresponding flux value
"""
return np.clip(10**(-0.4*(mag - zp)), 0, None)
#--------------------------------------------------------------------------------------------------
def sphere_distance(ra1, dec1, ra2, dec2):
"""
Calculate the great circle distance between two points using the Vincenty formulae.
Parameters:
ra1 (float or ndarray): Longitude of first point in degrees.
dec1 (float or ndarray): Lattitude of first point in degrees.
ra2 (float or ndarray): Longitude of second point in degrees.
dec2 (float or ndarray): Lattitude of second point in degrees.
Returns:
ndarray: Distance between points in degrees.
Note:
https://en.wikipedia.org/wiki/Great-circle_distance
"""
# Convert angles to radians:
ra1 = np.deg2rad(ra1)
ra2 = np.deg2rad(ra2)
dec1 = np.deg2rad(dec1)
dec2 = np.deg2rad(dec2)
# Calculate distance using Vincenty formulae:
return np.rad2deg(np.arctan2(
np.sqrt( (np.cos(dec2)*np.sin(ra2-ra1))**2 + (np.cos(dec1)*np.sin(dec2) - np.sin(dec1)*np.cos(dec2)*np.cos(ra2-ra1))**2 ),
np.sin(dec1)*np.sin(dec2) + np.cos(dec1)*np.cos(dec2)*np.cos(ra2-ra1)
))
#--------------------------------------------------------------------------------------------------
def radec_to_cartesian(radec):
"""
Convert spherical coordinates as (ra, dec) pairs to cartesian coordinates (x,y,z).
Parameters:
radec (ndarray): Array with ra-dec pairs in degrees.
Returns:
ndarray: (x,y,z) coordinates corresponding to input coordinates.
"""
radec = np.atleast_2d(radec)
xyz = np.empty((radec.shape[0], 3), dtype='float64')
phi = np.radians(radec[:,0])
theta = np.pi/2 - np.radians(radec[:,1])
xyz[:,0] = np.sin(theta) * np.cos(phi)
xyz[:,1] = np.sin(theta) * np.sin(phi)
xyz[:,2] = np.cos(theta)
return xyz
#--------------------------------------------------------------------------------------------------
def cartesian_to_radec(xyz):
"""
Convert cartesian coordinates (x,y,z) to spherical coordinates in ra-dec form.
Parameters:
radec (ndarray): Array with ra-dec pairs.
Returns:
ndarray: ra-dec coordinates in degrees corresponding to input coordinates.
"""
xyz = np.atleast_2d(xyz)
radec = np.empty((xyz.shape[0], 2), dtype='float64')
radec[:,1] = np.pi/2 - np.arccos(xyz[:,2])
radec[:,0] = np.arctan2(xyz[:,1], xyz[:,0])
indx = radec[:,0] < 0
radec[indx,0] = 2*np.pi - np.abs(radec[indx,0])
indx = radec[:,0] > 2*np.pi
radec[indx,0] -= 2*np.pi
return np.degrees(radec)
#--------------------------------------------------------------------------------------------------
def rms_timescale(time, flux, timescale=3600/86400):
"""
Compute robust RMS on specified timescale. Using MAD scaled to RMS.
Parameters:
time (ndarray): Timestamps in days.
flux (ndarray): Flux to calculate RMS for.
timescale (float, optional): Timescale to bin timeseries before calculating RMS.
Default=1 hour.
Returns:
float: Robust RMS on specified timescale.
.. codeauthor:: Rasmus Handberg <rasmush@phys.au.dk>
"""
time = np.asarray(time)
flux = np.asarray(flux)
if len(flux) == 0 or allnan(flux):
return np.nan
if len(time) == 0 or allnan(time):
raise ValueError("Invalid time-vector specified. No valid timestamps.")
time_min = np.nanmin(time)
time_max = np.nanmax(time)
if not np.isfinite(time_min) or not np.isfinite(time_max) or time_max - time_min <= 0:
raise ValueError("Invalid time-vector specified")
# Construct the bin edges seperated by the timescale:
bins = np.arange(time_min, time_max, timescale)
bins = np.append(bins, time_max)
# Bin the timeseries to one hour:
indx = np.isfinite(flux)
flux_bin, _, _ = binned_statistic(time[indx], flux[indx], nanmean, bins=bins)
# Compute robust RMS value (MAD scaled to RMS)
return mad_to_sigma * nanmedian(np.abs(flux_bin - nanmedian(flux_bin)))
#--------------------------------------------------------------------------------------------------
def find_nearest(array, value):
"""
Search array for value and return the index where the value is closest.
Parameters:
array (ndarray): Array to search.
value: Value to search array for.
Returns:
int: Index of ``array`` closest to ``value``.
Raises:
ValueError: If ``value`` is NaN.
.. codeauthor:: Rasmus Handberg <rasmush@phys.au.dk>
"""
if np.isnan(value):
raise ValueError("Invalid search value")
if np.isposinf(value):
return nanargmax(array)
if np.isneginf(value):
return nanargmin(array)
return nanargmin(np.abs(array - value))
#idx = np.searchsorted(array, value, side='left')
#if idx > 0 and (idx == len(array) or abs(value - array[idx-1]) <= abs(value - array[idx])):
# return idx-1
#else:
# return idx
#--------------------------------------------------------------------------------------------------
def download_file(url, destination, desc=None, timeout=60,
position_holders=None, position_lock=None, showprogress=None):
"""
Download file from URL and place into specified destination.
Parameters:
url (str): URL to file to be downloaded.
destination (str): Path where to save file.
desc (str, optional): Description to write next to progress-bar.
timeout (float): Time to wait for server response in seconds. Default=60.
showprogress (bool): Force showing the progress bar. If ``None``, the
progressbar is shown based on the logging level and output type.
.. codeauthor:: Rasmus Handberg <rasmush@phys.au.dk>
"""
logger = logging.getLogger(__name__)
tqdm_settings = {
'unit': 'B',
'unit_scale': True,
'position': None,
'leave': True,
'disable': None if logger.isEnabledFor(logging.INFO) else True,
'desc': desc
}
if showprogress is not None:
tqdm_settings['disable'] = not showprogress
if position_holders is not None:
tqdm_settings['leave'] = False
position_lock.acquire()
tqdm_settings['position'] = position_holders.index(False)
position_holders[tqdm_settings['position']] = True
position_lock.release()
# Strategy for retrying failing requests several times
# with a small increasing sleep in between:
retry_strategy = Retry(
total=3,
backoff_factor=1,
status_forcelist=[413, 429, 500, 502, 503, 504],
allowed_methods=['HEAD', 'GET'],
)
adapter = HTTPAdapter(max_retries=retry_strategy)
try:
with requests.Session() as http:
http.mount("https://", adapter)
http.mount("http://", adapter)
# Start stream from URL and throw an error for bad status codes:
response = http.get(url, stream=True, allow_redirects=True, timeout=timeout)
response.raise_for_status()
total_size = response.headers.get('content-length', None)
if total_size is not None:
total_size = int(total_size)
block_size = 1024
with open(destination, 'wb') as handle:
with tqdm.tqdm(total=total_size, **tqdm_settings) as pbar:
for block in response.iter_content(block_size):
handle.write(block)
pbar.update(len(block))
if total_size is not None and os.path.getsize(destination) != total_size:
raise RuntimeError("File not downloaded correctly")
except: # noqa: E722, pragma: no cover
logger.exception("Could not download file")
if os.path.exists(destination):
os.remove(destination)
raise
finally:
# Pause before returning to give progress bar time to write.
if position_holders is not None:
position_lock.acquire()
position_holders[tqdm_settings['position']] = False
position_lock.release()
#--------------------------------------------------------------------------------------------------
def download_parallel(urls, workers=4, timeout=60, showprogress=None):
"""
Download several files in parallel using multiple threads.
Parameters:
urls (iterable): List of files to download. Each element should consist of a list or tuple,
containing two elements: The URL to download, and the path to the destination where the
file should be saved.
workers (int, optional): Number of threads to use for downloading. Default=4.
timeout (float): Time to wait for server response in seconds. Default=60.
.. codeauthor:: Rasmus Handberg <rasmush@phys.au.dk>
"""
# Don't overcomplicate things for a singe file:
if len(urls) == 1:
download_file(urls[0][0], urls[0][1], timeout=timeout, showprogress=showprogress)
return
workers = min(workers, len(urls))
position_holders = [False] * workers
plock = Lock()
def _wrapper(arg):
download_file(arg[0], arg[1],
timeout=timeout,
showprogress=showprogress,
position_holders=position_holders,
position_lock=plock)
errors = []
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
# Start the load operations and mark each future with its URL
future_to_url = {executor.submit(_wrapper, url): url for url in urls}
for future in concurrent.futures.as_completed(future_to_url):
url = future_to_url[future]
try:
future.result()
except: # noqa: E722, pragma: no cover
errors.append(url[0])
if errors:
raise RuntimeError("Errors encountered during download of the following URLs:\n%s" % '\n'.join(errors))
#--------------------------------------------------------------------------------------------------
class TqdmLoggingHandler(logging.Handler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit): # pragma: no cover
raise
except: # noqa: E722, pragma: no cover
self.handleError(record)
#--------------------------------------------------------------------------------------------------
class ListHandler(logging.Handler):
"""
A logging.Handler that writes messages into a list object.
The standard logging.QueueHandler/logging.QueueListener can not be used
for this because the QueueListener runs in a private thread, not the
main thread.
.. warning::
This handler is not thread-safe. Do not use it in threaded environments.
"""
def __init__(self, *args, message_queue, **kwargs):
"""Initialize by copying the queue and sending everything else to superclass."""
super().__init__(*args, **kwargs)
self.message_queue = message_queue
def emit(self, record):
"""Add the formatted log message (sans newlines) to the queue."""
self.message_queue.append(self.format(record).rstrip('\n'))
#--------------------------------------------------------------------------------------------------
class LoggerWriter(object):
"""
File-like object which passes input into a logger.
Can be used together with :py:func:`contextlib.redirect_stdout`
or :py:func:`contextlib.redirect_stderr` to redirect streams to the given logger.
Can be useful for wrapping codes which uses normal :py:func:`print` functions for logging.
.. code-block:: python
:linenos:
logger = logging.getLogger(__name__)
with contextlib.redirect_stdout(LoggerWriter(logger, logging.INFO)):
print("This goes into the logger instead of STDOUT")
.. warning::
This object is not thread-safe. Do not use it in threaded environments.
.. codeauthor:: Rasmus Handberg <rasmush@phys.au.dk>
"""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, message):
if message.strip() != '':
self.logger.log(self.level, message)
def flush(self):
pass
#--------------------------------------------------------------------------------------------------
def sqlite_drop_column(conn, table, col):
"""
Drop table column from SQLite table.
Since SQLite does not have functionality for dropping/deleting columns
in existing tables, this function can provide this functionality.
This is done by temporarily copying the entire table, so this can be
quite an expensive operation.
Parameters:
conn (:class:`sqlite3.Connection`): Connection to SQLite database.
table (str): Table to drop column from.
col (str): Column to be dropped from table.
.. codeauthor:: Rasmus Handberg <rasmush@phys.au.dk>
"""
# Get a list of columns in the existing table:
cursor = conn.cursor()
cursor.execute(f"PRAGMA table_info({table:s});")
columns = [col[1] for col in cursor.fetchall()]
if col not in columns:
raise ValueError(f"Column '{col:s}' not found in table '{table:s}'")
columns.remove(col)
columns = ','.join(columns)
# Get list of index associated with the table:
cursor.execute("SELECT name,sql FROM sqlite_master WHERE type='index' AND tbl_name=?;", [table])
index = cursor.fetchall()
index_names = [row[0] for row in index]
index_sql = [row[1] for row in index]
# Warn if any index exist with the column to be removed:
regex_index = re.compile(r'^CREATE( UNIQUE)? INDEX (.+) ON ' + re.escape(table) + r'\s*\((.+)\).*$', re.IGNORECASE)
for sql in index_sql:
m = regex_index.match(sql)
if not m:
raise RuntimeError("COULD NOT UNDERSTAND SQL") # pragma: no cover
index_columns = [i.strip() for i in m.group(3).split(',')]
if col in index_columns:
raise RuntimeError("Column is used in INDEX %s." % m.group(2))
# Store the current foreign_key setting:
cursor.execute("PRAGMA foreign_keys;")
current_foreign_keys = cursor.fetchone()[0]
# Start a transaction:
cursor.execute('BEGIN TRANSACTION;')
try:
cursor.execute("PRAGMA foreign_keys=off;")
# Drop all indexes associated with table:
for name in index_names:
cursor.execute("DROP INDEX {0:s};".format(name))
cursor.execute(f"ALTER TABLE {table:s} RENAME TO {table:s}_backup;")
cursor.execute(f"CREATE TABLE {table:s} AS SELECT {columns:s} FROM {table:s}_backup;")
cursor.execute(f"DROP TABLE {table:s}_backup;")
# Recreate all index associated with table:
for sql in index_sql:
cursor.execute(sql)
conn.commit()
except: # noqa: E722, pragma: no cover
conn.rollback()
raise
finally:
cursor.execute(f"PRAGMA foreign_keys={current_foreign_keys};")
| tasoc/photometry | photometry/utilities.py | Python | gpl-3.0 | 30,338 | [
"Gaussian"
] | 0954e1105112461fd2b9b7d82beffb7011fb2eec2596a933d909f9eacbc820d3 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2014 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import contextlib
import gtk
import mock
from stoqlib.gui.dialogs.personmergedialog import PersonMergeDialog
from stoqlib.gui.test.uitestutils import GUITest
class TestPersonMergeDialog(GUITest):
def _create_data(self, name, phone=None, street=None):
client = self.create_client(name=name)
if phone:
client.person.phone_number = phone
if street:
address = self.create_address(person=client.person)
address.street = street
def _create_sample_data(self):
self._create_data(name=u'Juca Pinga', phone=u'33710001')
self._create_data(name=u'Juca da Silva Pinga')
self._create_data(name=u'Juca', phone=u'33710001')
self._create_data(name=u'Juca Antônio')
self._create_data(name=u'José Pinga', street=u'Rua Dos Bobos')
self._create_data(name=u'Jose Cuervo Pinga', phone=u'33710002',
street=u'Av. Dos bobos')
self._create_data(name=u'José Cuervo Pinga', phone=u'33710003')
def test_create(self):
dialog = PersonMergeDialog(self.store)
self.check_editor(dialog, 'dialog-person-merge-dialog')
@mock.patch('stoqlib.gui.dialogs.personmergedialog.ProgressDialog')
def test_search(self, ProgressDialog):
dialog = PersonMergeDialog(self.store)
self.click(dialog.search_button)
ProgressDialog.assert_called_once_with('Searching duplicates',
pulse=False)
@mock.patch('stoqlib.gui.dialogs.personmergedialog.ProgressDialog')
def test_search_same_name(self, ProgressDialog):
self._create_sample_data()
dialog = PersonMergeDialog(self.store)
# First, only the exact name
dialog.model.same_phone = False
dialog.model.same_street = False
self.click(dialog.search_button)
names = set(d.name for d in dialog.dup_tree)
self.assertEquals(names, set([u'José Cuervo Pinga', u'Jose Cuervo Pinga']))
@mock.patch('stoqlib.gui.dialogs.personmergedialog.ProgressDialog')
def test_search_first_name_phone(self, ProgressDialog):
self._create_sample_data()
dialog = PersonMergeDialog(self.store)
dialog.model.method = dialog.model.FIRST_NAME
# First, only the first name and phone
dialog.model.same_phone = True
dialog.model.same_street = False
self.click(dialog.search_button)
names = set(d.name for d in dialog.dup_tree)
self.assertEquals(names, set([u'Juca Pinga', 'Juca']))
@mock.patch('stoqlib.gui.dialogs.personmergedialog.ProgressDialog')
def test_search_first_last_name_address(self, ProgressDialog):
self._create_sample_data()
dialog = PersonMergeDialog(self.store)
dialog.model.method = dialog.model.FIRST_LAST_NAME
# First, only the first name and phone
dialog.model.same_phone = False
dialog.model.same_street = True
self.click(dialog.search_button)
names = set(d.name for d in dialog.dup_tree)
self.assertEquals(names, set([u'José Pinga', 'Jose Cuervo Pinga']))
@mock.patch('stoqlib.gui.dialogs.personmergedialog.ProgressDialog')
@mock.patch('stoqlib.gui.dialogs.personmergedialog.yesno')
def test_merge(self, yesno, ProgressDialog):
self._create_sample_data()
dialog = PersonMergeDialog(self.store)
dialog.model.same_phone = False
dialog.model.same_street = False
self.click(dialog.search_button)
for row in dialog.dup_tree:
if not row.parent:
root = row
else:
# Figure out how to mimic the user clicking the row
row.merge = True
self.assertEquals(len(root.get_to_merge()), 2)
dialog.dup_tree.select(root)
with contextlib.nested(
mock.patch('stoq.gui.inventory.api.new_store'),
mock.patch.object(self.store, 'commit'),
mock.patch.object(self.store, 'close')) as ctx:
new_store = ctx[0]
new_store.return_value = self.store
self.click(dialog.merge_button)
yesno.assert_called_once_with(
'This will merge 2 persons into 1. Are you sure?', gtk.RESPONSE_NO,
'Merge', "Don't merge")
# If we search again, there should be no duplicates
self.click(dialog.search_button)
self.assertEquals(len(dialog.dup_tree), 0)
| tiagocardosos/stoq | stoqlib/gui/test/test_personmergedialog.py | Python | gpl-2.0 | 5,377 | [
"VisIt"
] | 805483a433c1983742f0207469efb180dd4218d62a027df9e1ff98a02b2ac1d7 |
# Copyright 2016 by Raytheon BBN Technologies Corp. All Rights Reserved.
"""
Add synchronization primitives
Mark off the start and end of each concurrent block for each channel
with a Barrier() message. These are points at which all channels should be brought
in sync with each other.
Later (evenblocks.py) we'll calculate the per channel length of the sequence segment up
to the barrier, and insert pauses (Id pulses of proper length) where necessary to keep
all sequences in sync.
Note that we can only do that where we can determine the length.
Where the length is indeterminate (say, the control flow depends on the result of a
quantum measurement), we must do a Sync (send a message saying this channel is done) and a wait
(wait for the global return message saying all channels are done). Note that this takes more time,
so we prefer the sleeps.
"""
import ast
from copy import deepcopy
import pyqgl2.ast_util
from pyqgl2.ast_qgl2 import is_concur, is_seq
from pyqgl2.ast_util import NodeError
from pyqgl2.ast_util import ast2str, copy_all_loc, expr2ast
from pyqgl2.find_channels import find_all_channels
# Global ctr of next Barrier() message
BARRIER_CTR = 0
class SynchronizeBlocks(ast.NodeTransformer):
"""
Add a Barrier to the start and end of each seq block within each concur block
Note that before processing is done, we add an empty seq block for
each channel for which there is not a seq block in a given concur
block, so that we can add Barriers for that channel, to keep that channel lined
up with the others.
For example, if we had the following trivial program:
with concur:
with seq:
X90(QBIT_1)
with seq:
Y90(QBIT_2)
with concur:
with seq:
X90(QBIT_2)
with seq:
Y90(QBIT_3)
In the first concur block, only QBIT_1 and QBIT_2 are "busy", but
QBIT_3 will need to wait for the others to complete so it can start
in sync with QBIT_2 when it is time.
And if those blocks were of indeterminate length, we'd be using SYNC
and WAIT. Currently that WAIT needs all channels to report in, so we
need QBIT_3 to do the SYNC as well.
Similarly, in the second concur
block, only QBIT_2 and QBIT_3 are "busy", but QBIT_1 will need to
process any SYNC/WAIT as well. (since the second block is the final
block in this program, QBIT_1 does not really need to synchronize
with the other channels, since no other operations follow, but
if the program continued then this synchronization would be
important)
Therefore, this will expand to:
with concur:
with seq:
Barrier()
X90(QBIT_1)
Barrier()
with seq:
Barrier()
Y90(QBIT_2)
Barrier()
with seq: # for QBIT_3
Barrier()
Barrier()
with concur:
with seq: # For QBIT_1
Barrier()
Barrier()
with seq:
Barrier()
X90(QBIT_2)
Barrier()
with seq:
Barrier()
Y90(QBIT_3)
Barrier()
Later, those Barrier() messages will become Id or Sync and Wait pulses.
"""
def __init__(self, node):
# The set all all channels observed in the input AST
#
self.all_channels = find_all_channels(node)
self.blank_barrier_ast = expr2ast('Barrier()')
def visit_With(self, node):
if is_concur(node):
return self.concur_wait(node)
else:
return self.generic_visit(node)
def concur_wait(self, node):
"""
Synchronize the start of each seq block within a concur block,
Add seq blocks for any "missing" channels so we can
add a Barrier instruction for each of them as well
"""
global BARRIER_CTR
# This method will be destructive, unless we make a new
# copy of the AST tree first
#
node = deepcopy(node)
seen_channels = set()
# Channels in this with_concur
concur_channels = find_all_channels(node)
# For creating the Barriers, we want QGL1 scoped variables that will be real channel instances.
# We basically have that already.
real_chans = set()
for chan in concur_channels:
real_chans.add(chan)
start_barrier = BARRIER_CTR
end_barrier = start_barrier + 1
BARRIER_CTR += 2
for stmnt in node.body:
if not is_seq(stmnt):
NodeError.error_msg(stmnt,
'non-seq block inside concur block?')
return node
seq_channels = find_all_channels(stmnt)
if seq_channels.intersection(seen_channels):
NodeError.error_msg(stmnt,
'seq blocks have overlapping channels')
return node
seen_channels = seen_channels.union(seq_channels)
chan_name = ','.join(seq_channels)
# mark stmnt with chan_name or seq_channels in another way
if hasattr(stmnt, 'qgl_chan_list'):
oldChanSet = set(stmnt.qgl_chan_list)
newChanSet = seq_channels
oldMissing = newChanSet - oldChanSet
oldExtra = oldChanSet - newChanSet
if len(oldMissing) > 0:
NodeError.diag_msg(stmnt, 'marked chan list %s was missing %s' % (str(oldChanSet), str(oldMissing)))
if len(oldExtra) > 0:
NodeError.diag_msg(stmnt, 'marked chan list %s had extra %s' % (str(oldChanSet), str(oldExtra)))
NodeError.diag_msg(stmnt, 'Marking chan list %s' % (str(seq_channels)))
stmnt.qgl_chan_list = list(seq_channels)
new_seq_body = list()
# Helper to ensure the string we feed to AST doesn't put quotes around
# our Qubit variable names
def appendChans(bString, chans):
bString += '['
first = True
for chan in chans:
if first:
bString += str(chan)
first = False
else:
bString += "," + str(chan)
bString += ']'
return bString
# Add global ctr, chanlist=concur_channels
# FIXME: Hold concur_channels as a string? List?
bstring = 'Barrier("%s", ' % str(start_barrier)
bstring = appendChans(bstring, list(real_chans))
bstring += ')\n'
barrier_ast = expr2ast(bstring)
# barrier_ast = expr2ast('Barrier(%s, %s)\n' % (str(start_barrier), list(real_chans)))
copy_all_loc(barrier_ast, node)
barrier_ast.channels = concur_channels
# print("*****Start barrier: %s" % pyqgl2.ast_util.ast2str(barrier_ast))
new_seq_body.append(barrier_ast)
new_seq_body += stmnt.body
bstring = 'Barrier("%s", ' % str(end_barrier)
bstring = appendChans(bstring, list(real_chans))
bstring += ')\n'
end_barrier_ast = expr2ast(bstring)
#end_barrier_ast = expr2ast('Barrier(%s, %s)\n' % (str(end_barrier), list(real_chans)))
copy_all_loc(end_barrier_ast, node)
# Add global ctr, chanlist=concur_channels
end_barrier_ast.channels = concur_channels
# print('End AST: %s' % ast2str(end_barrier_ast))
new_seq_body.append(end_barrier_ast)
stmnt.body = new_seq_body
# FIXME: In new thinking, is the proper unseen set the global one,
# Or only those local to this with concur. I think only local
for unseen_chan in concur_channels - seen_channels:
#print('DIAG %s' % ast2str(stmnt))
NodeError.diag_msg(stmnt,
'channels unreferenced in concur: %s' % str(unseen_chan))
bstring = 'with seq:\n Barrier("%s", ' % str(start_barrier)
bstring = appendChans(bstring, list(real_chans))
bstring += ')\n Barrier("%s",' % str(end_barrier)
bstring = appendChans(bstring, list(real_chans))
bstring += ')\n'
empty_seq_ast = expr2ast(bstring)
# print('Empty AST: %s' % ast2str(empty_seq_ast))
# empty_seq_ast = expr2ast(
# 'with seq:\n Barrier(%s, %s)\n Barrier(%s, %s)' % (str(start_barrier), list(real_chans), str(end_barrier), list(real_chans)))
# Mark empty_seq_ast with unseen_chan
empty_seq_ast.qgl_chan_list = [unseen_chan]
copy_all_loc(empty_seq_ast, node)
node.body.append(empty_seq_ast)
return node
if __name__ == '__main__':
def test_code(code_text):
tree = ast.parse(code_text, mode='exec')
sync = SynchronizeBlocks(tree)
new = sync.visit(deepcopy(tree))
print('ORIG\n%s\n=>\n%s' % (ast2str(tree), ast2str(new)))
def t1():
code = """
with concur:
with seq:
X90(QBIT_1)
with seq:
Y90(QBIT_2)
with concur:
with seq:
X90(QBIT_2)
with seq:
Y90(QBIT_3)
with concur:
with seq:
X90(QBIT_4)
"""
test_code(code)
def main():
t1()
main()
| BBN-Q/pyqgl2 | src/python/attic/sync.py | Python | apache-2.0 | 9,551 | [
"VisIt"
] | ec4f5191d8e05161c3c7b2577450cbe83150990f5bef7cd00e72a485ea8eb2c3 |
"""Helpers for tests."""
from pkg_resources import resource_filename
from collections import namedtuple
from datetime import datetime
from pycds import get_schema_name, Contact, Network, Station, History, Variable
# Fixture helpers
# The following generators abstract behavior common to many fixtures in this
# test suite. The behaviour pattern is:
#
# def behaviour(session, ...)
# setup(session, ...)
# yield session
# teardown(session, ...)
#
# Two examples of this pattern are
#
# setup = add database objects to session
# teardown = remove database objects from session
#
# and
#
# setup = create views in session
# teardown = drop views in session
#
# To use such a generator correctly, i.e., so that the teardown after the
# yield is also performed, a fixture must first yield the result of
# `next(behaviour)`, then call `next(behaviour)` again. This can be done
# in two ways:
#
# g = behaviour(...)
# yield next(g)
# next(g)
#
# or, shorter and clearer:
#
# for sesh in behaviour(...):
# yield sesh
#
# The shorter method is preferred.
def add_then_delete_objs(sesh, sa_objects):
"""Add objects to session, yield session, drop objects from session (in
reverse order. For correct usage, see notes above.
Args:
sesh (sqlalchemy.orm.session.Session): database session
sa_objects: List of SQLAlchemy ORM objects to be added to database
for setup and removed on teardown. Order within list is respected
for setup and teardown, so that dependencies are respected.
Returns:
yields sesh after setup
"""
for sao in sa_objects:
sesh.add(sao)
sesh.flush()
yield sesh
for sao in reversed(sa_objects):
sesh.delete(sao)
sesh.flush()
def create_then_drop_views(sesh, views):
"""Create views in session, yield session, drop views in session (in
reverse order). For correct usage, see notes above.
Args:
sesh (sqlalchemy.orm.session.Session): database session
views: List of views created in database on setup and dropped
on teardown. Order within list is respected for setup and
teardown, so that dependencies can be respected.
Returns:
yields sesh after setup
"""
for view in views:
sesh.execute(view.create())
yield sesh
for view in reversed(views):
sesh.execute(view.drop())
# Data insertion helpers
def with_schema_name(sesh, schema_name, action):
"""Execute an action with the search path set to a specified schema name.
Restore existing search path after action.
"""
old_search_path = sesh.execute("SHOW search_path").scalar()
sesh.execute(f"SET search_path TO {schema_name}")
action(sesh)
sesh.execute(f"SET search_path TO {old_search_path}")
# Shorthand for defining various database objects
TestContact = namedtuple("TestContact", "name title organization email phone")
TestNetwork = namedtuple("TestNetwork", "name long_name color")
TestStation = namedtuple("TestStation", "native_id network histories")
TestHistory = namedtuple(
"TestHistory", "station_name elevation sdate edate province country freq"
)
TestVariable = namedtuple(
"TestVariable",
"name unit standard_name cell_method precision description display_name "
"short_name network",
)
def insert_test_data(sesh, schema_name=get_schema_name()):
"""Insert a small-ish set of test data"""
def action(sesh):
moti = Network(
**TestNetwork(
"MOTI",
"Ministry of Transportation and Infrastructure",
"000000",
)._asdict()
)
moe = Network(
**TestNetwork(
"MOTI",
"Ministry of Transportation and Infrastructure",
"000000",
)._asdict()
)
sesh.add_all([moti, moe])
simon = Contact(
**TestContact(
"Simon",
"Avalanche Guy",
"MOTI",
"simn@moti.bc.gov.ca",
"250-555-1212",
)._asdict()
)
simon.networks = [moti]
ted = Contact(
**TestContact(
"Ted",
"Air Quailty Guy",
"MOE",
"ted@moti.bc.gov.ca",
"250-555-2121",
)._asdict()
)
ted.networks = [moe]
sesh.add_all([simon, ted])
histories = [
TestHistory(
"Brandywine",
496,
datetime(2001, 1, 22, 13),
datetime(2011, 4, 6, 11),
"BC",
"Canada",
"1-hourly",
),
TestHistory(
"Stewart",
15,
datetime(2004, 1, 22, 13),
datetime(2011, 4, 6, 11),
"BC",
"Canada",
"1-hourly",
),
TestHistory(
"Cayoosh Summit",
1350,
datetime(1997, 1, 22, 13),
datetime(2011, 4, 6, 11),
"BC",
"Canada",
"1-hourly",
),
TestHistory(
"Boston Bar RCMP Station",
180,
datetime(1999, 1, 22, 13),
datetime(2002, 4, 6, 11),
"BC",
"Canada",
"1-hourly",
),
TestHistory(
"Prince Rupert",
35,
datetime(1990, 1, 22, 13),
datetime(1996, 4, 6, 11),
"BC",
"Canada",
"1-hourly",
),
TestHistory(
"Prince Rupert",
36,
datetime(1997, 1, 22, 13),
None,
"BC",
"Canada",
"1-hourly",
),
]
histories = [History(**hist._asdict()) for hist in histories]
sesh.add_all(histories)
stations = [
TestStation("11091", moti, [histories[0]]),
TestStation("51129", moti, [histories[1]]),
TestStation("26224", moti, [histories[2]]),
TestStation("E238240", moe, [histories[3]]),
TestStation("M106037", moe, histories[4:6]),
]
for station in stations:
sesh.add(Station(**station._asdict()))
variables = [
TestVariable(
"air-temperature",
"degC",
"air_temperature",
"time: point",
None,
"Instantaneous air temperature",
"Temperature (Point)",
"",
moti,
),
TestVariable(
"average-direction",
"km/h",
"wind_from_direction",
"time: mean",
None,
"Hourly average wind direction",
"Wind Direction (Mean)",
"",
moti,
),
TestVariable(
"dew-point",
"degC",
"dew_point_temperature",
"time: point",
None,
"",
"Dew Point Temperature (Mean)",
"",
moti,
),
TestVariable(
"BAR_PRESS_HOUR",
"millibar",
"air_pressure",
"time:point",
None,
"Instantaneous air pressure",
"Air Pressure (Point)",
"",
moe,
),
]
for variable in variables:
sesh.add(Variable(**variable._asdict()))
with_schema_name(sesh, schema_name, action)
def insert_crmp_data(sesh, schema_name=get_schema_name()):
"""Insert data from CRMP database dump into into tables in named schema.
"""
def action(sesh):
fname = resource_filename("pycds", "data/crmp_subset_data.sql")
with open(fname, "r") as f:
data = f.read()
sesh.execute(data)
with_schema_name(sesh, schema_name, action)
| pacificclimate/pycds | tests/helpers.py | Python | gpl-3.0 | 8,352 | [
"MOE"
] | 49fa469f505d860051c22b5b8e93c20ff030b91c0f03f51a87765b79db3881c2 |
# $Id: test_MurckoScaffold.py 3672 2010-06-14 17:10:00Z landrgr1 $
#
# Created by Peter Gedeck, June 2008
#
from collections import namedtuple
import doctest
import unittest
from rdkit import Chem
from rdkit.Chem.Scaffolds import MurckoScaffold
from rdkit.Chem.Scaffolds.MurckoScaffold import (GetScaffoldForMol, _pyGetScaffoldForMol,
MurckoScaffoldSmilesFromSmiles,
MurckoScaffoldSmiles, MakeScaffoldGeneric)
TestMolecule = namedtuple('TestMolecule', 'smiles,scaffold')
def load_tests(loader, tests, ignore):
""" Add the Doctests from the module """
tests.addTests(doctest.DocTestSuite(MurckoScaffold, optionflags=doctest.ELLIPSIS))
return tests
class TestCase(unittest.TestCase):
def test1MurckoScaffold(self):
# Test the functionality on a smaller test set
for testMol in self.testMolecules:
mol = Chem.MolFromSmiles(testMol.smiles)
calcScaffold = Chem.MolToSmiles(GetScaffoldForMol(mol))
actualScaffold = Chem.MolToSmiles(Chem.MolFromSmiles(testMol.scaffold))
self.assertEqual(calcScaffold, actualScaffold)
def test2MurckoScaffold(self):
# Test the functionality on a larger test set
for testMol in self.testMolecules2:
mol = Chem.MolFromSmiles(testMol.smiles)
calcScaffold = Chem.MolToSmiles(GetScaffoldForMol(mol))
actualScaffold = Chem.MolToSmiles(Chem.MolFromSmiles(testMol.scaffold))
self.assertEqual(calcScaffold, actualScaffold)
def test_ReferenceImplementation(self):
# Check that the C++ implementation is equivalent to the Python reference implementation
for testMol in self.testMolecules:
mol = Chem.MolFromSmiles(testMol.smiles)
calcScaffold1 = Chem.MolToSmiles(GetScaffoldForMol(mol))
calcScaffold2 = Chem.MolToSmiles(_pyGetScaffoldForMol(mol))
self.assertEqual(calcScaffold1, calcScaffold2)
def test_MurckScaffoldSmilesFromSmiles(self):
self.assertEqual(
MurckoScaffoldSmilesFromSmiles('Cc1cc(Oc2nccc(CCC)c2)ccc1'), 'c1ccc(Oc2ccccn2)cc1')
self.assertEqual(MurckoScaffoldSmilesFromSmiles('CCCC'), '')
def test_MurckoScaffoldSmiles(self):
self.assertEqual(MurckoScaffoldSmiles('Cc1cc(Oc2nccc(CCC)c2)ccc1'), 'c1ccc(Oc2ccccn2)cc1')
self.assertEqual(
MurckoScaffoldSmiles(mol=Chem.MolFromSmiles('Cc1cc(Oc2nccc(CCC)c2)ccc1')),
'c1ccc(Oc2ccccn2)cc1')
self.assertRaises(ValueError, MurckoScaffoldSmiles, smiles=None, mol=None)
def test_MakeScaffoldGeneric(self):
def testSmiles(smiles):
return Chem.MolToSmiles(MakeScaffoldGeneric(Chem.MolFromSmiles(smiles)))
self.assertEqual(testSmiles('c1ccccc1'), 'C1CCCCC1')
self.assertEqual(testSmiles('c1cccnc1'), 'C1CCCCC1')
# Examples associated with sf.net issue 246
self.assertEqual(testSmiles('c1[nH]ccc1'), 'C1CCCC1')
self.assertEqual(testSmiles('C1[NH2+]C1'), 'C1CC1')
self.assertEqual(testSmiles('C1[C@](Cl)(F)O1'), 'CC1(C)CC1')
testMolecules = [
TestMolecule('CC1CCC1', 'C1CCC1'),
TestMolecule('NCNCC2CC2C1CC1O', 'C1CC1C1CC1'),
# Spiro
TestMolecule('OC2C(C)C21C(N)C1C', 'C2CC12CC1'),
# Carbonyl outside scaffold
TestMolecule('C1CC1C(=O)OC', 'C1CC1'),
# Double bond outside scaffold
TestMolecule('C1CC1C=C', 'C1CC1'),
# Double bond in scaffold
TestMolecule('C1CC1C=CC1CC1C=CNNCO', 'C1CC1C=CC1CC1'),
TestMolecule('CC1CC1C(N)C1C(N)C1', 'C1CC1CC1CC1'),
# Double bond in linker
TestMolecule('C1CC1C(C(C)C)=NC1CC1', 'C1CC1C=NC1CC1'),
# S=O group in scaffold
TestMolecule('C1CC1S(=O)C1CC1C=CNNCO', 'C1CC1S(=O)C1CC1'),
# S=O group outside scaffold
TestMolecule('O=SCNC1CC1S(=O)C1CC1C=CNNCO', 'C1CC1S(=O)C1CC1'),
# SO2 group in scaffold
TestMolecule('C1CC1S(=O)(=O)C1CC1C=CNNCO', 'C1CC1S(=O)(=O)C1CC1'),
# SO2 group outside scaffold
TestMolecule('O=S(CNCNC)(=O)CNC1CC1S(=O)(=O)C1CC1C=CNNCO', 'C1CC1S(=O)(=O)C1CC1'),
# Hydroxamide
TestMolecule('C1CC1C=NO', 'C1CC1'),
# Cyano group
TestMolecule('C1CC1C#N', 'C1CC1'),
# Acetylene group
TestMolecule('C1CC1C#CNC', 'C1CC1'),
TestMolecule('O=C1N(C)C(=O)N1C#CNC', 'O=C1NC(=O)N1'),
TestMolecule('[O-][N+](=O)c1cc(ccc1Cl)NS(=O)(=O)Cc2ccccc2', 'c1ccccc1NS(=O)(=O)Cc2ccccc2'),
# N-Substituted pyrrol
TestMolecule('Cn1cccc1', 'c1ccc[nH]1'),
# Explicit hydrogens are removed
TestMolecule('C1CC1[CH](C)C1CC1', 'C1CC1CC1CC1'),
]
testMolecules2 = [
TestMolecule('CCOc1ccccc1N(S(C)(=O)=O)CC(NC1CCCCC1)=O', 'O=C(NC1CCCCC1)CNc1ccccc1'),
TestMolecule('c1ccc(-c2c(C)n(-c3c(C(O)=O)cccc3)c(C)nc2=O)cc1',
'O=c1c(cn(cn1)-c1ccccc1)-c1ccccc1'),
TestMolecule('Cc1ccc(Cl)c2c1NC(=O)C2=C1NC(=S)NC1=O', 'c1cc2c(cc1)C(=C1C(NC(N1)=S)=O)C(=O)N2'),
TestMolecule('CNC(=O)CCc1[nH]c2c(c1Sc1ccccc1)cccc2', 'c1cc(Sc2c3c([nH]c2)cccc3)ccc1'),
TestMolecule('CC(=O)OCC(=O)C1(O)CCC2C1(C)CC(=O)C1C3(C)CCC(=O)C=C3CCC21',
'O=C1C=C2CCC3C4CCCC4CC(=O)C3C2CC1'),
TestMolecule('CC(C)CC(Nc1nc(Cl)ccc1[N+]([O-])=O)C(O)=O', 'c1ccncc1'),
TestMolecule('COc1ccc(C(Nc2ccc(S(N3C(C)CCCC3)(=O)=O)cc2)=O)c(OC)c1OC',
'O=C(Nc1ccc(S(=O)(=O)N2CCCCC2)cc1)c1ccccc1'),
TestMolecule('CC(C)CCNc1nc(N)c([N+](=O)[O-])c(NCCO)n1', 'c1cncnc1'),
TestMolecule('c1ccc(Oc2c(NC(COC(c3c(C)noc3C)=O)=O)cccc2)cc1',
'O=C(COC(=O)c1cnoc1)Nc1ccccc1Oc1ccccc1'),
TestMolecule('COC(CCCCC1SCC(NC(OC)=O)C1NC(OC)=O)=O', 'C1CCCS1'),
TestMolecule('CSc1ccc(-c2c(C#N)c(N)nc3n(-c4ccccc4)nc(C)c32)cc1',
'c1ccc(cc1)-c1c2c(n(nc2)-c2ccccc2)ncc1'),
TestMolecule('O=C1Cc2ccccc2Sc2c1cc(Cl)cc2', 'O=C1Cc2ccccc2Sc2ccccc21'),
TestMolecule('COC(c1n(CC(N(C)c2ccccc2)=O)c2ccsc2c1)=O', 'O=C(Cn1c2ccsc2cc1)Nc1ccccc1'),
TestMolecule('N=C1C(=Cc2coc3ccccc3c2=O)C(=O)N=C2SC(c3ccncc3)=NN12',
'N=C1C(=Cc2coc3ccccc3c2=O)C(=O)N=C2SC(c3ccncc3)=NN12'),
TestMolecule('CCOC(c1ccc(NC(CCc2c(C)nc3ncnn3c2C)=O)cc1)=O', 'O=C(Nc1ccccc1)CCc1cnc2n(ncn2)c1'),
TestMolecule('COC(=O)C1=C(C)NC(C)=C(C(OC)=O)C1c1oc(-c2c(Cl)c(Cl)ccc2)cc1',
'c1ccc(-c2oc(C3C=CNC=C3)cc2)cc1'),
TestMolecule('CCN(S(c1cc(NC(COC(CCc2nc3ccccc3s2)=O)=O)ccc1)(=O)=O)CC',
'c1cc(NC(COC(=O)CCc2nc3c(s2)cccc3)=O)ccc1'),
TestMolecule('CCOC(c1cc(OC(c2ccccc2)=O)n(-c2ccccc2)n1)=O', 'O=C(Oc1n(ncc1)-c1ccccc1)c1ccccc1'),
TestMolecule('CCOC(=O)c1nc2c(c(NCc3ccccc3F)n1)cccc2', 'c1ccc(CNc2ncnc3c2cccc3)cc1'),
TestMolecule('Cc1nc(C)n(CC(N2CCCC(C(c3c(C)cc(Cl)cc3)=O)C2)=O)n1',
'c1ccc(cc1)C(=O)C1CCCN(C(=O)Cn2cncn2)C1'),
TestMolecule('COc1cc(NC(=O)c2nnn(CCc3ccccc3)c2N)c(OC)cc1', 'O=C(c1nnn(c1)CCc1ccccc1)Nc1ccccc1'),
TestMolecule('Cc1cc(C(=O)CN2C(=O)c3ccccc3C2=O)c(C)n1Cc1cccs1',
'O=C(CN1C(c2c(cccc2)C1=O)=O)c1cn(Cc2cccs2)cc1'),
TestMolecule('c1cnc2c(c1)cccc2S(N1CCC(C(=O)N2CCN(c3ccc(Cl)cc3)CC2)CC1)(=O)=O',
'c1ccc(cc1)N1CCN(C(=O)C2CCN(S(=O)(=O)c3c4ncccc4ccc3)CC2)CC1'),
TestMolecule('CCOC(c1c(C)[nH]c(C(NNC(c2ccc(C(C)(C)C)cc2)=O)=O)c1C)=O',
'c1ccc(cc1)C(NNC(c1ccc[nH]1)=O)=O'),
TestMolecule('CCOC(c1cc(C(C)C)sc1NC(=O)COC(CCS(c1ccccc1)(=O)=O)=O)=O',
'c1ccc(S(CCC(=O)OCC(Nc2cccs2)=O)(=O)=O)cc1'),
TestMolecule('CCC1CCCCN1CCCNC(=O)Cn1nc(-c2ccccc2)ccc1=O',
'O=C(NCCCN1CCCCC1)Cn1nc(ccc1=O)-c1ccccc1'),
TestMolecule('CCc1cc(OCCn2nc(C(O)=O)c3ccccc3c2=O)ccc1', 'O=c1n(CCOc2ccccc2)ncc2ccccc21'),
TestMolecule('Fc1ccc(CN2CCN3C(CCC3)C2C2CCCCC2)cc1F', 'c1ccc(cc1)CN1CCN2CCCC2C1C1CCCCC1'),
TestMolecule('O=[N+]([O-])c1cc(-c2nnc(N3CCOCC3)c3ccccc23)ccc1N1CCOCC1',
'c1cc2c(nnc(c2cc1)N1CCOCC1)-c1ccc(cc1)N1CCOCC1'),
TestMolecule('Cc1ccnc(NC(=O)COc2ccc3oc4c(c3c2)CCCC4)c1',
'O=C(COc1ccc2oc3c(c2c1)CCCC3)Nc1ccccn1'),
TestMolecule('Cc1cc(=O)oc(C)c1C(=O)NCCCN1CCN(c2ccc(F)cc2)CC1',
'c1ccc(N2CCN(CCCNC(c3ccc(oc3)=O)=O)CC2)cc1'),
TestMolecule('Cc1cc(C(=O)CSc2nc(=O)cc(N)[nH]2)c(C)n1-c1cccc(F)c1',
'O=C(CSc1nc(cc[nH]1)=O)c1cn(cc1)-c1ccccc1'),
TestMolecule('CCN(S(c1cccc(C(=O)N2CCCCC2)c1)(=O)=O)CC', 'O=C(N1CCCCC1)c1ccccc1'),
TestMolecule('CNC(=S)N1CCC(NC(=O)C23CC4CC(C2)CC(C3)C4)CC1',
'O=C(NC1CCNCC1)C12CC3CC(C1)CC(C3)C2'),
TestMolecule('Cc1cc2c(cc1)N=C(C)C(N=O)=C(C)N2', 'c1cc2NC=CC=Nc2cc1'),
TestMolecule('COc1ccc(Sc2cc(C(F)(F)F)nc(-c3ncccc3)n2)cc1', 'c1ccc(cc1)Sc1nc(ncc1)-c1ncccc1'),
TestMolecule('c1coc(CNC(Cn2cc(C(c3ccccc3)=O)c3c2cccc3)=O)c1',
'c1coc(CNC(Cn2cc(C(c3ccccc3)=O)c3c2cccc3)=O)c1'),
TestMolecule('O=C(NCc1ccc(Cl)cc1)c1noc(-c2ccco2)c1', 'O=C(c1noc(c1)-c1ccco1)NCc1ccccc1'),
TestMolecule('CN(C)c1ccc(C(c2n(CCOC(=O)Nc3ccc(Cl)cc3)nnn2)N2CCOCC2)cc1',
'O=C(Nc1ccccc1)OCCn1nnnc1C(c1ccccc1)N1CCOCC1'),
TestMolecule('NC(=NOC(=O)c1cc(Cn2cc(C(F)(F)F)ccc2=O)ccc1)c1ccccc1',
'c1ccc(C=NOC(c2cc(Cn3ccccc3=O)ccc2)=O)cc1'),
TestMolecule('CCc1nnc(NC(=O)Cc2c(-c3ccc(C)cc3)nc(C)s2)s1', 'O=C(Cc1c(-c2ccccc2)ncs1)Nc1nncs1'),
TestMolecule('COCCCNC(=O)CN1C(=O)N(Cc2ccccc2Cl)CC1', 'O=C1NCCN1Cc1ccccc1'),
TestMolecule('Cc1cc([N+]([O-])=O)nn1CC(=O)NCCCn1ccnc1', 'O=C(Cn1nccc1)NCCCn1ccnc1'),
TestMolecule('c1cc(F)c(N2CCN(C(=O)c3ccc(S(NCC4OCCC4)(=O)=O)cc3)CC2)cc1',
'c1ccc(cc1)N1CCN(C(c2ccc(cc2)S(=O)(=O)NCC2OCCC2)=O)CC1'),
TestMolecule('CC(NCc1cccnc1)=C1C(=O)NC(=O)N(c2ccc(C)cc2)C1=O',
'c1cc(ccc1)N1C(=O)NC(C(=CNCc2cccnc2)C1=O)=O'),
TestMolecule('Cc1ccn(C)c(=N)c1', 'N=c1[nH]cccc1'),
TestMolecule('Cc1cc(C)nc(N2CCC(CNC(=O)CCc3ccccc3)CC2)n1',
'O=C(CCc1ccccc1)NCC1CCN(c2ncccn2)CC1'),
TestMolecule('CCOC1=CC(=CNNC(CCCC(NC2CCCCC2)=O)=O)C=CC1=O',
'C1=CC(C=CC1=O)=CNNC(=O)CCCC(=O)NC1CCCCC1'),
TestMolecule('CC(=O)N1CCN(c2ccc([N+]([O-])=O)cc2)CC1', 'c1ccc(cc1)N1CCNCC1'),
TestMolecule('CS(N(CC(=O)N1CCCCC1)Cc1ccc(Cl)cc1)(=O)=O', 'O=C(N1CCCCC1)CNCc1ccccc1'),
TestMolecule('c1coc(C(=O)N2CCN(C(COc3cc(C(NCc4ccccc4)=O)ccc3)=O)CC2)c1',
'c1coc(C(=O)N2CCN(C(COc3cc(C(NCc4ccccc4)=O)ccc3)=O)CC2)c1'),
TestMolecule('Cc1cccc2sc(NNC(=O)C3=COCCO3)nc12', 'O=C(NNc1nc2ccccc2s1)C1=COCCO1'),
TestMolecule('c1ccc2c(c1)N(C)C1(C=Nc3c(cc(N4CCOCC4)c4ccccc34)O1)C2(C)C',
'C1=Nc2c(cc(c3ccccc23)N2CCOCC2)OC11Nc2ccccc2C1'),
TestMolecule('COc1cccc(C2N(CCN3CCOCC3)C(=O)C(O)=C2C(=O)c2sc(C)nc2C)c1',
'O=C(C1=CC(=O)N(C1c1ccccc1)CCN1CCOCC1)c1scnc1'),
TestMolecule('COc1cc(OC)c(NC(CSc2nc3c(c(=O)n2-c2ccc(F)cc2)SCC3)=O)cc1',
'c1ccc(cc1)NC(=O)CSc1n(c(=O)c2c(n1)CCS2)-c1ccccc1'),
TestMolecule('Cc1ccccc1CN1c2ccccc2C2(C1=O)OCCCO2', 'O=C1C2(OCCCO2)c2c(N1Cc1ccccc1)cccc2'),
TestMolecule('O=C(N1C2(OCC1)CCN(c1ncc(C(F)(F)F)cc1Cl)CC2)c1ccccc1',
'O=C(c1ccccc1)N1C2(OCC1)CCN(c1ccccn1)CC2'),
TestMolecule('CC=CC=CC(=O)Nc1nccs1', 'c1ncsc1'),
TestMolecule('CC(C)(C)c1ccc(C(c2c[nH]c(C(NCc3cccnc3)=O)c2)=O)cc1',
'c1ccc(cc1)C(=O)c1c[nH]c(c1)C(=O)NCc1cccnc1'),
TestMolecule('CCC(=O)Nc1c(C)nn(-c2cc(C)c(C)cc2)c1C', 'c1ccc(cc1)-n1nccc1'),
TestMolecule('Cc1ccc(SCCC(=O)NCCSCc2c(C)cccc2)cc1', 'O=C(NCCSCc1ccccc1)CCSc1ccccc1'),
TestMolecule('CC1=NN(Cc2ccccc2)C(=O)C1=Cc1ccc(N(C)C)cc1', 'O=C1C(C=NN1Cc1ccccc1)=Cc1ccccc1'),
TestMolecule('COCC(=O)Nc1ccc(S(NCCc2ccccc2)(=O)=O)cc1', 'c1ccc(CCNS(=O)(=O)c2ccccc2)cc1'),
TestMolecule('CCOC(=O)N(C)c1ccc(C(O)(C(F)(F)F)C(F)(F)F)cc1', 'c1ccccc1'),
TestMolecule('Fc1ccc(COC2=C(C(O)=O)CCNC2=O)cc1F', 'O=C1NCCC=C1OCc1ccccc1'),
TestMolecule('O=C1N2C(Nc3ccccc31)CCCCC2', 'O=C1N2C(Nc3ccccc31)CCCCC2'),
TestMolecule('Cl.COc1ccc(-c2nc3n(ccc4ccccc43)c2CN2CCOCC2)cc1OC',
'c1cccc(c1)-c1nc2c3c(ccn2c1CN1CCOCC1)cccc3'),
TestMolecule('ClCc1oc(-c2ccccc2)nn1', 'c1oc(nn1)-c1ccccc1'),
TestMolecule('Cl.Cc1ccc(OCC(O)Cn2c(=N)n(CCN3CCCCC3)c3ccccc32)cc1',
'N=c1n(CCCOc2ccccc2)c2ccccc2n1CCN1CCCCC1'),
TestMolecule('COc1ccc(C(=O)C=C(C)Nc2ccc3c(c2)OCO3)cc1', 'O=C(C=CNc1ccc2c(c1)OCO2)c1ccccc1'),
TestMolecule('c1csc(CN(C(c2ccc(F)cc2)C(NC2CCCCC2)=O)C(=O)CN2S(=O)(=O)c3ccccc3C2=O)c1',
'c1cc(CN(C(=O)CN2S(=O)(c3ccccc3C2=O)=O)C(C(=O)NC2CCCCC2)c2ccccc2)sc1'),
TestMolecule('c1csc(S(NCCSc2n(-c3ccccc3)nnn2)(=O)=O)c1',
'c1csc(S(NCCSc2n(-c3ccccc3)nnn2)(=O)=O)c1'),
TestMolecule('Cc1cccc(C=NNC(=O)Cn2c(N)nnn2)n1', 'O=C(Cn1cnnn1)NN=Cc1ccccn1'),
TestMolecule('CCOC(C1(Cc2ccc(Cl)cc2)CCN(C(c2cc(C)nc(C)n2)=O)CC1)=O',
'O=C(N1CCC(CC1)Cc1ccccc1)c1ccncn1'),
TestMolecule('c1ccc(C(N(CC2OCCC2)C(Cn2nnc3ccccc23)=O)C(NCc2ccc(F)cc2)=O)cc1',
'O=C(N(C(c1ccccc1)C(=O)NCc1ccccc1)CC1OCCC1)Cn1nnc2c1cccc2'),
TestMolecule('O=C1CSC(c2ccncc2)N1Cc1occc1', 'O=C1CSC(c2ccncc2)N1Cc1occc1'),
TestMolecule('COc1c(OCc2ccccc2)c(Br)cc(C=NNC(=O)Cn2nc([N+]([O-])=O)cc2C)c1',
'O=C(Cn1nccc1)NN=Cc1ccc(cc1)OCc1ccccc1'),
TestMolecule('Cc1c(Cn2nnc(-c3cc(C(=O)O)ccc3)n2)cccc1', 'c1cccc(-c2nn(nn2)Cc2ccccc2)c1'),
TestMolecule('O=C(c1ccc2snnc2c1)N1CCCC1', 'O=C(c1ccc2snnc2c1)N1CCCC1'),
TestMolecule('c1ccc(CC(NN2C(=O)C(=Cc3c(C(O)=O)cccc3)SC2=S)=O)cc1',
'O=C1C(=Cc2ccccc2)SC(=S)N1NC(Cc1ccccc1)=O'),
TestMolecule('Cc1ccccc1OCC(=O)NN=Cc1ccncc1', 'O=C(COc1ccccc1)NN=Cc1ccncc1'),
TestMolecule('O=C(C=Cc1ccccc1)NC(=S)Nc1ccc(CN2CCOCC2)cc1',
'O=C(C=Cc1ccccc1)NC(=S)Nc1ccc(CN2CCOCC2)cc1'),
TestMolecule('COc1ccc(NC(=S)N(Cc2cnccc2)Cc2c(=O)[nH]c3c(c2)cc(OC)c(OC)c3)cc1',
'O=c1c(CN(C(=S)Nc2ccccc2)Cc2cnccc2)cc2ccccc2[nH]1'),
TestMolecule('Nc1ccc2nc3c([nH]c(=O)n(C4CCCCC4)c3=O)nc2c1',
'c1ccc2nc3[nH]c(n(c(c3nc2c1)=O)C1CCCCC1)=O'),
TestMolecule('Cc1cc(NC(=O)c2ccc(S(Nc3ccccc3)(=O)=O)cc2)no1',
'c1cc(no1)NC(=O)c1ccc(S(=O)(=O)Nc2ccccc2)cc1'),
TestMolecule('Nn1c(Cc2c3c(cccc3)ccc2)nnc1SCc1ccccc1',
'c1ccc(CSc2nnc([nH]2)Cc2c3c(cccc3)ccc2)cc1'),
TestMolecule('Cc1[nH]nc(Nc2cc(C)ccc2)c1[N+](=O)[O-]', 'c1ccc(cc1)Nc1n[nH]cc1'),
TestMolecule('CC1Cn2c(nc3n(C)c(=O)[nH]c(=O)c23)O1', 'O=c1[nH]c2nc3n(c2c([nH]1)=O)CCO3'),
TestMolecule('c1csc(C(OCC(NC23CC4CC(C2)CC(C3)C4)=O)=O)c1',
'c1csc(C(OCC(NC23CC4CC(C2)CC(C3)C4)=O)=O)c1'),
TestMolecule('c1ccc(S(NC2=NC(=O)C(=Cc3cnccc3)S2)(=O)=O)cc1',
'c1ccc(S(NC2=NC(=O)C(=Cc3cnccc3)S2)(=O)=O)cc1'),
TestMolecule('CCCn1c(N2CCN(C)CC2)nc2n(C)c(=O)[nH]c(=O)c12',
'O=c1[nH]c([nH]c2nc([nH]c12)N1CCNCC1)=O'),
TestMolecule('CCn1c(SCC(Nc2cc(S(N3CCOCC3)(=O)=O)ccc2OC)=O)nnc1-c1ccncc1',
'c1cc(S(=O)(=O)N2CCOCC2)cc(NC(=O)CSc2nnc(-c3ccncc3)[nH]2)c1'),
TestMolecule('C#CCNC(=O)C1=CC(c2ccc(Br)cc2)CC(OCc2ccc(CO)cc2)O1',
'c1cccc(c1)C1C=COC(OCc2ccccc2)C1'),
TestMolecule('CCc1c(SCC(=O)Nc2cc(C)on2)nc2ccc(C)cc2c1', 'O=C(Nc1ccon1)CSc1ccc2c(cccc2)n1'),
TestMolecule('CCOCCCN(C(C(NC1CCCC1)=O)c1cccc(OC)c1OC)C(c1ccco1)=O',
'c1cc(ccc1)C(NC(c1occc1)=O)C(=O)NC1CCCC1'),
TestMolecule('Cc1ccc(C(=O)NC(=S)NNS(c2ccccc2)(=O)=O)cc1',
'c1cccc(c1)C(NC(=S)NNS(=O)(=O)c1ccccc1)=O'),
TestMolecule('COc1ccc(CC(N)=NOC(=O)c2sccc2)cc1', 'O=C(ON=CCc1ccccc1)c1sccc1'),
TestMolecule('c1ccc(C(O)=C2C(c3ncccc3)N(CC(OC)OC)C(=O)C2=O)cc1',
'c1cc(C=C2C(=O)C(=O)NC2c2ncccc2)ccc1'),
TestMolecule('COC(=O)CSc1nc(C)cc(Oc2ccccc2)n1', 'c1ccc(Oc2ccncn2)cc1'),
TestMolecule('COc1ccc(Cn2c(C)ccc2C)cc1', 'c1ccc(cc1)Cn1cccc1'),
TestMolecule('COc1cccc(N2CCN(C3CC(=O)N(c4ccc(C)c(Cl)c4)C3=O)CC2)c1',
'O=C1N(c2ccccc2)C(=O)C(C1)N1CCN(c2ccccc2)CC1'),
TestMolecule('COc1cccc(OC)c1OCCN(C)C.OC(=O)C(O)=O', 'c1ccccc1'),
TestMolecule('C1CCC(NC(=O)c2ccc(S(N3CCCC3)(=O)=O)cc2)C1',
'C1CCC(NC(=O)c2ccc(S(N3CCCC3)(=O)=O)cc2)C1'),
TestMolecule('CCCN(C(=O)Cn1ncc2c(=O)oc3c(c12)cccc3)c1cc(C)ccc1',
'O=C(Cn1ncc2c(oc3c(cccc3)c12)=O)Nc1ccccc1'),
TestMolecule('CNC(NC(CSc1nnc(C(F)(F)F)n1C)=O)=O', 'n1nc[nH]c1'),
TestMolecule('CCOCCCN1C(=O)CC(C(NCCc2ccc(C)cc2)=O)C1', 'O=C1NCC(C1)C(NCCc1ccccc1)=O'),
TestMolecule('COc1c([N+](=O)[O-])cc(CSc2n[nH]c(C)n2)cc1', 'c1ccc(CSc2nc[nH]n2)cc1'),
TestMolecule('CN(C)CC(=O)c1ccc(-c2ccccc2)cc1', 'c1cccc(c1)-c1ccccc1'),
TestMolecule('CC1(O)C(=O)c2c(cccc2)N(c2ccccc2)C1=O', 'O=C1CC(=O)N(c2c1cccc2)c1ccccc1'),
TestMolecule('CN(S(c1ccccc1)(=O)=O)CC(=O)NCCc1ccccc1', 'c1ccc(CCNC(=O)CNS(=O)(=O)c2ccccc2)cc1'),
TestMolecule('CCNc1ccccc1C(=O)O', 'c1ccccc1'),
TestMolecule('CC1(C)C(CSc2nc3ccccc3[nH]2)C1(Cl)Cl', 'c1ccc2c(nc([nH]2)SCC2CC2)c1'),
TestMolecule('CC(C)c1ccc(OCC(=O)NC(=S)Nc2c3cccc4c3c(cc2)CC4)cc1',
'O=C(NC(=S)Nc1c2cccc3c2c(cc1)CC3)COc1ccccc1'),
TestMolecule('CN(C)c1ccc(NC(CN2CCC(C(c3ccc(F)cc3)=O)CC2)=O)cc1',
'c1cccc(c1)NC(CN1CCC(CC1)C(=O)c1ccccc1)=O'),
TestMolecule('CCCCN(C)C(=O)Cc1c(OC)ccc2cc(Br)ccc21', 'c1c2ccccc2ccc1'),
TestMolecule('Cc1ccc(NC(CSc2sc(NC(CN3CCOCC3)=O)nn2)=O)cc1',
'O=C(Nc1ccccc1)CSc1sc(nn1)NC(=O)CN1CCOCC1'),
TestMolecule('COCCNC(=S)NNc1cccc(C(=O)O)c1', 'c1ccccc1'),
TestMolecule('O=C(CNc1ccccc1)NN=Cc1ccc2c(c1)OCCO2', 'O=C(CNc1ccccc1)NN=Cc1ccc2c(c1)OCCO2'),
TestMolecule('COc1cc2ccccc2cc1C(=O)NCC(c1sccc1)N(C)C', 'O=C(NCCc1sccc1)c1cc2c(cc1)cccc2'),
TestMolecule('COc1ccc(C(N(C)C)CNC(=O)CCOc2ccccc2)cc1', 'O=C(NCCc1ccccc1)CCOc1ccccc1'),
TestMolecule('Cl.CCN(CC)CCCN1C(=O)CSC1c1ccc([N+]([O-])=O)cc1', 'O=C1CSC(c2ccccc2)N1'),
TestMolecule('CCC(Nc1ccc(OC)cc1OC)=C1C(=O)NC(=O)NC1=O', 'c1cc(NC=C2C(=O)NC(=O)NC2=O)ccc1'),
TestMolecule('c1coc(-c2cc(C(F)(F)F)nc(NCc3ccc(F)cc3)n2)c1', 'c1ccc(CNc2nccc(n2)-c2occc2)cc1'),
TestMolecule('CCOC(Nc1sc(C)c(C)c1C(OCC)=O)=O', 'c1ccsc1'),
TestMolecule('O=CN1CCN(C(C(=O)NC2CCCCC2)c2cc3c(cc2[N+]([O-])=O)OCO3)CC1',
'O=C(C(N1CCNCC1)c1ccc2c(c1)OCO2)NC1CCCCC1'),
TestMolecule('COc1cc(C2N(c3ccc(Br)cc3)C(=O)c3n[nH]c(C)c32)ccc1O',
'O=C1c2n[nH]cc2C(N1c1ccccc1)c1ccccc1'),
TestMolecule('c1cc(NC(=O)c2ccccc2[N+]([O-])=O)c(N2CCOCC2)cc1',
'O=C(Nc1c(cccc1)N1CCOCC1)c1ccccc1'),
TestMolecule('N#Cc1cc2c(nc1SCC(=O)N1CCCCC1)CCCCC2', 'O=C(N1CCCCC1)CSc1ccc2c(n1)CCCCC2'),
TestMolecule('CCN(CC)c1ccc(CN(C(=O)c2cc(OC)c(OC)c(OC)c2)C2CCS(=O)(=O)C2)cc1',
'O=S1(=O)CCC(N(Cc2ccccc2)C(=O)c2ccccc2)C1'),
TestMolecule('COc1cc(NC(=S)N2CCN(Cc3ccccc3)CC2)cc(OC)c1', 'S=C(N1CCN(CC1)Cc1ccccc1)Nc1ccccc1'),
TestMolecule('CC(=O)C(=CNc1ccc(OCc2ccccc2)cc1)c1ccccc1', 'c1cccc(c1)COc1ccc(NC=Cc2ccccc2)cc1'),
TestMolecule('CC(C)C(C(NC(C)C(N)=O)=O)NC(C1CCCN1C(OC(C)(C)C)=O)=O', 'C1CCNC1'),
TestMolecule('CCOc1ccc(N2CC(C(=O)Nc3cccc(S(NC4=NCCC4)(=O)=O)c3)CC2=O)cc1',
'c1cccc(c1)N1CC(C(=O)Nc2cccc(S(=O)(=O)NC3=NCCC3)c2)CC1=O'),
TestMolecule('O=C(NCc1ccccc1Cl)CSc1ccc(-c2cccs2)nn1', 'O=C(NCc1ccccc1)CSc1ccc(nn1)-c1sccc1'),
TestMolecule('COc1ccc(OC)c(N=c2ssnc2Cl)c1', 'c1cccc(c1)N=c1ssnc1'),
TestMolecule('CC(=O)C1=C(C)NC(=O)CC1c1c(Cl)cccc1', 'O=C1CC(C=CN1)c1ccccc1'),
TestMolecule('CCC(=O)N=C(N)Nc1nc(C)c2cc(C)c(C)cc2n1', 'c1cc2c(cc1)ncnc2'),
TestMolecule('Cc1ccccc1C(OC1OC(=O)C(Cl)=C1Nc1ccc(C(O)=O)cc1)=O',
'O=C(OC1OC(C=C1Nc1ccccc1)=O)c1ccccc1'),
TestMolecule('CCOc1cc(CN2CCC(CO)(Cc3cccc(C(F)(F)F)c3)CC2)ccc1OC',
'c1ccc(cc1)CC1CCN(Cc2ccccc2)CC1'),
TestMolecule('Cc1cc2c([nH]c(=O)c(CCNC(c3cccs3)=O)c2)cc1C',
'O=C(NCCc1cc2ccccc2[nH]c1=O)c1cccs1'),
TestMolecule('Cc1ccc(Nc2cc(=O)[nH]c(=O)[nH]2)cc1C', 'c1cccc(c1)Nc1cc([nH]c([nH]1)=O)=O'),
TestMolecule('Cc1cc(OCC(=O)NC2CCS(=O)(=O)C2)c2c(oc(=O)c3c2CCC3)c1',
'O=C(NC1CCS(=O)(C1)=O)COc1c2c(ccc1)oc(c1c2CCC1)=O'),
TestMolecule('CCc1sc(NC(CCC(NCCc2ccc(OC)c(OC)c2)=O)=O)nn1',
'c1cc(ccc1)CCNC(=O)CCC(=O)Nc1scnn1'),
TestMolecule('N#CC1=C(SCc2ccccc2)NC(=O)CC1c1ccc(O)cc1', 'O=C1NC(=CC(C1)c1ccccc1)SCc1ccccc1'),
TestMolecule('O=C(NCCN1CCOCC1)c1csc2c1CCCC2', 'O=C(NCCN1CCOCC1)c1csc2c1CCCC2'),
TestMolecule('CCCCC(=O)Nc1cc(OC)c(NC(C2CCCCC2)=O)cc1OC', 'O=C(Nc1ccccc1)C1CCCCC1'),
TestMolecule('Cc1ccc(C(C(C)OC(C2CC(=O)N(C3CCCCC3)C2)=O)=O)cc1',
'c1cc(C(=O)COC(C2CC(=O)N(C2)C2CCCCC2)=O)ccc1'),
TestMolecule('Cc1ccc(S(C(C#N)c2c(N3CCCC3)nc3ccccc3n2)(=O)=O)cc1C',
'c1ccc(cc1)S(=O)(=O)Cc1c(nc2ccccc2n1)N1CCCC1'),
TestMolecule('CC1(C)OC(=O)C(=Cc2[nH]ccc2)C(=O)O1', 'O=C1OCOC(=O)C1=Cc1[nH]ccc1'),
TestMolecule('Cc1cc(C)cc(Oc2nc3n(cccc3C)c(=O)c2C=C(C#N)C(=O)NC2CCS(=O)(=O)C2)c1',
'c1ccc(cc1)Oc1c(c(=O)n2ccccc2n1)C=CC(=O)NC1CCS(=O)(=O)C1'),
TestMolecule('COc1cc(NC(=O)NCc2c(C)onc2-c2ccccc2)ccc1', 'O=C(NCc1conc1-c1ccccc1)Nc1ccccc1'),
TestMolecule('c1ccc(C(Oc2cc3c(cc2)C(=O)CO3)=O)cc1', 'c1ccc(C(Oc2cc3c(cc2)C(=O)CO3)=O)cc1'),
TestMolecule('CCN1C(=O)C2C(c3cccs3)N3C4C(=O)N(CC)C(=O)C4C(c4cccs4)N3C2C1=O',
'c1cc(sc1)C1C2C(NC(=O)C2N2N1C1C(=O)NC(=O)C1C2c1cccs1)=O'),
TestMolecule('Cc1cc(C(N2CCCC(C(c3cc(F)ccc3F)=O)C2)=O)c(C)o1',
'O=C(N1CCCC(C(=O)c2ccccc2)C1)c1cocc1'),
TestMolecule('COc1cc(C=NO)ccc1Oc1c([N+]([O-])=O)cc([N+]([O-])=O)cc1', 'c1cccc(Oc2ccccc2)c1'),
TestMolecule('Cc1ccc(N(Cc2c(=O)[nH]c3ccc(C)cc3c2)C(c2cccs2)=O)cc1',
'O=C(N(c1ccccc1)Cc1c([nH]c2c(cccc2)c1)=O)c1cccs1'),
TestMolecule('COc1ccc(C(=O)Nn2c(C)nnc2-n2c(C)cc(C)n2)cc1OC', 'O=C(c1ccccc1)Nn1cnnc1-n1nccc1'),
TestMolecule('Cc1c(NC(=O)c2c(C)c(Cl)c(C)nc2Cl)cccc1', 'O=C(c1cccnc1)Nc1ccccc1'),
TestMolecule('c1ccc(CNC(CC(C(=O)NCc2ccccc2)c2nc(=O)c3ccccc3[nH]2)=O)cc1',
'c1ccc(CNC(CC(C(=O)NCc2ccccc2)c2nc(=O)c3ccccc3[nH]2)=O)cc1'),
TestMolecule('CNc1n(-c2ccccc2)ncc1[N+](=O)[O-]', 'c1n(ncc1)-c1ccccc1'),
TestMolecule('CC1SC2(NC1=O)C1CC3CC(C1)CC2C3', 'O=C1CSC2(N1)C1CC3CC(C1)CC2C3'),
TestMolecule('CCc1ccccc1NC(=S)N(C(C)c1occc1)CCOC', 'S=C(NCc1occc1)Nc1ccccc1'),
TestMolecule('CCC(C)NC(=O)C1CCCN(S(c2ccc(-n3cnnn3)cc2)(=O)=O)C1',
'C1CCN(CC1)S(=O)(=O)c1ccc(cc1)-n1nnnc1'),
TestMolecule('COc1c2c(ccc1)C1CC(C)(O2)N(Cc2ccccc2)C(=O)N1', 'O=C1NC2CC(Oc3ccccc32)N1Cc1ccccc1'),
TestMolecule('COc1ccc(C2NC(=O)c3c(cccc3)O2)c(OC)c1OC', 'O=C1NC(Oc2c1cccc2)c1ccccc1'),
TestMolecule('O=C(NNC=C1C=Nc2ccccc21)c1ccn(Cc2c(Cl)cc(Cl)cc2)n1',
'O=C(NNC=C1c2c(cccc2)N=C1)c1nn(cc1)Cc1ccccc1'),
TestMolecule('c1ccc(NS(c2ccc(OCC(=O)NCc3cnccc3)cc2)(=O)=O)cc1',
'c1ccc(NS(c2ccc(OCC(=O)NCc3cnccc3)cc2)(=O)=O)cc1'),
TestMolecule('COC1=CC(=O)C(=C2NNC(C(F)(F)F)=C2c2cc3ccccc3o2)C=C1',
'O=C1C=CC=CC1=C1NNC=C1c1cc2ccccc2o1'),
TestMolecule('CCOC(=O)c1c(C(COC(C=Cc2ccc(Cl)cc2)=O)=O)c(C)[nH]c1C',
'c1ccc(C=CC(OCC(=O)c2cc[nH]c2)=O)cc1'),
TestMolecule('Cc1nc2ncnn2c(N2CCN(c3nnnn3-c3ccccc3)CC2)c1',
'c1nc2ncnn2c(c1)N1CCN(c2nnnn2-c2ccccc2)CC1'),
TestMolecule('CC(C)Oc1ccc(C(=O)Nc2ccc(NC(c3ccco3)=O)c(Cl)c2)cc1',
'O=C(Nc1ccc(cc1)NC(=O)c1ccccc1)c1occc1'),
TestMolecule('CC(c1ccccc1)NC(C(NCC1OCCC1)=O)=O', 'O=C(NCc1ccccc1)C(=O)NCC1OCCC1'),
TestMolecule('CCCCOc1ccc(NC(=O)CCSc2nccn2C)cc1', 'O=C(Nc1ccccc1)CCSc1ncc[nH]1'),
TestMolecule('O=C(OCc1ncccc1)c1oc(COc2c(Cl)cccc2)cc1', 'O=C(OCc1ncccc1)c1ccc(o1)COc1ccccc1'),
TestMolecule('COc1ccc(C=NNC(=O)OC(C)(C)C)cc1OC', 'c1ccccc1'),
TestMolecule('CC1CCCCC1NC(COC(c1ccc(S(NCc2ccco2)(=O)=O)cc1)=O)=O',
'c1coc(c1)CNS(=O)(=O)c1ccc(cc1)C(=O)OCC(=O)NC1CCCCC1'),
TestMolecule('Nn1c(SCC(=O)Nc2cccc(F)c2)nnc1C1CCCCC1', 'O=C(CSc1[nH]c(nn1)C1CCCCC1)Nc1ccccc1'),
TestMolecule('Cc1n[nH]c(NC2CCCCC2)nc1=O', 'O=c1cn[nH]c(n1)NC1CCCCC1'),
TestMolecule('CCCCCCCCC(=O)NC(C(Cl)(Cl)Cl)NC(=S)N1CCOCC1', 'C1NCCOC1'),
TestMolecule('CCCc1ccc(Oc2coc3cc(OCC(Nc4c(C)cccc4)=O)ccc3c2=O)cc1',
'c1cccc(c1)Oc1c(c2ccc(cc2oc1)OCC(=O)Nc1ccccc1)=O'),
TestMolecule('Cc1ccc(C(=O)NN=C2CCSC2)cc1[N+]([O-])=O', 'O=C(NN=C1CCSC1)c1ccccc1'),
TestMolecule('N#CC1=C2SCN(c3ccc(F)cc3)CN2C(=O)CC1c1cc(F)ccc1',
'O=C1N2CN(c3ccccc3)CSC2=CC(c2ccccc2)C1'),
TestMolecule('c1ccc(CN2C(=O)CC(Nc3cc4c(cc3)cccc4)C2=O)cc1',
'c1ccc(CN2C(=O)CC(Nc3cc4c(cc3)cccc4)C2=O)cc1'),
TestMolecule('COc1ccc(NC(C)=O)cc1NC(=O)CN1CCN(CC(=O)Nc2ccc(Cl)cc2)CC1',
'O=C(Nc1ccccc1)CN1CCN(CC1)CC(=O)Nc1ccccc1'),
TestMolecule('Clc1c(Cl)c(C2NC(=O)CCC2[N+]([O-])=O)ccc1', 'O=C1NC(CCC1)c1ccccc1'),
TestMolecule('CCN(C(=O)CSc1n(-c2ccccc2)c(-c2ccccc2)nn1)CC', 'c1ccc(cc1)-n1cnnc1-c1ccccc1'),
TestMolecule('CC(=O)CCCCn1cnc2n(C)c(=O)n(C)c(=O)c12', 'O=c1[nH]c(c2c(nc[nH]2)[nH]1)=O'),
TestMolecule('CC1=NN(c2ccccc2)C(=N)C1=NNc1ccc(Cl)cc1', 'N=C1C(=NNc2ccccc2)C=NN1c1ccccc1'),
TestMolecule('CCc1ccc(OCC(=O)N(CC)CC)cc1', 'c1ccccc1'),
TestMolecule('CN(CC(=O)N1CCCCC1)S(c1ccc(Cl)cc1)(=O)=O', 'O=C(CNS(=O)(=O)c1ccccc1)N1CCCCC1'),
TestMolecule('CSc1ncc(C=C2C(=O)NC(=O)N(c3ccc(C)cc3)C2=O)cn1',
'c1ccc(N2C(NC(=O)C(=Cc3cncnc3)C2=O)=O)cc1'),
TestMolecule('COCCNC(=S)Nc1c(Cc2ccccc2)cccc1', 'c1ccc(Cc2ccccc2)cc1'),
TestMolecule('COc1cc(C(=O)Nc2nnc(C(C)(C)C)s2)c([N+]([O-])=O)cc1OC', 'O=C(Nc1nncs1)c1ccccc1'),
TestMolecule('CCOC(=O)c1ccc(NC(=O)c2cc(OC)c(OC(C)C)cc2)cc1', 'O=C(Nc1ccccc1)c1ccccc1'),
TestMolecule('COc1ccc(C(=O)C=C2Sc3cc4c(cc3N2C)OCO4)cc1', 'O=C(C=C1Sc2cc3c(cc2N1)OCO3)c1ccccc1'),
TestMolecule('CCCC1=NN(c2sc3c(n2)cccc3)C(=O)C1=CNCCCN(CC)CC', 'C=C1C=NN(C1=O)c1sc2ccccc2n1'),
TestMolecule('COc1ccc(C(COC(CN2C(=O)NC(C)(C)C2=O)=O)=O)cc1OC',
'c1ccc(C(=O)COC(=O)CN2C(=O)CNC2=O)cc1'),
TestMolecule('O=C(Oc1ccc(Br)cc1)C1CC(=O)N(c2ccc(F)cc2)C1',
'O=C(C1CC(N(C1)c1ccccc1)=O)Oc1ccccc1'),
TestMolecule('O=c1nc(-c2ccccn2)[nH]c(C(F)(F)F)c1Br', 'O=c1cc[nH]c(-c2ncccc2)n1'),
TestMolecule('CCOC(c1oc2ccccc2c1NC(CN1CCN(C)CC1)=O)=O', 'O=C(CN1CCNCC1)Nc1coc2ccccc21'),
TestMolecule('CSc1nsc(NN=Cc2ccc3c(c2)OCO3)c1C#N', 'c1cc(sn1)NN=Cc1ccc2OCOc2c1'),
TestMolecule('CC(C)(C)NC(NC(CSc1nc(C)c(C)c(C)n1)=O)=O', 'c1cncnc1'),
TestMolecule('Cc1cccnc1CN1CCN(Cc2onc(C(c3ccccc3)c3ccccc3)n2)CC1',
'c1cccnc1CN1CCN(CC1)Cc1onc(n1)C(c1ccccc1)c1ccccc1'),
TestMolecule('COc1ccc(Nc2oc3cc(=O)ccc-3cc2C(=O)Nc2ncccc2)cc1OC',
'c1ccc(cc1)Nc1oc2-c(ccc(c2)=O)cc1C(Nc1ncccc1)=O'),
TestMolecule('c1cc(C)c(OCC(NS(c2ccc(C)cc2)(=O)=O)=O)cc1', 'O=C(COc1ccccc1)NS(=O)(=O)c1ccccc1'),
TestMolecule('CCOc1ccc(-c2scc(CSc3sc(N)nn3)n2)cc1OC', 'c1cccc(c1)-c1nc(cs1)CSc1scnn1'),
TestMolecule('c1ccc(C(=O)COC(=O)CN2C(=O)C3C4CC(C3C2=O)C=C4)cc1',
'c1ccc(C(=O)COC(=O)CN2C(=O)C3C4CC(C3C2=O)C=C4)cc1'),
TestMolecule('Cc1occc1C(=O)NC(C)c1ccc2c(c1)OCO2', 'O=C(NCc1ccc2c(c1)OCO2)c1ccoc1'),
TestMolecule('CCn1c(SCC(=O)Nc2c(Cl)nccc2)nnc1-c1ccccc1',
'O=C(Nc1cnccc1)CSc1[nH]c(nn1)-c1ccccc1'),
TestMolecule('CCC(C)N(C)C1CCN(C(=S)Nc2cc(OC)ccc2)CC1', 'S=C(Nc1ccccc1)N1CCCCC1'),
TestMolecule('Brc1oc(C(=O)N2CC(=O)Nc3c(cc(Br)cc3)C2c2ccccc2)cc1',
'O=C(N1CC(Nc2ccccc2C1c1ccccc1)=O)c1occc1'),
TestMolecule('CN(C(=O)CCSc1nc(-c2cc3c(cc2)OCO3)cc(C(F)(F)F)n1)Cc1ccccc1',
'O=C(NCc1ccccc1)CCSc1nc(ccn1)-c1cc2c(cc1)OCO2'),
TestMolecule('[Br-].COc1c(OC)c(OC)cc(-c2nc3c[n+](CC(=O)c4ccccc4)ccc3n2C)c1',
'O=C(C[n+]1cc2nc([nH]c2cc1)-c1ccccc1)c1ccccc1'),
TestMolecule('CCOC(CSc1n(-c2c(OC)cccc2)c(CNC(Cc2ccccc2)=O)nn1)=O',
'O=C(Cc1ccccc1)NCc1n(cnn1)-c1ccccc1'),
TestMolecule('CS(N(Cc1ccccc1)c1ccc(C(Nc2c(Sc3ccccc3)cccc2)=O)cc1)(=O)=O',
'O=C(c1ccc(NCc2ccccc2)cc1)Nc1c(cccc1)Sc1ccccc1'),
TestMolecule('Cc1nc(C2N(C(=O)c3cn(C)c4c(c3=O)cccc4)CCc3c4c([nH]c32)cccc4)ccc1',
'O=C(c1c[nH]c2c(cccc2)c1=O)N1C(c2ncccc2)c2[nH]c3ccccc3c2CC1'),
TestMolecule('CCCCc1nc(N2CCOCC2)c(C#N)c2c1CCCC2', 'c1nc(cc2c1CCCC2)N1CCOCC1'),
TestMolecule('O=C(NN=Cc1cc([N+]([O-])=O)ccc1Cl)c1nccnc1', 'O=C(NN=Cc1ccccc1)c1nccnc1'),
TestMolecule('COc1ccc(-n2c(SCC(=O)c3ccc4c(c3)OCCO4)nnn2)cc1',
'O=C(c1ccc2c(c1)OCCO2)CSc1n(nnn1)-c1ccccc1'),
TestMolecule('COc1c(C=CC(=O)Nc2cc(S(NC3=NCCCCC3)(=O)=O)ccc2)cccc1',
'O=C(Nc1cc(ccc1)S(=O)(=O)NC1=NCCCCC1)C=Cc1ccccc1'),
TestMolecule('Cc1nn(-c2ccc(F)cc2)c(Cl)c1C=C(CC(=O)O)c1sc2ccccc2n1',
'c1cc2sc(nc2cc1)C=Cc1cn(nc1)-c1ccccc1'),
TestMolecule('COc1c(OC)c(OC)cc(C2N(c3ccccc3)OC3C2C(=O)N(Cc2ccccc2)C3=O)c1',
'c1cccc(c1)CN1C(=O)C2C(N(OC2C1=O)c1ccccc1)c1ccccc1'),
TestMolecule('COCCNC(=S)Nc1cc(OC)c(NC(=O)c2ccco2)cc1OC', 'O=C(Nc1ccccc1)c1occc1'),
TestMolecule('N#Cc1c(SCC(=O)c2cc3c(oc2=O)cccc3)nc(-c2ccccc2)cc1',
'O=C(c1cc2c(cccc2)oc1=O)CSc1cccc(n1)-c1ccccc1'),
TestMolecule('O=C(N1CCCC1)c1nc2ccccn2c1CN1CCCC(OCc2ccccc2)C1',
'O=C(N1CCCC1)c1nc2ccccn2c1CN1CCCC(OCc2ccccc2)C1'),
TestMolecule('Brc1cccc(OCCSc2ncccn2)c1', 'c1cccc(c1)OCCSc1ncccn1'),
TestMolecule('CC(C)(C)NC(=O)C12CCC(C)(C1(C)C)c1nc3ccccc3nc12', 'c1cccc2nc3C4CC(CC4)c3nc12'),
TestMolecule('[I-].CC(C)C1C(OCC(O)C[N+]2(C)CCCCC2)CC(C)CC1', 'C1CC[NH+](CC1)CCCOC1CCCCC1'),
TestMolecule('Cc1ccccc1NS(=O)(=O)c1ccc(OCC(=O)N2CCCCC2)cc1',
'c1cc(ccc1)NS(=O)(=O)c1ccc(cc1)OCC(=O)N1CCCCC1'),
TestMolecule('Cc1cc(NC(=O)CSc2nc3c(c(=O)n2-c2ccc(Br)cc2)SCC3)no1',
'O=C(CSc1nc2c(c(n1-c1ccccc1)=O)SCC2)Nc1ccon1'),
TestMolecule('Cc1ccccc1C(NC(C(C)C)C(OCC(c1[nH]ccc1)=O)=O)=O',
'c1cc([nH]c1)C(COC(CNC(=O)c1ccccc1)=O)=O'),
TestMolecule('Cc1ccnc(NS(c2ccc(NS(C)(=O)=O)cc2)(=O)=O)n1', 'c1ccc(S(=O)(=O)Nc2ncccn2)cc1'),
TestMolecule('Cn1c(-c2ccc(Cl)cc2)cnc1NCc1cc2c(cc1[N+]([O-])=O)OCO2.OC(=O)C(O)=O',
'c1cc(ccc1)-c1[nH]c(nc1)NCc1cc2c(cc1)OCO2'),
TestMolecule('CC1Cc2ccccc2N1C(=O)CON=Cc1ccc(OC(F)F)cc1', 'O=C(CON=Cc1ccccc1)N1CCc2c1cccc2'),
TestMolecule('C=C1C(=O)OC2C(O)C(C)=CC(=O)C=C(C)CC(OC(C(C)=CC)=O)C12',
'C=C1C2CCC=CC(C=CCC2OC1=O)=O'),
TestMolecule('O=C1C2N(CSC2)c2c(cc(C(F)(F)F)cc2)N1Cc1cccc(F)c1',
'O=C1C2N(CSC2)c2ccccc2N1Cc1ccccc1'),
TestMolecule('Cc1ccc(OCC(=O)Nc2c[nH]c(=O)[nH]c2=O)cc1C',
'O=C(COc1ccccc1)Nc1c[nH]c([nH]c1=O)=O'),
TestMolecule('Cn1c(CN2CCOCC2)nc2cc(NC(=O)c3ccccc3Cl)ccc12',
'O=C(c1ccccc1)Nc1ccc2[nH]c(nc2c1)CN1CCOCC1'),
TestMolecule('O=c1oc2ccc(O)cc2c(CN2CCN(CC=Cc3ccccc3)CC2)c1',
'O=c1oc2ccccc2c(c1)CN1CCN(CC1)CC=Cc1ccccc1'),
TestMolecule('Cn1c(Cc2ccccc2)nnc1SCCC(=O)Nc1ccccc1', 'O=C(CCSc1nnc([nH]1)Cc1ccccc1)Nc1ccccc1'),
TestMolecule('c1cc2nc(CC(=O)c3cc([N+]([O-])=O)ccc3)[nH]c2cc1',
'O=C(Cc1nc2ccccc2[nH]1)c1ccccc1'),
TestMolecule('c1cc2cc(C(=O)N3CCN(c4ccc(N5CCOCC5)nn4)CC3)c(=O)oc2cc1',
'c1cc2cc(C(=O)N3CCN(c4ccc(N5CCOCC5)nn4)CC3)c(=O)oc2cc1'),
TestMolecule('COc1ccccc1-n1c(=S)[nH]nc1CCn1nc(C)c(Br)c1C', 'S=c1[nH]nc(n1-c1ccccc1)CCn1cccn1'),
TestMolecule('CCC(=O)NC(=S)Nc1ccc(N2CCOCC2)cc1', 'c1cccc(c1)N1CCOCC1'),
TestMolecule('CCCCCC(=O)N1CCN(CCNC=C2C(=O)CC(c3ccc(OC)c(OC)c3)CC2=O)CC1',
'c1ccc(cc1)C1CC(=O)C(C(=O)C1)=CNCCN1CCNCC1'),
TestMolecule('CN1CCN(C(=O)CN(S(C)(=O)=O)Cc2ccc(Cl)cc2)CC1', 'O=C(CNCc1ccccc1)N1CCNCC1'),
TestMolecule('COc1cc(OC)cc(C(=O)NCc2cccnc2)c1', 'O=C(NCc1cccnc1)c1ccccc1'),
TestMolecule('c1cncc(NC(=O)C2CCCN(S(c3cccc4c3nsn4)(=O)=O)C2)c1',
'c1cncc(NC(=O)C2CCCN(S(c3cccc4c3nsn4)(=O)=O)C2)c1'),
TestMolecule('CC(NC1=NN(C(C)=O)C(C)(c2cccs2)S1)=O', 'c1cc(sc1)C1SC=NN1'),
TestMolecule('CCCC(=O)Nc1ccc(-c2nc3cc(C)c(C)cc3o2)cc1', 'c1cccc(c1)-c1nc2ccccc2o1'),
TestMolecule('Cc1c(C)n(CC(O)CN2CCOCC2)c2ccccc12.OC(=O)C(O)=O', 'c1cn(c2ccccc12)CCCN1CCOCC1'),
TestMolecule('Cc1occc1-c1n(CCc2ccccc2)c(SCC(=O)Nc2sccn2)nn1',
'O=C(Nc1sccn1)CSc1n(c(nn1)-c1cocc1)CCc1ccccc1'),
TestMolecule('Cc1oc(-c2cc(F)ccc2)nc1CN1C(CCc2ncccc2)CCCC1',
'c1ccc(cc1)-c1nc(co1)CN1C(CCCC1)CCc1ncccc1'),
TestMolecule('COc1c(OC)c(C(O)=O)c(C=NNC(c2cc(NC(c3ccc(F)cc3)=O)ccc2)=O)cc1',
'O=C(Nc1cc(ccc1)C(=O)NN=Cc1ccccc1)c1ccccc1'),
TestMolecule('CCn1c(Cc2ccccc2)nnc1SCC(=O)Nc1ccc(S(N)(=O)=O)cc1',
'O=C(CSc1[nH]c(nn1)Cc1ccccc1)Nc1ccccc1'),
TestMolecule('CCn1c(COc2nn(-c3ccccc3)c(=O)cc2)nnc1SCc1ccc(OC)cc1',
'O=c1ccc(nn1-c1ccccc1)OCc1[nH]c(nn1)SCc1ccccc1'),
TestMolecule('CC1=NC(=O)C(=C2CC(O)(C(F)(F)F)ON2)C(C)=C1', 'O=C1C(=C2NOCC2)C=CC=N1'),
TestMolecule('COc1ccc(NC(=S)Nc2ccccc2C(F)(F)F)cc1', 'S=C(Nc1ccccc1)Nc1ccccc1'),
TestMolecule('CCCc1cc(=O)nc(SCC(=O)c2cc(C)n(CCOC)c2C)[nH]1',
'O=C(c1c[nH]cc1)CSc1[nH]ccc(=O)n1'),
TestMolecule('CC(=O)Nc1ccc2c(c1)C(C)(C)C(C)N2C', 'c1ccc2c(c1)NCC2'),
TestMolecule('CCN1CCN(C(c2ccc(OCC(Nc3ccc(F)cc3)=O)c(OC)c2)=O)CC1',
'c1cc(ccc1)NC(=O)COc1ccc(C(N2CCNCC2)=O)cc1'),
TestMolecule('CCCCN1C2CCCC1CC(NC(=O)c1ccc(OC)c(OC)c1)C2', 'O=C(NC1CC2NC(CCC2)C1)c1ccccc1'),
TestMolecule('c1ccc(N(CC(=O)N2CCOCC2)S(c2ccccc2)(=O)=O)cc1',
'c1ccc(N(CC(=O)N2CCOCC2)S(c2ccccc2)(=O)=O)cc1'),
TestMolecule('CCn1c(C)nc2cc(C(=O)NN=Cc3ccc(OC)c(O)c3)ccc12',
'O=C(NN=Cc1ccccc1)c1ccc2[nH]cnc2c1'),
TestMolecule('[Cl-].NC(=O)CN1C=CC(=C[NH+]=O)C=C1', 'C=C1C=CNC=C1'),
TestMolecule('Cn1cnnc1SC1C(NS(c2ccccc2)(=O)=O)c2c3c(ccc2)cccc31',
'O=S(=O)(NC1C(Sc2[nH]cnn2)c2cccc3c2c1ccc3)c1ccccc1'),
TestMolecule('COc1ccc(Nc2nc(NCc3ccco3)nc(NN=Cc3ccccc3F)n2)cc1',
'c1ccc(Nc2nc(nc(n2)NN=Cc2ccccc2)NCc2ccco2)cc1'),
TestMolecule('CC1=CC(=O)C(=C2C=C(c3ccccc3[N+]([O-])=O)NN2)C=C1',
'O=C1C(=C2NNC(=C2)c2ccccc2)C=CC=C1'),
TestMolecule('COc1ccc(CC2[N+]([O-])(C)CCc3cc(OC)c(O)cc32)cc1O',
'c1ccc(cc1)CC1c2c(cccc2)CC[NH2+]1'),
TestMolecule('Cl.NC(N)=Nc1nc(=O)c2cc(Br)ccc2[nH]1', 'O=c1nc[nH]c2ccccc21'),
TestMolecule('CC(=O)N1CCC(=NNc2ccc(S(=O)(=O)N3CCOCC3)cc2[N+]([O-])=O)CC1',
'c1cc(ccc1NN=C1CCNCC1)S(=O)(=O)N1CCOCC1'),
TestMolecule('Cc1cc(S(N(Cc2ccc(F)cc2)CC2OCCC2)(=O)=O)ccc1-n1cnnn1',
'c1cc(ccc1)CN(CC1OCCC1)S(c1ccc(cc1)-n1cnnn1)(=O)=O'),
TestMolecule('CC1(C)OCc2c(c3c(sc4c(NCCCO)ncnc43)nc2-c2ccco2)C1',
'c1ncnc2c1sc1nc(c3c(c12)CCOC3)-c1ccco1'),
TestMolecule('COc1ccc(CCNC(=O)CSc2n(-c3ccc(OC)c(OC)c3)nnn2)cc1OC',
'O=C(CSc1n(-c2ccccc2)nnn1)NCCc1ccccc1'),
TestMolecule('CC(C)(CC(O)=O)CC(NCc1c(Cl)cccc1Sc1ccc(Cl)cc1)=O', 'c1ccc(Sc2ccccc2)cc1'),
TestMolecule('COc1ccc(-c2cc(CCCC(=O)NCCc3cc(OC)ccc3OC)no2)cc1',
'O=C(NCCc1ccccc1)CCCc1noc(c1)-c1ccccc1'),
TestMolecule('Cc1ccc(-c2ncns2)cc1', 'c1ccc(cc1)-c1sncn1'),
TestMolecule('C(O)CCn1c(=O)c2c(nc1C=Cc1ccc([N+]([O-])=O)o1)cccc2',
'O=c1[nH]c(C=Cc2ccco2)nc2c1cccc2'),
TestMolecule('COC(CC(O)CC(O)C(C)OCc1ccccc1)OC', 'c1ccccc1'),
TestMolecule('Cl.CCCC(N1CCN(C(=O)c2occc2)CC1)c1n(C(C)(C)C)nnn1',
'O=C(N1CCN(Cc2nnn[nH]2)CC1)c1ccco1'),
TestMolecule('O=C(NC(CO)c1ccccc1)c1occc1', 'O=C(NCc1ccccc1)c1occc1'),
TestMolecule('O=C(Nc1ccc(N2CCOCC2)cc1)c1c(Cl)cc(F)c(F)c1', 'O=C(Nc1ccc(N2CCOCC2)cc1)c1ccccc1'),
TestMolecule('CCc1sc(N2C(=O)c3ccc(Oc4ccc([N+]([O-])=O)cc4)cc3C2=O)nn1',
'O=C1N(C(=O)c2cc(Oc3ccccc3)ccc21)c1scnn1'),
TestMolecule('CC(C)Cc1ccc(C(C)C(=O)O)cc1', 'c1ccccc1'),
TestMolecule('Cl.N=c1sccn1CC(=O)Nc1cc(S(N2CCCC2)(=O)=O)ccc1Cl',
'N=c1n(CC(=O)Nc2cccc(S(=O)(N3CCCC3)=O)c2)ccs1'),
TestMolecule('c1ccc(-c2ccc(C(=O)OC3CC4OC(=O)CC4C3CO)cc2)cc1',
'c1ccc(cc1)-c1ccc(C(=O)OC2CC3CC(=O)OC3C2)cc1'),
TestMolecule('CN(CCC#N)CC(=O)Nc1ccc(S(N)(=O)=O)cc1', 'c1ccccc1'),
TestMolecule('Cc1nc(-c2ccc([N+]([O-])=O)cc2)sc1C(=O)O', 'c1cc(-c2sccn2)ccc1'),
TestMolecule('c1coc(C(=O)N2CCN(C(Cn3nnc(-c4ccc(NC(c5ccc(F)cc5)=O)cc4)n3)=O)CC2)c1',
'O=C(N1CCN(C(=O)Cn2nc(nn2)-c2ccc(NC(=O)c3ccccc3)cc2)CC1)c1ccco1'),
TestMolecule('Cc1onc(-c2c(Cl)cccc2Cl)c1C(N)=S', 'c1ccc(cc1)-c1nocc1'),
TestMolecule('CCOC(=O)c1cnc2ccccc2c1NCCO', 'c1cnc2ccccc2c1'),
TestMolecule('Cc1ccc(C)c(NC(=O)Cn2nnc(-c3ccc(N4CCOCC4)cc3)n2)c1',
'O=C(Cn1nnc(n1)-c1ccc(cc1)N1CCOCC1)Nc1ccccc1'),
TestMolecule('CC(C)(C)c1cc(C(=O)NNc2ccc(OC(F)(F)F)cc2)n(Cc2ccccc2)n1',
'O=C(NNc1ccccc1)c1ccnn1Cc1ccccc1'),
TestMolecule('CCCCCOC(=O)C1=C(C)N=C2N(NN=N2)C1c1ccc(OC)c(OC)c1OC',
'c1cccc(c1)C1N2NN=NC2=NC=C1'),
TestMolecule('Cc1cc2cc(CNC(=O)C3CC3)ccc2n1C', 'O=C(NCc1ccc2c(cc[nH]2)c1)C1CC1'),
TestMolecule('Cc1ccccc1C(NC(CC(C)C)C(Nc1cc(S(N(C)C)(=O)=O)ccc1)=O)=O',
'c1ccc(cc1)NC(CNC(=O)c1ccccc1)=O'),
TestMolecule('COCCCNC(=S)N1CCC(NC(=O)c2ccco2)CC1', 'O=C(NC1CCNCC1)c1ccco1'),
TestMolecule('Cn1c(C=Cc2oc([N+]([O-])=O)cc2)nc2ccccc2c1=O', 'O=c1[nH]c(C=Cc2occc2)nc2ccccc12'),
TestMolecule('c1cc2nc(SCc3cc(=O)n4ccsc4n3)n(CCCO)c(=O)c2cc1',
'c1ccc2nc(SCc3cc(=O)n4ccsc4n3)[nH]c(=O)c2c1'),
TestMolecule('c1ccc2c(c1)cccc2NC(=O)CC1SC(NCC2OCCC2)=NC1=O',
'c1ccc2c(c1)cccc2NC(=O)CC1SC(NCC2OCCC2)=NC1=O'),
]
if __name__ == '__main__': # pragma: no cover
unittest.main()
| ptosco/rdkit | rdkit/Chem/Scaffolds/UnitTestMurckoScaffold.py | Python | bsd-3-clause | 37,500 | [
"RDKit"
] | 0fb03c1b9faf14d4119ca03e01d0a8f4b1cf84deb667096ac13ebd6fd2e5b08b |
from pymol.cgo import *
from pymol import cmd
from pymol.vfont import plain
#this is a plugin version of the axes_cyl scripts by Dr. Robert L. Campbell (in fact I only added
#the __init__ function and englobed the resto of the code in a main function)
#As a very minor change I replaced the "ORIGIN" label for the origin with an "O"
def main():
# create the axes object, draw axes with cylinders coloured red, green,
#blue for X, Y and Z
obj = [
CYLINDER, 0., 0., 0., 10., 0., 0., 0.2, 1.0, 1.0, 1.0, 1.0, 0.0, 0.,
CYLINDER, 0., 0., 0., 0., 10., 0., 0.2, 1.0, 1.0, 1.0, 0., 1.0, 0.,
CYLINDER, 0., 0., 0., 0., 0., 10., 0.2, 1.0, 1.0, 1.0, 0., 0.0, 1.0,
]
# add labels to axes object
cyl_text(obj,plain,[-5.,-5.,-1],'O',0.20,axes=[[3.0,0.0,0.0],[0.0,3.0,0.0],[0.0,0.0,3.0]])
cyl_text(obj,plain,[10.,0.,0.],'X',0.20,axes=[[3.0,0.0,0.0],[0.0,3.0,0.0],[0.0,0.0,3.0]])
cyl_text(obj,plain,[0.,10.,0.],'Y',0.20,axes=[[3.0,0.0,0.0],[0.0,3.0,0.0],[0.0,0.0,3.0]])
cyl_text(obj,plain,[0.,0.,10.],'Z',0.20,axes=[[3.0,0.0,0.0],[0.0,3.0,0.0],[0.0,0.0,3.0]])
# then we load it into PyMOL
cmd.load_cgo(obj,'axes')
def __init__(self):
self.menuBar.addmenuitem('Plugin', 'command',
'Showaxes',
label = 'Showaxes',
command = lambda s=self : main())
| weitzner/Dotfiles | pymol_scripts/showaxes.py | Python | mit | 1,326 | [
"PyMOL"
] | 1d885526bed17422256829b063e42932a8b8b076b2791ea7ffa6987a346bc9da |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.rejection_resample()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import resampling
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class RejectionResampleTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(initial_known=[True, False])))
def testDistribution(self, initial_known):
classes = np.random.randint(5, size=(10000,)) # Uniformly sampled
target_dist = [0.9, 0.05, 0.05, 0.0, 0.0]
initial_dist = [0.2] * 5 if initial_known else None
classes = math_ops.cast(classes, dtypes.int64) # needed for Windows build.
dataset = dataset_ops.Dataset.from_tensor_slices(classes).shuffle(
200, seed=21).map(lambda c: (c, string_ops.as_string(c))).repeat()
get_next = self.getNext(
dataset.apply(
resampling.rejection_resample(
target_dist=target_dist,
initial_dist=initial_dist,
class_func=lambda c, _: c,
seed=27)))
returned = []
while len(returned) < 2000:
returned.append(self.evaluate(get_next()))
returned_classes, returned_classes_and_data = zip(*returned)
_, returned_data = zip(*returned_classes_and_data)
self.assertAllEqual([compat.as_bytes(str(c))
for c in returned_classes], returned_data)
total_returned = len(returned_classes)
class_counts = np.array([
len([True for v in returned_classes if v == c])
for c in range(5)])
returned_dist = class_counts / total_returned
self.assertAllClose(target_dist, returned_dist, atol=1e-2)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(only_initial_dist=[True, False])))
def testEdgeCasesSampleFromInitialDataset(self, only_initial_dist):
init_dist = [0.5, 0.5]
target_dist = [0.5, 0.5] if only_initial_dist else [0.0, 1.0]
num_classes = len(init_dist)
# We don't need many samples to test that this works.
num_samples = 100
data_np = np.random.choice(num_classes, num_samples, p=init_dist)
dataset = dataset_ops.Dataset.from_tensor_slices(data_np)
# Reshape distribution.
dataset = dataset.apply(
resampling.rejection_resample(
class_func=lambda x: x,
target_dist=target_dist,
initial_dist=init_dist))
get_next = self.getNext(dataset)
returned = []
with self.assertRaises(errors.OutOfRangeError):
while True:
returned.append(self.evaluate(get_next()))
@combinations.generate(test_base.default_test_combinations())
def testRandomClasses(self):
init_dist = [0.25, 0.25, 0.25, 0.25]
target_dist = [0.0, 0.0, 0.0, 1.0]
num_classes = len(init_dist)
# We don't need many samples to test a dirac-delta target distribution.
num_samples = 100
data_np = np.random.choice(num_classes, num_samples, p=init_dist)
dataset = dataset_ops.Dataset.from_tensor_slices(data_np)
# Apply a random mapping that preserves the data distribution.
def _remap_fn(_):
return math_ops.cast(random_ops.random_uniform([1]) * num_classes,
dtypes.int32)[0]
dataset = dataset.map(_remap_fn)
# Reshape distribution.
dataset = dataset.apply(
resampling.rejection_resample(
class_func=lambda x: x,
target_dist=target_dist,
initial_dist=init_dist))
get_next = self.getNext(dataset)
returned = []
with self.assertRaises(errors.OutOfRangeError):
while True:
returned.append(self.evaluate(get_next()))
classes, _ = zip(*returned)
bincount = np.bincount(
np.array(classes),
minlength=num_classes).astype(np.float32) / len(classes)
self.assertAllClose(target_dist, bincount, atol=1e-2)
@combinations.generate(test_base.default_test_combinations())
def testExhaustion(self):
init_dist = [0.5, 0.5]
target_dist = [0.9, 0.1]
dataset = dataset_ops.Dataset.range(10000)
resampler = resampling.rejection_resample(
class_func=lambda x: x % 2,
target_dist=target_dist,
initial_dist=init_dist)
dataset = dataset.apply(resampler)
get_next = self.getNext(dataset)
returned = []
with self.assertRaises(errors.OutOfRangeError):
while True:
returned.append(self.evaluate(get_next()))
classes, _ = zip(*returned)
bincount = np.bincount(
np.array(classes),
minlength=len(init_dist)).astype(np.float32) / len(classes)
self.assertAllClose(target_dist, bincount, atol=1e-2)
@parameterized.parameters(
("float32", "float64"),
("float64", "float32"),
("float64", "float64"),
("float64", None),
)
def testOtherDtypes(self, target_dtype, init_dtype):
target_dist = np.array([0.5, 0.5], dtype=target_dtype)
if init_dtype is None:
init_dist = None
else:
init_dist = np.array([0.5, 0.5], dtype=init_dtype)
dataset = dataset_ops.Dataset.range(10)
resampler = resampling.rejection_resample(
class_func=lambda x: x % 2,
target_dist=target_dist,
initial_dist=init_dist)
dataset = dataset.apply(resampler)
get_next = self.getNext(dataset)
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
| sarvex/tensorflow | tensorflow/python/data/experimental/kernel_tests/rejection_resample_test.py | Python | apache-2.0 | 6,755 | [
"DIRAC"
] | 4b07cbd8fa430beffb105f86a871ce5bd205d79db74ece9bed693b5c9a458c1b |
# texttable - module for creating simple ASCII tables
# Copyright (C) 2003-2020 Gerome Fournier <jef(at)foutaise.org>
"""module for creating simple ASCII tables
Example:
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\\nXavier\\nHuon", 32, "Xav'"],
["Mr\\nBaptiste\\nClement", 1, "Baby"],
["Mme\\nLouise\\nBourgeau", 28, "Lou\\n\\nLoue"]])
print(table.draw())
print()
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print(table.draw())
Result:
+----------+-----+----------+
| Name | Age | Nickname |
+==========+=====+==========+
| Mr | | |
| Xavier | 32 | |
| Huon | | Xav' |
+----------+-----+----------+
| Mr | | |
| Baptiste | 1 | |
| Clement | | Baby |
+----------+-----+----------+
| Mme | | Lou |
| Louise | 28 | |
| Bourgeau | | Loue |
+----------+-----+----------+
text float exp int auto
===========================================
abcd 67.000 6.540e+02 89 128.001
efgh 67.543 6.540e-01 90 1.280e+22
ijkl 0.000 5.000e-78 89 0.000
mnop 0.023 5.000e+78 92 1.280e+22
"""
from __future__ import division
__all__ = ["Texttable", "ArraySizeError"]
__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
__license__ = 'MIT'
__version__ = '1.6.4'
__credits__ = """\
Jeff Kowalczyk:
- textwrap improved import
- comment concerning header output
Anonymous:
- add_rows method, for adding rows in one go
Sergey Simonenko:
- redefined len() function to deal with non-ASCII characters
Roger Lew:
- columns datatype specifications
Brian Peterson:
- better handling of unicode errors
Frank Sachsenheim:
- add Python 2/3-compatibility
Maximilian Hils:
- fix minor bug for Python 3 compatibility
frinkelpi:
- preserve empty lines
"""
import sys
import unicodedata
# define a text wrapping function to wrap some text
# to a specific width:
# - use cjkwrap if available (better CJK support)
# - fallback to textwrap otherwise
try:
import cjkwrap
def textwrapper(txt, width):
return cjkwrap.wrap(txt, width)
except ImportError:
try:
import textwrap
def textwrapper(txt, width):
return textwrap.wrap(txt, width)
except ImportError:
sys.stderr.write("Can't import textwrap module!\n")
raise
# define a function to calculate the rendering width of a unicode character
# - use wcwidth if available
# - fallback to unicodedata information otherwise
try:
import wcwidth
def uchar_width(c):
"""Return the rendering width of a unicode character
"""
return max(0, wcwidth.wcwidth(c))
except ImportError:
def uchar_width(c):
"""Return the rendering width of a unicode character
"""
if unicodedata.east_asian_width(c) in 'WF':
return 2
elif unicodedata.combining(c):
return 0
else:
return 1
from functools import reduce
if sys.version_info >= (3, 0):
unicode_type = str
bytes_type = bytes
else:
unicode_type = unicode
bytes_type = str
def obj2unicode(obj):
"""Return a unicode representation of a python object
"""
if isinstance(obj, unicode_type):
return obj
elif isinstance(obj, bytes_type):
try:
return unicode_type(obj, 'utf-8')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (obj, strerror))
return unicode_type(obj, 'utf-8', 'replace')
else:
return unicode_type(obj)
def len(iterable):
"""Redefining len here so it will be able to work with non-ASCII characters
"""
if isinstance(iterable, bytes_type) or isinstance(iterable, unicode_type):
return sum([uchar_width(c) for c in obj2unicode(iterable)])
else:
return iterable.__len__()
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class FallbackToText(Exception):
"""Used for failed conversion to float"""
pass
class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combination of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
self._hline_string = None
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
"""
if type(x) == int:
return str(x)
else:
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
elif f != f: # NaN
fn = cls._fmt_text
elif f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
if __name__ == '__main__':
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\nXavier\nHuon", 32, "Xav'"],
["Mr\nBaptiste\nClement", 1, "Baby"],
["Mme\nLouise\nBourgeau", 28, "Lou\n \nLoue"]])
print(table.draw())
print()
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print(table.draw())
| foutaise/texttable | texttable.py | Python | mit | 22,617 | [
"Brian"
] | ebf3cab5d06a1949736aea297c446d63ab19874fe08e9708086d684778b71357 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 41:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original[0:41,:]
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:7]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Plush-Toy-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=1)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True',numbers_alpha=2)
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,130.3,0,1.2])
grid('True')
show()
| tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Feature_Comparison/single_feature/results/test10_cross_validate_objects_1200ms_scaled_method_v_force.py | Python | mit | 4,558 | [
"Mayavi"
] | 7e95536d7ef4748bd1e566225cfbb87133fb5e31e6915563f55c7cb4bca6956b |
#!/usr/bin/env python
from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
def version():
with open('VERSION') as f:
return f.read().strip()
reqs = [line.strip() for line in open('requirements.txt') if not line.startswith('#')]
setup(
name='gutils',
version=version(),
description='A set of Python utilities for reading, merging, and post '
'processing Teledyne Webb Slocum Glider data.',
long_description=readme(),
author='Kyle Wilcox',
author_email='kyle@axiomdatascience.com',
install_requires=reqs,
url='https://github.com/SECOORA/GUTILS',
packages=find_packages(),
entry_points={
'console_scripts': [
'gutils_create_nc = gutils.nc:main_create',
'gutils_check_nc = gutils.nc:main_check',
'gutils_binary_to_ascii_watch = gutils.watch.binary:main_to_ascii',
'gutils_ascii_to_netcdf_watch = gutils.watch.ascii:main_to_netcdf',
'gutils_netcdf_to_ftp_watch = gutils.watch.netcdf:main_to_ftp',
'gutils_netcdf_to_erddap_watch = gutils.watch.netcdf:main_to_erddap',
]
},
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Scientific/Engineering'
],
)
| SECOORA/GUTILS | setup.py | Python | mit | 1,583 | [
"NetCDF"
] | 974adbad5b90843cb7cc2c5f1e747ca6a46588274ebdf9e28471a8156d120366 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.