text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# by Alexander Nedovizin
import bpy
from bpy.props import BoolProperty, IntProperty, StringProperty, FloatProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.utils.nodes_mixins.sv_animatable_nodes import SvAnimatableNode
from sverchok.data_structure import updateNode
from sverchok.data_structure import handle_read, handle_write
from random import uniform
from copy import deepcopy
from cmath import exp
class SvNeuroElman:
""" A set of functions for working with teachable neuron """
def init_w(self, number, ext, treshold):
out = []
for _ in range(number):
tmp = [uniform(-treshold, treshold) for _ in range(ext)]
out.append(tmp)
return out
def sigmoida(self, signal):
result = (exp(signal).real - exp(-signal).real) / (exp(signal).real + exp(-signal).real + 1e-8)
return result
def neuro(self, list_in, etalon, maxim, is_learning, prop):
""" The function calculates the output values depending on the input """
_list_in = [signal_a/maxim for signal_a in list_in]
out_a = self.layer_a(_list_in, prop)
out_b = self.layer_b(out_a, prop)
out_c = self.layer_c(out_b, prop)
if is_learning:
len_etalon = len(etalon)
if len_etalon < prop['InC']:
d = prop['InC'] - len_etalon
etalon = etalon + [0] * d
_etalon = list(map(lambda x: x / maxim, etalon))
self.learning(out_a, out_b, out_c, _etalon, maxim, prop)
_out_c = list(map(lambda x: x * maxim, out_c))
return _out_c
def layer_a(self, list_in, prop):
out_a = deepcopy(list_in)
len_outa = len(out_a)
if len_outa < prop['InA']:
ext_list_in = prop['InA'] - len_outa
out_a.extend([1] * ext_list_in)
return out_a
def layer_b(self, outA, prop):
out_b = [0] * prop['InB']
for idx_a, weights_a in enumerate(prop['wA']):
for idx_b, wa in enumerate(weights_a):
signal_a = wa * outA[idx_a]
out_b[idx_b] += signal_a
_out_b = [self.sigmoida(signal_b) for signal_b in out_b]
return _out_b
def layer_c(self, outB, prop):
out_c = [0] * prop['InC']
for idx_b, weights_b in enumerate(prop['wB']):
for idx_c, wb in enumerate(weights_b):
signal_b = wb * outB[idx_b]
out_c[idx_c] += signal_b
return out_c
# **********************
@staticmethod
def sigma(ej, f_vj):
return ej * f_vj
@staticmethod
def f_vj_sigmoida(a, yj):
if a == 0:
b = 1
else:
b = 1 / a
return b * yj * (1 - yj)
@staticmethod
def func_ej_last(dj, yj):
return dj - yj
@staticmethod
def func_ej_inner(e_sigma_k, wkj):
return e_sigma_k * wkj
@staticmethod
def delta_wji(sigma_j, yi, prop):
return prop['k_learning'] * sigma_j * yi
@staticmethod
def func_w(w, dw, prop):
return (1 - prop['k_lambda']) * w + dw
def learning(self, out_a, out_b, out_c, etalon, maxim, prop):
weights_a = deepcopy(prop['wA'])
weights_b = deepcopy(prop['wB'])
_out_a = deepcopy(out_a)
for idx, native_signal_a in enumerate(out_a):
processed_signal_a = deepcopy(native_signal_a)
_out_b = deepcopy(out_b)
_out_c = deepcopy(out_c)
for _ in range(prop['cycles']):
in_b = [0] * prop['InB']
in_a = [0] * prop['InA']
for idc, signal_c in enumerate(_out_c):
c_ = self.sigmoida(signal_c)
e_c = self.func_ej_last(etalon[idc], signal_c)
f_vc = self.f_vj_sigmoida(prop['InC'], c_)
sigma_c = self.sigma(e_c, f_vc)
for idb, signal_b in enumerate(_out_b):
dwji = self.delta_wji(sigma_c, signal_b, prop)
weights_b[idb][idc] = self.func_w(weights_b[idb][idc], dwji, prop)
in_b[idb] += sigma_c * dwji
for idb, signal_b in enumerate(_out_b):
f_vb = self.f_vj_sigmoida(prop['InB'], signal_b)
sigma_b = self.sigma(in_b[idb], f_vb)
for ida, signal_a in enumerate(out_a):
dwji = self.delta_wji(sigma_b, signal_a, prop)
weights_a[ida][idb] = self.func_w(weights_a[ida][idb], dwji, prop)
in_a[ida] += sigma_b * dwji
processed_signal_a -= prop['epsilon'] * processed_signal_a * (maxim - processed_signal_a)
absdx = abs(native_signal_a - processed_signal_a)
if absdx <= prop['trashold'] or absdx > abs(maxim / 2):
break
_out_a[idx] = processed_signal_a
_out_b = self.layer_b(_out_a, prop)
_out_c = self.layer_c(out_b, prop)
prop['wA'] = weights_a
prop['wB'] = weights_b
class SvNeuroElman1LNode(bpy.types.Node, SverchCustomTreeNode, SvAnimatableNode):
'''
Triggers: Neuro Elman 1 Layer
Tooltip: Join ETALON data - after animation learning - disconnect ETALON
'''
bl_idname = 'SvNeuroElman1LNode'
bl_label = '*Neuro Elman 1 Layer'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_NEURO'
elman = None
k_learning: FloatProperty(name='k_learning', default=0.1, update=updateNode, description="Learning rate")
gisterezis: FloatProperty(name='gisterezis', default=0.1, min=0.0, update=updateNode,
description="Sets the threshold of values inside the learning algorithm (in plans)")
maximum: FloatProperty(name='maximum', default=3.0, update=updateNode,
description="The maximum value of the input and output layer")
menushka: BoolProperty(name='menushka', default=False, description="Extra options")
epsilon: FloatProperty(name='epsilon', default=1.0, update=updateNode,
description="The coefficient participates in the learning assessment function")
treshold: FloatProperty(name='treshold', default=0.01, update=updateNode,
description="Participates in learning assessment")
k_lambda: FloatProperty(name='k_lambda', default=0.0001, max=0.1, update=updateNode,
description="Weight change step during training")
cycles: IntProperty(name='cycles', default=3, min=1, update=updateNode, description="Internal Learning Loops")
lA: IntProperty(name='lA', default=1, min=0, update=updateNode,
description="Input layer (must match the number of elements in the input)")
lB: IntProperty(name='lB', default=5, min=0, update=updateNode,
description="Inner layer (more nodes - more accurate calculations)")
lC: IntProperty(name='lC', default=1, min=0, update=updateNode,
description="Output layer (must match the number of elements in the output)")
def sv_init(self, context):
self.inputs.new('SvStringsSocket', "data")
self.inputs.new('SvStringsSocket', "etalon")
self.outputs.new('SvStringsSocket', "result")
def draw_buttons(self, context, layout):
self.draw_animatable_buttons(layout, icon_only=True)
handle_name = self.name + self.id_data.name
col_top = layout.column(align=True)
row = col_top.row(align=True)
row.prop(self, "lA", text="A layer")
row = col_top.row(align=True)
row.prop(self, "lB", text="B layer")
row = col_top.row(align=True)
row.prop(self, "lC", text="C layer")
layout.prop(self, "maximum", text="maximum")
op_start = layout.operator('node.sverchok_neuro', text='Reset')
op_start.typ = 1
op_start.handle_name = handle_name
layout.prop(self, "menushka", text="extend sets:")
if self.menushka:
layout.prop(self, "k_learning", text="koeff learning")
layout.prop(self, "gisterezis", text="gisterezis")
layout.prop(self, "cycles", text="cycles")
col = layout.column(align=True)
col.prop(self, "epsilon", text="epsilon")
col = layout.column(align=True)
col.prop(self, "k_lambda", text="lambda")
col = layout.column(align=True)
col.prop(self, "treshold", text="treshold")
def process(self):
handle_name = self.name + self.id_data.name
handle = handle_read(handle_name)
props = handle[1]
if not handle[0]:
elman = SvNeuroElman()
props = {'InA': 2,
'InB': 5,
'InC': 1,
'wA': [],
'wB': [],
'gister': 0.01,
'k_learning': 0.1,
'epsilon': 1.3,
'cycles': 3,
'trashold': 0.01,
'k_lambda': 0.0001,
'Elman': elman,
}
self.elman = props['Elman']
result = []
if self.outputs['result'].is_linked and self.inputs['data'].is_linked:
if self.inputs['etalon'].is_linked:
input_etalon = self.inputs['etalon'].sv_get()
is_learning = True
else:
input_etalon = [[0]]
is_learning = False
if (props['InA'] != self.lA + 1) or props['InB'] != self.lB or props['InC'] != self.lC:
props['InA'] = self.lA + 1
props['InB'] = self.lB
props['InC'] = self.lC
props['wA'] = self.elman.init_w(props['InA'], props['InB'], props['trashold'])
props['wB'] = self.elman.init_w(props['InB'], props['InC'], props['trashold'])
props['gister'] = self.gisterezis
props['k_learning'] = self.k_learning
props['epsilon'] = self.epsilon
props['k_lambda'] = self.k_lambda
props['cycles'] = self.cycles
props['trashold'] = self.treshold
input_data = self.inputs['data'].sv_get()
if type(input_etalon[0]) not in [list, tuple]:
input_etalon = [input_etalon]
if type(input_data[0]) not in [list, tuple]:
input_data = [input_data]
for idx, data in enumerate(input_data):
let = len(input_etalon) - 1
eta = input_etalon[min(idx, let)]
data2 = [1.0] + data
if type(eta) not in [list, tuple]:
eta = [eta]
result.append(self.elman.neuro(data2, eta, self.maximum, is_learning, props))
else:
result = [[]]
handle_write(handle_name, props)
self.outputs['result'].sv_set(result)
# *********************************
class SvNeuroOps(bpy.types.Operator):
""" Resetting weights """
bl_idname = "node.sverchok_neuro"
bl_label = "Sverchok Neuro operators"
bl_options = {'REGISTER', 'UNDO'}
typ: IntProperty(name='typ', default=0)
handle_name: StringProperty(name='handle')
def execute(self, context):
if self.typ == 1:
handle = handle_read(self.handle_name)
prop = handle[1]
if handle[0]:
elman = prop['Elman']
prop['wA'] = elman.init_w(prop['InA'], prop['InB'], prop['trashold'])
prop['wB'] = elman.init_w(prop['InB'], prop['InC'], prop['trashold'])
handle_write(self.handle_name, prop)
return {'FINISHED'}
def register():
bpy.utils.register_class(SvNeuroOps)
bpy.utils.register_class(SvNeuroElman1LNode)
def unregister():
bpy.utils.unregister_class(SvNeuroElman1LNode)
bpy.utils.unregister_class(SvNeuroOps)
| nortikin/sverchok | nodes/logic/neuro_elman.py | Python | gpl-3.0 | 12,827 | [
"NEURON"
] | 25d6f9f9ebc0a683c429dc0041e3d728afac5120c2a4e01d1d97082dce88a275 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# Introduction: This script is used to fetch pairwise core genomes between each strain pair.
# It is better to put this script in ITEP directory.
# Created by galaxy on 2016/10/21 15:11
import os
import sys
import shutil
from collections import defaultdict
from itertools import combinations
def replace(cur_dir):
old_id = "all_I_2.0_c_{0}_m_maxbit_".format(maxbit)
new_id = ""
for parent, dirnames, filenames in os.walk(cur_dir):
for filename in filenames:
if filename.find(old_id) != -1:
new_name = filename.replace(old_id, new_id)
# print(filename, "---->", newName)
os.rename(os.path.join(parent, filename), os.path.join(parent, new_name))
my_path = os.getcwd()
maxbit = 0.4
# source_file = os.path.join(my_path, 'SourceMe.sh')
# os.system('source {0}'.format(source_file))
strain_information_file = os.path.join(my_path, 'strain_info.txt')
strain_dict = defaultdict()
strain_list = []
with open(strain_information_file, 'r') as f1:
for each_line in f1.readlines()[1:]:
a_list = each_line.strip().split('\t')
strain_dict[a_list[1]] = [a_list[2]]
strain_list.append(a_list[1])
# fetch_strain_pair = []
# for i in strain_list:
# for j in strain_list:
# if i != j:
# fetch_strain_pair.append((i, j))
fetch_strain_pair = list(combinations(strain_list, 2))
strain_pair_dir = os.path.join(my_path, 'all_strain_pairs_{0}'.format(maxbit))
if not os.path.exists(strain_pair_dir):
os.makedirs(strain_pair_dir)
else:
shutil.rmtree(strain_pair_dir)
os.makedirs(strain_pair_dir)
tmp_organisms = os.path.join(my_path, 'tmp_organisms.txt')
replace_file = os.path.join(my_path, 'replace.py')
for each_pair in fetch_strain_pair:
the_strain = each_pair[0]
other_strain = each_pair[1]
# if strain_dict[the_strain][2] != strain_dict[other_strain][2]:
# if strain_dict[the_strain][1] != strain_dict[other_strain][1]:
the_strain_name = the_strain.split(' ')[-1]
other_strain_name = other_strain.split(' ')[-1]
strain_gene_dir = os.path.join(strain_pair_dir, '{0}_{1}'.format(the_strain_name, other_strain_name))
if not os.path.exists(strain_gene_dir):
the_strain_line = '{0}\t{1}\n'.format(
the_strain, strain_dict[the_strain][0])
other_strain_line = '{0}\t{1}\n'.format(
other_strain, strain_dict[other_strain][0])
with open(tmp_organisms, 'w') as f2:
result_line = the_strain_line + other_strain_line
f2.write(result_line)
cmd = 'cat {0} | python {1}/src/db_findClustersByOrganismList.py -a -u all_I_2.0_c_{3}_m_maxbit | python ' \
'{1}/src/db_getClusterGeneInformation.py|grep -F -f {0} |python ' \
'{1}/src/getClusterFastas.py -n {2}'.format(
tmp_organisms, my_path, strain_gene_dir, maxbit)
os.system(cmd)
replace(strain_gene_dir)
os.remove(tmp_organisms)
os.system('mv all_strain_pairs_{0} all_strain_pairs'.format(maxbit))
| cvn001/RecentHGT | src/fetch_pairwise_genome.py | Python | mit | 3,100 | [
"Galaxy"
] | f7af669743e22b20d62719a34017f181e726a7aba1d0bf063c9652a59acb398c |
#!/usr/bin/env python
##############################################################################
#
# Usage example for the procedure PPXF, which
# implements the Penalized Pixel-Fitting (pPXF) method by
# Cappellari M., & Emsellem E., 2004, PASP, 116, 138.
# The example also shows how to include a library of templates
# and how to mask gas emission lines if present.
#
# MODIFICATION HISTORY:
# V1.0.0: Written by Michele Cappellari, Leiden 11 November 2003
# V1.1.0: Log rebin the galaxy spectrum. Show how to correct the velocity
# for the difference in starting wavelength of galaxy and templates.
# MC, Vicenza, 28 December 2004
# V1.1.1: Included explanation of correction for instrumental resolution.
# After feedback from David Valls-Gabaud. MC, Venezia, 27 June 2005
# V2.0.0: Included example routine to determine the goodPixels vector
# by masking known gas emission lines. MC, Oxford, 30 October 2008
# V2.0.1: Included instructions for high-redshift usage. Thanks to Paul Westoby
# for useful feedback on this issue. MC, Oxford, 27 November 2008
# V2.0.2: Included example for obtaining the best-fitting redshift.
# MC, Oxford, 14 April 2009
# V2.1.0: Bug fix: Force PSF_GAUSSIAN to produce a Gaussian with an odd
# number of elements centered on the middle one. Many thanks to
# Harald Kuntschner, Eric Emsellem, Anne-Marie Weijmans and
# Richard McDermid for reporting problems with small offsets
# in systemic velocity. MC, Oxford, 15 February 2010
# V2.1.1: Added normalization of galaxy spectrum to avoid numerical
# instabilities. After feedback from Andrea Cardullo.
# MC, Oxford, 17 March 2010
# V2.2.0: Perform templates convolution in linear wavelength.
# This is useful for spectra with large wavelength range.
# MC, Oxford, 25 March 2010
# V2.2.1: Updated for Coyote Graphics. MC, Oxford, 11 October 2011
# V2.2.2: Renamed PPXF_KINEMATICS_EXAMPLE_SAURON to avoid conflict with the
# new PPXF_KINEMATICS_EXAMPLE_SDSS. Removed DETERMINE_GOOPIXELS which was
# made a separate routine. MC, Oxford, 12 January 2012
# V3.0.0: Translated from IDL into Python. MC, Oxford, 6 December 2013
# V3.0.1: Support both Python 2.6/2.7 and Python 3.x. MC, Oxford, 25 May 2014
# V3.0.2: Explicitly sort template files as glob() output may not be sorted.
# Thanks to Marina Trevisan for reporting problems under Linux.
# MC, Sydney, 4 February 2015
# V3.0.3: Use redshift in determine_goodpixels. MC, Oxford, 5 May 2015
# V3.0.4: Support both Pyfits and Astropy to read FITS files.
# MC, Oxford, 22 October 2015
#
##############################################################################
from __future__ import print_function
try:
import pyfits
except:
from astropy.io import fits as pyfits
from scipy import ndimage
import numpy as np
from time import clock
import glob
from ppxf import ppxf
import ppxf_util as util
def ppxf_kinematics_example_sauron():
# Read a galaxy spectrum and define the wavelength range
#
dir = 'spectra/'
file = dir + 'NGC4550_SAURON.fits'
hdu = pyfits.open(file)
gal_lin = hdu[0].data
h1 = hdu[0].header
lamRange1 = h1['CRVAL1'] + np.array([0.,h1['CDELT1']*(h1['NAXIS1']-1)])
FWHM_gal = 4.2 # SAURON has an instrumental resolution FWHM of 4.2A.
# If the galaxy is at a significant redshift (z > 0.03), one would need to apply
# a large velocity shift in PPXF to match the template to the galaxy spectrum.
# This would require a large initial value for the velocity (V > 1e4 km/s)
# in the input parameter START = [V,sig]. This can cause PPXF to stop!
# The solution consists of bringing the galaxy spectrum roughly to the
# rest-frame wavelength, before calling PPXF. In practice there is no
# need to modify the spectrum before the usual LOG_REBIN, given that a
# red shift corresponds to a linear shift of the log-rebinned spectrum.
# One just needs to compute the wavelength range in the rest-frame
# and adjust the instrumental resolution of the galaxy observations.
# This is done with the following three commented lines:
#
# z = 1.23 # Initial estimate of the galaxy redshift
# lamRange1 = lamRange1/(1+z) # Compute approximate restframe wavelength range
# FWHM_gal = FWHM_gal/(1+z) # Adjust resolution in Angstrom
galaxy, logLam1, velscale = util.log_rebin(lamRange1, gal_lin)
galaxy = galaxy/np.median(galaxy) # Normalize spectrum to avoid numerical issues
noise = galaxy*0 + 0.0049 # Assume constant noise per pixel here
# Read the list of filenames from the Single Stellar Population library
# by Vazdekis (1999, ApJ, 513, 224). A subset of the library is included
# for this example with permission. See http://purl.org/cappellari/software
# for suggestions of more up-to-date stellar libraries.
#
vazdekis = glob.glob(dir + 'Rbi1.30z*.fits')
vazdekis.sort()
FWHM_tem = 1.8 # Vazdekis spectra have a resolution FWHM of 1.8A.
# Extract the wavelength range and logarithmically rebin one spectrum
# to the same velocity scale of the SAURON galaxy spectrum, to determine
# the size needed for the array which will contain the template spectra.
#
hdu = pyfits.open(vazdekis[0])
ssp = hdu[0].data
h2 = hdu[0].header
lamRange2 = h2['CRVAL1'] + np.array([0.,h2['CDELT1']*(h2['NAXIS1']-1)])
sspNew, logLam2, velscale = util.log_rebin(lamRange2, ssp, velscale=velscale)
templates = np.empty((sspNew.size,len(vazdekis)))
# Convolve the whole Vazdekis library of spectral templates
# with the quadratic difference between the SAURON and the
# Vazdekis instrumental resolution. Logarithmically rebin
# and store each template as a column in the array TEMPLATES.
# Quadratic sigma difference in pixels Vazdekis --> SAURON
# The formula below is rigorously valid if the shapes of the
# instrumental spectral profiles are well approximated by Gaussians.
#
FWHM_dif = np.sqrt(FWHM_gal**2 - FWHM_tem**2)
sigma = FWHM_dif/2.355/h2['CDELT1'] # Sigma difference in pixels
for j in range(len(vazdekis)):
hdu = pyfits.open(vazdekis[j])
ssp = hdu[0].data
ssp = ndimage.gaussian_filter1d(ssp,sigma)
sspNew, logLam2, velscale = util.log_rebin(lamRange2, ssp, velscale=velscale)
templates[:,j] = sspNew/np.median(sspNew) # Normalizes templates
# The galaxy and the template spectra do not have the same starting wavelength.
# For this reason an extra velocity shift DV has to be applied to the template
# to fit the galaxy spectrum. We remove this artificial shift by using the
# keyword VSYST in the call to PPXF below, so that all velocities are
# measured with respect to DV. This assume the redshift is negligible.
# In the case of a high-redshift galaxy one should de-redshift its
# wavelength to the rest frame before using the line below (see above).
#
c = 299792.458
dv = (logLam2[0]-logLam1[0])*c # km/s
vel = 450. # Initial estimate of the galaxy velocity in km/s
z = np.exp(vel/c) - 1 # Relation between velocity and redshift in pPXF
goodPixels = util.determine_goodpixels(logLam1, lamRange2, z)
# Here the actual fit starts. The best fit is plotted on the screen.
# Gas emission lines are excluded from the pPXF fit using the GOODPIXELS keyword.
#
start = [vel, 180.] # (km/s), starting guess for [V,sigma]
t = clock()
pp = ppxf(templates, galaxy, noise, velscale, start,
goodpixels=goodPixels, plot=True, moments=4,
degree=4, vsyst=dv)
print("Formal errors:")
print(" dV dsigma dh3 dh4")
print("".join("%8.2g" % f for f in pp.error*np.sqrt(pp.chi2)))
print('Elapsed time in PPXF: %.2f s' % (clock() - t))
# If the galaxy is at significant redshift z and the wavelength has been
# de-redshifted with the three lines "z = 1.23..." near the beginning of
# this procedure, the best-fitting redshift is now given by the following
# commented line (equation 2 of Cappellari et al. 2009, ApJ, 704, L34;
# http://adsabs.harvard.edu/abs/2009ApJ...704L..34C)
#
#print, 'Best-fitting redshift z:', (z + 1)*(1 + sol[0]/c) - 1
#------------------------------------------------------------------------------
if __name__ == '__main__':
ppxf_kinematics_example_sauron()
| moustakas/impy | lib/ppxf/ppxf_kinematics_example_sauron.py | Python | gpl-2.0 | 8,518 | [
"Galaxy",
"Gaussian"
] | f337d7ef1da2d1cdfe5c328243bfb52abb9063695b0d2825823e34d3e3411f6a |
#!/usr/bin/env python
import datetime
import shutil
from copy import deepcopy
from math import log, exp
try:
from math import inf
except ImportError:
inf = float("inf")
from argparse import ArgumentParser
import GetOrganelleLib
from GetOrganelleLib.seq_parser import *
from GetOrganelleLib.pipe_control_func import *
import time
import random
import subprocess
import sys
import os
PATH_OF_THIS_SCRIPT = os.path.split(os.path.realpath(__file__))[0]
import platform
SYSTEM_NAME = ""
if platform.system() == "Linux":
SYSTEM_NAME = "linux"
elif platform.system() == "Darwin":
SYSTEM_NAME = "macOS"
else:
sys.stdout.write("Error: currently GetOrganelle is not supported for " + platform.system() + "! ")
exit()
GO_LIB_PATH = os.path.split(GetOrganelleLib.__file__)[0]
GO_DEP_PATH = os.path.realpath(os.path.join(GO_LIB_PATH, "..", "GetOrganelleDep", SYSTEM_NAME))
UTILITY_PATH = os.path.join(PATH_OF_THIS_SCRIPT, "Utilities")
_GO_PATH = GO_PATH
_LBL_DB_PATH = LBL_DB_PATH
_SEQ_DB_PATH = SEQ_DB_PATH
MAJOR_VERSION, MINOR_VERSION = sys.version_info[:2]
if MAJOR_VERSION == 2 and MINOR_VERSION >= 7:
PYTHON_VERSION = "2.7+"
elif MAJOR_VERSION == 3 and MINOR_VERSION >= 5:
PYTHON_VERSION = "3.5+"
else:
sys.stdout.write("Python version have to be 2.7+ or 3.5+")
sys.exit(0)
MAX_RATIO_RL_WS = 0.75
AUTO_MIN_WS = 49
AUTO_MIN_WS_ANIMAL_MT = 41
AUTO_MIN_WS_PLANT_MT = 55
GLOBAL_MIN_WS = 29
BASE_COV_SAMPLING_PERCENT = 0.06
GUESSING_FQ_GZIP_COMPRESSING_RATIO = 3.58
GUESSING_FQ_SEQ_INFLATE_TO_FILE = 3.22
SUPPORTED_ORGANELLE_TYPES = ["embplant_pt", "embplant_mt", "embplant_nr", "other_pt", "animal_mt", "fungus_mt", "fungus_nr"]
ORGANELLE_EXPECTED_GRAPH_SIZES = {"embplant_pt": 130000,
"embplant_mt": 390000,
"embplant_nr": 13000,
"fungus_nr": 13000,
"other_pt": 39000,
"animal_mt": 13000,
"fungus_mt": 65000}
READ_LINE_TO_INF = int(HEAD_MAXIMUM_LINES/4)
def get_options(description, version):
version = version
usage = "\n### Embryophyta plant plastome, 2*(1G raw data, 150 bp) reads\n" + str(os.path.basename(__file__)) + \
" -1 sample_1.fq -2 sample_2.fq -s cp_seed.fasta -o plastome_output " \
" -R 15 -k 21,45,65,85,105 -F embplant_pt\n" \
"### Embryophyta plant mitogenome\n" + str(os.path.basename(__file__)) + \
" -1 sample_1.fq -2 sample_2.fq -s mt_seed.fasta -o mitogenome_output " \
" -R 30 -k 21,45,65,85,105 -F embplant_mt"
parser = ArgumentParser(usage=usage, description=description, add_help=False)
# simple help mode
if "-h" in sys.argv:
parser.add_argument("-1", dest="fq_file_1", help="Input file with forward paired-end reads (*.fq/.gz/.tar.gz).")
parser.add_argument("-2", dest="fq_file_2", help="Input file with reverse paired-end reads (*.fq/.gz/.tar.gz).")
parser.add_argument("-u", dest="unpaired_fq_files", help="Input file(s) with unpaired (single-end) reads. ")
parser.add_argument("-o", dest="output_base", help="Output directory.")
parser.add_argument("-s", dest="seed_file", help="Input fasta format file as initial seed. "
"Default: " + os.path.join(SEQ_DB_PATH, "*.fasta"))
parser.add_argument("-w", dest="word_size", help="Word size (W) for extension. Default: auto-estimated")
parser.add_argument("-R", dest="max_rounds", help="Maximum extension rounds (suggested: >=2). "
"Default: 15 (embplant_pt)")
parser.add_argument("-F", dest="organelle_type",
help="Target organelle genome type(s): "
"embplant_pt/other_pt/embplant_mt/embplant_nr/animal_mt/fungus_mt/fungus_nr/anonym/"
"embplant_pt,embplant_mt/other_pt,embplant_mt,fungus_mt")
parser.add_argument("--max-reads", type=float,
help="Maximum number of reads to be used per file. "
"Default: 1.5E7 (-F embplant_pt/embplant_nr/fungus_mt/fungus_nr); "
"7.5E7 (-F embplant_mt/other_pt/anonym); 3E8 (-F animal_mt)")
parser.add_argument("--fast", dest="fast_strategy",
help="=\"-R 10 -t 4 -J 5 -M 7 --max-n-words 3E7 --larger-auto-ws "
"--disentangle-time-limit 360\"")
parser.add_argument("-k", dest="spades_kmer", default="21,55,85,115",
help="SPAdes kmer settings. Default: %(default)s")
parser.add_argument("-t", dest="threads", type=int, default=1,
help="Maximum threads to use. Default: %(default)s")
parser.add_argument("-P", dest="pre_grouped", default=int(2E5), help="Pre-grouping value. Default: %(default)s")
parser.add_argument("-v", "--version", action="version",
version="GetOrganelle v{version}".format(version=version))
parser.add_argument("-h", dest="simple_help", default=False, action="store_true",
help="print brief introduction for frequently-used options.")
parser.add_argument("--help", dest="verbose_help", default=False, action="store_true",
help="print verbose introduction for all options.")
parser.print_help()
sys.stdout.write("\n")
exit()
else:
# verbose help mode
# group 1
group_inout = parser.add_argument_group("IN-OUT OPTIONS", "Options on inputs and outputs")
# group_inout = OptionGroup(parser, "IN-OUT OPTIONS", "Options on inputs and outputs")
group_inout.add_argument("-1", dest="fq_file_1",
help="Input file with forward paired-end reads (format: fastq/fastq.gz/fastq.tar.gz).")
group_inout.add_argument("-2", dest="fq_file_2",
help="Input file with reverse paired-end reads (format: fastq/fastq.gz/fastq.tar.gz).")
group_inout.add_argument("-u", dest="unpaired_fq_files",
help="Input file(s) with unpaired (single-end) reads (format: fastq/fastq.gz/fastq.tar.gz). "
"files could be comma-separated lists such as 'seq1.fq,seq2.fq'.")
group_inout.add_argument("-o", dest="output_base",
help="Output directory. Overwriting files if directory exists.")
group_inout.add_argument("-s", dest="seed_file", default=None,
help="Seed sequence(s). Input fasta format file as initial seed. "
"A seed sequence in GetOrganelle is only used for identifying initial "
"organelle reads. The assembly process is purely de novo. "
"Should be a list of files split by comma(s) on a multi-organelle mode, "
"with the same list length to organelle_type (followed by '-F'). "
"Default: '" + os.path.join(SEQ_DB_PATH, "*.fasta") + "' "
"(* depends on the value followed with flag '-F')")
group_inout.add_argument("-a", dest="anti_seed",
help="Anti-seed(s). Not suggested unless what you really know what you are doing. "
"Input fasta format file as anti-seed, where the extension process "
"stop. Typically serves as excluding plastid reads when extending mitochondrial "
"reads, or the other way around. You should be cautious about using this option, "
"because if the anti-seed includes some word in the target but not in the seed, "
"the result would have gaps. For example, use the embplant_mt and embplant_pt "
"from the same plant-species as seed and anti-seed.")
group_inout.add_argument("--max-reads", dest="maximum_n_reads", type=float, default=1.5E7,
help="Hard bound for maximum number of reads to be used per file. "
"A input larger than " + str(
READ_LINE_TO_INF) + " will be treated as infinity (INF). "
"Default: 1.5E7 (-F embplant_pt/embplant_nr/fungus_mt/fungus_nr); "
"7.5E7 (-F embplant_mt/other_pt/anonym); 3E8 (-F animal_mt)")
group_inout.add_argument("--reduce-reads-for-coverage", dest="reduce_reads_for_cov", type=float, default=500,
help="Soft bound for maximum number of reads to be used according to "
"target-hitting base coverage. "
"If the estimated target-hitting base coverage is too high and "
"over this VALUE, GetOrganelle automatically reduce the number of reads to "
"generate a final assembly with base coverage close to this VALUE. "
"This design could greatly save computational resources in many situations. "
"A mean base coverage over 500 is extremely sufficient for most cases. "
"This VALUE must be larger than 10. Set this VALUE to inf to disable reducing. "
"Default: %(default)s.")
group_inout.add_argument("--max-ignore-percent", dest="maximum_ignore_percent", type=float, default=0.01,
help="The maximum percent of bases to be ignore in extension, due to low quality. "
"Default: %(default)s")
group_inout.add_argument("--phred-offset", dest="phred_offset", default=-1, type=int,
help="Phred offset for spades-hammer. Default: GetOrganelle-autodetect")
group_inout.add_argument("--min-quality-score", dest="min_quality_score", type=int, default=1,
help="Minimum quality score in extension. This value would be automatically decreased "
"to prevent ignoring too much raw data (see --max-ignore-percent)."
"Default: %(default)s ('\"' in Phred+33; 'A' in Phred+64/Solexa+64)")
group_inout.add_argument("--prefix", dest="prefix", default="",
help="Add extra prefix to resulting files under the output directory.")
group_inout.add_argument("--out-per-round", dest="fg_out_per_round", action="store_true", default=False,
help="Enable output per round. Choose to save memory but cost more time per round.")
group_inout.add_argument("--zip-files", dest="zip_files", action="store_true", default=False,
help="Choose to compress fq/sam files using gzip.")
group_inout.add_argument("--keep-temp", dest="keep_temp_files", action="store_true", default=False,
help="Choose to keep the running temp/index files.")
group_inout.add_argument("--config-dir", dest="get_organelle_path", default=None,
help="The directory where the configuration file and default databases were placed. "
"The default value also can be changed by adding 'export GETORG_PATH=your_favor' "
"to the shell script (e.g. ~/.bash_profile or ~/.bashrc) "
"Default: " + GO_PATH)
# group 2
group_scheme = parser.add_argument_group("SCHEME OPTIONS", "Options on running schemes.")
group_scheme.add_argument("-F", dest="organelle_type",
help="This flag should be followed with embplant_pt (embryophyta plant plastome), "
"other_pt (non-embryophyta plant plastome), embplant_mt "
"(plant mitogenome), embplant_nr (plant nuclear ribosomal RNA), animal_mt "
"(animal mitogenome), fungus_mt (fungus mitogenome), "
"fungus_nr (fungus nuclear ribosomal RNA)"
"or embplant_mt,other_pt,fungus_mt "
"(the combination of any of above organelle genomes split by comma(s), "
"which might be computationally more intensive than separate runs), "
"or anonym (uncertain organelle genome type). "
"The anonym should be used with customized seed and label databases "
"('-s' and '--genes'). "
"For easy usage and compatibility of old versions, following redirection "
"would be automatically fulfilled without warning:\t"
"\nplant_cp->embplant_pt; plant_pt->embplant_pt; "
"\nplant_mt->embplant_mt; plant_nr->embplant_nr")
group_scheme.add_argument("--fast", dest="fast_strategy", default=False, action="store_true",
help="=\"-R 10 -t 4 -J 5 -M 7 --max-n-words 3E7 --larger-auto-ws "
"--disentangle-time-limit 360\" "
"This option is suggested for homogeneously and highly covered data (very fine data). "
"You can overwrite the value of a specific option listed above by adding "
"that option along with the \"--fast\" flag. "
"You could try GetOrganelle with this option for a list of samples and run a second "
"time without this option for the rest with incomplete results. ")
group_scheme.add_argument("--memory-save", dest="memory_save", default=False, action="store_true",
help="=\"--out-per-round -P 0 --remove-duplicates 0\" "
"You can overwrite the value of a specific option listed above by adding "
"that option along with the \"--memory-save\" flag. A larger '-R' value is suggested "
"when \"--memory-save\" is chosen.")
group_scheme.add_argument("--memory-unlimited", dest="memory_unlimited", default=False, action="store_true",
help="=\"-P 1E7 --index-in-memory --remove-duplicates 2E8 "
"--min-quality-score -5 --max-ignore-percent 0\" "
"You can overwrite the value of a specific option listed above by adding "
"that option along with the \"--memory-unlimited\" flag. ")
# group 3
group_extending = parser.add_argument_group("EXTENDING OPTIONS",
"Options on the performance of extending process")
group_extending.add_argument("-w", dest="word_size", type=float,
help="Word size (W) for pre-grouping (if not assigned by '--pre-w') and extending "
"process. This script would try to guess (auto-estimate) a proper W "
"using an empirical function based on average read length, reads quality, "
"target genome coverage, and other variables that might influence the extending "
"process. You could assign the ratio (1>input>0) of W to "
"read_length, based on which this script would estimate the W for you; "
"or assign an absolute W value (read length>input>=35). Default: auto-estimated.")
group_extending.add_argument("--pre-w", dest="pregroup_word_size", type=float,
help="Word size (W) for pre-grouping. Used to reproduce result when word size is "
"a certain value during pregrouping process and later changed during reads "
"extending process. Similar to word size. Default: the same to word size.")
group_extending.add_argument("-R", "--max-rounds", dest="max_rounds", type=int, # default=inf,
help="Maximum number of extending rounds (suggested: >=2). "
"Default: 15 (-F embplant_pt), 30 (-F embplant_mt/other_pt), "
"10 (-F embplant_nr/animal_mt/fungus_mt/fungus_nr), inf (-P 0).")
group_extending.add_argument("--max-n-words", dest="maximum_n_words", type=float, default=4E8,
help="Maximum number of words to be used in total."
"Default: 4E8 (-F embplant_pt), 2E8 (-F embplant_nr/fungus_mt/fungus_nr/animal_mt), "
"2E9 (-F embplant_mt/other_pt)")
group_extending.add_argument("-J", dest="jump_step", type=int, default=3,
help="The length of step for checking words in reads during extending process "
"(integer >= 1). When you have reads of high quality, the larger the number is, "
"the faster the extension will be, "
"the more risk of missing reads in low coverage area. "
"Choose 1 to choose the slowest but safest extension strategy. Default: %(default)s")
group_extending.add_argument("-M", dest="mesh_size", type=int, default=2,
help="(Beta parameter) "
"The length of step for building words from seeds during extending process "
"(integer >= 1). When you have reads of high quality, the larger the number is, "
"the faster the extension will be, "
"the more risk of missing reads in low coverage area. "
"Another usage of this mesh size is to choose a larger mesh size coupled with a "
"smaller word size, which makes smaller word size feasible when memory is limited."
"Choose 1 to choose the slowest but safest extension strategy. Default: %(default)s")
group_extending.add_argument("--bowtie2-options", dest="bowtie2_options", default="--very-fast -t",
help="Bowtie2 options, such as '--ma 3 --mp 5,2 --very-fast -t'. Default: %(default)s.")
group_extending.add_argument("--larger-auto-ws", dest="larger_auto_ws", default=False, action="store_true",
help="By using this flag, the empirical function for estimating W would tend to "
"produce a relative larger W, which would speed up the matching in extending, "
"reduce the memory cost in extending, but increase the risk of broken final "
"graph. Suggested when the data is good with high and homogenous coverage.")
mixed_organelles = ("other_pt", "embplant_mt", "fungus_mt")
group_extending.add_argument("--target-genome-size", dest="target_genome_size", default='130000', type=str,
help="Hypothetical value(s) of target genome size. This is only used for estimating "
"word size when no '-w word_size' is given. "
"Should be a list of INTEGER numbers split by comma(s) on a multi-organelle mode, "
"with the same list length to organelle_type (followed by '-F'). "
"Default: " +
" or ".join(
[str(
ORGANELLE_EXPECTED_GRAPH_SIZES[this_type]) + " (-F " + this_type + ")"
for this_type in SUPPORTED_ORGANELLE_TYPES]) + " or " +
",".join([str(ORGANELLE_EXPECTED_GRAPH_SIZES[this_type])
for this_type in mixed_organelles]) +
" (-F " + ",".join(mixed_organelles) + ")")
group_extending.add_argument("--max-extending-len", dest="max_extending_len", type=str,
help="Maximum extending length(s) derived from the seed(s). "
"A single value could be a non-negative number, or inf (infinite) "
"or auto (automatic estimation). "
"This is designed for properly stopping the extending from getting too long and "
"saving computational resources. However, empirically, a maximum extending length "
"value larger than 6000 would not be helpful for saving computational resources. "
"This value would not be precise in controlling output size, especially "
"when pre-group (followed by '-P') is turn on."
"In the auto mode, the maximum extending length is estimated based on the sizes of "
"the gap regions that not covered in the seed sequences. A sequence of a closely "
"related species would be preferred for estimating a better maximum extending "
"length value. If you are using limited loci, e.g. rbcL gene as the seed for "
"assembling the whole plastome (with extending length ca. 75000 >> 6000), "
"you should set maximum extending length to inf. "
"Should be a list of numbers/auto/inf split by comma(s) on a multi-organelle mode, "
"with the same list length to organelle_type (followed by '-F'). "
"Default: inf. ")
# group 4
group_assembly = parser.add_argument_group("ASSEMBLY OPTIONS", "These options are about the assembly and "
"graph disentangling")
group_assembly.add_argument("-k", dest="spades_kmer", default="21,55,85,115",
help="SPAdes kmer settings. Use the same format as in SPAdes. illegal kmer values "
"would be automatically discarded by GetOrganelle. "
"Default: %(default)s")
group_assembly.add_argument("--spades-options", dest="other_spades_options", default="",
help="Other SPAdes options. Use double quotation marks to include all "
"the arguments and parameters.")
group_assembly.add_argument("--no-spades", dest="run_spades", action="store_false", default=True,
help="Disable SPAdes.")
group_assembly.add_argument("--ignore-k", dest="ignore_kmer_res", default=40, type=int,
help="A kmer threshold below which, no slimming/disentangling would be executed"
" on the result. Default: %(default)s")
group_assembly.add_argument("--genes", dest="genes_fasta",
help="Followed with a customized database (a fasta file or the base name of a "
"blast database) containing or made of ONE set of protein coding genes "
"and ribosomal RNAs extracted from ONE reference genome that you want to assemble. "
"Should be a list of databases split by comma(s) on a multi-organelle mode, "
"with the same list length to organelle_type (followed by '-F'). "
"This is optional for any organelle mentioned in '-F' but required for 'anonym'. "
"By default, certain database(s) in " + str(LBL_DB_PATH) + " would be used "
"contingent on the organelle types chosen (-F). "
"The default value become invalid when '--genes' or '--ex-genes' is used.")
group_assembly.add_argument("--ex-genes", dest="exclude_genes",
help="This is optional and Not suggested, since non-target contigs could contribute "
"information for better downstream coverage-based clustering. "
"Followed with a customized database (a fasta file or the base name of a "
"blast database) containing or made of protein coding genes "
"and ribosomal RNAs extracted from reference genome(s) that you want to exclude. "
"Could be a list of databases split by comma(s) but "
"NOT required to have the same list length to organelle_type (followed by '-F'). "
"The default value will become invalid when '--genes' or '--ex-genes' is used.")
group_assembly.add_argument("--disentangle-df", dest="disentangle_depth_factor", default=10.0, type=float,
help="Depth factor for differentiate genome type of contigs. "
"The genome type of contigs are determined by blast. "
"Default: %(default)s")
group_assembly.add_argument("--contamination-depth", dest="contamination_depth", default=3., type=float,
help="Depth factor for confirming contamination in parallel contigs. Default: %(default)s")
group_assembly.add_argument("--contamination-similarity", dest="contamination_similarity", default=0.9,
type=float,
help="Similarity threshold for confirming contaminating contigs. Default: %(default)s")
group_assembly.add_argument("--no-degenerate", dest="degenerate", default=True, action="store_false",
help="Disable making consensus from parallel contig based on nucleotide degenerate table.")
group_assembly.add_argument("--degenerate-depth", dest="degenerate_depth", default=1.5, type=float,
help="Depth factor for confirming parallel contigs. Default: %(default)s")
group_assembly.add_argument("--degenerate-similarity", dest="degenerate_similarity", default=0.98, type=float,
help="Similarity threshold for confirming parallel contigs. Default: %(default)s")
group_assembly.add_argument("--disentangle-time-limit", dest="disentangle_time_limit", default=1800, type=int,
help="Time limit (second) for each try of disentangling a graph file as a circular "
"genome. Disentangling a graph as contigs is not limited. Default: %(default)s")
group_assembly.add_argument("--expected-max-size", dest="expected_max_size", default='250000', type=str,
help="Expected maximum target genome size(s) for disentangling. "
"Should be a list of INTEGER numbers split by comma(s) on a multi-organelle mode, "
"with the same list length to organelle_type (followed by '-F'). "
"Default: 250000 (-F embplant_pt/fungus_mt), "
"25000 (-F embplant_nr/animal_mt/fungus_nr), 1000000 (-F embplant_mt/other_pt),"
"1000000,1000000,250000 (-F other_pt,embplant_mt,fungus_mt)")
group_assembly.add_argument("--expected-min-size", dest="expected_min_size", default=10000, type=str,
help="Expected minimum target genome size(s) for disentangling. "
"Should be a list of INTEGER numbers split by comma(s) on a multi-organelle mode, "
"with the same list length to organelle_type (followed by '-F'). "
"Default: %(default)s for all.")
group_assembly.add_argument("--reverse-lsc", dest="reverse_lsc", default=False, action="store_true",
help="For '-F embplant_pt' with complete circular result, "
"by default, the direction of the starting contig (usually "
"the LSC region) is determined as the direction with less ORFs. Choose this option "
"to reverse the direction of the starting contig when result is circular. "
"Actually, both directions are biologically equivalent to each other. The "
"reordering of the direction is only for easier downstream analysis.")
group_assembly.add_argument("--max-paths-num", dest="max_paths_num", default=1000, type=int,
help="Repeats would dramatically increase the number of potential isomers (paths). "
"This option was used to export a certain amount of paths out of all possible paths "
"per assembly graph. Default: %(default)s")
# group 5
group_computational = parser.add_argument_group("ADDITIONAL OPTIONS", "")
group_computational.add_argument("-t", dest="threads", type=int, default=1,
help="Maximum threads to use.")
group_computational.add_argument("-P", dest="pre_grouped", type=float, default=2E5,
help="The maximum number (integer) of high-covered reads to be pre-grouped "
"before extending process. pre_grouping is suggested when the whole genome "
"coverage is shallow but the organ genome coverage is deep. "
"The default value is 2E5. "
"For personal computer with 8G memory, we suggest no more than 3E5. "
"A larger number (ex. 6E5) would run faster but exhaust memory "
"in the first few minutes. Choose 0 to disable this process.")
group_computational.add_argument("--which-blast", dest="which_blast", default="",
help="Assign the path to BLAST binary files if not added to the path. "
"Default: try \"" + os.path.realpath(GO_DEP_PATH) +
"/ncbi-blast\" first, then $PATH")
group_computational.add_argument("--which-bowtie2", dest="which_bowtie2", default="",
help="Assign the path to Bowtie2 binary files if not added to the path. "
"Default: try \"" + os.path.realpath(GO_DEP_PATH) +
"/bowtie2\" first, then $PATH")
group_computational.add_argument("--which-spades", dest="which_spades", default="",
help="Assign the path to SPAdes binary files if not added to the path. "
"Default: try \"" + os.path.realpath(GO_DEP_PATH) +
"/SPAdes\" first, then $PATH")
group_computational.add_argument("--which-bandage", dest="which_bandage", default="",
help="Assign the path to bandage binary file if not added to the path. "
"Default: try $PATH")
group_computational.add_argument("--continue", dest="script_resume", default=False, action="store_true",
help="Several check points based on files produced, rather than on the log file, "
"so keep in mind that this script will NOT detect the difference "
"between this input parameters and the previous ones.")
group_computational.add_argument("--overwrite", dest="script_overwrite", default=False, action="store_true",
help="Overwrite previous file if existed. ")
group_computational.add_argument("--index-in-memory", dest="index_in_memory", action="store_true",
default=False,
help="Keep index in memory. Choose save index in memory than in disk.")
group_computational.add_argument("--remove-duplicates", dest="rm_duplicates", default=1E7, type=float,
help="By default this script use unique reads to extend. Choose the number of "
"duplicates (integer) to be saved in memory. A larger number (ex. 2E7) would "
"run faster but exhaust memory in the first few minutes. "
"Choose 0 to disable this process. "
"Note that whether choose or not will not disable "
"the calling of replicate reads. Default: %(default)s.")
group_computational.add_argument("--flush-step", dest="echo_step", default=54321,
help="Flush step (INTEGER OR INF) for presenting progress. "
"For running in the background, you could set this to inf, "
"which would disable this. Default: %(default)s")
group_computational.add_argument("--random-seed", dest="random_seed", default=12345, type=int,
help="Default: %(default)s")
group_computational.add_argument("--verbose", dest="verbose_log", action="store_true", default=False,
help="Verbose output. Choose to enable verbose running log_handler.")
parser.add_argument("-v", "--version", action="version",
version="GetOrganelle v{version}".format(version=version))
parser.add_argument("-h", dest="simple_help", default=False, action="store_true",
help="print brief introduction for frequently-used options.")
parser.add_argument("--help", dest="verbose_help", default=False, action="store_true",
help="print verbose introduction for all options.")
if "--help" in sys.argv:
parser.print_help()
exit()
# if "--help" in sys.argv:
# parser.add_option_group(group_inout)
# parser.add_option_group(group_scheme)
# parser.add_option_group(group_extending)
# parser.add_option_group(group_assembly)
# parser.add_option_group(group_computational)
#
# elif "-h" in sys.argv:
# for not_often_used in ("-a", "--max-ignore-percent", "--reduce-reads-for-coverage", "--phred-offset",
# "--min-quality-score", "--prefix", "--out-per-round", "--zip-files", "--keep-temp",
# "--config-dir",
# "--memory-save", "--memory-unlimited", "--pre-w", "--max-n-words",
# "-J", "-M", "--bowtie2-options",
# "--larger-auto-ws", "--target-genome-size", "--spades-options", "--no-spades",
# "--ignore-k", "--genes", "--ex-genes", "--disentangle-df",
# "--contamination-depth", "--contamination-similarity", "--no-degenerate",
# "--degenerate-depth", "--degenerate-similarity", "--disentangle-time-limit",
# "--expected-max-size", "--expected-min-size", "--reverse-lsc", "--max-paths-num",
# "--which-blast", "--which-bowtie2", "--which-spades", "--which-bandage",
# "--continue", "--overwrite", "--index-in-memory",
# "--remove-duplicates", "--flush-step", "--verbose"):
# parser.remove_option(not_often_used)
#
# else:
# parser.add_option_group(group_inout)
# parser.add_option_group(group_scheme)
# parser.add_option_group(group_extending)
# parser.add_option_group(group_assembly)
# parser.add_option_group(group_computational)
# redirect organelle types before parsing arguments
redirect_organelle_types = {"plant_cp": "embplant_pt",
"plant_pt": "embplant_pt",
"plant_mt": "embplant_mt",
"plant_nr": "embplant_nr"}
for go_arg, candidate_arg in enumerate(sys.argv):
if go_arg > 1 and sys.argv[go_arg - 1] in {"-F", "-E"}:
if candidate_arg in redirect_organelle_types:
sys.argv[go_arg] = redirect_organelle_types[candidate_arg]
elif "," in candidate_arg:
new_arg = []
for sub_arg in candidate_arg.split(","):
if sub_arg in redirect_organelle_types:
new_arg.append(redirect_organelle_types[sub_arg])
else:
new_arg.append(sub_arg)
sys.argv[go_arg] = ",".join(new_arg)
#
try:
options = parser.parse_args()
except Exception as e:
sys.stderr.write("\n############################################################################\n" + str(e))
sys.stderr.write("\n\"-h\" for more usage\n")
exit()
else:
# if pos_args:
# sys.stderr.write("\n############################################################################"
# "\nUnrecognized options: " + "\", \"".join(pos_args) + "\n")
# exit()
if not ((options.fq_file_1 and options.fq_file_2) or options.unpaired_fq_files):
sys.stderr.write("\n############################################################################"
"\nERROR: Insufficient arguments!\n")
sys.stderr.write("Missing/Illegal input reads file(s) (followed after '-1&-2' and/or '-u')!\n")
exit()
if not options.output_base:
sys.stderr.write("\n############################################################################"
"\nERROR: Insufficient arguments!\n")
sys.stderr.write("Missing option: output directory (followed after '-o')!\n")
exit()
if not options.organelle_type:
sys.stderr.write("\n############################################################################"
"\nERROR: Insufficient arguments!\n")
sys.stderr.write("Missing option: organelle type (followed after '-F')!\n")
exit()
else:
options.organelle_type = options.organelle_type.split(",")
if int(bool(options.fq_file_1)) + int(bool(options.fq_file_2)) == 1:
sys.stderr.write("\n############################################################################"
"\nERROR: unbalanced paired reads!\n\n")
exit()
global _GO_PATH, _LBL_DB_PATH, _SEQ_DB_PATH
if options.get_organelle_path:
_GO_PATH = os.path.expanduser(options.get_organelle_path)
if os.path.isdir(_GO_PATH):
_LBL_DB_PATH = os.path.join(_GO_PATH, LBL_NAME)
_SEQ_DB_PATH = os.path.join(_GO_PATH, SEQ_NAME)
else:
sys.stderr.write("\n############################################################################"
"\nERROR: path " + _GO_PATH + " invalid!\n")
exit()
def _check_default_db(this_sub_organelle, extra_type=""):
if not ((os.path.isfile(os.path.join(_LBL_DB_PATH, this_sub_organelle + ".fasta")) or options.genes_fasta)
and
(os.path.isfile(os.path.join(_SEQ_DB_PATH, this_sub_organelle + ".fasta")) or options.seed_file)):
sys.stderr.write("\n############################################################################"
"\nERROR: default " + this_sub_organelle + "," * int(bool(extra_type)) + extra_type +
" database not added yet!\n"
"\nInstall it by: get_organelle_config.py -a " + this_sub_organelle +
"," * int(bool(extra_type)) + extra_type +
"\nor\nInstall all types by: get_organelle_config.py -a all\n")
exit()
for sub_organelle_t in options.organelle_type:
if sub_organelle_t not in {"embplant_pt", "other_pt", "embplant_mt", "embplant_nr", "animal_mt",
"fungus_mt", "fungus_nr", "anonym"}:
sys.stderr.write("\n############################################################################"
"\nERROR: \"-F\" MUST be one of 'embplant_pt', 'other_pt', 'embplant_mt', "
"'embplant_nr', 'animal_mt', 'fungus_mt', 'fungus_nr', 'anonym', "
"or a combination of above split by comma(s)!\n\n")
exit()
elif sub_organelle_t == "anonym":
if not options.seed_file or not options.genes_fasta:
sys.stderr.write("\n############################################################################"
"\nERROR: \"-s\" and \"--genes\" must be specified when \"-F anonym\"!\n\n")
exit()
else:
if sub_organelle_t in ("embplant_pt", "embplant_mt"):
for go_t, check_sub in enumerate(["embplant_pt", "embplant_mt"]):
_check_default_db(check_sub, ["embplant_pt", "embplant_mt"][not go_t])
else:
_check_default_db(sub_organelle_t)
organelle_type_len = len(options.organelle_type)
if not options.seed_file:
use_default_seed = True
options.seed_file = [os.path.join(_SEQ_DB_PATH, sub_o + ".fasta") for sub_o in options.organelle_type]
else:
use_default_seed = False
options.seed_file = str(options.seed_file).split(",")
if len(options.seed_file) != organelle_type_len:
sys.stderr.write("\n############################################################################"
"\nERROR: -F is followed with " + str(organelle_type_len) + " organelle types, " +
"while -s is followed with " + str(len(options.seed_file)) + " file(s)!\n")
exit()
for check_file in [options.fq_file_1, options.fq_file_2, options.anti_seed] + options.seed_file:
if check_file:
if not os.path.exists(check_file):
sys.stderr.write("\n############################################################################"
"\nERROR: " + check_file + " not found!\n\n")
exit()
if os.path.getsize(check_file) == 0:
sys.stderr.write("\n############################################################################"
"\nERROR: " + check_file + " is empty!\n\n")
exit()
if options.unpaired_fq_files:
options.unpaired_fq_files = options.unpaired_fq_files.split(",")
for fastq_file in options.unpaired_fq_files:
if not os.path.exists(fastq_file):
sys.stderr.write("\n############################################################################"
"\nERROR: " + fastq_file + " not found!\n\n")
exit()
else:
options.unpaired_fq_files = []
if options.jump_step < 1:
sys.stderr.write("\n############################################################################"
"\nERROR: Jump step MUST be an integer that >= 1\n")
exit()
if options.mesh_size < 1:
sys.stderr.write("\n############################################################################"
"\nERROR: Mesh size MUST be an integer that >= 1\n")
exit()
if options.fq_file_1 == options.fq_file_2 and options.fq_file_1:
sys.stderr.write("\n############################################################################"
"\nERROR: 1st fastq file is the same with 2nd fastq file!\n")
exit()
if options.memory_save and options.memory_unlimited:
sys.stderr.write("\n############################################################################"
"\nERROR: \"--memory-save\" and \"--memory-unlimited\" are not compatible!\n")
assert options.threads > 0
if options.reduce_reads_for_cov < 10:
sys.stderr.write("\n############################################################################"
"\nERROR: value after \"--reduce-reads-for-coverage\" must be larger than 10!\n")
exit()
if options.echo_step == "inf":
options.echo_step = inf
elif type(options.echo_step) == int:
pass
elif type(options.echo_step) == str:
try:
options.echo_step = int(float(options.echo_step))
except ValueError:
sys.stderr.write("\n############################################################################"
"\n--flush-step should be followed by positive integer or inf!\n")
exit()
assert options.echo_step > 0
assert options.max_paths_num > 0
assert options.phred_offset in (-1, 64, 33)
assert options.script_resume + options.script_overwrite < 2, "'--overwrite' conflicts with '--continue'"
options.prefix = os.path.basename(options.prefix)
if os.path.isdir(options.output_base):
if options.script_resume:
previous_attributes = LogInfo(options.output_base, options.prefix).__dict__
else:
if options.script_overwrite:
try:
shutil.rmtree(options.output_base)
except OSError as e:
sys.stderr.write(
"\n############################################################################"
"\nRemoving existed " + options.output_base + " failed! "
"\nPlease manually remove it or use a new output directory!\n")
os.mkdir(options.output_base)
else:
sys.stderr.write("\n############################################################################"
"\n" + options.output_base + " existed! "
"\nPlease use a new output directory, or use '--continue'/'--overwrite'\n")
exit()
previous_attributes = {}
else:
options.script_resume = False
os.mkdir(options.output_base)
previous_attributes = {}
# if options.script_resume and os.path.isdir(options.output_base):
# previous_attributes = LogInfo(options.output_base, options.prefix).__dict__
# else:
# previous_attributes = {}
# options.script_resume = False
# if not os.path.isdir(options.output_base):
# os.mkdir(options.output_base)
# options.script_resume = False
log_handler = simple_log(logging.getLogger(), options.output_base, options.prefix + "get_org.")
log_handler.info("")
log_handler.info(description)
log_handler.info("Python " + str(sys.version).replace("\n", " "))
log_handler.info("PLATFORM: " + " ".join(platform.uname()))
# log versions of dependencies
lib_versions_info = []
lib_not_available = []
lib_versions_info.append("GetOrganelleLib " + GetOrganelleLib.__version__)
try:
import numpy
except ImportError:
lib_not_available.append("numpy")
else:
lib_versions_info.append("numpy " + numpy.__version__)
try:
import sympy
except ImportError:
lib_not_available.append("sympy")
else:
lib_versions_info.append("sympy " + sympy.__version__)
try:
import scipy
except ImportError:
lib_not_available.append("scipy")
else:
lib_versions_info.append("scipy " + scipy.__version__)
try:
import psutil
except ImportError:
pass
else:
lib_versions_info.append("psutil " + psutil.__version__)
log_handler.info("PYTHON LIBS: " + "; ".join(lib_versions_info))
options.which_bowtie2 = detect_bowtie2_path(options.which_bowtie2, GO_DEP_PATH)
if options.run_spades:
options.which_spades = detect_spades_path(options.which_spades, GO_DEP_PATH)
options.which_blast = detect_blast_path(options.which_blast, GO_DEP_PATH)
dep_versions_info = []
dep_versions_info.append(detect_bowtie2_version(options.which_bowtie2))
if options.run_spades:
dep_versions_info.append(detect_spades_version(options.which_spades))
dep_versions_info.append(detect_blast_version(options.which_blast))
if executable(os.path.join(options.which_bandage, "Bandage -v")):
dep_versions_info.append(detect_bandage_version(options.which_bandage))
log_handler.info("DEPENDENCIES: " + "; ".join(dep_versions_info))
# log database
log_handler.info("GETORG_PATH=" + _GO_PATH)
existing_seed_db, existing_label_db = get_current_db_versions(db_type="both", seq_db_path=_SEQ_DB_PATH,
lbl_db_path=_LBL_DB_PATH, silent=True)
if use_default_seed:
log_seed_types = deepcopy(options.organelle_type)
if "embplant_pt" in log_seed_types and "embplant_mt" not in log_seed_types:
log_seed_types.append("embplant_mt")
if "embplant_mt" in log_seed_types and "embplant_pt" not in log_seed_types:
log_seed_types.append("embplant_pt")
log_handler.info("SEED DB: " + single_line_db_versions(existing_seed_db, log_seed_types))
if not options.genes_fasta:
log_label_types = deepcopy(options.organelle_type)
if "embplant_pt" in log_label_types and "embplant_mt" not in log_label_types:
log_label_types.append("embplant_mt")
if "embplant_mt" in log_label_types and "embplant_pt" not in log_label_types:
log_label_types.append("embplant_pt")
log_handler.info("LABEL DB: " + single_line_db_versions(existing_label_db, log_label_types))
# working directory
log_handler.info("WORKING DIR: " + os.getcwd())
log_handler.info(" ".join(["\"" + arg + "\"" if " " in arg else arg for arg in sys.argv]) + "\n")
# if options.run_spades:
# space is forbidden for both spades and blast
for fq_file in [options.fq_file_1, options.fq_file_2] * int(bool(options.fq_file_1 and options.fq_file_2))\
+ options.unpaired_fq_files:
assert is_valid_path(os.path.basename(fq_file)), \
"Invalid characters (e.g. space, non-ascii) for SPAdes in file name: " + os.path.basename(fq_file)
for fq_file in [options.output_base, options.prefix]:
assert is_valid_path(os.path.realpath(fq_file)), \
"Invalid characters (e.g. space, non-ascii) for SPAdes in path: " + os.path.realpath(fq_file)
log_handler = timed_log(log_handler, options.output_base, options.prefix + "get_org.")
if options.word_size is None:
pass
elif 0 < options.word_size < 1:
pass
elif options.word_size >= GLOBAL_MIN_WS:
options.word_size = int(options.word_size)
else:
log_handler.error("Illegal word size (\"-w\") value!")
exit()
if options.pregroup_word_size:
if 0 < options.pregroup_word_size < 1:
pass
elif options.pregroup_word_size >= GLOBAL_MIN_WS:
options.pregroup_word_size = int(options.pregroup_word_size)
else:
log_handler.error("Illegal word size (\"--pre-w\") value!")
exit()
if options.fast_strategy:
if "-R" not in sys.argv and "--max-rounds" not in sys.argv:
options.max_rounds = 10
if "-t" not in sys.argv:
options.threads = 4
if "-J" not in sys.argv:
options.jump_step = 5
if "-M" not in sys.argv:
options.mesh_size = 7
if "--max-n-words" not in sys.argv:
options.maximum_n_words = 3E7
options.larger_auto_ws = True
if "--disentangle-time-limit" not in sys.argv:
options.disentangle_time_limit = 360
if options.memory_save:
if "-P" not in sys.argv:
options.pre_grouped = 0
if "--remove-duplicates" not in sys.argv:
options.rm_duplicates = 0
if options.memory_unlimited:
if "-P" not in sys.argv:
options.pre_grouped = 1E7
if "--remove-duplicates" not in sys.argv:
options.rm_duplicates = 2E8
if "--min-quality-score" not in sys.argv:
options.min_quality_score = -5
if "--max-ignore-percent" not in sys.argv:
options.maximum_ignore_percent = 0
# using the default
if "--max-reads" not in sys.argv:
if "embplant_mt" in options.organelle_type or "anonym" in options.organelle_type:
options.maximum_n_reads *= 5
elif "animal_mt" in options.organelle_type:
options.maximum_n_reads *= 20
if options.maximum_n_reads > READ_LINE_TO_INF:
options.maximum_n_reads = inf
else:
options.maximum_n_reads = int(options.maximum_n_reads)
if "--max-n-words" not in sys.argv:
if "embplant_mt" in options.organelle_type or "anonym" in options.organelle_type:
options.maximum_n_words *= 5
elif "embplant_nr" in options.organelle_type or "fungus_mt" in options.organelle_type or\
"fungus_nr" in options.organelle_type:
options.maximum_n_words /= 2
elif "animal_mt" in options.organelle_type:
options.maximum_n_words /= 2
if "--genes" not in sys.argv:
options.genes_fasta = [] # None] * organelle_type_len
else:
temp_val_len = len(str(options.genes_fasta).split(","))
if temp_val_len != organelle_type_len:
log_handler.error("-F is followed with " + str(organelle_type_len) + " organelle types, " +
"while --genes is followed with " + str(temp_val_len) + " value(s)!\n")
exit()
temp_vals = []
for sub_genes in str(options.genes_fasta).split(","):
# if sub_genes == "":
# temp_vals.append(sub_genes)
if not os.path.exists(sub_genes):
log_handler.error(sub_genes + " not found!")
exit()
else:
temp_vals.append(sub_genes)
options.genes_fasta = temp_vals
if "--ex-genes" not in sys.argv:
options.exclude_genes = []
else:
temp_vals = []
for sub_genes in str(options.exclude_genes).split(","):
if not (os.path.exists(sub_genes) or os.path.exists(remove_db_postfix(sub_genes) + ".nhr")):
log_handler.error(sub_genes + " not found!")
exit()
else:
temp_vals.append(sub_genes)
options.exclude_genes = temp_vals
if "--target-genome-size" not in sys.argv:
raw_default_value = int(str(options.target_genome_size))
options.target_genome_size = []
for go_t, sub_organelle_t in enumerate(options.organelle_type):
if sub_organelle_t == "embplant_mt":
options.target_genome_size.append(int(raw_default_value * 3))
elif sub_organelle_t == "fungus_mt":
options.target_genome_size.append(int(raw_default_value / 2))
elif sub_organelle_t in ("embplant_nr", "animal_mt", "fungus_nr"):
options.target_genome_size.append(int(raw_default_value / 10))
elif sub_organelle_t == "anonym":
ref_seqs = read_fasta(options.genes_fasta[go_t])[1]
options.target_genome_size.append(2 * sum([len(this_seq) for this_seq in ref_seqs]))
log_handler.info(
"Setting '--target-genome-size " + ",".join([str(t_s) for t_s in options.target_genome_size]) +
"' for estimating the word size value for anonym type.")
else:
options.target_genome_size.append(raw_default_value)
else:
temp_val_len = len(str(options.target_genome_size).split(","))
if temp_val_len != organelle_type_len:
log_handler.error("-F is followed with " + str(organelle_type_len) + " organelle types, " +
"while --target-genome-size is followed with " + str(temp_val_len) + " value(s)!\n")
exit()
try:
options.target_genome_size = [int(sub_size) for sub_size in str(options.target_genome_size).split(",")]
except ValueError:
log_handler.error("Invalid --target-genome-size value(s): " + str(options.target_genome_size))
exit()
if "--expected-max-size" not in sys.argv:
raw_default_value = int(str(options.expected_max_size))
options.expected_max_size = []
for got_t, sub_organelle_t in enumerate(options.organelle_type):
if sub_organelle_t == "embplant_pt":
options.expected_max_size.append(raw_default_value)
elif sub_organelle_t in ("embplant_mt", "other_pt"):
options.expected_max_size.append(int(raw_default_value * 4))
elif sub_organelle_t == "fungus_mt":
options.expected_max_size.append(raw_default_value)
elif sub_organelle_t in ("embplant_nr", "fungus_nr", "animal_mt"):
options.expected_max_size.append(int(raw_default_value / 10))
elif sub_organelle_t == "anonym":
ref_seqs = read_fasta(options.genes_fasta[got_t])[1]
options.expected_max_size.append(10 * sum([len(this_seq) for this_seq in ref_seqs]))
log_handler.info(
"Setting '--expected-max-size " + ",".join([str(t_s) for t_s in options.expected_max_size]) +
"' for estimating the word size value for anonym type.")
else:
temp_val_len = len(str(options.expected_max_size).split(","))
if temp_val_len != organelle_type_len:
log_handler.error("-F is followed with " + str(organelle_type_len) + " organelle types, " +
"while --expected-max-size is followed with " + str(temp_val_len) + " value(s)!\n")
exit()
try:
options.expected_max_size = [int(sub_size) for sub_size in str(options.expected_max_size).split(",")]
except ValueError:
log_handler.error("Invalid --expected-max-size value(s): " + str(options.expected_max_size))
exit()
if "--expected-min-size" not in sys.argv:
raw_default_value = int(str(options.expected_min_size))
options.expected_min_size = []
for sub_organelle_t in options.organelle_type:
options.expected_min_size.append(raw_default_value)
else:
temp_val_len = len(str(options.expected_min_size).split(","))
if temp_val_len != organelle_type_len:
log_handler.error("-F is followed with " + str(organelle_type_len) + " organelle types, " +
"while --expected-min-size is followed with " + str(temp_val_len) + " value(s)!\n")
exit()
try:
options.expected_min_size = [int(sub_size) for sub_size in str(options.expected_min_size).split(",")]
except ValueError:
log_handler.error("Invalid --expected-min-size value(s): " + str(options.expected_min_size))
exit()
if "--max-extending-len" not in sys.argv:
options.max_extending_len = [] # -1 means auto
for go_t, seed_f in enumerate(options.seed_file):
# using auto as the default when using default seed files
# if os.path.realpath(seed_f) == os.path.join(_SEQ_DB_PATH, options.organelle_type[go_t] + ".fasta"):
# options.max_extending_len.append(-1)
# else:
# options.max_extending_len.append(inf)
options.max_extending_len.append(inf)
else:
temp_val_len = len(str(options.max_extending_len).split(","))
if temp_val_len != organelle_type_len:
log_handler.error("-F is followed with " + str(organelle_type_len) + " organelle types, " +
"while --max-extending-len is followed with " + str(temp_val_len) + " value(s)!\n")
exit()
try:
options.max_extending_len = [-1 if sub_size == "auto" else float(sub_size)
for sub_size in str(options.max_extending_len).split(",")]
except ValueError:
log_handler.error("Invalid --max-extending-len value(s): " + str(options.max_extending_len))
exit()
for sub_organelle_t in options.organelle_type:
if sub_organelle_t in ("fungus_mt", "animal_mt", "anonym"):
global MAX_RATIO_RL_WS
MAX_RATIO_RL_WS = 0.8
break
if not executable(os.path.join(options.which_bowtie2, "bowtie2")):
log_handler.error(os.path.join(options.which_bowtie2, "bowtie2") + " not accessible!")
exit()
if not executable(os.path.join(options.which_bowtie2, "bowtie2-build") + " --large-index"):
log_handler.error(os.path.join(options.which_bowtie2, "bowtie2-build") + " not accessible!")
exit()
# if not executable(os.path.join(options.which_bowtie2, "bowtie2-build-l")):
# log_handler.error(os.path.join(options.which_bowtie2, "bowtie2-build-l") + " not accessible!")
# exit()
run_slim = False
run_disentangle = False
if options.run_spades:
if options.which_spades:
if not executable(os.path.join(options.which_spades, "spades.py -h")):
raise Exception("spades.py not found/executable in " + options.which_spades + "!")
else:
run_slim = True
run_disentangle = True
else:
options.which_spades = ""
if not executable("spades.py -h"):
log_handler.error("spades.py not found in the PATH. "
"Adding SPAdes binary dir to the PATH or using \"--which-spades\" to fix this. "
"Now only get the reads and skip assembly.")
options.run_spades = False
else:
run_slim = True
run_disentangle = True
if not executable(os.path.join(options.which_blast, "blastn")):
log_handler.error(os.path.join(options.which_blast, "blastn") +
" not accessible! Slimming/Disentangling disabled!!\n")
run_slim = False
run_disentangle = False
if options.genes_fasta and not executable(os.path.join(options.which_blast, "makeblastdb")):
log_handler.error(os.path.join(options.which_blast, "makeblastdb") +
" not accessible! Slimming/Disentangling disabled!!\n")
run_slim = False
run_disentangle = False
if lib_not_available:
log_handler.error("/".join(lib_not_available) + " not available! Disentangling disabled!!\n")
run_disentangle = False
options.rm_duplicates = int(options.rm_duplicates)
options.pre_grouped = int(options.pre_grouped)
if not options.rm_duplicates and options.pre_grouped:
log_handler.warning("removing duplicates was inactive, so that the pre-grouping was disabled.")
options.pre_grouped = False
if options.max_rounds and options.max_rounds < 1:
log_handler.warning("illegal maximum rounds! Set to infinite")
options.max_rounds = inf
if not options.max_rounds:
if not options.pre_grouped:
options.max_rounds = inf
else:
options.max_rounds = 1
for sub_organelle_t in options.organelle_type:
if sub_organelle_t in {"embplant_mt", "other_pt"}:
options.max_rounds = max(options.max_rounds, 30)
elif sub_organelle_t in {"embplant_nr", "animal_mt", "fungus_mt", "fungus_nr"}:
options.max_rounds = max(options.max_rounds, 10)
elif sub_organelle_t == "embplant_pt":
options.max_rounds = max(options.max_rounds, 15)
random.seed(options.random_seed)
try:
import numpy as np
except ImportError:
pass
else:
np.random.seed(options.random_seed)
return options, log_handler, previous_attributes, run_slim, run_disentangle
def estimate_maximum_n_reads_using_mapping(
twice_max_coverage, check_dir, original_fq_list, reads_paired,
maximum_n_reads_hard_bound, seed_files, organelle_types, in_customs, ex_customs, target_genome_sizes,
keep_temp, resume, other_spades_opts,
which_blast, which_spades, which_bowtie2, threads, random_seed, verbose_log, log_handler):
from GetOrganelleLib.sam_parser import MapRecords, get_cover_range
if executable(os.path.join(UTILITY_PATH, "slim_graph.py -h")):
which_slim = UTILITY_PATH
elif executable(os.path.join(PATH_OF_THIS_SCRIPT, "slim_graph.py -h")):
which_slim = PATH_OF_THIS_SCRIPT
elif executable("slim_graph.py -h"):
which_slim = ""
else:
which_slim = None
result_n_reads = [maximum_n_reads_hard_bound] * len(original_fq_list)
data_maximum_n_reads = inf
if not os.path.exists(check_dir):
os.mkdir(check_dir)
check_num_line = 100000
increase_checking_reads_by = 5
min_valid_cov_to_estimate = 5.0
maximum_percent_worth_estimating = 0.1
previous_file_sizes = [0] * len(original_fq_list)
no_more_new_reads = [False] * len(original_fq_list)
estimated_maximum_n_reads_list = [inf] * len(original_fq_list)
original_fq_sizes = [os.path.getsize(raw_fq) * GUESSING_FQ_GZIP_COMPRESSING_RATIO
if raw_fq.endswith(".gz") else os.path.getsize(raw_fq)
for raw_fq in original_fq_list]
# make paired equal size estimation if compressed
if reads_paired and original_fq_list[0].endswith(".gz") and original_fq_list[1].endswith(".gz") and \
abs(log(float(original_fq_sizes[0])/original_fq_sizes[1])) < log(1.3):
original_fq_sizes[0] = original_fq_sizes[1] = (original_fq_sizes[0] + original_fq_sizes[1]) /2.
# if the original data sizes is too small, no need to reduce
max_organelle_base_percent = 0.2
for go_t, organelle_type in enumerate(organelle_types):
# temporary treat: compatible with previous
if organelle_type in ORGANELLE_EXPECTED_GRAPH_SIZES:
min_file_size = ORGANELLE_EXPECTED_GRAPH_SIZES[organelle_type] * twice_max_coverage \
/ max_organelle_base_percent * GUESSING_FQ_SEQ_INFLATE_TO_FILE
else:
min_file_size = target_genome_sizes[go_t] * twice_max_coverage \
/ max_organelle_base_percent * GUESSING_FQ_SEQ_INFLATE_TO_FILE
if sum(original_fq_sizes) < min_file_size:
if not keep_temp:
try:
shutil.rmtree(check_dir)
except OSError:
log_handler.warning("Removing temporary directory " + check_dir + " failed.")
return result_n_reads
#
count_round = 1
while count_round == 1 or check_num_line < min(maximum_n_reads_hard_bound, data_maximum_n_reads):
if check_num_line > READ_LINE_TO_INF:
return [inf] * len(original_fq_list)
log_handler.info("Tasting " + "+".join([str(check_num_line)] * len(original_fq_list)) + " reads ...")
this_check_dir = os.path.join(check_dir, str(count_round))
if not os.path.exists(this_check_dir):
os.mkdir(this_check_dir)
check_fq_files = []
check_percents = []
for f_id, r_file in enumerate(original_fq_list):
check_fq = os.path.join(this_check_dir, "check_" + str(f_id + 1))
if not (os.path.exists(check_fq) and resume):
if r_file.endswith(".gz"):
unzip(r_file, check_fq, 4 * check_num_line, verbose_log, log_handler if verbose_log else None)
else:
os.system("head -n " + str(int(4 * check_num_line)) + " " + r_file + " > " + check_fq + ".temp")
os.rename(check_fq + ".temp", check_fq)
check_f_size = os.path.getsize(check_fq)
if check_f_size == 0:
raise ValueError("Empty file" + check_fq + "\n"
"Please check the legality and integrity of your input reads!\n")
if check_f_size == previous_file_sizes[f_id]:
no_more_new_reads[f_id] = True
check_percents.append(1)
tmp_line = 0
with open(check_fq) as counter:
for foo in counter:
tmp_line += 1
estimated_maximum_n_reads_list[f_id] = int(tmp_line / 4)
else:
check_percents.append(min(float(check_f_size) / original_fq_sizes[f_id], 1))
estimated_maximum_n_reads_list[f_id] = int(check_num_line / check_percents[-1])
check_fq_files.append(check_fq)
count_round += 1
data_maximum_n_reads = max(estimated_maximum_n_reads_list)
go_next_run = False
base_cov_of_all_organelles = []
for go_t, seed_f in enumerate(seed_files):
organelle_type = organelle_types[go_t]
if sum([os.path.exists(remove_db_postfix(seed_f) + ".index" + postfix)
for postfix in
(".1.bt2l", ".2.bt2l", ".3.bt2l", ".4.bt2l", ".rev.1.bt2l", ".rev.2.bt2l")]) != 6:
new_seed_file = os.path.join(this_check_dir, os.path.basename(seed_f))
check_fasta_seq_names(seed_f, new_seed_file)
seed_f = new_seed_file
bowtie_out_base = os.path.join(this_check_dir, organelle_type + ".check")
mapped_fq = bowtie_out_base + ".fq"
mapped_sam = bowtie_out_base + ".sam"
map_with_bowtie2(
seed_file=seed_f, original_fq_files=check_fq_files, bowtie_out=bowtie_out_base, resume=resume,
threads=threads, random_seed=random_seed, generate_fq=True, target_echo_name="seed",
log_handler=log_handler if verbose_log else None, verbose_log=verbose_log, silent=not verbose_log,
which_bowtie2=which_bowtie2)
seed_fq_size = os.path.getsize(mapped_fq)
if not seed_fq_size:
if sum(no_more_new_reads) == len(no_more_new_reads):
if log_handler:
log_handler.error("No " + str(organelle_type) + " seed reads found!")
log_handler.error("Please check your raw data or change your " + str(organelle_type) + " seed!")
else:
data_size_checked = [check_percents[go_f] * fq_size
for go_f, fq_size in enumerate(original_fq_sizes)]
data_checked_percent = sum(data_size_checked) / float(sum(original_fq_sizes))
if data_checked_percent > maximum_percent_worth_estimating:
base_cov_of_all_organelles.append(0.)
break
else:
# another run with more reads
go_next_run = True
break
mapping_records = MapRecords(mapped_sam)
mapping_records.update_coverages()
coverage_info = mapping_records.coverages
coverages_2 = [pos for ref in coverage_info for pos in coverage_info[ref] if pos > 0]
base_cov_values = get_cover_range(coverages_2, guessing_percent=BASE_COV_SAMPLING_PERCENT)
mean_read_len, max_read_len, all_read_nums = \
get_read_len_mean_max_count(mapped_fq, maximum_n_reads_hard_bound)
if executable(os.path.join(which_spades, "spades.py -h")) and \
executable(os.path.join(which_bowtie2, "bowtie2")):
try:
this_in = "" if not in_customs else in_customs[go_t]
this_ex = "" if not ex_customs else ex_customs[go_t]
base_cov_values = pre_assembly_mapped_reads_for_base_cov(
original_fq_files=check_fq_files, mapped_fq_file=mapped_fq, seed_fs_file=seed_f,
mean_read_len=mean_read_len, organelle_type=organelle_type,
in_custom=this_in, ex_custom=this_ex, threads=threads, resume=resume,
other_spades_opts=other_spades_opts,
which_spades=which_spades, which_slim=which_slim, which_blast=which_blast,
log_handler=log_handler if verbose_log else None, verbose_log=verbose_log)
except NotImplementedError:
pass
if base_cov_values[1] < min_valid_cov_to_estimate:
data_size_checked = [check_percents[go_f] * fq_size
for go_f, fq_size in enumerate(original_fq_sizes)]
data_checked_percent = sum(data_size_checked) / float(sum(original_fq_sizes))
if data_checked_percent > maximum_percent_worth_estimating:
base_cov_of_all_organelles.append(0.)
break
else:
# another run with more reads
go_next_run = True
break
else:
base_cov_of_all_organelles.append(base_cov_values[1])
if go_next_run:
check_num_line *= increase_checking_reads_by
continue
data_all_size = sum(original_fq_sizes)
data_size_checked = [check_percents[go_f] * fq_size for go_f, fq_size in enumerate(original_fq_sizes)]
data_checked_percent = sum(data_size_checked) / float(data_all_size)
the_check_base_cov = min(base_cov_of_all_organelles)
the_real_base_cov = the_check_base_cov / data_checked_percent
if the_real_base_cov > twice_max_coverage:
reduce_ratio = twice_max_coverage / the_real_base_cov
result_n_reads = [min(maximum_n_reads_hard_bound, math.ceil(real_num * reduce_ratio))
if real_num * reduce_ratio <= READ_LINE_TO_INF else inf
for real_num in estimated_maximum_n_reads_list]
else:
result_n_reads = [maximum_n_reads_hard_bound] * len(original_fq_list)
break
if not keep_temp:
try:
shutil.rmtree(check_dir)
except OSError:
log_handler.warning("Removing temporary directory " + check_dir + " failed.")
return result_n_reads
def combination_res_log(all_choices_num, chosen_num):
res = 0.
for ch_n in range(chosen_num, 0, -1):
res += log(all_choices_num - ch_n + 1) - log(ch_n)
return res
def trans_word_cov(word_cov, base_cov, mean_base_error_rate, read_length):
if mean_base_error_rate == 0.:
return word_cov
wrong_words_percent = 0
for error_site_num in range(1, int(min(read_length * mean_base_error_rate * 10, read_length))):
prob_of_err_site_num = combination_res_log(read_length, error_site_num) \
+ error_site_num * log(mean_base_error_rate) \
+ (read_length - error_site_num) * log(1 - mean_base_error_rate)
wrong_words_percent += (1 - 2 ** (-error_site_num)) * exp(prob_of_err_site_num)
# if word size < read_len/2, wrong words percent decreases
increase_word_cov = word_cov / (1 - wrong_words_percent) - word_cov
if word_cov > 0.5 * base_cov:
word_cov += increase_word_cov ** 0.34
elif word_cov + increase_word_cov > 0.5 * base_cov:
word_cov = 0.5 * base_cov + (word_cov + increase_word_cov - 0.5 * base_cov) ** 0.34
else:
word_cov += increase_word_cov
return word_cov
def estimate_word_size(base_cov, base_cov_deviation, read_length, target_size, mean_error_rate=0.015, log_handler=None,
max_discontinuous_prob=0.01, min_word_size=AUTO_MIN_WS, max_effective_word_cov=60,
wc_bc_ratio_constant=0.35, organelle_type=""):
# base_cov_deviation cannot be well estimated and thus excluded from the estimation
echo_problem = False
# G: genome size, N: Number of reads from data, L: read length,
# ## Poisson distribution
# mean read cov = N/(G-L+1)
# expected # reads starting within any specific interval of C consecutive nucleotides = (N/(G-L+1))*C
# P(no read starts in the interval) = e^(-C*N/(G-L+1))
# P(>=1 reads start in the interval) = 1-e^(-C*N/(G-L+1))
# P(the interval is not continuous) = 1-(1-e^(-N/(G-L+1)))^C
#
# 1. The higher the base coverage is, the larger the word size should be. # to exclude unnecessary contigs.
# 2. The longer the read length is, the larger the word size should be
# 3. The higher the error rate is, the smaller the word size should be
# empirical functions:
word_cov = min(max_effective_word_cov, base_cov * wc_bc_ratio_constant)
# min_word_cov = log(-1/(max_discontinuous_prob**(1/target_size) - 1))
min_word_cov = 5
while 1 - (1 - math.e ** (-min_word_cov)) ** target_size > max_discontinuous_prob:
min_word_cov += 0.05
# print(min_word_cov)
#
wc_bc_ratio_max = 1 - (min_word_size - 1) / read_length
if base_cov * wc_bc_ratio_max < min_word_cov:
min_word_cov = base_cov * wc_bc_ratio_max
echo_problem = True
word_cov = max(min_word_cov, word_cov)
word_cov = trans_word_cov(word_cov, base_cov, mean_error_rate / 2., read_length)
# 1. relationship between kmer coverage and base coverage, k_cov = base_cov * (read_len - k_len + 1) / read_len
estimated_word_size = int(read_length * (1 - word_cov / base_cov)) + 1
# print(estimated_word_size)
estimated_word_size = min(int(read_length * MAX_RATIO_RL_WS), max(min_word_size, estimated_word_size))
if echo_problem:
if log_handler:
log_handler.warning("Guessing that you are using too few data for assembling " + organelle_type + "!")
log_handler.warning("GetOrganelle is still trying ...")
else:
sys.stdout.write("Guessing that you are using too few data for assembling " + organelle_type + "!\n")
sys.stdout.write("GetOrganelle is still trying ...\n")
return int(round(estimated_word_size, 0))
def calculate_word_size_according_to_ratio(word_size_ratio, mean_read_len, log_handler):
if word_size_ratio < 1:
new_word_size = int(round(word_size_ratio * mean_read_len, 0))
if new_word_size < GLOBAL_MIN_WS:
new_word_size = GLOBAL_MIN_WS
log_handler.warning("Too small ratio " + str(new_word_size) + ", setting '-w " + str(GLOBAL_MIN_WS) + "'")
else:
log_handler.info("Setting '-w " + str(new_word_size) + "'")
return new_word_size
else:
max_ws = int(round(mean_read_len * 0.9))
if word_size_ratio > max_ws:
word_size_ratio = max_ws
log_handler.warning("Too large word size for mean read length " + str(mean_read_len) +
", setting '-w " + str(word_size_ratio) + "'")
return word_size_ratio
def extend_with_constant_words(baits_pool, raw_fq_files, word_size, output, jump_step=3):
output_handler = open(output + ".Temp", "w")
for fq_f in raw_fq_files:
with open(fq_f) as fq_f_input_handler:
head_line = fq_f_input_handler.readline()
while head_line:
seq_line = fq_f_input_handler.readline()
seq_len = len(seq_line.strip())
accepted = False
for i in range(0, seq_len, jump_step):
if seq_line[i:i + word_size] in baits_pool:
accepted = True
break
if accepted:
output_handler.write(head_line)
output_handler.write(seq_line)
output_handler.write(fq_f_input_handler.readline())
output_handler.write(fq_f_input_handler.readline())
else:
fq_f_input_handler.readline()
fq_f_input_handler.readline()
head_line = fq_f_input_handler.readline()
output_handler.close()
os.rename(output + ".Temp", output)
def pre_assembly_mapped_reads_for_base_cov(
original_fq_files, mapped_fq_file, seed_fs_file, mean_read_len, organelle_type, in_custom, ex_custom,
threads, resume, other_spades_opts, which_spades, which_slim, which_blast,
log_handler=None, verbose_log=False, keep_temp=False):
from GetOrganelleLib.assembly_parser import get_graph_coverages_range_simple
draft_kmer = min(45, int(mean_read_len / 2) * 2 - 3)
this_modified_dir = os.path.realpath(mapped_fq_file) + ".spades"
this_original_graph = os.path.join(this_modified_dir, "assembly_graph.fastg")
this_modified_base = "assembly_graph.fastg.modified"
this_modified_graph = this_original_graph + ".modified.fastg"
more_fq_file = os.path.realpath(mapped_fq_file) + ".more.fq"
more_modified_dir = more_fq_file + ".spades"
more_original_graph = os.path.join(more_modified_dir, "assembly_graph.fastg")
more_modified_base = "assembly_graph.fastg.modified"
more_modified_graph = more_original_graph + ".modified.fastg"
if in_custom or ex_custom:
include_priority_db = in_custom
exclude_db = ex_custom
else:
include_priority_db = os.path.join(_LBL_DB_PATH, organelle_type + ".fasta")
exclude_db = ""
db_command = ""
if include_priority_db:
db_command += " --include-priority " + include_priority_db
if exclude_db:
db_command += " --exclude " + exclude_db
if resume and (os.path.exists(this_modified_graph) or os.path.exists(more_modified_graph)):
if os.path.exists(more_modified_graph) and os.path.getsize(more_modified_graph) > 0:
kmer_cov_values = get_graph_coverages_range_simple(read_fasta(more_modified_graph))
base_cov_values = [this_word_cov * mean_read_len / (mean_read_len - draft_kmer + 1)
for this_word_cov in kmer_cov_values]
elif os.path.exists(this_modified_graph) and os.path.getsize(this_modified_graph) > 0:
kmer_cov_values = get_graph_coverages_range_simple(read_fasta(this_modified_graph))
base_cov_values = [this_word_cov * mean_read_len / (mean_read_len - draft_kmer + 1)
for this_word_cov in kmer_cov_values]
else:
base_cov_values = [0.0, 0.0, 0.0]
else:
try:
# log_handler.info(" ...")
this_command = os.path.join(which_spades, "spades.py") + " -t " + str(threads) + \
" -s " + mapped_fq_file + " " + other_spades_opts + \
" -k " + str(draft_kmer) + " --only-assembler -o " + this_modified_dir
pre_assembly = subprocess.Popen(this_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
if verbose_log and log_handler:
log_handler.info(this_command)
output = monitor_spades_log(pre_assembly, log_handler, sensitively_stop=True, silent=True)
if not os.path.exists(this_original_graph) or os.path.getsize(this_original_graph) == 0:
raise OSError("original graph")
if "== Error ==" in output:
if verbose_log and log_handler:
log_handler.error('\n' + output.strip())
raise NotImplementedError
if which_slim is None:
shutil.copy(this_original_graph, this_modified_graph)
else:
which_bl_str = " --which-blast " + which_blast if which_blast else ""
slim_command = os.path.join(which_slim, "slim_graph.py") + \
" --verbose " * int(bool(verbose_log)) + which_bl_str + \
" --log -t " + str(threads) + " --wrapper " + this_original_graph + \
" -o " + this_modified_dir + " --out-base " + this_modified_base + \
" " + db_command + " --keep-temp " * int(bool(keep_temp))
do_slim = subprocess.Popen(slim_command,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
if verbose_log and log_handler:
log_handler.info(slim_command)
output, err = do_slim.communicate()
if not os.path.exists(this_modified_graph):
if log_handler:
log_handler.error("slimming the pre-assembled graph failed.")
if verbose_log and log_handler:
log_handler.error("\n" + output.decode("utf8").strip())
shutil.copy(this_original_graph, this_modified_graph)
elif os.path.getsize(this_modified_graph) == 0:
raise OSError("modified graph")
kmer_cov_values = get_graph_coverages_range_simple(read_fasta(this_modified_graph))
base_cov_values = [this_word_cov * mean_read_len / (mean_read_len - draft_kmer + 1)
for this_word_cov in kmer_cov_values]
except OSError:
# if os.path.exists(mapped_fq_file + ".spades"):
# shutil.rmtree(mapped_fq_file + ".spades")
# using words to recruit more reads for word size estimation
# gathering_word_size = min(auto_min_word_size, 2 * int(mean_read_len * auto_min_word_size/100.) - 1)
if log_handler:
log_handler.info("Retrying with more reads ..")
gathering_word_size = 25
if resume and os.path.exists(more_fq_file):
pass
else:
theses_words = chop_seqs(
fq_simple_generator(mapped_fq_file), word_size=gathering_word_size)
theses_words |= chop_seqs(
read_fasta(seed_fs_file)[1], word_size=gathering_word_size)
extend_with_constant_words(
theses_words, original_fq_files, word_size=gathering_word_size, output=more_fq_file)
more_command = os.path.join(which_spades, "spades.py") + " -t " + str(threads) + " -s " + \
more_fq_file + " " + other_spades_opts + " -k " + str(draft_kmer) + \
" --only-assembler -o " + this_modified_dir
pre_assembly = subprocess.Popen(
more_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
if verbose_log and log_handler:
log_handler.info(more_command)
output = monitor_spades_log(pre_assembly, log_handler, sensitively_stop=True)
if not os.path.exists(more_original_graph) or os.path.getsize(more_original_graph) == 0:
if verbose_log and log_handler:
log_handler.error(more_original_graph + " not found/valid!")
raise NotImplementedError
elif "== Error ==" in output:
if verbose_log and log_handler:
log_handler.error('\n' + output.strip())
raise NotImplementedError
else:
if which_slim is None or not executable(os.path.join(which_blast, "blastn")):
shutil.copy(more_original_graph, more_modified_graph)
else:
which_bl_str = " --which-blast " + which_blast if which_blast else ""
slim_command = os.path.join(which_slim, "slim_graph.py") + \
" --verbose " * int(bool(verbose_log)) + which_bl_str + \
" --log -t " + str(threads) + " --wrapper " + more_original_graph + \
" -o " + more_modified_dir + " --out-base " + more_modified_base + \
" " + db_command + " --keep-temp " * int(bool(keep_temp))
do_slim = subprocess.Popen(slim_command,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
if verbose_log and log_handler:
log_handler.info(slim_command)
output, err = do_slim.communicate()
if not os.path.exists(more_modified_graph):
if log_handler:
log_handler.error("slimming the pre-assembled graph failed.")
if verbose_log and log_handler:
log_handler.error("\n" + output.decode("utf8").strip())
shutil.copy(more_original_graph, more_modified_graph)
elif os.path.getsize(more_modified_graph) == 0:
if log_handler:
log_handler.warning("No target found in the pre-assembled graph/seed. "
"GetOrganelle is still trying ..")
shutil.copy(more_original_graph, more_modified_graph)
kmer_cov_values = get_graph_coverages_range_simple(read_fasta(more_modified_graph))
base_cov_values = [this_word_cov * mean_read_len / (mean_read_len - draft_kmer + 1)
for this_word_cov in kmer_cov_values]
if not keep_temp and os.path.exists(this_modified_dir):
shutil.rmtree(this_modified_dir)
return base_cov_values
def check_parameters(word_size, original_fq_files, seed_fs_files, seed_fq_files, seed_sam_files,
organelle_types, in_custom_list, ex_custom_list, mean_error_rate, target_genome_sizes,
max_extending_len, mean_read_len, max_read_len, low_quality_pattern,
all_read_nums, reduce_reads_for_cov,
log_handler, other_spades_opts, which_spades, which_blast, which_bowtie2,
wc_bc_ratio_constant=0.35, larger_auto_ws=False,
threads=1, resume=False, random_seed=12345, verbose_log=False, zip_files=False):
from GetOrganelleLib.sam_parser import MapRecords, get_cover_range, mapping_gap_info_from_coverage_dict
from itertools import combinations
if word_size is None or -1 in max_extending_len:
log_handler.info("The automatically-estimated parameter(s) do not ensure the best choice(s).")
log_handler.info("If the result graph is not a circular organelle genome, ")
log_handler.info(" you could adjust the value(s) of "
"'-w'" +
"/'--max-extending-len'" * int(bool(-1 in max_extending_len)) +
"/'-R' for another new run.")
auto_max_extending_len = [m_e_l == -1 for m_e_l in max_extending_len]
if "animal_mt" in organelle_types:
auto_min_word_size = AUTO_MIN_WS_ANIMAL_MT
elif "embplant_mt" not in organelle_types:
auto_min_word_size = AUTO_MIN_WS
else:
auto_min_word_size = AUTO_MIN_WS_PLANT_MT
if executable(os.path.join(UTILITY_PATH, "slim_graph.py -h")):
which_slim = UTILITY_PATH
elif executable(os.path.join(PATH_OF_THIS_SCRIPT, "slim_graph.py -h")):
which_slim = PATH_OF_THIS_SCRIPT
elif executable("slim_graph.py -h"):
which_slim = ""
else:
which_slim = None
base_coverages_by_organelles = []
for go_t, this_sam_f in enumerate(seed_sam_files):
gathering_word_size = None
mapping_records = MapRecords(this_sam_f)
mapping_records.update_coverages()
coverage_info = mapping_records.coverages
# multiple ref ?
coverages_2 = [pos for ref in coverage_info for pos in coverage_info[ref] if pos > 2]
if not coverages_2:
coverages_2 = [pos for ref in coverage_info for pos in coverage_info[ref] if pos > 0]
if not coverages_2:
if log_handler:
log_handler.error("No " + organelle_types[go_t] + " seed reads found!")
log_handler.error("Please check your raw data or change your " + organelle_types[go_t] + " seed!")
exit()
# top BASE_COV_SAMPLING_PERCENT from mapped reads
base_cov_values = get_cover_range(coverages_2, guessing_percent=BASE_COV_SAMPLING_PERCENT)
# log_handler.info(
# "Estimated " + organelle_types[go_t] + "-hitting base-coverage = " + "%.2f" % base_cov_values[1])
# # + "~".join(["%.2f" % base_c for base_c in base_cov_values]))
this_modified_dir = seed_fq_files[go_t] + ".spades"
this_modified_graph = os.path.join(this_modified_dir, "assembly_graph.fastg.modified.fastg")
# if base_cov_values[0] < 100 and set(organelle_types) != {"embplant_pt"}:
# if word_size is None:
if word_size is None or max_extending_len[go_t] == -1:
if executable(os.path.join(which_spades, "spades.py -h")) and \
executable(os.path.join(which_bowtie2, "bowtie2")):
log_handler.info("Pre-assembling mapped reads ...")
try:
this_in = "" if not in_custom_list else in_custom_list[go_t]
this_ex = "" if not ex_custom_list else ex_custom_list[go_t]
base_cov_values = pre_assembly_mapped_reads_for_base_cov(
original_fq_files=original_fq_files, mapped_fq_file=seed_fq_files[go_t],
seed_fs_file=seed_fs_files[go_t], mean_read_len=mean_read_len,
# TODO check in_customs lengths
organelle_type=organelle_types[go_t], in_custom=this_in, ex_custom=this_ex,
threads=threads, resume=resume, log_handler=log_handler, verbose_log=verbose_log,
other_spades_opts=other_spades_opts,
which_spades=which_spades, which_slim=which_slim, which_blast=which_blast)
except NotImplementedError:
if max_extending_len[go_t] == -1:
log_handler.warning(
"Pre-assembling failed. The estimations for " + organelle_types[go_t] + "-hitting "
"base-coverage, -w, --max-extending-len may be misleading.")
else:
log_handler.warning(
"Pre-assembling failed. "
"The estimations for " + organelle_types[go_t] + "-hitting base-coverage "
"and word size may be misleading.")
pass
else:
log_handler.info("Pre-assembling mapped reads finished.")
else:
log_handler.warning(
"No pre-assembling due to insufficient dependencies! "
"The estimations for " + organelle_types[go_t] +
"-hitting base-coverage and word size may be misleading.")
base_coverages_by_organelles.append((base_cov_values[1], (base_cov_values[2] - base_cov_values[0]) / 2))
log_handler.info(
"Estimated " + organelle_types[go_t] + "-hitting base-coverage = " + "%.2f" % base_cov_values[1])
#
if executable(os.path.join(which_spades, "spades.py -h")) and \
executable(os.path.join(which_bowtie2, "bowtie2")):
if max_extending_len[go_t] == -1: # auto
best_seed, gap_percent, largest_gap_lens = mapping_gap_info_from_coverage_dict(coverage_info)
log_handler.info("Closest " + organelle_types[go_t] + " seed sequence: " + str(best_seed))
# redo quick-mapping with the closest seed
if os.path.exists(this_modified_graph):
simulated_fq_f = os.path.join(seed_fq_files[go_t] + ".spades",
"get_org.assembly_graph.simulated.fq")
simulate_fq_simple(from_fasta_file=this_modified_graph,
out_dir=seed_fq_files[go_t] + ".spades",
out_name="get_org.assembly_graph.simulated.fq",
sim_read_jump_size=7, resume=resume)
closest_seed_f = os.path.join(seed_fq_files[go_t] + ".spades", "get_org.closest_seed.fasta")
seed_seq_list = SequenceList(seed_fs_files[go_t])
for seq_record in seed_seq_list:
if seq_record.label.startswith(best_seed):
with open(closest_seed_f + ".Temp", "w") as out_closest:
out_closest.write(seq_record.fasta_str() + "\n")
os.rename(closest_seed_f + ".Temp", closest_seed_f)
break
bowtie_out_base = os.path.join(seed_fq_files[go_t] + ".spades", "get_org.map_to_closest")
map_with_bowtie2(seed_file=closest_seed_f, original_fq_files=[simulated_fq_f],
bowtie_out=bowtie_out_base, resume=resume, threads=threads,
random_seed=random_seed, target_echo_name=organelle_types[go_t],
log_handler=log_handler, generate_fq=False, silent=verbose_log,
which_bowtie2=which_bowtie2, bowtie2_mode="--very-fast-local")
mapping_records = MapRecords(bowtie_out_base + ".sam")
mapping_records.update_coverages()
coverage_info = mapping_records.coverages
best_seed, gap_percent, largest_gap_lens = mapping_gap_info_from_coverage_dict(coverage_info)
# if not keep_temp:
# os.remove(simulated_fq_f)
if zip_files:
zip_file(source=bowtie_out_base + ".sam", target=bowtie_out_base + ".sam.tar.gz",
verbose_log=verbose_log, log_handler=log_handler, remove_source=True)
zip_file(source=simulated_fq_f, target=simulated_fq_f + ".tar.gz",
verbose_log=verbose_log, log_handler=log_handler, remove_source=True)
log_handler.info("Unmapped percentage " + "%1.4f" % gap_percent + " and unmapped lengths " +
" ".join([str(g_l) for g_l in largest_gap_lens[:5]]) + " ..")
cov_dev_percent = (base_cov_values[2] - base_cov_values[0]) / 2 / base_cov_values[1]
# if organelle_types[go_t] == "animal_mt":
# # empirical function
# max_extending_len[go_t] = largest_gap_lens[0] / 2. * (1 + gap_percent ** 2) * (1 + cov_dev_percent ** 2)
# max_extending_len[go_t] = min(int(math.ceil(max_extending_len[go_t] / 100)) * 100, 15000)
# else:
if len(coverage_info[best_seed]) < target_genome_sizes[go_t] / 10. or gap_percent > 0.4:
max_extending_len[go_t] = inf
else:
if largest_gap_lens:
# empirical function
max_extending_len[go_t] = largest_gap_lens[0] / 2. * (1 + gap_percent ** 0.5) \
* (1 + cov_dev_percent ** 0.5)
else:
max_extending_len[go_t] = 1
# if more.fq was used,
# previous empirical formula is not estimating the gap based on the initial mapped fq
# so the gap could be actually larger by 2 * (max_read_len - gathering_word_size)
if gathering_word_size is not None:
max_extending_len[go_t] += 2 * (max_read_len - gathering_word_size)
max_extending_len[go_t] = int(math.ceil(max_extending_len[go_t]/100)) * 100
else:
max_extending_len[go_t] = inf
# check the divergence of coverages of different organelle genomes
for estimated_a, estimated_b in combinations(base_coverages_by_organelles, 2):
if abs(log(estimated_a[0]) - log(estimated_b[0])) > log(10):
log_handler.warning("Multi-organelle mode (with the same data size and word size) is not suggested "
"for organelles with divergent base-coverages.")
log_handler.warning("Please try to get different organelles in separate runs, "
"or to use other seeds to get a better estimation of coverage values.")
break
# check the base coverage to ensure not using too much data
this_minimum_base_cov = min([value_set[0] for value_set in base_coverages_by_organelles])
if this_minimum_base_cov > reduce_reads_for_cov:
reduce_ratio = reduce_reads_for_cov / this_minimum_base_cov
for go_r_n, read_num in enumerate(all_read_nums):
all_read_nums[go_r_n] = int(read_num * reduce_ratio)
log_handler.info("Reads reduced to = " + "+".join([str(sub_num) for sub_num in all_read_nums]))
for go_t, (t_base_cov, t_base_sd) in enumerate(base_coverages_by_organelles):
base_coverages_by_organelles[go_t] = t_base_cov * reduce_ratio, t_base_sd * reduce_ratio
log_handler.info("Adjusting expected " + organelle_types[go_t] + " base coverage to " +
"%.2f" % (t_base_cov * reduce_ratio))
if word_size is None:
all_ws_values = []
for go_type, (this_base_cov, cov_dev) in enumerate(base_coverages_by_organelles):
if larger_auto_ws:
word_size = estimate_word_size(
base_cov=this_base_cov, base_cov_deviation=cov_dev,
read_length=mean_read_len, target_size=target_genome_sizes[go_type],
max_discontinuous_prob=0.05, min_word_size=69, mean_error_rate=mean_error_rate,
log_handler=log_handler, wc_bc_ratio_constant=wc_bc_ratio_constant - 0.03,
organelle_type=organelle_types[go_type])
else:
word_size = estimate_word_size(
base_cov=this_base_cov, base_cov_deviation=cov_dev,
read_length=mean_read_len, target_size=target_genome_sizes[go_type],
max_discontinuous_prob=0.01, min_word_size=auto_min_word_size,
mean_error_rate=mean_error_rate, log_handler=log_handler,
wc_bc_ratio_constant=wc_bc_ratio_constant, organelle_type=organelle_types[go_type])
all_ws_values.append(word_size)
word_size = min(all_ws_values)
log_handler.info("Estimated word size(s): " + ",".join([str(here_w) for here_w in all_ws_values]))
log_handler.info("Setting '-w " + str(word_size) + "'")
elif float(str(word_size)) < 1:
new_word_size = int(round(word_size * mean_read_len, 0))
if new_word_size < GLOBAL_MIN_WS:
word_size = GLOBAL_MIN_WS
log_handler.warning("Too small ratio " + str(word_size) + ", setting '-w " + str(GLOBAL_MIN_WS) + "'")
else:
word_size = new_word_size
log_handler.info("Setting '-w " + str(word_size) + "'")
all_infinite = True
for go_t, max_ex_len in enumerate(max_extending_len):
if not auto_max_extending_len[go_t]: # not user defined
all_infinite = False
break
# if organelle_types[go_t] == "animal_mt":
# if max_extending_len[go_t] < 15000:
# all_infinite = False
# break
# else:
if max_extending_len[go_t] < 6000: # empirically not efficient for max_extending_len > 6000
all_infinite = False
break
if all_infinite:
for go_t in range(len(max_extending_len)):
max_extending_len[go_t] = inf
log_handler.info(
"Setting '--max-extending-len " + ",".join([str(max_ex_l) for max_ex_l in max_extending_len]) + "'")
if float(word_size) / max_read_len <= 0.5 and len(low_quality_pattern) > 2:
keep_seq_parts = True
else:
keep_seq_parts = False
return word_size, keep_seq_parts, base_coverages_by_organelles, max_extending_len, all_read_nums
def check_kmers(kmer_str, word_s, max_r_len, log_handler):
if kmer_str:
# delete illegal kmer
try:
kmer_values = [int(kmer_v) for kmer_v in kmer_str.split(",")]
except ValueError:
raise ValueError("Invalid kmer value string: " + kmer_str)
else:
for kmer_v_out in kmer_values:
if kmer_v_out % 2 == 0:
raise ValueError("Invalid kmer value: " + str(kmer_v_out) + "! kmer values must be odd numbers!")
# delete illegal kmer
kmer_values = [kmer_v for kmer_v in kmer_values if 21 <= kmer_v <= min(max_r_len, 127)]
spades_kmer = ",".join([str(kmer_v) for kmer_v in sorted(kmer_values)])
log_handler.info("Setting '-k " + spades_kmer + "'")
return spades_kmer
else:
return None
try:
import psutil
except ImportError:
this_process = None
else:
this_process = psutil.Process(os.getpid())
def write_fq_results(original_fq_files, accepted_contig_id, out_file_name, temp2_clusters_dir, fq_info_in_memory,
all_read_limits, echo_step, verbose, index_in_memory, log_handler, extra_accepted_lines=set()):
if verbose:
if echo_step != inf:
sys.stdout.write(' ' * 100 + '\b' * 100)
sys.stdout.flush()
log_handler.info("Producing output ...")
log_handler.info("reading indices ...")
accepted_lines = []
if index_in_memory:
# read cluster indices
for this_index in accepted_contig_id:
accepted_lines += fq_info_in_memory[1][this_index]
# produce the pair-end output
accepted_lines = set(accepted_lines)
else:
# read cluster indices
temp2_indices_file_in = open(temp2_clusters_dir, 'r')
this_index = 0
for line in temp2_indices_file_in:
if this_index in accepted_contig_id:
accepted_lines += [int(x) for x in line.strip().split('\t')]
this_index += 1
accepted_lines = set(accepted_lines)
# add initial mapped read ids
for line_id in extra_accepted_lines:
accepted_lines.add(line_id)
# write by line
if verbose:
log_handler.info("writing fastq lines ...")
post_reading = [open(fq_file, 'r') for fq_file in original_fq_files]
files_out = [open(out_file_name + '_' + str(i + 1) + '.temp', 'w') for i in range(len(original_fq_files))]
line_count = 0
for i in range(len(original_fq_files)):
count_r = 0
line = post_reading[i].readline()
while line:
count_r += 1
if line_count in accepted_lines:
files_out[i].write(line)
for j in range(3):
files_out[i].write(post_reading[i].readline())
line_count += 1
line = post_reading[i].readline()
line_count += 1
else:
for j in range(4):
line = post_reading[i].readline()
line_count += 1
if count_r >= all_read_limits[i]:
break
files_out[i].close()
post_reading[i].close()
del accepted_lines
for i in range(len(original_fq_files)):
os.rename(out_file_name + '_' + str(i + 1) + '.temp', out_file_name + '_' + str(i + 1) + '.fq')
if verbose:
log_handler.info("writing fastq lines finished.")
def make_read_index(original_fq_files, direction_according_to_user_input, all_read_limits, rm_duplicates, output_base,
word_size, anti_lines, pre_grouped, index_in_memory, anti_seed, keep_seq_parts,
low_quality, echo_step, resume, log_handler):
# read original reads
# line_cluster (list) ~ forward_reverse_reads
line_clusters = []
seq_duplicates = {}
forward_reverse_reads = []
line_count = 0
this_index = 0
do_split_low_quality = len(low_quality) > 2
#
name_to_line = {}
#
temp1_contig_dir = [os.path.join(output_base, k + 'temp.indices.1') for k in ("_", "")]
temp2_clusters_dir = [os.path.join(output_base, k + 'temp.indices.2') for k in ("_", "")]
cancel_seq_parts = True
if resume and os.path.exists(temp1_contig_dir[1]) and os.path.exists(temp2_clusters_dir[1]):
if pre_grouped or index_in_memory:
log_handler.info("Reading existed indices for fastq ...")
#
if keep_seq_parts:
forward_reverse_reads = [x.strip().split("\t") for x in open(temp1_contig_dir[1], 'r')]
cancel_seq_parts = True if max([len(x) for x in forward_reverse_reads]) == 1 else False
else:
forward_reverse_reads = [x.strip() for x in open(temp1_contig_dir[1], 'r')]
#
line_clusters = [[int(x) for x in y.split('\t')] for y in open(temp2_clusters_dir[1], 'r')]
if rm_duplicates:
line_count = sum([len(x) for x in line_clusters]) * 4
# log
len_indices = len(line_clusters)
if this_process:
memory_usage = "Mem " + str(round(this_process.memory_info().rss / 1024.0 / 1024 / 1024, 3)) + " G, "
else:
memory_usage = ''
if rm_duplicates:
log_handler.info(memory_usage + str(len_indices) + " unique reads in all " +
str(line_count // 4) + " reads")
else:
log_handler.info(memory_usage + str(len_indices) + " reads")
else:
log_handler.info("indices for fastq existed!")
len_indices = len([x for x in open(temp2_clusters_dir[1], 'r')])
else:
if not index_in_memory:
temp1_contig_out = open(temp1_contig_dir[0], 'w')
# lengths = []
use_user_direction = False
for id_file, file_name in enumerate(original_fq_files):
file_in = open(file_name, "r")
count_this_read_n = 0
line = file_in.readline()
# if anti seed input, name & direction should be recognized
if anti_seed:
while line and count_this_read_n < all_read_limits[id_file]:
if line.startswith("@"):
count_this_read_n += 1
# parsing name & direction
if use_user_direction:
this_name = line[1:].strip()
direction = direction_according_to_user_input[id_file]
else:
try:
if ' ' in line:
this_head = line[1:].split(' ')
this_name, direction = this_head[0], int(this_head[1][0])
elif '#' in line:
this_head = line[1:].split('#')
this_name, direction = this_head[0], int(this_head[1].strip("/")[0])
elif line[-3] == "/" and line[-2].isdigit(): # 2019-04-22 added
this_name, direction = line[1:-3], int(line[-2])
elif line[1:].strip().isdigit():
log_handler.info("Using user-defined read directions. ")
use_user_direction = True
this_name = line[1:].strip()
direction = direction_according_to_user_input[id_file]
else:
log_handler.info('Unrecognized head: ' + file_name + ': ' + str(line.strip()))
log_handler.info("Using user-defined read directions. ")
use_user_direction = True
this_name = line[1:].strip()
direction = direction_according_to_user_input[id_file]
except (ValueError, IndexError):
log_handler.info('Unrecognized head: ' + file_name + ': ' + str(line.strip()))
log_handler.info("Using user-defined read directions. ")
use_user_direction = True
this_name = line[1:].strip()
direction = direction_according_to_user_input[id_file]
if (this_name, direction) in anti_lines:
line_count += 4
for i in range(4):
line = file_in.readline()
continue
this_seq = file_in.readline().strip()
# drop nonsense reads
if len(this_seq) < word_size:
line_count += 4
for i in range(3):
line = file_in.readline()
continue
file_in.readline()
quality_str = file_in.readline()
if do_split_low_quality:
this_seq = split_seq_by_quality_pattern(this_seq, quality_str, low_quality, word_size)
# drop nonsense reads
if not this_seq:
line_count += 4
line = file_in.readline()
continue
if keep_seq_parts:
if cancel_seq_parts and len(this_seq) > 1:
cancel_seq_parts = False
this_c_seq = complementary_seqs(this_seq)
# lengths.extend([len(seq_part) for seq_part in this_seq])
else:
this_seq = this_seq[0]
this_c_seq = complementary_seq(this_seq)
# lengths.append(len(this_seq))
else:
this_c_seq = complementary_seq(this_seq)
# lengths.append(len(this_seq))
if rm_duplicates:
if this_seq in seq_duplicates:
line_clusters[seq_duplicates[this_seq]].append(line_count)
elif this_c_seq in seq_duplicates:
line_clusters[seq_duplicates[this_c_seq]].append(line_count)
else:
if index_in_memory:
forward_reverse_reads.append(this_seq)
forward_reverse_reads.append(this_c_seq)
else:
if do_split_low_quality and keep_seq_parts:
temp1_contig_out.write(
"\t".join(this_seq) + '\n' + "\t".join(this_c_seq) + '\n')
else:
temp1_contig_out.write(this_seq + '\n' + this_c_seq + '\n')
seq_duplicates[this_seq] = this_index
line_clusters.append([line_count])
this_index += 1
if len(seq_duplicates) > rm_duplicates:
seq_duplicates = {}
else:
line_clusters.append([line_count])
if index_in_memory:
forward_reverse_reads.append(this_seq)
forward_reverse_reads.append(this_c_seq)
else:
if do_split_low_quality and keep_seq_parts:
temp1_contig_out.write("\t".join(this_seq) + '\n' + "\t".join(this_c_seq) + '\n')
else:
temp1_contig_out.write(this_seq + '\n' + this_c_seq + '\n')
else:
log_handler.error("Illegal fq format in line " + str(line_count) + ' ' + str(line))
exit()
if echo_step != inf and line_count % echo_step == 0:
to_print = str("%s" % datetime.datetime.now())[:23].replace('.', ',') + " - INFO: " + str(
(line_count + 4) // 4) + " reads"
sys.stdout.write(to_print + '\b' * len(to_print))
sys.stdout.flush()
line_count += 4
line = file_in.readline()
else:
while line and count_this_read_n < all_read_limits[id_file]:
if line.startswith("@"):
count_this_read_n += 1
this_seq = file_in.readline().strip()
# drop nonsense reads
if len(this_seq) < word_size:
line_count += 4
for i in range(3):
line = file_in.readline()
continue
file_in.readline()
quality_str = file_in.readline()
if do_split_low_quality:
this_seq = split_seq_by_quality_pattern(this_seq, quality_str, low_quality, word_size)
# drop nonsense reads
if not this_seq:
line_count += 4
line = file_in.readline()
continue
if keep_seq_parts:
if cancel_seq_parts and len(this_seq) > 1:
cancel_seq_parts = False
this_c_seq = complementary_seqs(this_seq)
# lengths.extend([len(seq_part) for seq_part in this_seq])
else:
this_seq = this_seq[0]
this_c_seq = complementary_seq(this_seq)
# lengths.append(len(this_seq))
else:
this_c_seq = complementary_seq(this_seq)
# lengths.append(len(this_seq))
if rm_duplicates:
if this_seq in seq_duplicates:
line_clusters[seq_duplicates[this_seq]].append(line_count)
elif this_c_seq in seq_duplicates:
line_clusters[seq_duplicates[this_c_seq]].append(line_count)
else:
if index_in_memory:
forward_reverse_reads.append(this_seq)
forward_reverse_reads.append(this_c_seq)
else:
if do_split_low_quality and keep_seq_parts:
temp1_contig_out.write(
"\t".join(this_seq) + '\n' + "\t".join(this_c_seq) + '\n')
else:
temp1_contig_out.write(this_seq + '\n' + this_c_seq + '\n')
seq_duplicates[this_seq] = this_index
line_clusters.append([line_count])
this_index += 1
if len(seq_duplicates) > rm_duplicates:
seq_duplicates = {}
else:
line_clusters.append([line_count])
if index_in_memory:
forward_reverse_reads.append(this_seq)
forward_reverse_reads.append(this_c_seq)
else:
if do_split_low_quality and keep_seq_parts:
temp1_contig_out.write("\t".join(this_seq) + '\n' + "\t".join(this_c_seq) + '\n')
else:
temp1_contig_out.write(this_seq + '\n' + this_c_seq + '\n')
else:
log_handler.error("Illegal fq format in line " + str(line_count) + ' ' + str(line))
exit()
if echo_step != inf and line_count % echo_step == 0:
to_print = str("%s" % datetime.datetime.now())[:23].replace('.', ',') + " - INFO: " + str(
(line_count + 4) // 4) + " reads"
sys.stdout.write(to_print + '\b' * len(to_print))
sys.stdout.flush()
line_count += 4
line = file_in.readline()
line = file_in.readline()
file_in.close()
if line:
log_handler.info("For " + file_name + ", only top " + str(int(all_read_limits[id_file])) +
" reads are used in downstream analysis.")
if not index_in_memory:
temp1_contig_out.close()
os.rename(temp1_contig_dir[0], temp1_contig_dir[1])
if this_process:
memory_usage = "Mem " + str(round(this_process.memory_info().rss / 1024.0 / 1024 / 1024, 3)) + " G, "
else:
memory_usage = ''
del name_to_line
if not index_in_memory:
# dump line clusters
len_indices = len(line_clusters)
temp2_indices_file_out = open(temp2_clusters_dir[0], 'w')
for this_index in range(len_indices):
temp2_indices_file_out.write('\t'.join([str(x) for x in line_clusters[this_index]]))
temp2_indices_file_out.write('\n')
temp2_indices_file_out.close()
os.rename(temp2_clusters_dir[0], temp2_clusters_dir[1])
del seq_duplicates
len_indices = len(line_clusters)
if rm_duplicates:
if len_indices == 0 and line_count // 4 > 0:
log_handler.error("No qualified reads found!")
log_handler.error("Word size (" + str(word_size) + ") CANNOT be larger than your "
"post-trimmed maximum read length!")
exit()
log_handler.info(memory_usage + str(len_indices) + " candidates in all " + str(line_count // 4) + " reads")
else:
# del lengths
log_handler.info(memory_usage + str(len_indices) + " reads")
if keep_seq_parts and cancel_seq_parts:
keep_seq_parts = False
for go_to, all_seq_parts in enumerate(forward_reverse_reads):
forward_reverse_reads[go_to] = all_seq_parts[0]
return forward_reverse_reads, line_clusters, len_indices, keep_seq_parts
def pre_grouping(fastq_indices_in_memory, dupli_threshold, out_base, index_in_memory, preg_word_size, log_handler):
forward_and_reverse_reads, line_clusters, len_indices, keep_seq_parts = fastq_indices_in_memory
log_handler.info("Pre-grouping reads ...")
log_handler.info("Setting '--pre-w " + str(preg_word_size) + "'")
lines_with_duplicates = {}
count_dupli = 0
for j in range(len(line_clusters)):
if len(line_clusters[j]) >= 2:
if count_dupli < dupli_threshold:
lines_with_duplicates[j] = int
count_dupli += 1
if this_process:
memory_usage = "Mem " + str(round(this_process.memory_info().rss / 1024.0 / 1024 / 1024, 3)) + " G, "
else:
memory_usage = ''
log_handler.info(memory_usage + str(len(lines_with_duplicates)) + "/" + str(count_dupli) + " used/duplicated")
groups_of_duplicate_lines = {}
count_groups = 0
these_words = {}
if index_in_memory:
def generate_forward_and_reverse(here_unique_id):
return forward_and_reverse_reads[2 * here_unique_id], forward_and_reverse_reads[2 * here_unique_id + 1]
else:
# variable outside the function
here_go_to = [0]
temp_seq_file = open(os.path.join(out_base, 'temp.indices.1'))
if keep_seq_parts:
def generate_forward_and_reverse(here_unique_id):
forward_seq_line = temp_seq_file.readline()
reverse_seq_line = temp_seq_file.readline()
# skip those reads that are not unique/represented by others
while here_go_to[0] < 2 * here_unique_id:
forward_seq_line = temp_seq_file.readline()
reverse_seq_line = temp_seq_file.readline()
here_go_to[0] += 2
here_go_to[0] += 2
return forward_seq_line.strip().split("\t"), reverse_seq_line.strip().split("\t")
else:
def generate_forward_and_reverse(here_unique_id):
forward_seq_line = temp_seq_file.readline()
reverse_seq_line = temp_seq_file.readline()
# skip those reads that are not unique/represented by others
while here_go_to[0] < 2 * here_unique_id:
forward_seq_line = temp_seq_file.readline()
reverse_seq_line = temp_seq_file.readline()
here_go_to[0] += 2
here_go_to[0] += 2
return forward_seq_line.strip(), reverse_seq_line.strip()
for this_unique_read_id in sorted(lines_with_duplicates):
this_seq, this_c_seq = generate_forward_and_reverse(this_unique_read_id)
these_group_id = set()
this_words = []
if keep_seq_parts:
for this_seq_part, this_c_seq_part in zip(this_seq, this_c_seq):
seq_len = len(this_seq_part)
temp_length = seq_len - preg_word_size
for i in range(0, temp_length + 1):
forward = this_seq_part[i:i + preg_word_size]
reverse = this_c_seq_part[temp_length - i:seq_len - i]
if forward in these_words:
these_group_id.add(these_words[forward])
else:
this_words.append(forward)
this_words.append(reverse)
else:
seq_len = len(this_seq)
temp_length = seq_len - preg_word_size
for i in range(0, temp_length + 1):
forward = this_seq[i:i + preg_word_size]
reverse = this_c_seq[temp_length - i:seq_len - i]
if forward in these_words:
these_group_id.add(these_words[forward])
else:
this_words.append(forward)
this_words.append(reverse)
len_groups = len(these_group_id)
# create a new group
if len_groups == 0:
new_group_id = count_groups
groups_of_duplicate_lines[new_group_id] = [{this_unique_read_id}, set(this_words)]
for this_word in this_words:
these_words[this_word] = new_group_id
lines_with_duplicates[this_unique_read_id] = new_group_id
count_groups += 1
# belongs to one group
elif len_groups == 1:
this_group_id = these_group_id.pop()
groups_of_duplicate_lines[this_group_id][0].add(this_unique_read_id)
for this_word in this_words:
groups_of_duplicate_lines[this_group_id][1].add(this_word)
these_words[this_word] = this_group_id
lines_with_duplicates[this_unique_read_id] = this_group_id
# connect different groups
else:
these_group_id = list(these_group_id)
these_group_id.sort()
this_group_to_keep = these_group_id[0]
# for related group to merge
for to_merge in range(1, len_groups):
this_group_to_merge = these_group_id[to_merge]
lines_to_merge, words_to_merge = groups_of_duplicate_lines[this_group_to_merge]
for line_to_merge in lines_to_merge:
groups_of_duplicate_lines[this_group_to_keep][0].add(line_to_merge)
lines_with_duplicates[line_to_merge] = this_group_to_keep
for word_to_merge in words_to_merge:
groups_of_duplicate_lines[this_group_to_keep][1].add(word_to_merge)
these_words[word_to_merge] = this_group_to_keep
del groups_of_duplicate_lines[this_group_to_merge]
# for the remain group to grow
for this_word in this_words:
groups_of_duplicate_lines[this_group_to_keep][1].add(this_word)
these_words[this_word] = this_group_to_keep
groups_of_duplicate_lines[this_group_to_keep][0].add(this_unique_read_id)
lines_with_duplicates[this_unique_read_id] = this_group_to_keep
for del_words in groups_of_duplicate_lines:
groups_of_duplicate_lines[del_words] = groups_of_duplicate_lines[del_words][0]
count_del_single = 0
for del_words in list(groups_of_duplicate_lines):
if len(groups_of_duplicate_lines[del_words]) == 1:
del_line = groups_of_duplicate_lines[del_words].pop()
del lines_with_duplicates[del_line]
del groups_of_duplicate_lines[del_words]
count_del_single += 1
if this_process:
memory_usage = "Mem " + str(round(this_process.memory_info().rss / 1024.0 / 1024 / 1024, 3)) + " G, "
else:
memory_usage = ''
del these_words
group_id_to_read_counts = {}
for cal_copy_group_id in groups_of_duplicate_lines:
group_id_to_read_counts[cal_copy_group_id] = sum([len(line_clusters[line_id])
for line_id in groups_of_duplicate_lines[cal_copy_group_id]])
log_handler.info(memory_usage + str(len(groups_of_duplicate_lines)) + " groups made.")
return groups_of_duplicate_lines, lines_with_duplicates, group_id_to_read_counts
class RoundLimitException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class WordsLimitException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class NoMoreReads(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def extending_no_lim(word_size, seed_file, original_fq_files, len_indices, pre_grouped,
groups_of_duplicate_lines, lines_with_duplicates, fq_info_in_memory, output_base,
max_rounds, min_rounds, fg_out_per_round, jump_step, mesh_size, verbose, resume,
all_read_limits, maximum_n_words, keep_seq_parts, low_qual_pattern, echo_step,
log_handler):
# adding initial word
log_handler.info("Adding initial words ...")
if keep_seq_parts:
accepted_words = chop_seq_list(
fq_simple_generator(seed_file[0], split_pattern=low_qual_pattern, min_sub_seq=word_size),
word_size)
for go_type in range(1, len(seed_file)):
chop_seq_list(
fq_simple_generator(seed_file[go_type], split_pattern=low_qual_pattern, min_sub_seq=word_size),
word_size, previous_words=accepted_words)
else:
accepted_words = chop_seqs(fq_simple_generator(seed_file[0]), word_size)
for go_type in range(1, len(seed_file)):
chop_seqs(fq_simple_generator(seed_file[go_type]), word_size, previous_words=accepted_words)
log_handler.info("AW " + str(len(accepted_words)))
accepted_rd_id = set()
accepted_rd_id_this_round = set()
g_duplicate_lines = deepcopy(groups_of_duplicate_lines)
l_with_duplicates = deepcopy(lines_with_duplicates)
line_to_accept = set()
round_count = 1
initial_aw_count = len(accepted_words)
prev_aw_count = initial_aw_count
accumulated_num_words = initial_aw_count
check_times = 1000
check_step = max(int(len_indices / check_times), 1)
if fg_out_per_round:
round_dir = os.path.join(output_base, "intermediate_reads")
if not os.path.exists(round_dir):
os.mkdir(round_dir)
if not this_process and verbose:
log_handler.warning("Package psutil is not installed, so that memory usage will not be logged\n"
"Don't worry. This will not affect the result.")
try:
def summarise_round(acc_words, acc_contig_id_this_round, pre_aw, r_count, acc_num_words, unique_id):
len_aw = len(acc_words)
len_al = len(acc_contig_id_this_round)
# for check words limit; memory control
acc_num_words += len_aw - pre_aw
if this_process:
inside_memory_usage = " Mem " + str(round(this_process.memory_info().rss / 1024.0 / 1024 / 1024, 3))
else:
inside_memory_usage = ''
if fg_out_per_round:
write_fq_results(original_fq_files, acc_contig_id_this_round,
os.path.join(round_dir, "Round." + str(r_count)),
os.path.join(output_base, 'temp.indices.2'), fq_info_in_memory, all_read_limits,
echo_step, verbose, bool(fq_info_in_memory), log_handler)
# clear former accepted words from memory
del acc_words
# then add new accepted words into memory
if keep_seq_parts:
acc_words = chop_seq_list(
fq_simple_generator(
[os.path.join(round_dir, "Round." + str(r_count) + '_' + str(x + 1) + '.fq') for x in
range(len(original_fq_files))],
split_pattern=low_qual_pattern, min_sub_seq=word_size),
word_size, mesh_size)
else:
acc_words = chop_seqs(
fq_simple_generator(
[os.path.join(round_dir, "Round." + str(r_count) + '_' + str(x + 1) + '.fq') for x in
range(len(original_fq_files))]),
word_size, mesh_size)
acc_contig_id_this_round = set()
log_handler.info("Round " + str(r_count) + ': ' + str(unique_id + 1) + '/' + str(len_indices) + " AI " + str(
len_al) + " AW " + str(len_aw) + inside_memory_usage)
#
if len_aw == pre_aw:
raise NoMoreReads('')
pre_aw = len(acc_words)
#
if r_count == max_rounds:
raise RoundLimitException(r_count)
r_count += 1
return acc_words, acc_contig_id_this_round, pre_aw, r_count, acc_num_words
def echo_to_screen():
inside_this_print = str("%s" % datetime.datetime.now())[:23].replace('.', ',') + " - INFO: Round " \
+ str(round_count) + ': ' + str(unique_read_id + 1) + '/' + str(len_indices) + \
" AI " + str(len(accepted_rd_id_this_round)) + " AW " + str(len(accepted_words))
sys.stdout.write(inside_this_print + '\b' * len(inside_this_print))
sys.stdout.flush()
def check_words_limit(inside_max_n_words):
if accumulated_num_words + len(accepted_words) - prev_aw_count > inside_max_n_words:
if this_process:
inside_memory_usage = " Mem " + str(
round(this_process.memory_info().rss / 1024.0 / 1024 / 1024, 3))
else:
inside_memory_usage = ''
log_handler.info("Round " + str(round_count) + ': ' + str(unique_read_id + 1) + '/' +
str(len_indices) + " AI " + str(len(accepted_rd_id_this_round)) +
" AW " + str(len(accepted_words)) + inside_memory_usage)
raise WordsLimitException("")
# core extending code
# here efficiency is more important than code conciseness,
# so there are four similar structure with minor differences
reads_generator = tuple()
while True:
# if verbose:
# log_handler.info("Round " + str(round_count) + ": Start ...")
if fq_info_in_memory:
reads_generator = (this_read for this_read in fq_info_in_memory[0])
else:
if keep_seq_parts:
reads_generator = (this_read.strip().split("\t") for this_read in
open(os.path.join(output_base, 'temp.indices.1'), 'r'))
else:
reads_generator = (this_read.strip() for this_read in
open(os.path.join(output_base, 'temp.indices.1'), 'r'))
unique_read_id = 0
if keep_seq_parts:
if pre_grouped and g_duplicate_lines:
for unique_read_id in range(len_indices):
this_seq = next(reads_generator)
this_c_seq = next(reads_generator)
if unique_read_id not in accepted_rd_id:
if unique_read_id in line_to_accept:
accepted_rd_id.add(unique_read_id)
accepted_rd_id_this_round.add(unique_read_id)
line_to_accept.remove(unique_read_id)
for this_seq_part, this_c_seq_part in zip(this_seq, this_c_seq):
seq_len = len(this_seq_part)
temp_length = seq_len - word_size
for i in range(0, temp_length + 1, mesh_size):
# add forward
accepted_words.add(this_seq_part[i:i + word_size])
# add reverse
accepted_words.add(this_c_seq_part[temp_length - i:seq_len - i])
else:
accepted = False
for this_seq_part in this_seq:
seq_len = len(this_seq_part)
temp_length = seq_len - word_size
for i in range(0, (temp_length + 1) // 2, jump_step):
# from first kmer to the middle
if this_seq_part[i:i + word_size] in accepted_words:
accepted = True
break
# from last kmer to the middle
if this_seq_part[temp_length - i:seq_len - i] in accepted_words:
accepted = True
break
if accepted:
break
if accepted:
for this_seq_part, this_c_seq_part in zip(this_seq, this_c_seq):
seq_len = len(this_seq_part)
temp_length = seq_len - word_size
for i in range(0, temp_length + 1, mesh_size):
# add forward
accepted_words.add(this_seq_part[i:i + word_size])
# add reverse
accepted_words.add(this_c_seq_part[temp_length - i:seq_len - i])
accepted_rd_id.add(unique_read_id)
accepted_rd_id_this_round.add(unique_read_id)
if unique_read_id in l_with_duplicates:
which_group = l_with_duplicates[unique_read_id]
for id_to_accept in g_duplicate_lines[which_group]:
line_to_accept.add(id_to_accept)
del l_with_duplicates[id_to_accept]
line_to_accept.remove(unique_read_id)
del g_duplicate_lines[which_group]
if echo_step != inf and unique_read_id % echo_step == 0:
echo_to_screen()
if unique_read_id % check_step == 0:
check_words_limit(maximum_n_words)
accepted_words, accepted_rd_id_this_round, prev_aw_count, round_count, accumulated_num_words \
= summarise_round(accepted_words, accepted_rd_id_this_round, prev_aw_count,
round_count,
accumulated_num_words, unique_read_id)
else:
for unique_read_id in range(len_indices):
this_seq = next(reads_generator)
this_c_seq = next(reads_generator)
if unique_read_id not in accepted_rd_id:
accepted = False
for this_seq_part in this_seq:
seq_len = len(this_seq_part)
temp_length = seq_len - word_size
for i in range(0, (temp_length + 1) // 2, jump_step):
# from first kmer to the middle
if this_seq_part[i:i + word_size] in accepted_words:
accepted = True
break
# from last kmer to the middle
if this_seq_part[temp_length - i:seq_len - i] in accepted_words:
accepted = True
break
if accepted:
break
if accepted:
for this_seq_part, this_c_seq_part in zip(this_seq, this_c_seq):
seq_len = len(this_seq_part)
temp_length = seq_len - word_size
for i in range(0, temp_length + 1, mesh_size):
accepted_words.add(this_seq_part[i:i + word_size])
accepted_words.add(this_c_seq_part[temp_length - i:seq_len - i])
accepted_rd_id.add(unique_read_id)
accepted_rd_id_this_round.add(unique_read_id)
if echo_step != inf and unique_read_id % echo_step == 0:
echo_to_screen()
if unique_read_id % check_step == 0:
check_words_limit(maximum_n_words)
accepted_words, accepted_rd_id_this_round, prev_aw_count, round_count, accumulated_num_words \
= summarise_round(accepted_words, accepted_rd_id_this_round, prev_aw_count,
round_count,
accumulated_num_words, unique_read_id)
else:
if pre_grouped and g_duplicate_lines:
for unique_read_id in range(len_indices):
this_seq = next(reads_generator)
this_c_seq = next(reads_generator)
if unique_read_id not in accepted_rd_id:
seq_len = len(this_seq)
temp_length = seq_len - word_size
if unique_read_id in line_to_accept:
accepted_rd_id.add(unique_read_id)
accepted_rd_id_this_round.add(unique_read_id)
line_to_accept.remove(unique_read_id)
for i in range(0, temp_length + 1, mesh_size):
# add forward
accepted_words.add(this_seq[i:i + word_size])
# add reverse
accepted_words.add(this_c_seq[temp_length - i:seq_len - i])
else:
accepted = False
for i in range(0, (temp_length + 1) // 2, jump_step):
# from first kmer to the middle
if this_seq[i:i + word_size] in accepted_words:
accepted = True
break
# from last kmer to the middle
if this_seq[temp_length - i:seq_len - i] in accepted_words:
accepted = True
break
if accepted:
for i in range(0, temp_length + 1, mesh_size):
# add forward
accepted_words.add(this_seq[i:i + word_size])
# add reverse
accepted_words.add(this_c_seq[temp_length - i:seq_len - i])
accepted_rd_id.add(unique_read_id)
accepted_rd_id_this_round.add(unique_read_id)
if unique_read_id in l_with_duplicates:
which_group = l_with_duplicates[unique_read_id]
for id_to_accept in g_duplicate_lines[which_group]:
line_to_accept.add(id_to_accept)
del l_with_duplicates[id_to_accept]
line_to_accept.remove(unique_read_id)
del g_duplicate_lines[which_group]
if echo_step != inf and unique_read_id % echo_step == 0:
echo_to_screen()
if unique_read_id % check_step == 0:
check_words_limit(maximum_n_words)
accepted_words, accepted_rd_id_this_round, prev_aw_count, round_count, accumulated_num_words \
= summarise_round(accepted_words, accepted_rd_id_this_round, prev_aw_count,
round_count,
accumulated_num_words, unique_read_id)
else:
for unique_read_id in range(len_indices):
this_seq = next(reads_generator)
this_c_seq = next(reads_generator)
if unique_read_id not in accepted_rd_id:
accepted = False
seq_len = len(this_seq)
temp_length = seq_len - word_size
for i in range(0, (temp_length + 1) // 2, jump_step):
# from first kmer to the middle
if this_seq[i:i + word_size] in accepted_words:
accepted = True
break
# from last kmer to the middle
if this_seq[temp_length - i:seq_len - i] in accepted_words:
accepted = True
break
if accepted:
for i in range(0, temp_length + 1, mesh_size):
accepted_words.add(this_seq[i:i + word_size])
accepted_words.add(this_c_seq[temp_length - i:seq_len - i])
accepted_rd_id.add(unique_read_id)
accepted_rd_id_this_round.add(unique_read_id)
if echo_step != inf and unique_read_id % echo_step == 0:
echo_to_screen()
if unique_read_id % check_step == 0:
check_words_limit(maximum_n_words)
accepted_words, accepted_rd_id_this_round, prev_aw_count, round_count, accumulated_num_words \
= summarise_round(accepted_words, accepted_rd_id_this_round, prev_aw_count,
round_count,
accumulated_num_words, unique_read_id)
reads_generator.close()
except KeyboardInterrupt:
reads_generator.close()
if echo_step != inf:
sys.stdout.write(' ' * 100 + '\b' * 100)
sys.stdout.flush()
log_handler.info(
"Round " + str(round_count) + ': ' + str(unique_read_id + 1) + '/' + str(len_indices) + " AI " + str(
len(accepted_rd_id_this_round)) + " AW " + str(len(accepted_words)))
log_handler.info("KeyboardInterrupt")
except NoMoreReads:
reads_generator.close()
if round_count < min_rounds:
log_handler.info("No more reads found and terminated ...")
log_handler.warning("Terminated at an insufficient number of rounds. "
"Try decrease '-w' if failed in the end.")
else:
log_handler.info("No more reads found and terminated ...")
except WordsLimitException:
reads_generator.close()
if round_count <= min_rounds:
log_handler.info("Hit the words limit and terminated ...")
log_handler.warning("Terminated at an insufficient number of rounds, see '--max-n-words'/'--max-extending-len' for more.")
else:
log_handler.info("Hit the words limit and terminated ...")
except RoundLimitException as r_lim:
reads_generator.close()
log_handler.info("Hit the round limit " + str(r_lim) + " and terminated ...")
del reads_generator
accepted_words = set()
accepted_rd_id_this_round = set()
del l_with_duplicates
return accepted_rd_id
def extending_with_lim(word_size, seed_file, original_fq_files, len_indices, pre_grouped,
groups_of_duplicate_lines, lines_with_duplicates, group_id_to_read_counts, fq_info_in_memory,
output_base, max_rounds, min_rounds, fg_out_per_round, jump_step, mesh_size, verbose, resume,
all_read_limits, extending_dist_limit, maximum_n_words, keep_seq_parts, low_qual_pattern,
mean_read_len, mean_base_cov,
echo_step, log_handler):
# adding initial word
log_handler.info("Adding initial words ...")
if keep_seq_parts:
accepted_words = chop_seq_list_as_empty_dict(
seq_iter=fq_simple_generator(seed_file[0], split_pattern=low_qual_pattern, min_sub_seq=word_size),
word_size=word_size, val_len=extending_dist_limit[0])
for go_type in range(1, len(seed_file)):
chop_seq_list_as_empty_dict(
seq_iter=fq_simple_generator(seed_file[go_type], split_pattern=low_qual_pattern,
min_sub_seq=word_size),
word_size=word_size, val_len=extending_dist_limit[go_type], previous_words=accepted_words)
else:
accepted_words = chop_seqs_as_empty_dict(
seq_iter=fq_simple_generator(seed_file[0]),
word_size=word_size, val_len=extending_dist_limit[0])
for go_type in range(1, len(seed_file)):
chop_seqs_as_empty_dict(
seq_iter=fq_simple_generator(seed_file[go_type]),
word_size=word_size, val_len=extending_dist_limit[go_type], previous_words=accepted_words)
log_handler.info("AW " + str(len(accepted_words)))
accepted_rd_id = set()
accepted_rd_id_this_round = set()
g_duplicate_lines = deepcopy(groups_of_duplicate_lines)
l_with_duplicates = deepcopy(lines_with_duplicates)
line_to_accept = {}
round_count = 1
initial_aw_count = len(accepted_words)
prev_aw_count = initial_aw_count
accumulated_num_words = initial_aw_count
check_times = 1000
check_step = max(int(len_indices / check_times), 1)
if fg_out_per_round:
round_dir = os.path.join(output_base, "intermediate_reads")
if not os.path.exists(round_dir):
os.mkdir(round_dir)
if this_process and verbose:
log_handler.warning("Package psutil is not installed, so that memory usage will not be logged\n"
"Don't worry. This will not affect the result.")
try:
def summarise_round(acc_words, acc_contig_id_this_round, pre_aw, r_count, acc_num_words, unique_id):
len_aw = len(acc_words)
len_al = len(acc_contig_id_this_round)
# for check words limit; memory control
acc_num_words += len_aw - pre_aw
if this_process:
inside_memory_usage = " Mem " + str(round(this_process.memory_info().rss / 1024.0 / 1024 / 1024, 3))
else:
inside_memory_usage = ''
if fg_out_per_round:
write_fq_results(original_fq_files, acc_contig_id_this_round,
os.path.join(round_dir, "Round." + str(r_count)),
os.path.join(output_base, 'temp.indices.2'), fq_info_in_memory, all_read_limits,
echo_step, verbose, bool(fq_info_in_memory), log_handler)
acc_contig_id_this_round = set()
log_handler.info("Round " + str(r_count) + ': ' + str(unique_id + 1) + '/' + str(len_indices) + " AI " + str(
len_al) + " AW " + str(len_aw) + inside_memory_usage)
# cost too much time
# acc_words = {in_k: in_v for in_k, in_v in acc_words.items() if in_v < extending_dist_limit}
#
if len_aw == pre_aw:
raise NoMoreReads('')
pre_aw = len(acc_words)
#
if r_count == max_rounds:
raise RoundLimitException(r_count)
r_count += 1
return acc_words, acc_contig_id_this_round, pre_aw, r_count, acc_num_words
def echo_to_screen():
inside_this_print = str("%s" % datetime.datetime.now())[:23].replace('.', ',') + " - INFO: Round " \
+ str(round_count) + ': ' + str(unique_read_id + 1) + '/' + str(len_indices) + \
" AI " + str(len(accepted_rd_id_this_round)) + " AW " + str(len(accepted_words))
sys.stdout.write(inside_this_print + '\b' * len(inside_this_print))
sys.stdout.flush()
def check_words_limit(inside_max_n_words):
if accumulated_num_words + len(accepted_words) - prev_aw_count > inside_max_n_words:
if this_process:
inside_memory_usage = " Mem " + str(
round(this_process.memory_info().rss / 1024.0 / 1024 / 1024, 3))
else:
inside_memory_usage = ''
log_handler.info("Round " + str(round_count) + ': ' + str(unique_read_id + 1) + '/' +
str(len_indices) + " AI " + str(len(accepted_rd_id_this_round)) +
" AW " + str(len(accepted_words)) + inside_memory_usage)
raise WordsLimitException("")
# core extending code
# here efficiency is more important than code conciseness,
# so there are four similar structure with minor differences
while True:
if verbose:
log_handler.info("Round " + str(round_count) + ": Start ...")
if fq_info_in_memory:
reads_generator = (this_read for this_read in fq_info_in_memory[0])
else:
if keep_seq_parts:
reads_generator = (this_read.strip().split("\t") for this_read in
open(os.path.join(output_base, 'temp.indices.1'), 'r'))
else:
reads_generator = (this_read.strip() for this_read in
open(os.path.join(output_base, 'temp.indices.1'), 'r'))
unique_read_id = 0
if keep_seq_parts:
if pre_grouped and g_duplicate_lines:
for unique_read_id in range(len_indices):
this_seq = next(reads_generator)
this_c_seq = next(reads_generator)
if unique_read_id not in accepted_rd_id:
if unique_read_id in line_to_accept:
accepted_rd_id.add(unique_read_id)
accepted_rd_id_this_round.add(unique_read_id)
group_left = line_to_accept.pop(unique_read_id)
for this_seq_part, this_c_seq_part in zip(this_seq, this_c_seq):
seq_len = len(this_seq_part)
temp_length = seq_len - word_size
for i in range(0, temp_length + 1, mesh_size):
# add forward & reverse
this_w = this_seq_part[i:i + word_size]
accepted_words[this_w] \
= accepted_words[this_c_seq_part[temp_length - i:seq_len - i]] \
= max(group_left, accepted_words.get(this_w, 0))
else:
accepted = False
accept_go_to_word = 0
part_accumulated_go_to = 0
accept_dist = 0
for this_seq_part in this_seq:
seq_len = len(this_seq_part)
temp_length = seq_len - word_size
for i in range(0, (temp_length + 1) // 2, jump_step):
# from first kmer to the middle
this_w = this_seq_part[i:i + word_size]
if this_w in accepted_words:
accepted = True
accept_go_to_word = part_accumulated_go_to + i
accept_dist = accepted_words[this_w]
break
# from last kmer to the middle
this_w = this_seq_part[temp_length - i:seq_len - i]
if this_w in accepted_words:
accepted = True
accept_go_to_word = part_accumulated_go_to + temp_length - i
accept_dist = accepted_words[this_w]
break
if accepted:
break
part_accumulated_go_to += seq_len
if accepted:
if accept_dist - mean_read_len > 0:
part_accumulated_go_to = 0
for this_seq_part, this_c_seq_part in zip(this_seq, this_c_seq):
seq_len = len(this_seq_part)
temp_length = seq_len - word_size
for i in range(0, temp_length + 1, mesh_size):
this_dist = accept_dist - \
abs(accept_go_to_word - (part_accumulated_go_to + i))
this_w = this_seq_part[i:i + word_size]
# add forward & reverse
accepted_words[this_w] \
= accepted_words[this_c_seq_part[temp_length - i:seq_len - i]] \
= max(this_dist, accepted_words.get(this_w, 0))
part_accumulated_go_to += seq_len
accepted_rd_id.add(unique_read_id)
accepted_rd_id_this_round.add(unique_read_id)
if unique_read_id in l_with_duplicates:
which_group = l_with_duplicates[unique_read_id]
# N_reads = (contig_len - read_len) * (base_cov / read_len)
expected_contig_len = \
group_id_to_read_counts[which_group] * mean_read_len / mean_base_cov + \
mean_read_len
group_left = accept_dist - (expected_contig_len - word_size + 1)
if group_left < 0:
for id_to_accept in g_duplicate_lines[which_group]:
line_to_accept[id_to_accept] = group_left
del l_with_duplicates[id_to_accept]
del line_to_accept[unique_read_id]
del g_duplicate_lines[which_group]
else:
g_duplicate_lines[which_group].remove(unique_read_id)
del l_with_duplicates[unique_read_id]
if echo_step != inf and unique_read_id % echo_step == 0:
echo_to_screen()
if unique_read_id % check_step == 0:
check_words_limit(maximum_n_words)
accepted_words, accepted_rd_id_this_round, prev_aw_count, round_count, accumulated_num_words \
= summarise_round(accepted_words, accepted_rd_id_this_round, prev_aw_count,
round_count, accumulated_num_words, unique_read_id)
else:
for unique_read_id in range(len_indices):
this_seq = next(reads_generator)
this_c_seq = next(reads_generator)
if unique_read_id not in accepted_rd_id:
accepted = False
accept_go_to_word = 0
part_accumulated_go_to = 0
accept_dist = 0
for this_seq_part in this_seq:
seq_len = len(this_seq_part)
temp_length = seq_len - word_size
for i in range(0, (temp_length + 1) // 2, jump_step):
# from first kmer to the middle
this_w = this_seq_part[i:i + word_size]
if this_w in accepted_words:
accepted = True
accept_go_to_word = part_accumulated_go_to + i
accept_dist = accepted_words[this_w]
break
# from last kmer to the middle
this_w = this_seq_part[temp_length - i:seq_len - i]
if this_w in accepted_words:
accepted = True
accept_go_to_word = part_accumulated_go_to + temp_length - i
accept_dist = accepted_words[this_w]
break
if accepted:
break
part_accumulated_go_to += seq_len
if accepted:
if accept_dist - mean_read_len > 0:
part_accumulated_go_to = 0
for this_seq_part, this_c_seq_part in zip(this_seq, this_c_seq):
seq_len = len(this_seq_part)
temp_length = seq_len - word_size
for i in range(0, temp_length + 1, mesh_size):
this_dist = accept_dist - \
abs(accept_go_to_word - (part_accumulated_go_to + i))
# if this_dist < extending_dist_limit:
this_w = this_seq_part[i:i + word_size]
accepted_words[this_w] \
= accepted_words[this_c_seq_part[temp_length - i:seq_len - i]] \
= max(this_dist, accepted_words.get(this_w, 0))
part_accumulated_go_to += seq_len
accepted_rd_id.add(unique_read_id)
accepted_rd_id_this_round.add(unique_read_id)
if echo_step != inf and unique_read_id % echo_step == 0:
echo_to_screen()
if unique_read_id % check_step == 0:
check_words_limit(maximum_n_words)
accepted_words, accepted_rd_id_this_round, prev_aw_count, round_count, accumulated_num_words \
= summarise_round(accepted_words, accepted_rd_id_this_round, prev_aw_count,
round_count,
accumulated_num_words, unique_read_id)
else:
if pre_grouped and g_duplicate_lines:
for unique_read_id in range(len_indices):
this_seq = next(reads_generator)
this_c_seq = next(reads_generator)
if unique_read_id not in accepted_rd_id:
seq_len = len(this_seq)
temp_length = seq_len - word_size
if unique_read_id in line_to_accept:
accepted_rd_id.add(unique_read_id)
accepted_rd_id_this_round.add(unique_read_id)
group_left = line_to_accept.pop(unique_read_id)
for i in range(0, temp_length + 1, mesh_size):
# add forward & reverse
this_w = this_seq[i:i + word_size]
accepted_words[this_w] \
= accepted_words[this_c_seq[temp_length - i:seq_len - i]] \
= max(group_left, accepted_words.get(this_w, 0))
else:
accepted = False
accept_go_to_word = 0
accept_dist = 0
for i in range(0, (temp_length + 1) // 2, jump_step):
# from first kmer to the middle
this_w = this_seq[i:i + word_size]
if this_w in accepted_words:
accepted = True
accept_go_to_word = i
accept_dist = accepted_words[this_w]
break
# from last kmer to the middle
this_w = this_seq[temp_length - i:seq_len - i]
if this_w in accepted_words:
accepted = True
accept_go_to_word = temp_length - i
accept_dist = accepted_words[this_w]
break
if accepted:
if accept_dist - mean_read_len > 0:
for i in range(0, temp_length + 1, mesh_size):
this_dist = accept_dist - abs(accept_go_to_word - i)
# if this_dist < extending_dist_limit:
# add forward & reverse
this_w = this_seq[i:i + word_size]
accepted_words[this_w] \
= accepted_words[this_c_seq[temp_length - i:seq_len - i]] \
= max(this_dist, accepted_words.get(this_w, 0))
accepted_rd_id.add(unique_read_id)
accepted_rd_id_this_round.add(unique_read_id)
if unique_read_id in l_with_duplicates:
which_group = l_with_duplicates[unique_read_id]
# using unique reads
expected_contig_len = \
group_id_to_read_counts[which_group] * mean_read_len / mean_base_cov + \
mean_read_len
# print(group_id_to_read_counts[which_group], expected_contig_len)
group_left = accept_dist - (expected_contig_len - word_size + 1)
if group_left < 0:
for id_to_accept in g_duplicate_lines[which_group]:
line_to_accept[id_to_accept] = group_left
del l_with_duplicates[id_to_accept]
del line_to_accept[unique_read_id]
del g_duplicate_lines[which_group]
else:
g_duplicate_lines[which_group].remove(unique_read_id)
del l_with_duplicates[unique_read_id]
if echo_step != inf and unique_read_id % echo_step == 0:
echo_to_screen()
if unique_read_id % check_step == 0:
check_words_limit(maximum_n_words)
accepted_words, accepted_rd_id_this_round, prev_aw_count, round_count, accumulated_num_words \
= summarise_round(accepted_words, accepted_rd_id_this_round, prev_aw_count,
round_count,
accumulated_num_words, unique_read_id)
else:
for unique_read_id in range(len_indices):
this_seq = next(reads_generator)
this_c_seq = next(reads_generator)
if unique_read_id not in accepted_rd_id:
accepted = False
accept_go_to_word = 0
accept_dist = 0
seq_len = len(this_seq)
temp_length = seq_len - word_size
for i in range(0, (temp_length + 1) // 2, jump_step):
# from first kmer to the middle
this_w = this_seq[i:i + word_size]
if this_w in accepted_words:
accepted = True
accept_go_to_word = i
accept_dist = accepted_words[this_w]
break
# from last kmer to the middle
this_w = this_seq[temp_length - i:seq_len - i]
if this_w in accepted_words:
accepted = True
accept_dist = accepted_words[this_w]
break
if accepted:
if accept_dist - mean_read_len > 0:
for i in range(0, temp_length + 1, mesh_size):
this_dist = accept_dist - abs(accept_go_to_word - i)
# if this_dist < extending_dist_limit:
this_w = this_seq[i:i + word_size]
accepted_words[this_w] \
= accepted_words[this_c_seq[temp_length - i:seq_len - i]] \
= max(this_dist, accepted_words.get(this_w, 0))
accepted_rd_id.add(unique_read_id)
accepted_rd_id_this_round.add(unique_read_id)
if echo_step != inf and unique_read_id % echo_step == 0:
echo_to_screen()
if unique_read_id % check_step == 0:
check_words_limit(maximum_n_words)
accepted_words, accepted_rd_id_this_round, prev_aw_count, round_count, accumulated_num_words \
= summarise_round(accepted_words, accepted_rd_id_this_round, prev_aw_count,
round_count,
accumulated_num_words, unique_read_id)
reads_generator.close()
except KeyboardInterrupt:
reads_generator.close()
if echo_step != inf:
sys.stdout.write(' ' * 100 + '\b' * 100)
sys.stdout.flush()
log_handler.info(
"Round " + str(round_count) + ': ' + str(unique_read_id + 1) + '/' + str(len_indices) + " AI " + str(
len(accepted_rd_id_this_round)) + " AW " + str(len(accepted_words)))
log_handler.info("KeyboardInterrupt")
except NoMoreReads:
reads_generator.close()
if round_count < min_rounds:
log_handler.info("No more reads found and terminated ...")
log_handler.warning("Terminated at an insufficient number of rounds. "
"Try decrease '-w' if failed in the end.")
else:
log_handler.info("No more reads found and terminated ...")
except WordsLimitException:
reads_generator.close()
if round_count <= min_rounds:
log_handler.info("Hit the words limit and terminated ...")
log_handler.warning("Terminated at an insufficient number of rounds, see '--max-n-words'/'--max-extending-len' for more.")
else:
log_handler.info("Hit the words limit and terminated ...")
except RoundLimitException as r_lim:
reads_generator.close()
log_handler.info("Hit the round limit " + str(r_lim) + " and terminated ...")
del reads_generator
accepted_words = set()
accepted_rd_id_this_round = set()
del l_with_duplicates
return accepted_rd_id
def get_anti_with_fas(word_size, anti_words, anti_input, original_fq_files, log_handler):
anti_lines = set()
pre_reading_handler = [open(fq_file, 'r') for fq_file in original_fq_files]
line_count = 0
def add_to_anti_lines(here_head):
try:
if ' ' in here_head:
here_head_split = here_head.split(' ')
this_name, direction = here_head_split[0], int(here_head_split[1][0])
elif '#' in here_head:
here_head_split = here_head.split('#')
this_name, direction = here_head_split[0], int(here_head_split[1].strip("/")[0])
elif here_head[-2] == "/" and here_head[-1].isdigit(): # 2019-04-22 added
this_name, direction = here_head[:-2], int(here_head[-1])
else:
this_name, direction = here_head, 1
except (ValueError, IndexError):
log_handler.error('Unrecognized fq format in ' + str(line_count))
exit()
else:
anti_lines.add((this_name, direction))
for file_in in pre_reading_handler:
line = file_in.readline()
if anti_input:
while line:
if line.startswith("@"):
this_head = line[1:].strip()
this_seq = file_in.readline().strip()
# drop illegal reads
seq_len = len(this_seq)
if seq_len < word_size:
line_count += 4
for i in range(3):
line = file_in.readline()
add_to_anti_lines(this_head)
continue
this_c_seq = complementary_seq(this_seq)
temp_length = seq_len - word_size
for i in range(0, temp_length + 1):
if this_seq[i:i + word_size] in anti_words:
add_to_anti_lines(this_head)
break
if this_c_seq[i:i + word_size] in anti_words:
add_to_anti_lines(this_head)
break
else:
log_handler.error("Illegal fq format in line " + str(line_count) + ' ' + str(line))
exit()
line_count += 1
for i in range(3):
line = file_in.readline()
line_count += 1
file_in.close()
return anti_lines
def making_seed_reads_using_mapping(seed_file, original_fq_files,
out_base, resume, verbose_log, threads, random_seed, organelle_type, prefix,
keep_temp, bowtie2_other_options, log_handler, which_bowtie2=""):
seed_dir = os.path.join(out_base, prefix + "seed")
if not os.path.exists(seed_dir):
os.mkdir(seed_dir)
if sum([os.path.exists(remove_db_postfix(seed_file) + ".index" + postfix)
for postfix in
(".1.bt2l", ".2.bt2l", ".3.bt2l", ".4.bt2l", ".rev.1.bt2l", ".rev.2.bt2l")]) != 6:
new_seed_file = os.path.join(seed_dir, os.path.basename(seed_file))
check_fasta_seq_names(seed_file, new_seed_file, log_handler)
seed_file = new_seed_file
bowtie_out_base = os.path.join(seed_dir, prefix + organelle_type + ".initial")
total_seed_fq = bowtie_out_base + ".fq"
total_seed_sam = bowtie_out_base + ".sam"
seed_index_base = seed_file + '.index'
map_with_bowtie2(seed_file=seed_file, original_fq_files=original_fq_files,
bowtie_out=bowtie_out_base, resume=resume, threads=threads, random_seed=random_seed,
generate_fq=True, target_echo_name="seed", log_handler=log_handler, verbose_log=verbose_log,
which_bowtie2=which_bowtie2, bowtie2_mode="", bowtie2_other_options=bowtie2_other_options)
if not keep_temp:
for seed_index_file in [x for x in os.listdir(seed_dir) if x.startswith(os.path.basename(seed_index_base))]:
os.remove(os.path.join(seed_dir, seed_index_file))
seed_fq_size = os.path.getsize(total_seed_fq)
if not seed_fq_size:
if log_handler:
log_handler.error("No " + str(organelle_type) + " seed reads found!")
log_handler.error("Please check your raw data or change your " + str(organelle_type) + " seed!")
exit()
log_handler.info("Seed reads made: " + total_seed_fq + " (" + str(int(seed_fq_size)) + " bytes)")
if seed_fq_size < 10000:
log_handler.error("Too few seed reads found! "
"Please change your seed file (-s) or "
"increase your data input (--max-reads/--reduce-reads-for-coverage)!")
exit()
return total_seed_fq, total_seed_sam, seed_file
def get_anti_lines_using_mapping(anti_seed, seed_sam_files, original_fq_files,
out_base, resume, verbose_log, threads,
random_seed, prefix, keep_temp, bowtie2_other_options, log_handler, which_bowtie2=""):
from GetOrganelleLib.sam_parser import get_heads_from_sam_fast
seed_dir = os.path.join(out_base, prefix + "seed")
if not os.path.exists(seed_dir):
os.mkdir(seed_dir)
if anti_seed:
new_anti_seed = os.path.join(seed_dir, os.path.basename(anti_seed))
check_fasta_seq_names(anti_seed, new_anti_seed, log_handler)
anti_seed = new_anti_seed
else:
anti_seed = ""
anti_index_base = anti_seed + '.index'
bowtie_out_base = os.path.join(out_base, prefix + "anti_seed_bowtie")
anti_seed_sam = [os.path.join(out_base, x + prefix + "anti_seed_bowtie.sam") for x in ("temp.", "")]
if anti_seed:
map_with_bowtie2(seed_file=anti_seed, original_fq_files=original_fq_files,
bowtie_out=bowtie_out_base, resume=resume, threads=threads, random_seed=random_seed,
log_handler=log_handler, target_echo_name="anti-seed", generate_fq=False, verbose_log=verbose_log,
which_bowtie2=which_bowtie2, bowtie2_mode="", bowtie2_other_options=bowtie2_other_options)
log_handler.info("Parsing bowtie2 result ...")
anti_lines = get_heads_from_sam_fast(anti_seed_sam[1]) - get_heads_from_sam_fast(*seed_sam_files)
log_handler.info("Parsing bowtie2 result finished ...")
else:
anti_lines = set()
if not keep_temp:
for anti_index_file in [x for x in os.listdir(seed_dir) if x.startswith(os.path.basename(anti_index_base))]:
os.remove(os.path.join(seed_dir, anti_index_file))
return anti_lines
def assembly_with_spades(spades_kmer, spades_out_put, parameters, out_base, prefix, original_fq_files, reads_paired,
which_spades, verbose_log, resume, threads, log_handler):
if '-k' in parameters or not spades_kmer:
kmer = ''
else:
kmer = '-k ' + spades_kmer
if resume and os.path.exists(spades_out_put):
spades_command = os.path.join(which_spades, "spades.py") + " --continue -o " + spades_out_put
else:
spades_out_command = '-o ' + spades_out_put
if reads_paired['input'] and reads_paired['pair_out']:
all_unpaired = []
# spades does not accept empty files
if os.path.getsize(os.path.join(out_base, prefix + "extended_1_unpaired.fq")):
all_unpaired.append(os.path.join(out_base, prefix + "extended_1_unpaired.fq"))
if os.path.getsize(os.path.join(out_base, prefix + "extended_2_unpaired.fq")):
all_unpaired.append(os.path.join(out_base, prefix + "extended_2_unpaired.fq"))
for iter_unpaired in range(len(original_fq_files) - 2):
if os.path.getsize(str(os.path.join(out_base, prefix + "extended_" + str(iter_unpaired + 3) + ".fq"))):
all_unpaired.append(
str(os.path.join(out_base, prefix + "extended_" + str(iter_unpaired + 3) + ".fq")))
if os.path.getsize(os.path.join(out_base, prefix + "extended_1_paired.fq")):
spades_command = ' '.join(
[os.path.join(which_spades, "spades.py"), '-t', str(threads), parameters, '-1',
os.path.join(out_base, prefix + "extended_1_paired.fq"), '-2',
os.path.join(out_base, prefix + "extended_2_paired.fq")] +
['--s' + str(i + 1) + ' ' + out_f for i, out_f in enumerate(all_unpaired)] +
[kmer, spades_out_command]).strip()
else:
# log_handler.warning("No paired reads found for the target!?")
spades_command = ' '.join(
[os.path.join(which_spades, "spades.py"), '-t', str(threads), parameters] +
['--s' + str(i + 1) + ' ' + out_f for i, out_f in enumerate(all_unpaired)] +
[kmer, spades_out_command]).strip()
else:
all_unpaired = []
for iter_unpaired in range(len(original_fq_files)):
if os.path.getsize(str(os.path.join(out_base, prefix + "extended_" + str(iter_unpaired + 1) + ".fq"))):
all_unpaired.append(
str(os.path.join(out_base, prefix + "extended_" + str(iter_unpaired + 1) + ".fq")))
spades_command = ' '.join(
[os.path.join(which_spades, "spades.py"), '-t', str(threads), parameters] +
['--s' + str(i + 1) + ' ' + out_f for i, out_f in enumerate(all_unpaired)] +
[kmer, spades_out_command]).strip()
log_handler.info(spades_command)
spades_running = subprocess.Popen(spades_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# output, err = spades_running.communicate()
output = monitor_spades_log(spades_running, log_handler)
if not os.path.exists(spades_out_put):
log_handler.error("Assembling failed.")
return False
elif "== Error ==" in output or "terminated by segmentation fault" in output:
# check when other kmer assembly results were produced
real_kmer_values = sorted([int(kmer_d[1:])
for kmer_d in os.listdir(spades_out_put)
if os.path.isdir(os.path.join(spades_out_put, kmer_d))
and kmer_d.startswith("K")])
real_kmer_values = [str(k_val) for k_val in real_kmer_values]
temp_res = False
failed_at_k = None
for kmer_val in real_kmer_values:
this_k_path = os.path.join(spades_out_put, "K" + kmer_val)
if os.path.exists(os.path.join(this_k_path, "assembly_graph.fastg")):
temp_res = True
else:
failed_at_k = kmer_val
if temp_res:
if failed_at_k:
log_handler.warning("SPAdes failed for '-k " + failed_at_k + "'!")
log_handler.warning("If you need result based on kmer=" + failed_at_k + " urgently, "
"please check " + os.path.join(spades_out_put, "spades.log"))
del real_kmer_values[real_kmer_values.index(failed_at_k)]
log_handler.warning("GetOrganelle would continue to process results based on "
"kmer=" + ",".join(real_kmer_values) + ".")
# os.system("cp " + os.path.join(spades_out_put, "K" + real_kmer_values[-1], "assembly_graph.fastg")
# + " " + spades_out_put)
log_handler.info('Assembling finished with warnings.\n')
return True
else:
log_handler.warning("SPAdes failed with unknown errors!")
log_handler.warning("If you need to know more details, please check " +
os.path.join(spades_out_put, "spades.log") + " and contact SPAdes developers.")
log_handler.warning("GetOrganelle would continue to process results based on "
"kmer=" + ",".join(real_kmer_values) + ".")
# os.system("cp " + os.path.join(spades_out_put, "K" + real_kmer_values[-1], "assembly_graph.fastg")
# + " " + spades_out_put)
log_handler.info("Assembling finished with warnings.\n")
return True
else:
if "mmap(2) failed" in output:
# https://github.com/ablab/spades/issues/91
log_handler.error("Guessing your output directory is inside a VirtualBox shared folder!")
log_handler.error("Assembling failed.")
else:
log_handler.error("Assembling failed.")
return False
elif not os.path.exists(os.path.join(spades_out_put, "assembly_graph.fastg")):
if verbose_log:
log_handler.info(output)
log_handler.warning("Assembling exited halfway.\n")
return True
else:
spades_log = output.split("\n")
if verbose_log:
log_handler.info(output)
for line in spades_log:
line = line.strip()
if line.count(":") > 2 and "Insert size = " in line and \
line.split()[0].replace(":", "").replace(".", "").isdigit():
try:
log_handler.info(line.split(" ")[-1].split(", read length =")[0].strip())
except IndexError:
pass
log_handler.info('Assembling finished.\n')
return True
def slim_spades_result(organelle_types, in_custom, ex_custom, spades_output, ignore_kmer_res, max_slim_extending_len,
verbose_log, log_handler, threads, which_blast="", resume=False, keep_temp=False):
if executable(os.path.join(UTILITY_PATH, "slim_graph.py -h")):
which_slim = UTILITY_PATH
elif executable(os.path.join(PATH_OF_THIS_SCRIPT, "slim_graph.py -h")):
which_slim = PATH_OF_THIS_SCRIPT
elif executable("slim_graph.py -h"):
which_slim = ""
else:
raise Exception("slim_graph.py not found!")
slim_stat_list = []
if not executable(os.path.join(which_blast, "blastn")):
if log_handler:
log_handler.warning(
os.path.join(which_blast, "blastn") + " not accessible! Skip slimming assembly result ...")
slim_stat_list.append((1, None))
return slim_stat_list
if not executable(os.path.join(which_blast, "makeblastdb")):
if log_handler:
log_handler.warning(
os.path.join(which_blast, "makeblastdb") + " not accessible! Skip slimming assembly result ...")
slim_stat_list.append((1, None))
return slim_stat_list
include_priority_db = []
exclude_db = []
if in_custom or ex_custom:
include_priority_db = in_custom
exclude_db = ex_custom
else:
if organelle_types == ["embplant_pt"]:
include_priority_db = [os.path.join(_LBL_DB_PATH, "embplant_pt.fasta"),
os.path.join(_LBL_DB_PATH, "embplant_mt.fasta")]
max_slim_extending_len = \
max_slim_extending_len if max_slim_extending_len else MAX_SLIM_EXTENDING_LENS[organelle_types[0]]
elif organelle_types == ["embplant_mt"]:
include_priority_db = [os.path.join(_LBL_DB_PATH, "embplant_mt.fasta"),
os.path.join(_LBL_DB_PATH, "embplant_pt.fasta")]
max_slim_extending_len = \
max_slim_extending_len if max_slim_extending_len else MAX_SLIM_EXTENDING_LENS[organelle_types[0]]
else:
include_priority_db = [os.path.join(_LBL_DB_PATH, sub_organelle_t + ".fasta")
for sub_organelle_t in organelle_types]
if max_slim_extending_len is None:
max_slim_extending_len = max([MAX_SLIM_EXTENDING_LENS[sub_organelle_t]
for sub_organelle_t in organelle_types])
kmer_values = sorted([int(kmer_d[1:])
for kmer_d in os.listdir(spades_output)
if os.path.isdir(os.path.join(spades_output, kmer_d))
and kmer_d.startswith("K")
and os.path.exists(os.path.join(spades_output, kmer_d, "assembly_graph.fastg"))],
reverse=True)
if not kmer_values:
return [], ignore_kmer_res # to avoid "ValueError: max() arg is an empty sequence"
if max(kmer_values) <= ignore_kmer_res:
log_handler.info("Small kmer values, resetting \"--ignore-k -1\"")
ignore_kmer_res = -1
kmer_dirs = [os.path.join(spades_output, "K" + str(kmer_val))
for kmer_val in kmer_values if kmer_val > ignore_kmer_res]
in_ex_info = generate_in_ex_info_name(include_indices=include_priority_db, exclude_indices=exclude_db)
for kmer_dir in kmer_dirs:
graph_file = os.path.join(kmer_dir, "assembly_graph.fastg")
this_fastg_file_out = os.path.join(kmer_dir, "assembly_graph.fastg" + in_ex_info + ".fastg")
if resume:
if os.path.exists(this_fastg_file_out):
if log_handler:
log_handler.info("Slimming " + graph_file + " ... skipped.")
slim_stat_list.append((0, this_fastg_file_out))
continue
run_command = ""
if include_priority_db:
run_command += " --include-priority " + ",".join(include_priority_db)
if exclude_db:
run_command += " --exclude " + ",".join(exclude_db)
which_bl_str = " --which-blast " + which_blast if which_blast else ""
run_command = os.path.join(which_slim, "slim_graph.py") + " --verbose " * int(bool(verbose_log)) + \
" --log --wrapper -t " + str(threads) + " --keep-temp " * int(bool(keep_temp)) + \
(" --max-slim-extending-len " +
str(max_slim_extending_len) + " ") * int(bool(max_slim_extending_len)) + \
which_bl_str + " " + graph_file + run_command # \
slim_spades = subprocess.Popen(run_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
if verbose_log and log_handler:
log_handler.info(run_command)
output, err = slim_spades.communicate()
output_file_list = [os.path.join(kmer_dir, x) for x in os.listdir(kmer_dir) if x.count(".fastg") == 2]
if " failed" in output.decode("utf8") or "- ERROR:" in output.decode("utf8"):
if log_handler:
if verbose_log:
log_handler.error(output.decode("utf8"))
log_handler.error("Slimming " + graph_file + " failed. "
"Please check " + os.path.join(kmer_dir, "slim.log.txt") + " for details. ")
slim_stat_list.append((1, None))
elif output_file_list and os.path.getsize(output_file_list[0]) == 0:
if log_handler:
log_handler.warning("Slimming " + graph_file + " finished with no target organelle contigs found!")
slim_stat_list.append((2, None))
elif output_file_list:
if log_handler:
if verbose_log:
log_handler.info(output.decode("utf8"))
log_handler.info("Slimming " + graph_file + " finished!")
slim_stat_list.append((0, this_fastg_file_out))
else:
slim_stat_list.append((1, None))
return slim_stat_list, ignore_kmer_res
def separate_fq_by_pair(out_base, prefix, verbose_log, log_handler):
log_handler.info("Separating extended fastq file ... ")
out_paired_1 = os.path.join(out_base, prefix + "extended_1_paired.fq")
out_paired_2 = os.path.join(out_base, prefix + "extended_2_paired.fq")
out_unpaired_1 = os.path.join(out_base, prefix + "extended_1_unpaired.fq")
out_unpaired_2 = os.path.join(out_base, prefix + "extended_2_unpaired.fq")
get_paired_and_unpaired_reads(input_fq_1=os.path.join(out_base, prefix + "extended_1.fq"),
input_fq_2=os.path.join(out_base, prefix + "extended_2.fq"),
output_p_1=out_paired_1,
output_p_2=out_paired_2,
output_u_1=out_unpaired_1,
output_u_2=out_unpaired_2)
if not os.path.getsize(out_paired_1) and not os.path.getsize(out_paired_2):
log_handler.warning("No paired reads found?!")
return True
def extract_organelle_genome(out_base, spades_output, ignore_kmer_res, slim_out_fg, organelle_prefix,
organelle_type, blast_db, read_len_for_log, verbose, log_handler, basic_prefix,
expected_maximum_size, expected_minimum_size, do_spades_scaffolding, options):
from GetOrganelleLib.assembly_parser import ProcessingGraphFailed, Assembly
def disentangle_assembly(fastg_file, tab_file, output, weight_factor, log_dis, time_limit, type_factor=3.,
mode="embplant_pt", blast_db_base="embplant_pt", contamination_depth=3.,
contamination_similarity=0.95, degenerate=True,
degenerate_depth=1.5, degenerate_similarity=0.98,
expected_max_size=inf, expected_min_size=0, hard_cov_threshold=10.,
min_sigma_factor=0.1, here_only_max_c=True, with_spades_scaffolds=False,
here_acyclic_allowed=False,
here_verbose=False, timeout_flag_str="'--disentangle-time-limit'", temp_graph=None):
@set_time_limit(time_limit, flag_str=timeout_flag_str)
def disentangle_inside(fastg_f, tab_f, o_p, w_f, log_in, type_f=3., mode_in="embplant_pt",
in_db_n="embplant_pt", c_d=3., c_s=0.95,
deg=True, deg_dep=1.5, deg_sim=0.98, hard_c_t=10., min_s_f=0.1, max_c_in=True,
max_s=inf, min_s=0, with_spades_scaffolds_in=False,
acyclic_allowed_in=False, verbose_in=False, in_temp_graph=None):
image_produced = False
this_K = os.path.split(os.path.split(fastg_f)[0])[-1]
o_p += "." + this_K
if with_spades_scaffolds_in:
log_in.info("Scaffolding disconnected contigs using SPAdes scaffolds ... ")
log_in.warning("Assembly based on scaffolding may not be as accurate as "
"the ones directly exported from the assembly graph.")
if acyclic_allowed_in:
log_in.info("Disentangling " + fastg_f + " as a/an " + in_db_n + "-insufficient graph ... ")
else:
log_in.info("Disentangling " + fastg_f + " as a circular genome ... ")
input_graph = Assembly(fastg_f)
if with_spades_scaffolds_in:
if not input_graph.add_gap_nodes_with_spades_res(os.path.join(spades_output, "scaffolds.fasta"),
os.path.join(spades_output, "scaffolds.paths"),
# min_cov=options.min_depth, max_cov=options.max_depth,
log_handler=log_handler):
raise ProcessingGraphFailed("No new connections.")
else:
if in_temp_graph:
if in_temp_graph.endswith(".gfa"):
this_tmp_graph = in_temp_graph[:-4] + ".scaffolds.gfa"
else:
this_tmp_graph = in_temp_graph + ".scaffolds.gfa"
input_graph.write_to_gfa(this_tmp_graph)
target_results = input_graph.find_target_graph(tab_f,
mode=mode_in, database_name=in_db_n, type_factor=type_f,
log_hard_cov_threshold=hard_c_t,
contamination_depth=c_d,
contamination_similarity=c_s,
degenerate=deg, degenerate_depth=deg_dep,
degenerate_similarity=deg_sim,
expected_max_size=max_s, expected_min_size=min_s,
only_keep_max_cov=max_c_in,
min_sigma_factor=min_s_f,
weight_factor=w_f,
broken_graph_allowed=acyclic_allowed_in,
read_len_for_log=read_len_for_log,
kmer_for_log=int(this_K[1:]),
log_handler=log_in, verbose=verbose_in,
temp_graph=in_temp_graph)
if not target_results:
raise ProcessingGraphFailed("No target graph detected!")
if len(target_results) > 1:
log_in.warning(str(len(target_results)) + " sets of graph detected!")
# log_in.info("Slimming and disentangling graph finished!")
log_in.info("Writing output ...")
ambiguous_base_used = False
if acyclic_allowed_in:
contig_num = set()
still_complete = []
for go_res, res in enumerate(target_results):
go_res += 1
broken_graph = res["graph"]
count_path = 0
these_paths = broken_graph.get_all_paths(mode=mode_in, log_handler=log_in)
# reducing paths
if len(these_paths) > options.max_paths_num:
log_in.warning("Only exporting " + str(options.max_paths_num) + " out of all " +
str(len(these_paths)) + " possible paths. (see '--max-paths-num' to change it.)")
these_paths = these_paths[:options.max_paths_num]
# exporting paths, reporting results
for this_paths, other_tag in these_paths:
count_path += 1
all_contig_str = []
contig_num.add(len(this_paths))
contigs_are_circular = []
for go_contig, this_p_part in enumerate(this_paths):
this_contig = broken_graph.export_path(this_p_part)
if DEGENERATE_BASES & set(this_contig.seq):
ambiguous_base_used = True
if this_contig.label.endswith("(circular)"):
contigs_are_circular.append(True)
else:
contigs_are_circular.append(False)
if len(this_paths) == 1 and contigs_are_circular[-1]:
all_contig_str.append(this_contig.fasta_str())
else:
all_contig_str.append(">scaffold_" + str(go_contig + 1) + "--" + this_contig.label +
"\n" + this_contig.seq + "\n")
if len(all_contig_str) == 1 and set(contigs_are_circular) == {True}:
if "GAP" in all_contig_str:
still_complete.append("nearly-complete")
else:
still_complete.append("complete")
# print ir stat
if count_path == 1 and in_db_n == "embplant_pt":
detect_seq = broken_graph.export_path(this_paths[0]).seq
ir_stats = detect_plastome_architecture(detect_seq, 1000)
log_in.info("Detecting large repeats (>1000 bp) in PATH1 with " + ir_stats[-1] +
", Total:LSC:SSC:Repeat(bp) = " + str(len(detect_seq)) + ":" +
":".join([str(len_val) for len_val in ir_stats[:3]]))
else:
still_complete.append("incomplete")
if still_complete[-1] == "complete":
out_n = o_p + ".complete.graph" + str(go_res) + "." + \
str(count_path) + other_tag + ".path_sequence.fasta"
log_in.info("Writing PATH" + str(count_path) + " of complete " + mode_in + " to " + out_n)
elif still_complete[-1] == "nearly-complete":
out_n = o_p + ".nearly-complete.graph" + str(go_res) + "." + \
str(count_path) + other_tag + ".path_sequence.fasta"
log_in.info(
"Writing PATH" + str(count_path) + " of nearly-complete " + mode_in + " to " + out_n)
else:
out_n = o_p + ".scaffolds.graph" + str(go_res) + other_tag + "." + \
str(count_path) + ".path_sequence.fasta"
log_in.info(
"Writing PATH" + str(count_path) + " of " + mode_in + " scaffold(s) to " + out_n)
open(out_n, "w").write("\n".join(all_contig_str))
if set(still_complete[-len(these_paths):]) == {"complete"}:
this_out_base = o_p + ".complete.graph" + str(go_res) + ".selected_graph."
log_in.info("Writing GRAPH to " + this_out_base + "gfa")
broken_graph.write_to_gfa(this_out_base + "gfa")
image_produced = draw_assembly_graph_using_bandage(
input_graph_file=this_out_base + "gfa",
output_image_file=this_out_base + "png",
assembly_graph_ob=broken_graph,
log_handler=log_handler, verbose_log=verbose_in, which_bandage=options.which_bandage)
elif set(still_complete[-len(these_paths):]) == {"nearly-complete"}:
this_out_base = o_p + ".nearly-complete.graph" + str(go_res) + ".selected_graph."
log_in.info("Writing GRAPH to " + this_out_base + "gfa")
broken_graph.write_to_gfa(this_out_base + "gfa")
image_produced = draw_assembly_graph_using_bandage(
input_graph_file=this_out_base + "gfa",
output_image_file=this_out_base + "png",
assembly_graph_ob=broken_graph,
log_handler=log_handler, verbose_log=verbose_in, which_bandage=options.which_bandage)
else:
this_out_base = o_p + ".contigs.graph" + str(go_res) + ".selected_graph."
log_in.info("Writing GRAPH to " + this_out_base + "gfa")
broken_graph.write_to_gfa(this_out_base + "gfa")
# image_produced = draw_assembly_graph_using_bandage(
# input_graph_file=this_out_base + "gfa",
# output_image_file=this_out_base + "png",
# assembly_graph_ob=broken_graph,
# log_handler=log_handler, verbose_log=verbose_in, which_bandage=options.which_bandage)
if set(still_complete) == {"complete"}:
log_in.info("Result status of " + mode_in + ": circular genome")
elif set(still_complete) == {"nearly-complete"}:
log_in.info("Result status of " + mode_in + ": circular genome with gaps")
else:
log_in.info("Result status of " + mode_in + ": " +
",".join(sorted([str(c_n) for c_n in contig_num])) + " scaffold(s)")
else:
status_str = "complete"
for go_res, res in enumerate(target_results):
go_res += 1
idealized_graph = res["graph"]
count_path = 0
these_paths = idealized_graph.get_all_circular_paths(
mode=mode_in, log_handler=log_in, reverse_start_direction_for_pt=options.reverse_lsc)
# reducing paths
if len(these_paths) > options.max_paths_num:
log_in.warning("Only exporting " + str(options.max_paths_num) + " out of all " +
str(len(these_paths)) + " possible paths. (see '--max-paths-num' to change it.)")
these_paths = these_paths[:options.max_paths_num]
# exporting paths, reporting results
for this_path, other_tag in these_paths:
count_path += 1
this_seq_obj = idealized_graph.export_path(this_path)
if DEGENERATE_BASES & set(this_seq_obj.seq):
ambiguous_base_used = True
status_str = "nearly-complete"
out_n = o_p + "." + status_str + ".graph" + str(go_res) + "." + str(
count_path) + other_tag + ".path_sequence.fasta"
open(out_n, "w").write(this_seq_obj.fasta_str())
# print ir stat
if count_path == 1 and in_db_n == "embplant_pt" and not ambiguous_base_used:
detect_seq = this_seq_obj.seq
ir_stats = detect_plastome_architecture(detect_seq, 1000)
log_in.info("Detecting large repeats (>1000 bp) in PATH1 with " + ir_stats[-1] +
", Total:LSC:SSC:Repeat(bp) = " + str(len(detect_seq)) + ":" +
":".join([str(len_val) for len_val in ir_stats[:3]]))
log_in.info(
"Writing PATH" + str(count_path) + " of " + status_str + " " + mode_in + " to " + out_n)
temp_base_out = o_p + "." + status_str + ".graph" + str(go_res) + ".selected_graph."
log_in.info("Writing GRAPH to " + temp_base_out + "gfa")
idealized_graph.write_to_gfa(temp_base_out + "gfa")
image_produced = draw_assembly_graph_using_bandage(
input_graph_file=temp_base_out + "gfa", output_image_file=temp_base_out + "png",
assembly_graph_ob=idealized_graph, log_handler=log_handler, verbose_log=verbose_in,
which_bandage=options.which_bandage)
if ambiguous_base_used:
log_in.info("Result status of " + mode_in + ": circular genome with gaps")
else:
log_in.info("Result status of " + mode_in + ": circular genome")
if ambiguous_base_used:
log_in.warning("Ambiguous base(s) used!")
o_p_extended = os.path.join(os.path.split(o_p)[0], basic_prefix + "extended_" + this_K + ".")
os.system("cp " + os.path.join(os.path.split(fastg_f)[0], "assembly_graph.fastg") + " " +
o_p_extended + "assembly_graph.fastg")
os.system("cp " + fastg_f + " " + o_p_extended + os.path.basename(fastg_f))
os.system("cp " + tab_f + " " + o_p_extended + os.path.basename(tab_f))
if not acyclic_allowed_in:
if image_produced:
log_in.info("Please check the produced assembly image"
" or manually visualize " + o_p_extended + os.path.basename(fastg_f) +
" using Bandage to confirm the final result.")
else:
log_in.info("Please visualize " + o_p_extended + os.path.basename(fastg_f) +
" using Bandage to confirm the final result.")
log_in.info("Writing output finished.")
disentangle_inside(fastg_f=fastg_file, tab_f=tab_file, o_p=output, w_f=weight_factor, log_in=log_dis,
type_f=type_factor, mode_in=mode, in_db_n=blast_db_base,
c_d=contamination_depth, c_s=contamination_similarity,
deg=degenerate, deg_dep=degenerate_depth, deg_sim=degenerate_similarity,
hard_c_t=hard_cov_threshold, min_s_f=min_sigma_factor, max_c_in=here_only_max_c,
max_s=expected_max_size, min_s=expected_min_size,
with_spades_scaffolds_in=with_spades_scaffolds,
acyclic_allowed_in=here_acyclic_allowed, verbose_in=here_verbose, in_temp_graph=temp_graph)
# start
kmer_values = sorted([int(kmer_d[1:])
for kmer_d in os.listdir(spades_output)
if os.path.isdir(os.path.join(spades_output, kmer_d))
and kmer_d.startswith("K")
and os.path.exists(os.path.join(spades_output, kmer_d, "assembly_graph.fastg"))],
reverse=True)
kmer_values = [kmer_val for kmer_val in kmer_values if kmer_val > ignore_kmer_res]
kmer_dirs = [os.path.join(spades_output, "K" + str(kmer_val)) for kmer_val in kmer_values]
timeout_flag = "'--disentangle-time-limit'"
export_succeeded = False
path_prefix = os.path.join(out_base, organelle_prefix)
graph_temp_file = path_prefix + ".temp.gfa" if options.keep_temp_files else None
for go_k, kmer_dir in enumerate(kmer_dirs):
out_fastg = slim_out_fg[go_k]
if out_fastg and os.path.getsize(out_fastg):
try:
"""disentangle"""
out_csv = out_fastg[:-5] + "csv"
# if it is the first round (the largest kmer), copy the slimmed result to the main spades output
# if go_k == 0:
# main_spades_folder = os.path.split(kmer_dir)[0]
# os.system("cp " + out_fastg + " " + main_spades_folder)
# os.system("cp " + out_csv + " " + main_spades_folder)
disentangle_assembly(fastg_file=out_fastg, blast_db_base=blast_db,
mode=organelle_type, tab_file=out_csv, output=path_prefix,
weight_factor=100, hard_cov_threshold=options.disentangle_depth_factor,
contamination_depth=options.contamination_depth,
contamination_similarity=options.contamination_similarity,
degenerate=options.degenerate, degenerate_depth=options.degenerate_depth,
degenerate_similarity=options.degenerate_similarity,
expected_max_size=expected_maximum_size,
expected_min_size=expected_minimum_size,
here_only_max_c=True,
here_acyclic_allowed=False, here_verbose=verbose, log_dis=log_handler,
time_limit=options.disentangle_time_limit, timeout_flag_str=timeout_flag,
temp_graph=graph_temp_file)
except ImportError as e:
log_handler.error("Disentangling failed: " + str(e))
return False
except AttributeError as e:
if verbose:
raise e
except RuntimeError as e:
if verbose:
log_handler.exception("")
log_handler.info("Disentangling failed: RuntimeError: " + str(e).strip())
except TimeoutError:
log_handler.info("Disentangling timeout. (see " + timeout_flag + " for more)")
except ProcessingGraphFailed as e:
log_handler.info("Disentangling failed: " + str(e).strip())
except Exception as e:
log_handler.exception("")
sys.exit()
else:
export_succeeded = True
break
if not export_succeeded and do_spades_scaffolding:
largest_k_graph_f_exist = bool(slim_out_fg[0])
if kmer_dirs and largest_k_graph_f_exist:
out_fastg = slim_out_fg[0]
if out_fastg and os.path.getsize(out_fastg):
try:
"""disentangle"""
out_csv = out_fastg[:-5] + "csv"
disentangle_assembly(fastg_file=out_fastg, blast_db_base=blast_db,
mode=organelle_type, tab_file=out_csv, output=path_prefix,
weight_factor=100, hard_cov_threshold=options.disentangle_depth_factor,
contamination_depth=options.contamination_depth,
contamination_similarity=options.contamination_similarity,
degenerate=options.degenerate, degenerate_depth=options.degenerate_depth,
degenerate_similarity=options.degenerate_similarity,
expected_max_size=expected_maximum_size,
expected_min_size=expected_minimum_size,
here_only_max_c=True, with_spades_scaffolds=True,
here_acyclic_allowed=False, here_verbose=verbose, log_dis=log_handler,
time_limit=options.disentangle_time_limit, timeout_flag_str=timeout_flag,
temp_graph=graph_temp_file)
except FileNotFoundError:
log_handler.warning("scaffolds.fasta and/or scaffolds.paths not found!")
except ImportError as e:
log_handler.error("Disentangling failed: " + str(e))
return False
except AttributeError as e:
if verbose:
raise e
except RuntimeError as e:
if verbose:
log_handler.exception("")
log_handler.info("Disentangling failed: RuntimeError: " + str(e).strip())
except TimeoutError:
log_handler.info("Disentangling timeout. (see " + timeout_flag + " for more)")
except ProcessingGraphFailed as e:
log_handler.info("Disentangling failed: " + str(e).strip())
except Exception as e:
log_handler.exception("")
sys.exit()
else:
export_succeeded = True
if not export_succeeded:
largest_k_graph_f_exist = bool(slim_out_fg[0])
if kmer_dirs and largest_k_graph_f_exist:
for go_k, kmer_dir in enumerate(kmer_dirs):
out_fastg = slim_out_fg[go_k]
if out_fastg and os.path.getsize(out_fastg):
try:
"""disentangle the graph as scaffold(s)"""
out_fastg_list = sorted([os.path.join(kmer_dir, x)
for x in os.listdir(kmer_dir) if x.count(".fastg") == 2])
if out_fastg_list:
out_fastg = out_fastg_list[0]
out_csv = out_fastg[:-5] + "csv"
disentangle_assembly(fastg_file=out_fastg, blast_db_base=blast_db,
mode=organelle_type, tab_file=out_csv,
output=path_prefix, weight_factor=100, here_verbose=verbose,
log_dis=log_handler,
hard_cov_threshold=options.disentangle_depth_factor * 0.8,
contamination_depth=options.contamination_depth,
contamination_similarity=options.contamination_similarity,
degenerate=options.degenerate,
degenerate_depth=options.degenerate_depth,
degenerate_similarity=options.degenerate_similarity,
expected_max_size=expected_maximum_size,
expected_min_size=expected_minimum_size,
here_only_max_c=True, here_acyclic_allowed=True,
time_limit=3600, timeout_flag_str=timeout_flag,
temp_graph=graph_temp_file)
except (ImportError, AttributeError) as e:
log_handler.error("Disentangling failed: " + str(e))
break
except RuntimeError as e:
if verbose:
log_handler.exception("")
log_handler.info("Disentangling failed: RuntimeError: " + str(e).strip())
except TimeoutError:
log_handler.info("Disentangling timeout. (see " + timeout_flag + " for more)")
except ProcessingGraphFailed as e:
log_handler.info("Disentangling failed: " + str(e).strip())
except Exception as e:
raise e
else:
export_succeeded = True
out_csv = out_fastg[:-5] + "csv"
log_handler.info("Please ...")
log_handler.info("load the graph file '" + os.path.basename(out_fastg) +
"' in " + ",".join(["K" + str(k_val) for k_val in kmer_values]))
log_handler.info("load the CSV file '" + os.path.basename(out_csv) +
"' in " + ",".join(["K" + str(k_val) for k_val in kmer_values]))
log_handler.info("visualize and confirm the incomplete result in Bandage.")
# log.info("-------------------------------------------------------")
log_handler.info("If the result is nearly complete, ")
log_handler.info("you can also adjust the arguments according to "
"https://github.com/Kinggerm/GetOrganelle/wiki/FAQ#what-should-i-do-with-incomplete-resultbroken-assembly-graph")
log_handler.info("If you have questions for us, "
"please provide us with the get_org.log.txt file "
"and the post-slimming graph in the format you like!")
# log.info("-------------------------------------------------------")
break
if not export_succeeded:
out_fastg = slim_out_fg[0]
out_csv = out_fastg[:-5] + "csv"
log_handler.info("Please ...")
log_handler.info("load the graph file '" + os.path.basename(out_fastg) + ",assembly_graph.fastg" +
"' in " + ",".join(["K" + str(k_val) for k_val in kmer_values]))
log_handler.info("load the CSV file '" + os.path.basename(out_csv) +
"' in " + ",".join(["K" + str(k_val) for k_val in kmer_values]))
log_handler.info("visualize and export your result in Bandage.")
log_handler.info("If you have questions for us, please provide us with the get_org.log.txt file "
"and the post-slimming graph in the format you like!")
else:
# slim failed with unknown error
log_handler.info("Please ...")
log_handler.info("load the graph file: " + os.path.join(spades_output, 'assembly_graph.fastg'))
log_handler.info("visualize and export your result in Bandage.")
log_handler.info("If you have questions for us, please provide us with the get_org.log.txt file "
"and the post-slimming graph in the format you like!")
return export_succeeded
def main():
time0 = time.time()
from GetOrganelleLib.versions import get_versions
title = "GetOrganelle v" + str(get_versions()) + \
"\n" \
"\nget_organelle_from_reads.py assembles organelle genomes from genome skimming data." \
"\nFind updates in https://github.com/Kinggerm/GetOrganelle and see README.md for more information." \
"\n"
options, log_handler, previous_attributes, run_slim, run_disentangle = \
get_options(description=title, version=get_versions())
resume = options.script_resume
verb_log = options.verbose_log
out_base = options.output_base
echo_step = options.echo_step
reads_files_to_drop = []
# global word_size
word_size = None
mean_read_len = None
mean_error_rate = None
# all_bases = None
low_quality_pattern = None
max_read_len = None
# max_extending_lens
max_extending_lens = {inf}
slim_extending_len = None
phred_offset = options.phred_offset
try:
if options.fq_file_1 and options.fq_file_2:
reads_paired = {'input': True, 'pair_out': bool}
original_fq_files = [options.fq_file_1, options.fq_file_2] + \
[fastq_file for fastq_file in options.unpaired_fq_files]
direction_according_to_user_input = [1, 2] + [1] * len(options.unpaired_fq_files)
else:
reads_paired = {'input': False, 'pair_out': False}
original_fq_files = [fastq_file for fastq_file in options.unpaired_fq_files]
direction_according_to_user_input = [1] * len(options.unpaired_fq_files)
all_read_nums = [options.maximum_n_reads for foo in original_fq_files]
other_spd_options = options.other_spades_options.split(' ')
if '-o' in other_spd_options:
which_out = other_spd_options.index('-o')
spades_output = other_spd_options[which_out + 1]
del other_spd_options[which_out: which_out + 2]
else:
spades_output = os.path.join(out_base, options.prefix + "extended_spades")
if "--phred-offset" in other_spd_options:
log_handler.warning("--spades-options '--phred-offset' was deprecated in GetOrganelle. ")
which_po = other_spd_options.index("--phred-offset")
del other_spd_options[which_po: which_po + 2]
other_spd_options = ' '.join(other_spd_options)
""" get reads """
extended_files_exist = max(
min([os.path.exists(
str(os.path.join(out_base, options.prefix + "extended")) + '_' + str(i + 1) + '_unpaired.fq')
for i in range(2)] +
[os.path.exists(str(os.path.join(out_base, options.prefix + "extended")) + '_' + str(i + 1) + '.fq')
for i in range(2, len(original_fq_files))]),
min([os.path.exists(str(os.path.join(out_base, options.prefix + "extended")) + '_' + str(i + 1) + '.fq')
for i in range(len(original_fq_files))]))
extended_fq_gz_exist = max(
min([os.path.exists(
str(os.path.join(out_base, options.prefix + "extended")) + '_' + str(i + 1) + '_unpaired.fq.tar.gz')
for i in range(2)] +
[os.path.exists(str(os.path.join(out_base, options.prefix + "extended")) + '_' + str(i + 1) + '.fq.tar.gz')
for i in range(2, len(original_fq_files))]),
min([os.path.exists(str(os.path.join(out_base, options.prefix + "extended")) + '_' + str(i + 1) + '.fq.tar.gz')
for i in range(len(original_fq_files))]))
if resume:
if "max_read_len" in previous_attributes and "mean_read_len" in previous_attributes and \
"phred_offset" in previous_attributes:
try:
max_read_len = int(previous_attributes["max_read_len"])
mean_read_len = float(previous_attributes["mean_read_len"])
phred_offset = int(previous_attributes["phred_offset"])
except ValueError:
resume = False
else:
resume = False
if not resume and verb_log:
log_handler.info("Previous attributes: max/mean read lengths/phred offset not found. "
"Restart a new run.\n")
try:
word_size = int(previous_attributes["w"])
except (KeyError, ValueError):
if extended_files_exist or extended_fq_gz_exist:
if verb_log:
log_handler.info("Previous attributes: word size not found. Restart a new run.\n")
resume = False
else:
pass
if not (resume and (extended_files_exist or (extended_fq_gz_exist and phred_offset != -1))):
anti_seed = options.anti_seed
pre_grp = options.pre_grouped
in_memory = options.index_in_memory
log_handler.info("Pre-reading fastq ...")
# using mapping to estimate maximum_n_reads when options.reduce_reads_for_cov != inf.
all_read_nums = None
if resume:
try:
all_read_nums = [int(sub_num) for sub_num in previous_attributes["num_reads_1"].split("+")]
except (KeyError, ValueError):
resume = False
else:
try:
low_quality_pattern = "[" + previous_attributes["trim_chars"] + "]"
mean_error_rate = float(previous_attributes["mean_error_rate"])
except (KeyError, ValueError):
low_quality_pattern = "[]"
mean_error_rate = None
# all_bases = mean_read_len * sum(all_read_nums)
if all_read_nums is None:
if options.reduce_reads_for_cov != inf:
log_handler.info(
"Estimating reads to use ... "
"(to use all reads, set '--reduce-reads-for-coverage inf --max-reads inf')")
all_read_nums = estimate_maximum_n_reads_using_mapping(
twice_max_coverage=options.reduce_reads_for_cov * 2, check_dir=os.path.join(out_base, "check"),
original_fq_list=original_fq_files, reads_paired=reads_paired["input"],
maximum_n_reads_hard_bound=options.maximum_n_reads,
seed_files=options.seed_file, organelle_types=options.organelle_type,
in_customs=options.genes_fasta, ex_customs=options.exclude_genes,
target_genome_sizes=options.target_genome_size,
keep_temp=options.keep_temp_files, resume=options.script_resume,
other_spades_opts=other_spd_options,
which_blast=options.which_blast, which_spades=options.which_spades,
which_bowtie2=options.which_bowtie2, threads=options.threads,
random_seed=options.random_seed, verbose_log=options.verbose_log, log_handler=log_handler)
log_handler.info("Estimating reads to use finished.")
else:
all_read_nums = [options.maximum_n_reads] * len(original_fq_files)
if original_fq_files:
for file_id, read_file in enumerate(original_fq_files):
# unzip fq files if needed
if read_file.endswith(".gz") or read_file.endswith(".zip"):
target_fq = os.path.join(out_base, str(file_id + 1) + "-" +
os.path.basename(read_file)) + ".fastq"
if not (os.path.exists(target_fq) and resume):
unzip(read_file, target_fq, 4 * all_read_nums[file_id],
options.verbose_log, log_handler)
else:
target_fq = os.path.join(out_base, str(file_id + 1) + "-" +
os.path.basename(read_file))
if os.path.realpath(target_fq) == os.path.realpath(os.path.join(os.getcwd(), read_file)):
log_handler.error("Do not put original reads file(s) in the output directory!")
exit()
if not (os.path.exists(target_fq) and resume):
if all_read_nums[file_id] > READ_LINE_TO_INF:
os.system("cp " + read_file + " " + target_fq + ".Temp")
os.system("mv " + target_fq + ".Temp " + target_fq)
else:
os.system("head -n " + str(int(4 * all_read_nums[file_id])) + " " +
read_file + " > " + target_fq + ".Temp")
os.system("mv " + target_fq + ".Temp " + target_fq)
if os.path.getsize(target_fq) == 0:
raise ValueError("Empty file " + target_fq)
original_fq_files[file_id] = target_fq
reads_files_to_drop.append(target_fq)
if not resume:
sampling_reads_for_quality = 10000
# pre-reading fastq
log_handler.info("Counting read qualities ...")
low_quality_pattern, mean_error_rate, phred_offset = \
get_read_quality_info(original_fq_files, sampling_reads_for_quality, options.min_quality_score,
log_handler, maximum_ignore_percent=options.maximum_ignore_percent)
log_handler.info("Counting read lengths ...")
mean_read_len, max_read_len, all_read_nums = get_read_len_mean_max_count(original_fq_files,
options.maximum_n_reads)
log_handler.info("Mean = " + str(round(mean_read_len, 1)) + " bp, maximum = " +
str(max_read_len) + " bp.")
log_handler.info("Reads used = " + "+".join([str(sub_num) for sub_num in all_read_nums]))
log_handler.info("Pre-reading fastq finished.\n")
else:
log_handler.info("Pre-reading fastq skipped.\n")
# reading seeds
log_handler.info("Making seed reads ...")
seed_fq_files = []
seed_sam_files = []
seed_fs_files = []
for go_t, seed_f in enumerate(options.seed_file):
seed_fq, seed_sam, new_seed_f = making_seed_reads_using_mapping(
seed_file=seed_f,
original_fq_files=original_fq_files,
out_base=out_base, resume=resume, verbose_log=verb_log, threads=options.threads,
random_seed=options.random_seed, organelle_type=options.organelle_type[go_t],
prefix=options.prefix, keep_temp=options.keep_temp_files,
bowtie2_other_options=options.bowtie2_options, which_bowtie2=options.which_bowtie2,
log_handler=log_handler)
seed_fq_files.append(seed_fq)
seed_sam_files.append(seed_sam)
seed_fs_files.append(new_seed_f)
anti_lines = get_anti_lines_using_mapping(
anti_seed=anti_seed, seed_sam_files=seed_sam_files,
original_fq_files=original_fq_files, out_base=out_base, resume=resume,
verbose_log=verb_log, threads=options.threads,
random_seed=options.random_seed, prefix=options.prefix,
keep_temp=options.keep_temp_files, bowtie2_other_options=options.bowtie2_options,
which_bowtie2=options.which_bowtie2, log_handler=log_handler)
log_handler.info("Making seed reads finished.\n")
log_handler.info("Checking seed reads and parameters ...")
if not resume or options.word_size:
word_size = options.word_size
word_size, keep_seq_parts, mean_base_cov_values, max_extending_lens, all_read_limits = \
check_parameters(word_size=word_size,
original_fq_files=original_fq_files,
seed_fs_files=seed_fs_files,
seed_fq_files=seed_fq_files, seed_sam_files=seed_sam_files,
organelle_types=options.organelle_type,
in_custom_list=options.genes_fasta,
ex_custom_list=options.exclude_genes,
mean_error_rate=mean_error_rate,
target_genome_sizes=options.target_genome_size,
max_extending_len=options.max_extending_len, mean_read_len=mean_read_len,
max_read_len=max_read_len, low_quality_pattern=low_quality_pattern,
all_read_nums=all_read_nums, reduce_reads_for_cov=options.reduce_reads_for_cov,
log_handler=log_handler,
other_spades_opts=other_spd_options,
which_spades=options.which_spades,
which_blast=options.which_blast, which_bowtie2=options.which_bowtie2,
wc_bc_ratio_constant=0.35, larger_auto_ws=options.larger_auto_ws,
threads=options.threads, random_seed=options.random_seed,
resume=resume, verbose_log=verb_log, zip_files=options.zip_files)
log_handler.info("Checking seed reads and parameters finished.\n")
# make read index
log_handler.info("Making read index ...")
fq_info_in_memory = make_read_index(original_fq_files, direction_according_to_user_input,
all_read_limits, options.rm_duplicates, out_base, word_size,
anti_lines, pre_grp, in_memory, anti_seed,
keep_seq_parts=keep_seq_parts, low_quality=low_quality_pattern,
resume=resume, echo_step=echo_step, log_handler=log_handler)
len_indices = fq_info_in_memory[2]
keep_seq_parts = fq_info_in_memory[3]
if keep_seq_parts:
log_handler.info("Reads are stored as fragments.")
# pre-grouping if asked
if pre_grp:
preg_word_size = word_size if not options.pregroup_word_size else options.pregroup_word_size
groups_of_lines, lines_with_dup, group_id_to_read_counts = \
pre_grouping(fastq_indices_in_memory=fq_info_in_memory, dupli_threshold=pre_grp, out_base=out_base,
preg_word_size=preg_word_size, index_in_memory=in_memory, log_handler=log_handler)
else:
groups_of_lines = lines_with_dup = group_id_to_read_counts = None
if not in_memory:
fq_info_in_memory = None
log_handler.info("Making read index finished.\n")
# extending process
log_handler.info("Extending ...")
if set(max_extending_lens) == {inf}:
accepted_rd_id = extending_no_lim(word_size=word_size, seed_file=seed_fq_files,
original_fq_files=original_fq_files, len_indices=len_indices,
pre_grouped=pre_grp, groups_of_duplicate_lines=groups_of_lines,
lines_with_duplicates=lines_with_dup,
fq_info_in_memory=fq_info_in_memory, output_base=out_base,
max_rounds=options.max_rounds,
min_rounds=1, fg_out_per_round=options.fg_out_per_round,
jump_step=options.jump_step, mesh_size=options.mesh_size,
verbose=verb_log, resume=resume,
all_read_limits=all_read_limits,
maximum_n_words=options.maximum_n_words,
keep_seq_parts=keep_seq_parts, low_qual_pattern=low_quality_pattern,
echo_step=echo_step, log_handler=log_handler)
else:
accepted_rd_id = extending_with_lim(word_size=word_size, seed_file=seed_fq_files,
original_fq_files=original_fq_files, len_indices=len_indices,
pre_grouped=pre_grp, groups_of_duplicate_lines=groups_of_lines,
lines_with_duplicates=lines_with_dup,
group_id_to_read_counts=group_id_to_read_counts,
fq_info_in_memory=fq_info_in_memory, output_base=out_base,
max_rounds=options.max_rounds,
extending_dist_limit=max_extending_lens,
min_rounds=1, fg_out_per_round=options.fg_out_per_round,
jump_step=options.jump_step, mesh_size=options.mesh_size,
verbose=verb_log, resume=resume,
all_read_limits=all_read_limits,
maximum_n_words=options.maximum_n_words,
keep_seq_parts=keep_seq_parts,
low_qual_pattern=low_quality_pattern,
mean_read_len=mean_read_len,
mean_base_cov=min([cov_v[0] for cov_v in mean_base_cov_values]),
echo_step=echo_step, log_handler=log_handler)
mapped_read_ids = set()
write_fq_results(original_fq_files, accepted_rd_id,
os.path.join(out_base, options.prefix + "extended"),
os.path.join(out_base, 'temp.indices.2'),
fq_info_in_memory, all_read_limits,
echo_step, verb_log, in_memory, log_handler, mapped_read_ids)
del accepted_rd_id, fq_info_in_memory, groups_of_lines, \
anti_lines, lines_with_dup
if not options.keep_temp_files:
try:
os.remove(os.path.join(out_base, 'temp.indices.1'))
os.remove(os.path.join(out_base, 'temp.indices.2'))
except OSError:
pass
log_handler.info("Extending finished.\n")
else:
log_handler.info("Extending ... skipped.\n")
if reads_files_to_drop and not options.keep_temp_files:
for rm_read_file in reads_files_to_drop:
os.remove(rm_read_file)
if reads_paired['input']:
if not (resume and (min([os.path.exists(x) for x in
(os.path.join(out_base, options.prefix + "extended_" + y + "_" + z + "paired.fq")
for y in ('1', '2') for z in ('', 'un'))]) or extended_fq_gz_exist)):
resume = False
reads_paired['pair_out'] = separate_fq_by_pair(out_base, options.prefix, verb_log, log_handler)
if reads_paired['pair_out'] and not options.keep_temp_files:
os.remove(os.path.join(out_base, options.prefix + "extended_1.fq"))
os.remove(os.path.join(out_base, options.prefix + "extended_2.fq"))
else:
log_handler.info("Separating extended fastq file ... skipped.\n")
""" assembly """
is_assembled = False
if options.run_spades:
if not (resume and os.path.exists(os.path.join(spades_output, 'assembly_graph.fastg'))):
if extended_fq_gz_exist and not extended_files_exist:
files_to_unzip = [os.path.join(out_base, candidate)
for candidate in os.listdir(out_base) if candidate.endswith(".fq.tar.gz")]
for file_to_u in files_to_unzip:
unzip(source=file_to_u, target=file_to_u[:-7], line_limit=inf)
options.spades_kmer = check_kmers(options.spades_kmer, word_size, max_read_len, log_handler)
log_handler.info("Assembling using SPAdes ...")
if not executable("pigz -h"):
log_handler.warning("Compression after read correction will be skipped for lack of 'pigz'")
if "--disable-gzip-output" not in other_spd_options:
other_spd_options += " --disable-gzip-output"
if phred_offset in (33, 64):
other_spd_options += " --phred-offset %i" % phred_offset
is_assembled = assembly_with_spades(options.spades_kmer, spades_output, other_spd_options, out_base,
options.prefix, original_fq_files, reads_paired,
which_spades=options.which_spades, verbose_log=options.verbose_log,
resume=resume, threads=options.threads, log_handler=log_handler)
else:
is_assembled = True
log_handler.info("Assembling using SPAdes ... skipped.\n")
if options.zip_files:
files_to_zip = [os.path.join(out_base, candidate)
for candidate in os.listdir(out_base) if candidate.endswith(".fq")]
files_to_zip.extend([os.path.join(out_base, "seed", candidate)
for candidate in os.listdir(os.path.join(out_base, "seed"))
if candidate.endswith(".fq") or candidate.endswith(".sam")])
if files_to_zip:
log_handler.info("Compressing files ...")
for file_to_z in files_to_zip:
zip_file(source=file_to_z, target=file_to_z + ".tar.gz", remove_source=True)
log_handler.info("Compressing files finished.\n")
""" export organelle """
if is_assembled and run_slim:
slim_stat_list, ignore_k = slim_spades_result(
organelle_types=options.organelle_type, in_custom=options.genes_fasta, ex_custom=options.exclude_genes,
spades_output=spades_output, ignore_kmer_res=options.ignore_kmer_res,
max_slim_extending_len=slim_extending_len,
verbose_log=options.verbose_log, log_handler=log_handler, threads=options.threads,
which_blast=options.which_blast, resume=options.script_resume, keep_temp=options.keep_temp_files)
slim_stat_codes = [s_code for s_code, fastg_out in slim_stat_list]
slim_fastg_file = [fastg_out for s_code, fastg_out in slim_stat_list]
options.ignore_kmer_res = ignore_k
if set(slim_stat_codes) == {2}:
log_handler.warning("No sequence hit our LabelDatabase!")
log_handler.warning("This might due to unreasonable seed/parameter choices or a bug.")
log_handler.info("Please open an issue at https://github.com/Kinggerm/GetOrganelle/issues "
"with the get_org.log.txt file.\n")
elif 0 in slim_stat_codes:
log_handler.info("Slimming assembly graphs finished.\n")
if run_disentangle:
organelle_type_prefix = []
duplicated_o_types = {o_type: 1
for o_type in options.organelle_type
if options.organelle_type.count(o_type) > 1}
for here_type in options.organelle_type:
if here_type in duplicated_o_types:
organelle_type_prefix.append(here_type + "-" + str(duplicated_o_types[here_type]))
duplicated_o_types[here_type] += 1
else:
organelle_type_prefix.append(here_type)
for go_t, sub_organelle_type in enumerate(options.organelle_type):
og_prefix = options.prefix + organelle_type_prefix[go_t]
graph_existed = bool([gfa_f for gfa_f in os.listdir(out_base)
if gfa_f.startswith(og_prefix) and gfa_f.endswith(".selected_graph.gfa")])
fasta_existed = bool([fas_f for fas_f in os.listdir(out_base)
if fas_f.startswith(og_prefix) and fas_f.endswith(".path_sequence.fasta")])
if resume and graph_existed and fasta_existed:
log_handler.info("Extracting " + sub_organelle_type + " from the assemblies ... skipped.\n")
else:
# log_handler.info("Parsing assembly graph and outputting ...")
log_handler.info("Extracting " + sub_organelle_type + " from the assemblies ...")
if options.genes_fasta:
db_base_name = remove_db_postfix(os.path.basename(options.genes_fasta[go_t]))
else:
db_base_name = sub_organelle_type
ext_res = extract_organelle_genome(out_base=out_base, spades_output=spades_output,
ignore_kmer_res=options.ignore_kmer_res,
slim_out_fg=slim_fastg_file, organelle_prefix=og_prefix,
organelle_type=sub_organelle_type,
blast_db=db_base_name,
read_len_for_log=mean_read_len,
verbose=options.verbose_log, log_handler=log_handler,
basic_prefix=options.prefix,
expected_minimum_size=options.expected_min_size[go_t],
expected_maximum_size=options.expected_max_size[go_t],
options=options,
do_spades_scaffolding=reads_paired["input"])
if ext_res:
log_handler.info("Extracting " + sub_organelle_type + " from the assemblies finished.\n")
else:
log_handler.info("Extracting " + sub_organelle_type + " from the assemblies failed.\n")
else:
log_handler.error("No valid assembly graph found!")
log_handler = simple_log(log_handler, out_base, prefix=options.prefix + "get_org.")
log_handler.info("\nTotal cost " + "%.2f" % (time.time() - time0) + " s")
log_handler.info("Thank you!")
# except SystemExit:
# pass
except:
log_handler.exception("")
log_handler = simple_log(log_handler, out_base, prefix=options.prefix + "get_org.")
log_handler.info("\nTotal cost " + "%.2f" % (time.time() - time0) + " s")
log_handler.info("For trouble-shooting, please ")
log_handler.info("Firstly, check https://github.com/Kinggerm/GetOrganelle/wiki/FAQ")
log_handler.info("Secondly, check if there are open/closed issues related at "
"https://github.com/Kinggerm/GetOrganelle/issues")
log_handler.info("If your problem was still not solved, "
"\n please open an issue at https://github.com/Kinggerm/GetOrganelle/issues"
"\n please provide the get_org.log.txt "
"and the assembly graph (can be *.png to protect your data privacy) if possible!")
logging.shutdown()
if __name__ == '__main__':
main()
"""Copyright 2016 Jianjun Jin"""
| Kinggerm/GetOrganelle | get_organelle_from_reads.py | Python | gpl-3.0 | 262,598 | [
"BLAST"
] | 9743c060a350421e38eb96137b5de8a6e4927d15fc8cf0300ef47891bc1c956e |
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 2: Nature-Inspired Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2014 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
============================================================================================================
This example takes awhile to execute. It uses a genetic algorithm to fit an RBF network to the iris data set.
You can see the output from the example here. As you can see, it took 58 iterations to train to 0.05.
You can see that it is able to classify many of the iris species correctly, but not all.
This example uses one-of-n encoding for the iris species. Equilateral could have also been used.
Generaton #1, Score=0.199843346838, stagnant=0
Generaton #2, Score=0.199843346838, stagnant=0
Generaton #3, Score=0.193606061977, stagnant=1
Generaton #4, Score=0.182932591913, stagnant=0
Generaton #5, Score=0.165157776619, stagnant=0
Generaton #6, Score=0.15796529294, stagnant=0
Generaton #7, Score=0.157826592807, stagnant=0
Generaton #8, Score=0.149478480898, stagnant=1
Generaton #9, Score=0.142609733514, stagnant=0
Generaton #10, Score=0.141267076301, stagnant=0
Generaton #11, Score=0.13387570015, stagnant=0
Generaton #12, Score=0.131977908763, stagnant=0
Generaton #13, Score=0.126539359115, stagnant=0
Generaton #14, Score=0.122389808687, stagnant=0
Generaton #15, Score=0.121392668139, stagnant=0
Generaton #16, Score=0.11318352856, stagnant=1
Generaton #17, Score=0.111552631929, stagnant=0
Generaton #18, Score=0.104332331742, stagnant=0
Generaton #19, Score=0.103101332438, stagnant=0
Generaton #20, Score=0.100584671844, stagnant=0
Generaton #21, Score=0.0974004283988, stagnant=0
Generaton #22, Score=0.094533902446, stagnant=0
Generaton #23, Score=0.0910003821609, stagnant=0
Generaton #24, Score=0.0910003821609, stagnant=0
Generaton #25, Score=0.0905620576106, stagnant=1
Generaton #26, Score=0.0866654176526, stagnant=2
Generaton #27, Score=0.0826733880209, stagnant=0
Generaton #28, Score=0.0816455270936, stagnant=0
Generaton #29, Score=0.0799649368276, stagnant=0
Generaton #30, Score=0.0797301141794, stagnant=0
Generaton #31, Score=0.0774793573792, stagnant=1
Generaton #32, Score=0.0767527501314, stagnant=0
Generaton #33, Score=0.0764559059563, stagnant=1
Generaton #34, Score=0.0749918540669, stagnant=2
Generaton #35, Score=0.0723100319898, stagnant=0
Generaton #36, Score=0.071279017377, stagnant=0
Generaton #37, Score=0.0692806352376, stagnant=0
Generaton #38, Score=0.0687199631007, stagnant=0
Generaton #39, Score=0.0671800095714, stagnant=1
Generaton #40, Score=0.0651154796387, stagnant=0
Generaton #41, Score=0.0640848760543, stagnant=0
Generaton #42, Score=0.062768548122, stagnant=0
Generaton #43, Score=0.0623897612924, stagnant=0
Generaton #44, Score=0.0613174410677, stagnant=1
Generaton #45, Score=0.0600323016682, stagnant=0
Generaton #46, Score=0.0590140769361, stagnant=0
Generaton #47, Score=0.0579662753868, stagnant=0
Generaton #48, Score=0.0563771595186, stagnant=0
Generaton #49, Score=0.0557091224927, stagnant=0
Generaton #50, Score=0.0557091224927, stagnant=1
Generaton #51, Score=0.0556228207268, stagnant=2
Generaton #52, Score=0.0547559332724, stagnant=3
Generaton #53, Score=0.0547559332724, stagnant=4
Generaton #54, Score=0.0544944263627, stagnant=5
Generaton #55, Score=0.0539352236468, stagnant=6
Generaton #56, Score=0.0535581096618, stagnant=7
Generaton #57, Score=0.0527253713172, stagnant=8
Generaton #58, Score=0.0525153691128, stagnant=9
[ 0.22222222 0.625 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.16666667 0.41666667 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.11111111 0.5 0.05084746 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.08333333 0.45833333 0.08474576 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.66666667 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.30555556 0.79166667 0.11864407 0.125 ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.08333333 0.58333333 0.06779661 0.08333333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.58333333 0.08474576 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.02777778 0.375 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.16666667 0.45833333 0.08474576 0. ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.30555556 0.70833333 0.08474576 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.13888889 0.58333333 0.10169492 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.13888889 0.41666667 0.06779661 0. ] -> Iris-setosa, Ideal: Iris-setosa
[ 0. 0.41666667 0.01694915 0. ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.41666667 0.83333333 0.03389831 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.38888889 1. 0.08474576 0.125 ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.30555556 0.79166667 0.05084746 0.125 ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.22222222 0.625 0.06779661 0.08333333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.38888889 0.75 0.11864407 0.08333333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.22222222 0.75 0.08474576 0.08333333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.30555556 0.58333333 0.11864407 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.22222222 0.70833333 0.08474576 0.125 ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.08333333 0.66666667 0. 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.22222222 0.54166667 0.11864407 0.16666667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.13888889 0.58333333 0.15254237 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.41666667 0.10169492 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.58333333 0.10169492 0.125 ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.25 0.625 0.08474576 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.25 0.58333333 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.11111111 0.5 0.10169492 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.13888889 0.45833333 0.10169492 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.30555556 0.58333333 0.08474576 0.125 ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.25 0.875 0.08474576 0. ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.33333333 0.91666667 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.16666667 0.45833333 0.08474576 0. ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.5 0.03389831 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.33333333 0.625 0.05084746 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.16666667 0.45833333 0.08474576 0. ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.02777778 0.41666667 0.05084746 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.22222222 0.58333333 0.08474576 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.625 0.05084746 0.08333333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.05555556 0.125 0.05084746 0.08333333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.02777778 0.5 0.05084746 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.625 0.10169492 0.20833333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.22222222 0.75 0.15254237 0.125 ] -> Iris-setosa, Ideal: Iris-setosa
[ 0.13888889 0.41666667 0.06779661 0.08333333] -> Iris-setosa, Ideal: Iris-setosa
[ 0.22222222 0.75 0.10169492 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.08333333 0.5 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.27777778 0.70833333 0.08474576 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.19444444 0.54166667 0.06779661 0.04166667] -> Iris-setosa, Ideal: Iris-setosa
[ 0.75 0.5 0.62711864 0.54166667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.58333333 0.5 0.59322034 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.72222222 0.45833333 0.66101695 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.33333333 0.125 0.50847458 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.61111111 0.33333333 0.61016949 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.38888889 0.33333333 0.59322034 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.55555556 0.54166667 0.62711864 0.625 ] -> Iris-virginica, Ideal: Iris-versicolor
[ 0.16666667 0.16666667 0.38983051 0.375 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.63888889 0.375 0.61016949 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.25 0.29166667 0.49152542 0.54166667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.19444444 0. 0.42372881 0.375 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.44444444 0.41666667 0.54237288 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.47222222 0.08333333 0.50847458 0.375 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.5 0.375 0.62711864 0.54166667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.36111111 0.375 0.44067797 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.66666667 0.45833333 0.57627119 0.54166667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.36111111 0.41666667 0.59322034 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.41666667 0.29166667 0.52542373 0.375 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.52777778 0.08333333 0.59322034 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.36111111 0.20833333 0.49152542 0.41666667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.44444444 0.5 0.6440678 0.70833333] -> Iris-virginica, Ideal: Iris-versicolor
[ 0.5 0.33333333 0.50847458 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.55555556 0.20833333 0.66101695 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.5 0.33333333 0.62711864 0.45833333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.58333333 0.375 0.55932203 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.63888889 0.41666667 0.57627119 0.54166667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.69444444 0.33333333 0.6440678 0.54166667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.66666667 0.41666667 0.6779661 0.66666667] -> Iris-virginica, Ideal: Iris-versicolor
[ 0.47222222 0.375 0.59322034 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.38888889 0.25 0.42372881 0.375 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.33333333 0.16666667 0.47457627 0.41666667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.33333333 0.16666667 0.45762712 0.375 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.41666667 0.29166667 0.49152542 0.45833333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.47222222 0.29166667 0.69491525 0.625 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.30555556 0.41666667 0.59322034 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.47222222 0.58333333 0.59322034 0.625 ] -> Iris-virginica, Ideal: Iris-versicolor
[ 0.66666667 0.45833333 0.62711864 0.58333333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.55555556 0.125 0.57627119 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.36111111 0.41666667 0.52542373 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.33333333 0.20833333 0.50847458 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.33333333 0.25 0.57627119 0.45833333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.5 0.41666667 0.61016949 0.54166667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.41666667 0.25 0.50847458 0.45833333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.19444444 0.125 0.38983051 0.375 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.36111111 0.29166667 0.54237288 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.38888889 0.41666667 0.54237288 0.45833333] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.38888889 0.375 0.54237288 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.52777778 0.375 0.55932203 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.22222222 0.20833333 0.33898305 0.41666667] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.38888889 0.33333333 0.52542373 0.5 ] -> Iris-versicolor, Ideal: Iris-versicolor
[ 0.55555556 0.54166667 0.84745763 1. ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.41666667 0.29166667 0.69491525 0.75 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.77777778 0.41666667 0.83050847 0.83333333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.55555556 0.375 0.77966102 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.61111111 0.41666667 0.81355932 0.875 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.91666667 0.41666667 0.94915254 0.83333333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.16666667 0.20833333 0.59322034 0.66666667] -> Iris-versicolor, Ideal: Iris-virginica
[ 0.83333333 0.375 0.89830508 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.66666667 0.20833333 0.81355932 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.80555556 0.66666667 0.86440678 1. ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.61111111 0.5 0.69491525 0.79166667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.58333333 0.29166667 0.72881356 0.75 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.69444444 0.41666667 0.76271186 0.83333333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.38888889 0.20833333 0.6779661 0.79166667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.41666667 0.33333333 0.69491525 0.95833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.58333333 0.5 0.72881356 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.61111111 0.41666667 0.76271186 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.94444444 0.75 0.96610169 0.875 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.94444444 0.25 1. 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.47222222 0.08333333 0.6779661 0.58333333] -> Iris-versicolor, Ideal: Iris-virginica
[ 0.72222222 0.5 0.79661017 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.36111111 0.33333333 0.66101695 0.79166667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.94444444 0.33333333 0.96610169 0.79166667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.55555556 0.29166667 0.66101695 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.66666667 0.54166667 0.79661017 0.83333333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.80555556 0.5 0.84745763 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.52777778 0.33333333 0.6440678 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.5 0.41666667 0.66101695 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.58333333 0.33333333 0.77966102 0.83333333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.80555556 0.41666667 0.81355932 0.625 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.86111111 0.33333333 0.86440678 0.75 ] -> Iris-virginica, Ideal: Iris-virginica
[ 1. 0.75 0.91525424 0.79166667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.58333333 0.33333333 0.77966102 0.875 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.55555556 0.33333333 0.69491525 0.58333333] -> Iris-versicolor, Ideal: Iris-virginica
[ 0.5 0.25 0.77966102 0.54166667] -> Iris-versicolor, Ideal: Iris-virginica
[ 0.94444444 0.41666667 0.86440678 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.55555556 0.58333333 0.77966102 0.95833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.58333333 0.45833333 0.76271186 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.47222222 0.41666667 0.6440678 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.72222222 0.45833333 0.74576271 0.83333333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.66666667 0.45833333 0.77966102 0.95833333] -> Iris-virginica, Ideal: Iris-virginica
[ 0.72222222 0.45833333 0.69491525 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.41666667 0.29166667 0.69491525 0.75 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.69444444 0.5 0.83050847 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.66666667 0.54166667 0.79661017 1. ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.66666667 0.41666667 0.71186441 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.55555556 0.20833333 0.6779661 0.75 ] -> Iris-virginica, Ideal: Iris-virginica
[ 0.61111111 0.41666667 0.71186441 0.79166667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.52777778 0.58333333 0.74576271 0.91666667] -> Iris-virginica, Ideal: Iris-virginica
[ 0.44444444 0.41666667 0.69491525 0.70833333] -> Iris-virginica, Ideal: Iris-virginica
Process finished with exit code 0
"""
__author__ = 'jheaton'
import os
import sys
import numpy as np
# Find the AIFH core files
aifh_dir = os.path.dirname(os.path.abspath(__file__))
aifh_dir = os.path.abspath(aifh_dir + os.sep + ".." + os.sep + "lib" + os.sep + "aifh")
sys.path.append(aifh_dir)
from normalize import Normalize
from rbf_network import RbfNetwork
from error import ErrorCalculation
from genetic import *
from pso import *
# find the Iris data set
irisFile = os.path.dirname(os.path.realpath(__file__))
irisFile = os.path.abspath(irisFile + "../../datasets/iris.csv")
# Read the Iris data set.
print('Reading CSV file: ' + irisFile)
norm = Normalize()
iris_work = norm.load_csv(irisFile)
# Extract the original iris species so we can display during the final validation.
ideal_species = [row[4] for row in iris_work]
# Setup the first four fields to "range normalize" between -1 and 1.
for i in range(0, 4):
norm.make_col_numeric(iris_work, i)
norm.norm_col_range(iris_work, i, 0, 1)
# Discover all of the classes for column #4, the iris species.
classes = norm.build_class_map(iris_work, 4)
inv_classes = {v: k for k, v in classes.items()}
# Normalize iris species using one-of-n.
# We could have used equilateral as well. For an example of equilateral, see the example_nm_iris example.
norm.norm_col_one_of_n(iris_work, 4, classes, 0, 1)
# Prepare training data. Separate into input and ideal.
training = np.array(iris_work)
training_input = training[:, 0:4]
training_ideal = training[:, 4:7]
# Create an RBF network. There are four inputs and two outputs.
# There are also five RBF functions used internally.
# You can experiment with different numbers of internal RBF functions.
# However, the input and output must match the data set.
network = RbfNetwork(4, 4, 3)
network.reset()
def score_funct(x):
"""
The score function for Iris anneal.
@param x:
@return:
"""
global best_score
global input_data
global output_data
# Update the network's long term memory to the vector we need to score.
network.copy_memory(x)
# Loop over the training set and calculate the output for each.
actual_output = []
for input_data in training_input:
output_data = network.compute_regression(input_data)
actual_output.append(output_data)
# Calculate the error with MSE.
result = ErrorCalculation.mse(np.array(actual_output), training_ideal)
return result
# Perform the PSO training
train = TrainPSO(30,len(network.long_term_memory),score_funct)
train.display_iteration = True
train.train()
# Display the final validation. We show all of the iris data as well as the predicted species.
train.copy_best(network.long_term_memory)
for i in range(0, len(training_input)):
input_data = training_input[i]
# Compute the output from the RBF network
output_data = network.compute_regression(input_data)
ideal_data = training_ideal[i]
# Decode the three output neurons into a class number.
class_id = norm.denorm_one_of_n(output_data)
print(str(input_data) + " -> " + inv_classes[class_id] + ", Ideal: " + ideal_species[i]) | trenton3983/Artificial_Intelligence_for_Humans | vol2/vol2-python-examples/examples/example_pso_iris.py | Python | apache-2.0 | 20,910 | [
"VisIt"
] | 832af39ad7f61bade0d07e76dd80addb7e2653204297beb3b28e9b65165a05f5 |
import numpy as np
from enthought.mayavi import mlab
import Image
def disp_odf(sph_map, theta_res=64, phi_res=32, colormap='RGB', colors=256):
pi = np.pi
sin = np.sin
cos = np.cos
theta, phi = np.mgrid[0:2*pi:theta_res*1j, 0:pi:phi_res*1j]
x = sin(phi)*cos(theta)
y = sin(phi)*sin(theta)
z = cos(phi)
nvox = np.prod(sph_map.shape)
x_cen, y_cen, z_cen = _3grid(sph_map.shape)
odf_values = sph_map.evaluate_at(theta, phi)
max_value = odf_values.max()
mlab.figure()
for ii in range(nvox):
odf_ii = odf_values.reshape(nvox, theta_res, phi_res)[ii,:,:]
odf_ii /= max_value * 2
if colormap == 'RGB':
rgb = np.r_['-1,3,0', x*odf_ii, y*odf_ii, z*odf_ii]
rgb = np.abs(rgb*255/rgb.max()).astype('uint8')
odf_im = Image.fromarray(rgb, mode='RGB')
odf_im = odf_im.convert('P', palette=Image.ADAPTIVE, colors=colors)
lut = np.empty((colors,4),'uint8')
lut[:,3] = 255
lut[:,0:3] = np.reshape(odf_im.getpalette(),(colors,3))
oo = mlab.mesh(x*odf_ii + x_cen.flat[ii],
y*odf_ii + y_cen.flat[ii],
z*odf_ii + z_cen.flat[ii],
scalars=np.int16(odf_im))
oo.module_manager.scalar_lut_manager.lut.table=lut
else:
oo = mlab.mesh(x*odf_ii + x_cen.flat[ii],
y*odf_ii + y_cen.flat[ii],
z*odf_ii + z_cen.flat[ii],
scalars=odf_ii,
colormap=colormap)
def _3grid(shape):
if len(shape) > 3:
raise ValueError('cannot display 4d image')
elif len(shape) < 3:
d = [1, 1, 1]
d[0:len(shape)] = shape
else:
d = shape
return np.mgrid[0:d[0], 0:d[1], 0:d[2]]
if __name__ == '__main__':
import dipy.core.qball as qball
from dipy.io.bvectxt import read_bvec_file
filename='/Users/bagrata/HARDI/E1322S8I1.nii.gz'
grad_table_filename='/Users/bagrata/HARDI/E1322S8I1.bvec'
from nipy import load_image, save_image
grad_table, b_values = read_bvec_file(grad_table_filename)
img = load_image(filename)
print 'input dimensions: '
print img.ndim
print 'image size: '
print img.shape
print 'image affine: '
print img.affine
print 'images has pixels with size: '
print np.dot(img.affine, np.eye(img.ndim+1)).diagonal()[0:3]
data = np.asarray(img)
theta, phi = np.mgrid[0:2*np.pi:64*1j, 0:np.pi:32*1j]
odf_i = qball.ODF(data[188:192,188:192,22:24,:],4,grad_table,b_values)
disp_odf(odf_i[0:1,0:2,0:2])
| StongeEtienne/dipy | scratch/odf.py | Python | bsd-3-clause | 2,711 | [
"Mayavi"
] | 24b3c9472c959778b9242a99fa60a4b382af68e558cdef1f63a62fe980012dff |
# ===============================================
# MODULE STUDY: os
import os
reload(os)
print '*----------------------------------------*'
path = 'C:/' # nt
path = '/' # linux
print os.stat(path)
print '*----------------------------------------*'
print os.error # <type 'exceptions.OSError'>
print os.error() # OSError()
print '*----------------------------------------*'
print os.name # 'posix', 'nt', 'os2', 'ce', 'java', 'riscos'
# you can see platform module too and sys.platform
################################ Process Parameters ################################
print '*----------------------------------------*'
print os.environ # A mapping object representing the string environment
print sorted(os.environ)
print os.environ['HOMEPATH']
print os.environ['PATH']
print os.environ['WINDIR']
print os.environ['USER']
print os.environ['NUMBER_OF_PROCESSORS']
print os.environ['MAYA_LOCATION']
print os.environ['PROCESSOR_ARCHITECTURE']
print os.environ['HOME']
print os.environ['USERNAME']
print os.environ['PYTHONPATH']
print os.environ['HOMEDRIVE']
print os.environ['MAYA_PLUG_IN_PATH']
print os.environ['OS']
for e in os.environ.keys():
print e, ":", os.environ[e]
# we can access too many information of OS with os.environ
print '*----------------------------------------*'
print os.getcwd() # E:\Madoodia\_Python\_learning_python
new_path = 'D:/'
os.chdir(new_path)
print os.getcwd() # D:/
print os.getpid() # Return the current process id.
print os.getenv('USERNAME') # Return the value of the environment variable varname if it exists
print os.getenv('NOT_EXISTS')
os.putenv(varname, value) # Set the environment variable named varname to the string value
os.strerror(code) # Return the error message corresponding to the error code in code
os.umask(mask) # Set the current numeric umask and return the previous umask.
os.uname() # Availability: Unix
os.unsetenv(varname) # Unset (delete) the environment variable named varname
################################ File Object Creation ################################
os.fdopen(fd[, mode[, bufsize]])
os.popen(command[, mode[, bufsize]]) # Deprecated since version 2.6: This function is obsolete. Use the subprocess module
os.tmpfile() # Deprecated since version 2.6: All of the popen*() functions are obsolete. Use the subprocess module
os.popen2(cmd[, mode[, bufsize]]) # Deprecated since version 2.6: This function is obsolete. Use the subprocess module
os.popen3(cmd[, mode[, bufsize]]) # Deprecated since version 2.6: This function is obsolete. Use the subprocess module
os.popen4(cmd[, mode[, bufsize]]) # Deprecated since version 2.6: This function is obsolete. Use the subprocess module
################################ File Descriptor Operations ################################
os.close(fd) # Close file descriptor fd.
os.closerange(fd_low, fd_high) # Close all file descriptors from fd_low (inclusive) to fd_high (exclusive), ignoring errors
os.dup(fd) # Return a duplicate of file descriptor fd.
os.dup2(fd, fd2) # Duplicate file descriptor fd to fd2
os.fstat(fd) # Return status for file descriptor fd, like stat().
os.fsync(fd) # Force write of file with filedescriptor fd to disk
os.isatty(fd) # Return True if the file descriptor fd is open and connected to a tty(-like) device, else False.
os.lseek(fd, pos, how) # Set the current position of file descriptor fd to position pos, modified by how: SEEK_SET or 0
os.open(file, flags[, mode]) # Open the file file and set various flags according to flags
os.pipe() # Create a pipe. Return a pair of file descriptors (r, w)
os.read(fd, n) # Read at most n bytes from file descriptor fd
os.write(fd, str) # Write the string str to file descriptor fd
################################ Files and Directories ################################
os.access(path, mode) # Use the real uid/gid to test for access to path
os.chdir(path) # Change the current working directory to path.
os.getcwd() # Return a string representing the current working directory.
os.getcwdu() # Return a Unicode object representing the current working directory.
os.chmod(path, mode) # Change the mode of path to the numeric mode
os.listdir(path) # Return a list containing the names of the entries in the directory given by path
os.lstat(path) # Perform the equivalent of an lstat() system call on the given path
os.mkdir(path[, mode]) # Create a directory named path with numeric mode mode
os.makedirs(path[, mode]) # Recursive directory creation function.
# Like mkdir(), but makes all intermediate-level directories needed to contain the leaf directory
os.remove(path) # Remove (delete) the file path. If path is a directory, OSError is raised; see rmdir() below to remove a directory
os.removedirs(path) # Remove directories recursively. Works like rmdir() except that, if the leaf directory is successfully removed
os.rename(src, dst) # Rename the file or directory src to dst. If dst is a directory, OSError will be raised
os.renames(old, new) # Recursive directory or file renaming function
os.rmdir(path) # Remove (delete) the directory path. Only works when the directory is empty, otherwise, OSError is raised
os.stat(path) # Perform the equivalent of a stat() system call on the given path
os.utime(path, times) # Set the access and modified times of the file specified by path
# Generate the file names in a directory tree by walking the tree either top-down or bottom-up
os.walk(top, topdown=True, onerror=None, followlinks=False)
################################ Process Management ################################
os.abort() # Generate a SIGABRT signal to the current process
os._exit(n) # Exit the process with status n # The standard way to exit is sys.exit(n)
os.startfile(path[, operation]) # Start a file with its associated application.
# The subprocess module provides more powerful facilities for spawning new processes and retrieving their results
os.system(command) # Execute the command (a string) in a subshell
os.times() # Return a 5-tuple of floating point numbers indicating accumulated (processor or other) times, in seconds
################################ Miscellaneous System Information ################################
os.curdir # The constant string used by the operating system to refer to the current directory
os.pardir # The constant string used by the operating system to refer to the parent directory
os.sep # The character used by the operating system to separate pathname components
os.altsep # An alternative character used by the operating system to separate pathname components
os.extsep # The character which separates the base filename from the extension
os.pathsep # The character conventionally used by the operating system to separate search path components
os.defpath # The default search path used by exec*p* and spawn*p* if the environment doesn’t have a 'PATH' key
os.linesep # The string used to separate (or, rather, terminate) lines on the current platform.
################################ Miscellaneous Functions ################################
os.urandom(n) # Return a string of n random bytes suitable for cryptographic use
# ****************************************** os.path *********************************************** #
# This module implements some useful functions on pathnames.
# To read or write files see open(), and for accessing the filesystem see the os module.
path = 'C:/Python27/Lib/site-packages/sip.pyd'
os.path.abspath(path) # Return a normalized absolutized version of the pathname path
os.path.basename(path) # Return the base name of pathname path
os.path.commonprefix(list) # Return the longest path prefix that is a prefix of all paths in list
os.path.dirname(path) # Return the directory name of pathname path
os.path.exists(path) # Return True if path refers to an existing path
os.path.lexists(path) # Return True if path refers to an existing path
os.path.expanduser(path) # On Unix and Windows, return the argument with an initial component of ~ or ~user replaced by that user‘s home directory.
os.path.expandvars(path) # Return the argument with environment variables expanded.
os.path.getatime(path) # Return the time of last access of path
os.path.getmtime(path) # Return the time of last modification of path
os.path.getctime(path) # Return the system’s ctime which, on some systems (like Unix) is the time of the last metadata change, and, on others (like Windows), is the creation time for path
os.path.getsize(path) # Return the size, in bytes, of path
os.path.isabs(path) # Return True if path is an absolute pathname
os.path.isfile(path) # Return True if path is an existing regular file
os.path.isdir(path) # Return True if path is an existing directory
os.path.islink(path) # Return True if path refers to a directory entry that is a symbolic link
os.path.join(path1[, path2[, ...]]) # Join one or more path components intelligently
os.path.normcase(path) # Normalize the case of a pathname
os.path.normpath(path) # Normalize a pathname by collapsing redundant separators and up-level references
os.path.realpath(path) # Return the canonical path of the specified filename
os.path.relpath(path[, start]) # Return a relative filepath to path either from the current directory or from an optional start directory.
os.path.samefile(path1, path2) # Return True if both pathname arguments refer to the same file or directory
os.path.split(path) # Split the pathname path into a pair, (head, tail) where tail is the last pathname component and head is everything leading up to that
os.path.splitdrive(path) # Split the pathname path into a pair (drive, tail) where drive is either a drive specification or the empty string.
os.path.splitext(path) # Split the pathname path into a pair (root, ext) such that root + ext == path
os.path.splitunc(path) # Split the pathname path into a pair (unc, rest) so that unc is the UNC mount point (such as r'\\host\mount'),
os.path.walk(path, visit, arg) # Calls the function visit with arguments (arg, dirname, names) for each directory in the directory tree rooted at path
| madoodia/codeLab | python/modules_os.py | Python | mit | 10,636 | [
"VisIt"
] | 43aa99dafa1ee0a7f5df3b32115342e213cfc49228a81693d97431cd5515e512 |
#!/usr/bin/python
# script extract a particular genome region and compute a z-score piRNA signature
# version 1 - 22-06-2012
# Usage bowtie_window_analysis.py <bowtie input> <geneID> <Upstream_coordinate> <Downstream_coordinate> <bowtie index> <output>
import sys, subprocess
from collections import defaultdict # required for some SmRNAwindow attributes (readDic)
from numpy import mean, std # required for some SmRNAwindow methods
from smRtools import *
geneID = sys.argv[2]
Upstream_coordinate = int(sys.argv[3])
Downstream_coordinate = int(sys.argv[4])
fasta_dic = get_fasta (sys.argv[5])
geneSequence = fasta_dic[sys.argv[2]][Upstream_coordinate:Downstream_coordinate]
geneObject= SmRNAwindow(geneID, geneSequence)
F = open (sys.argv[1], "r") # F is the bowtie output taken as input
counter = 0
for line in F:
fields = line.split()
if fields[2] != geneID : continue
polarity = fields[1]
coordinate = int(fields[3])
if (coordinate < Upstream_coordinate or coordinate > Downstream_coordinate) : continue
size = len(fields[4])
geneObject.addread (polarity, coordinate, size)
F.close()
OUT = open (sys.argv[6], "w")
pipi_z = geneObject.z_signature(23,28,23,28, range(1,26) )
print >> OUT, "pipi signature"
print >> OUT, pipi_z
print >> OUT, "sisi signature"
print >> OUT, geneObject.z_signature(20,22,20,22, range(1,26) )
print >> OUT, "total read analyzed"
print >> OUT, geneObject.readcount()
print >> OUT, "size distribution of these reads"
print >> OUT, geneObject.readsizes()
OUT.close()
| JuPeg/tools-artbio | unstable/local_tools/bowtie_window_analysis.py | Python | mit | 1,514 | [
"Bowtie"
] | 3b412a3264ebf666530b3ea82b718be6d026d39c0a92b54add14a16a8e468ba1 |
"""
client_manual.py
Copyright 2016 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import traceback
from common import mynetwork
class ObserverClientManual(mynetwork.SingleLineMasterClient):
"Just add some printing"
def handler_message(self,msg,FileNo):
print "Receieved '%s' from %i" % (msg,FileNo)
mynetwork.SingleLineMasterClient.handler_message(self, msg, FileNo)
def sendmessage(self,FileNo,msg):
print "Sent %i '%s'" % (FileNo,msg)
mynetwork.SingleLineProtocolServer.sendmessage(self, FileNo, msg)
def main():
client = ObserverClientManual()
# Have to run this to get the connection set up
client.network_events()
cnt = 0
client.sendserver("Join-Observer")
try:
while True:
time.sleep(.07)
option = raw_input("Info? > ")
client.sendserver("DUMP")
client.network_events()
except:
traceback.print_exc()
time.sleep(5)
| brianr747/Simple4Xpygame | clients/client_manual.py | Python | apache-2.0 | 1,507 | [
"Brian"
] | 11fade0adc6c7ccc54691e84aa91704d764db5e07cd570b2200954308bbde6de |
# Information about the IUPAC alphabets
protein_letters = "ACDEFGHIKLMNPQRSTVWY"
extended_protein_letters = "ACDEFGHIKLMNPQRSTVWYBXZJUO"
# B = "Asx"; aspartic acid or asparagine (D or N)
# X = "Xxx"; unknown or 'other' amino acid
# Z = "Glx"; glutamic acid or glutamine (E or Q)
# http://www.chem.qmul.ac.uk/iupac/AminoAcid/A2021.html#AA212
#
# J = "Xle"; leucine or isoleucine (L or I, used in NMR)
# Mentioned in http://www.chem.qmul.ac.uk/iubmb/newsletter/1999/item3.html
# Also the International Nucleotide Sequence Database Collaboration (INSDC)
# (i.e. GenBank, EMBL, DDBJ) adopted this in 2006
# http://www.ddbj.nig.ac.jp/insdc/icm2006-e.html
#
# Xle (J); Leucine or Isoleucine
# The residue abbreviations, Xle (the three-letter abbreviation) and J
# (the one-letter abbreviation) are reserved for the case that cannot
# experimentally distinguish leucine from isoleucine.
#
# U = "Sec"; selenocysteine
# http://www.chem.qmul.ac.uk/iubmb/newsletter/1999/item3.html
#
# O = "Pyl"; pyrrolysine
# http://www.chem.qmul.ac.uk/iubmb/newsletter/2009.html#item35
ambiguous_dna_letters = "GATCRYWSMKHBVDN"
unambiguous_dna_letters = "GATC"
ambiguous_rna_letters = "GAUCRYWSMKHBVDN"
unambiguous_rna_letters = "GAUC"
# B == 5-bromouridine
# D == 5,6-dihydrouridine
# S == thiouridine
# W == wyosine
extended_dna_letters = "GATCBDSW"
# are there extended forms?
#extended_rna_letters = "GAUCBDSW"
ambiguous_dna_values = {
"A": "A",
"C": "C",
"G": "G",
"T": "T",
"M": "AC",
"R": "AG",
"W": "AT",
"S": "CG",
"Y": "CT",
"K": "GT",
"V": "ACG",
"H": "ACT",
"D": "AGT",
"B": "CGT",
"X": "GATC",
"N": "GATC",
}
ambiguous_rna_values = {
"A": "A",
"C": "C",
"G": "G",
"U": "U",
"M": "AC",
"R": "AG",
"W": "AU",
"S": "CG",
"Y": "CU",
"K": "GU",
"V": "ACG",
"H": "ACU",
"D": "AGU",
"B": "CGU",
"X": "GAUC",
"N": "GAUC",
}
ambiguous_dna_complement = {
"A": "T",
"C": "G",
"G": "C",
"T": "A",
"M": "K",
"R": "Y",
"W": "W",
"S": "S",
"Y": "R",
"K": "M",
"V": "B",
"H": "D",
"D": "H",
"B": "V",
"X": "X",
"N": "N",
}
ambiguous_rna_complement = {
"A": "U",
"C": "G",
"G": "C",
"U": "A",
"M": "K",
"R": "Y",
"W": "W",
"S": "S",
"Y": "R",
"K": "M",
"V": "B",
"H": "D",
"D": "H",
"B": "V",
"X": "X",
"N": "N",
}
def _make_ranges(mydict):
d = {}
for key, value in mydict.iteritems():
d[key] = (value, value)
return d
# From bioperl's SeqStats.pm
unambiguous_dna_weights = {
"A": 347.,
"C": 323.,
"G": 363.,
"T": 322.,
}
unambiguous_dna_weight_ranges = _make_ranges(unambiguous_dna_weights)
unambiguous_rna_weights = {
"A": unambiguous_dna_weights["A"] + 16., # 16 for the oxygen
"C": unambiguous_dna_weights["C"] + 16.,
"G": unambiguous_dna_weights["G"] + 16.,
"U": 340.,
}
unambiguous_rna_weight_ranges = _make_ranges(unambiguous_rna_weights)
def _make_ambiguous_ranges(mydict, weight_table):
range_d = {}
avg_d = {}
for letter, values in mydict.iteritems():
#Following line is a quick hack to skip undefined weights for U and O
if len(values)==1 and values[0] not in weight_table : continue
weights = map(weight_table.get, values)
range_d[letter] = (min(weights), max(weights))
total_w = 0.0
for w in weights:
total_w = total_w + w
avg_d[letter] = total_w / len(weights)
return range_d, avg_d
ambiguous_dna_weight_ranges, avg_ambiguous_dna_weights = \
_make_ambiguous_ranges(ambiguous_dna_values,
unambiguous_dna_weights)
ambiguous_rna_weight_ranges, avg_ambiguous_rna_weights = \
_make_ambiguous_ranges(ambiguous_rna_values,
unambiguous_rna_weights)
protein_weights = {
"A": 89.09,
"C": 121.16,
"D": 133.10,
"E": 147.13,
"F": 165.19,
"G": 75.07,
"H": 155.16,
"I": 131.18,
"K": 146.19,
"L": 131.18,
"M": 149.21,
"N": 132.12,
#"O": 0.0, # Needs to be recorded!
"P": 115.13,
"Q": 146.15,
"R": 174.20,
"S": 105.09,
"T": 119.12,
#"U": 168.05, # To be confirmed
"V": 117.15,
"W": 204.23,
"Y": 181.19
}
extended_protein_values = {
"A": "A",
"B": "ND",
"C": "C",
"D": "D",
"E": "E",
"F": "F",
"G": "G",
"H": "H",
"I": "I",
"J": "IL",
"K": "K",
"L": "L",
"M": "M",
"N": "N",
"O": "O",
"P": "P",
"Q": "Q",
"R": "R",
"S": "S",
"T": "T",
"U": "U",
"V": "V",
"W": "W",
"X": "ACDEFGHIKLMNPQRSTVWY",
#TODO - Include U and O in the possible values of X?
#This could alter the extended_protein_weight_ranges ...
"Y": "Y",
"Z": "QE",
}
protein_weight_ranges = _make_ranges(protein_weights)
extended_protein_weight_ranges, avg_extended_protein_weights = \
_make_ambiguous_ranges(extended_protein_values,
protein_weights)
# For Center of Mass Calculation.
# Taken from http://www.chem.qmul.ac.uk/iupac/AtWt/ & PyMol
atom_weights = {
'H' : 1.00794,
'He' : 4.002602,
'Li' : 6.941,
'Be' : 9.012182,
'B' : 10.811,
'C' : 12.0107,
'N' : 14.0067,
'O' : 15.9994,
'F' : 18.9984032,
'Ne' : 20.1797,
'Na' : 22.989770,
'Mg' : 24.3050,
'Al' : 26.981538,
'Si' : 28.0855,
'P' : 30.973761,
'S' : 32.065,
'Cl' : 35.453,
'Ar' : 39.948,
'K' : 39.0983,
'Ca' : 40.078,
'Sc' : 44.955910,
'Ti' : 47.867,
'V' : 50.9415,
'Cr' : 51.9961,
'Mn' : 54.938049,
'Fe' : 55.845,
'Co' : 58.933200,
'Ni' : 58.6934,
'Cu' : 63.546,
'Zn' : 65.39,
'Ga' : 69.723,
'Ge' : 72.64,
'As' : 74.92160,
'Se' : 78.96,
'Br' : 79.904,
'Kr' : 83.80,
'Rb' : 85.4678,
'Sr' : 87.62,
'Y' : 88.90585,
'Zr' : 91.224,
'Nb' : 92.90638,
'Mo' : 95.94,
'Tc' : 98.0,
'Ru' : 101.07,
'Rh' : 102.90550,
'Pd' : 106.42,
'Ag' : 107.8682,
'Cd' : 112.411,
'In' : 114.818,
'Sn' : 118.710,
'Sb' : 121.760,
'Te' : 127.60,
'I' : 126.90447,
'Xe' : 131.293,
'Cs' : 132.90545,
'Ba' : 137.327,
'La' : 138.9055,
'Ce' : 140.116,
'Pr' : 140.90765,
'Nd' : 144.24,
'Pm' : 145.0,
'Sm' : 150.36,
'Eu' : 151.964,
'Gd' : 157.25,
'Tb' : 158.92534,
'Dy' : 162.50,
'Ho' : 164.93032,
'Er' : 167.259,
'Tm' : 168.93421,
'Yb' : 173.04,
'Lu' : 174.967,
'Hf' : 178.49,
'Ta' : 180.9479,
'W' : 183.84,
'Re' : 186.207,
'Os' : 190.23,
'Ir' : 192.217,
'Pt' : 195.078,
'Au' : 196.96655,
'Hg' : 200.59,
'Tl' : 204.3833,
'Pb' : 207.2,
'Bi' : 208.98038,
'Po' : 208.98,
'At' : 209.99,
'Rn' : 222.02,
'Fr' : 223.02,
'Ra' : 226.03,
'Ac' : 227.03,
'Th' : 232.0381,
'Pa' : 231.03588,
'U' : 238.02891,
'Np' : 237.05,
'Pu' : 244.06,
'Am' : 243.06,
'Cm' : 247.07,
'Bk' : 247.07,
'Cf' : 251.08,
'Es' : 252.08,
'Fm' : 257.10,
'Md' : 258.10,
'No' : 259.10,
'Lr' : 262.11,
'Rf' : 261.11,
'Db' : 262.11,
'Sg' : 266.12,
'Bh' : 264.12,
'Hs' : 269.13,
'Mt' : 268.14,
}
| bryback/quickseq | genescript/Bio/Data/IUPACData.py | Python | mit | 7,553 | [
"BioPerl",
"PyMOL"
] | 97aa12dca31cb3eaa80b1d5aa9ab8382f992a70a3d7f19ded69ac75d0e300653 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class Dealii(CMakePackage, CudaPackage):
"""C++ software library providing well-documented tools to build finite
element codes for a broad variety of PDEs."""
homepage = "https://www.dealii.org"
url = "https://github.com/dealii/dealii/releases/download/v8.4.1/dealii-8.4.1.tar.gz"
git = "https://github.com/dealii/dealii.git"
maintainers = ['davydden', 'jppelteret', 'luca-heltai']
# Don't add RPATHs to this package for the full build DAG.
# only add for immediate deps.
transitive_rpaths = False
version('master', branch='master')
version('9.2.0', sha256='d05a82fb40f1f1e24407451814b5a6004e39366a44c81208b1ae9d65f3efa43a')
version('9.1.1', sha256='fc5b483f7fe58dfeb52d05054011280f115498e337af3e085bf272fd1fd81276')
version('9.1.0', sha256='5b070112403f8afbb72345c1bb24d2a38d11ce58891217e353aab97957a04600')
version('9.0.1', sha256='df2f0d666f2224be07e3741c0e8e02132fd67ea4579cd16a2429f7416146ee64')
version('9.0.0', sha256='c918dc5c1a31d62f6eea7b524dcc81c6d00b3c378d4ed6965a708ab548944f08')
version('8.5.1', sha256='d33e812c21a51f7e5e3d3e6af86aec343155650b611d61c1891fbc3cabce09ae')
version('8.5.0', sha256='e6913ff6f184d16bc2598c1ba31f879535b72b6dff043e15aef048043ff1d779')
version('8.4.2', sha256='ec7c00fadc9d298d1a0d16c08fb26818868410a9622c59ba624096872f3058e4')
version('8.4.1', sha256='00a0e92d069cdafd216816f1aff460f7dbd48744b0d9e0da193287ebf7d6b3ad')
version('8.4.0', sha256='36a20e097a03f17b557e11aad1400af8c6252d25f7feca40b611d5fc16d71990')
version('8.3.0', sha256='4ddf72632eb501e1c814e299f32fc04fd680d6fda9daff58be4209e400e41779')
version('8.2.1', sha256='d75674e45fe63cd9fa294460fe45228904d51a68f744dbb99cd7b60720f3b2a0')
version('8.1.0', sha256='d666bbda2a17b41b80221d7029468246f2658051b8c00d9c5907cd6434c4df99')
variant('mpi', default=True, description='Compile with MPI')
variant('assimp', default=True,
description='Compile with Assimp')
variant('arpack', default=True,
description='Compile with Arpack and PArpack (only with MPI)')
variant('adol-c', default=True,
description='Compile with Adol-c')
variant('doc', default=False,
description='Compile with documentation')
variant('ginkgo', default=True, description='Compile with Ginkgo')
variant('gmsh', default=True, description='Compile with GMSH')
variant('gsl', default=True, description='Compile with GSL')
variant('hdf5', default=True,
description='Compile with HDF5 (only with MPI)')
variant('metis', default=True, description='Compile with Metis')
variant('muparser', default=True, description='Compile with muParser')
variant('nanoflann', default=True, description='Compile with Nanoflann')
variant('netcdf', default=True,
description='Compile with Netcdf (only with MPI)')
variant('oce', default=True, description='Compile with OCE')
variant('p4est', default=True,
description='Compile with P4est (only with MPI)')
variant('petsc', default=True,
description='Compile with Petsc (only with MPI)')
variant('scalapack', default=True,
description='Compile with ScaLAPACK (only with MPI)')
variant('sundials', default=True,
description='Compile with Sundials')
variant('slepc', default=True,
description='Compile with Slepc (only with Petsc and MPI)')
variant('symengine', default=True,
description='Compile with SymEngine')
variant('threads', default=True,
description='Compile with multi-threading via TBB')
variant('trilinos', default=True,
description='Compile with Trilinos (only with MPI)')
variant('python', default=False,
description='Compile with Python bindings')
variant('int64', default=False,
description='Compile with 64 bit indices support')
variant('optflags', default=False,
description='Compile using additional optimization flags')
variant('build_type', default='DebugRelease',
description='The build type to build',
values=('Debug', 'Release', 'DebugRelease'))
# required dependencies, light version
depends_on('blas')
# Boost 1.58 is blacklisted, require at least 1.59, see
# https://github.com/dealii/dealii/issues/1591
# There are issues with 1.65.1 and 1.65.0:
# https://github.com/dealii/dealii/issues/5262
# we take the patch from https://github.com/boostorg/serialization/pull/79
# more precisely its variation https://github.com/dealii/dealii/pull/5572#issuecomment-349742019
# 1.68.0 has issues with serialization https://github.com/dealii/dealii/issues/7074
# adopt https://github.com/boostorg/serialization/pull/105 as a fix
depends_on('boost@1.59.0:1.63,1.65.1,1.67.0:+thread+system+serialization+iostreams',
patches=[patch('boost_1.65.1_singleton.patch',
level=1,
when='@1.65.1'),
patch('boost_1.68.0.patch',
level=1,
when='@1.68.0'),
],
when='~python')
depends_on('boost@1.59.0:1.63,1.65.1,1.67.0:+thread+system+serialization+iostreams+python',
patches=[patch('boost_1.65.1_singleton.patch',
level=1,
when='@1.65.1'),
patch('boost_1.68.0.patch',
level=1,
when='@1.68.0'),
],
when='+python')
# bzip2 is not needed since 9.0
depends_on('bzip2', when='@:8.99')
depends_on('lapack')
depends_on('suite-sparse')
depends_on('zlib')
# optional dependencies
depends_on('mpi', when='+mpi')
depends_on('adol-c@2.6.4:', when='@9.0:+adol-c')
depends_on('arpack-ng+mpi', when='+arpack+mpi')
depends_on('assimp', when='@9.0:+assimp')
depends_on('doxygen+graphviz', when='+doc')
depends_on('graphviz', when='+doc')
depends_on('ginkgo', when='@9.1:+ginkgo')
depends_on('gmsh+tetgen+netgen+oce', when='@9.0:+gmsh', type=('build', 'run'))
depends_on('gsl', when='@8.5.0:+gsl')
# FIXME: next line fixes concretization with petsc
depends_on('hdf5+mpi+hl+fortran', when='+hdf5+mpi+petsc')
depends_on('hdf5+mpi+hl', when='+hdf5+mpi~petsc')
depends_on('cuda@8:', when='+cuda')
depends_on('cmake@3.9:', when='+cuda', type='build')
# older version of deal.II do not build with Cmake 3.10, see
# https://github.com/dealii/dealii/issues/5510
depends_on('cmake@:3.9.99', when='@:8.99', type='build')
# FIXME: concretizer bug. The two lines mimic what comes from PETSc
# but we should not need it
depends_on('metis@5:+int64', when='+metis+int64')
depends_on('metis@5:~int64', when='+metis~int64')
depends_on('muparser', when='+muparser')
# Nanoflann support has been removed after 9.2.0
depends_on('nanoflann', when='@9.0:9.2+nanoflann')
depends_on('netcdf-c+mpi', when='+netcdf+mpi')
depends_on('netcdf-cxx', when='+netcdf+mpi')
depends_on('oce', when='+oce')
depends_on('p4est', when='+p4est+mpi')
depends_on('petsc+mpi~int64', when='+petsc+mpi~int64')
depends_on('petsc+mpi+int64', when='+petsc+mpi+int64')
depends_on('petsc@:3.6.4', when='@:8.4.1+petsc+mpi')
depends_on('python', when='@8.5.0:+python')
depends_on('scalapack', when='@9.0:+scalapack')
depends_on('slepc', when='+slepc+petsc+mpi')
depends_on('slepc@:3.6.3', when='@:8.4.1+slepc+petsc+mpi')
depends_on('slepc~arpack', when='+slepc+petsc+mpi+int64')
depends_on('sundials@:3~pthread', when='@9.0:+sundials')
depends_on('trilinos gotype=int', when='+trilinos')
# Both Trilinos and SymEngine bundle the Teuchos RCP library.
# This leads to conflicts between macros defined in the included
# headers when they are not compiled in the same mode.
# See https://github.com/symengine/symengine/issues/1516
# FIXME: uncomment when the following is fixed
# https://github.com/spack/spack/issues/11160
# depends_on("symengine@0.4: build_type=Release", when="@9.1:+symengine+trilinos^trilinos~debug") # NOQA: ignore=E501
# depends_on("symengine@0.4: build_type=Debug", when="@9.1:+symengine+trilinos^trilinos+debug") # NOQA: ignore=E501
depends_on('symengine@0.4:', when='@9.1:+symengine')
depends_on('tbb', when='+threads')
# do not require +rol to make concretization of xsdk possible
depends_on('trilinos+amesos+aztec+epetra+ifpack+ml+muelu+sacado+teuchos', when='+trilinos+mpi~int64~cuda')
depends_on('trilinos+amesos+aztec+epetra+ifpack+ml+muelu+sacado+teuchos~hypre', when='+trilinos+mpi+int64~cuda')
# FIXME: temporary disable Tpetra when using CUDA due to
# namespace "Kokkos::Impl" has no member "cuda_abort"
depends_on('trilinos@master+amesos+aztec+epetra+ifpack+ml+muelu+rol+sacado+teuchos~amesos2~ifpack2~intrepid2~kokkos~tpetra~zoltan2', when='+trilinos+mpi~int64+cuda')
depends_on('trilinos@master+amesos+aztec+epetra+ifpack+ml+muelu+rol+sacado+teuchos~hypre~amesos2~ifpack2~intrepid2~kokkos~tpetra~zoltan2', when='+trilinos+mpi+int64+cuda')
# Explicitly provide a destructor in BlockVector,
# otherwise deal.II may fail to build with Intel compilers.
patch('https://github.com/dealii/dealii/commit/a89d90f9993ee9ad39e492af466b3595c06c3e25.patch',
sha256='4282b32e96f2f5d376eb34f3fddcc4615fcd99b40004cca784eb874288d1b31c',
when='@9.0.1')
# https://github.com/dealii/dealii/pull/7935
patch('https://github.com/dealii/dealii/commit/f8de8c5c28c715717bf8a086e94f071e0fe9deab.patch',
sha256='61f217744b70f352965be265d2f06e8c1276685e2944ca0a88b7297dd55755da',
when='@9.0.1 ^boost@1.70.0:')
# Fix TBB version check
# https://github.com/dealii/dealii/pull/9208
patch('https://github.com/dealii/dealii/commit/80b13fe5a2eaefc77fa8c9266566fa8a2de91edf.patch',
sha256='6f876dc8eadafe2c4ec2a6673864fb451c6627ca80511b6e16f3c401946fdf33',
when='@9.0.0:9.1.1')
# check that the combination of variants makes sense
# 64-bit BLAS:
for p in ['openblas', 'intel-mkl', 'intel-parallel-studio+mkl']:
conflicts('^{0}+ilp64'.format(p), when='@:8.5.1',
msg='64bit BLAS is only supported from 9.0.0')
# interfaces added in 9.0.0:
for p in ['assimp', 'gmsh', 'nanoflann', 'scalapack', 'sundials',
'adol-c']:
conflicts('+{0}'.format(p), when='@:8.5.1',
msg='The interface to {0} is supported from version 9.0.0 '
'onwards. Please explicitly disable this variant '
'via ~{0}'.format(p))
# interfaces added in 9.1.0:
for p in ['ginkgo', 'symengine']:
conflicts('+{0}'.format(p), when='@:9.0',
msg='The interface to {0} is supported from version 9.1.0 '
'onwards. Please explicitly disable this variant '
'via ~{0}'.format(p))
conflicts('+nanoflann', when='@9.3.0:',
msg='The interface to nanoflann was removed from version 9.3.0. '
'Please explicitly disable this variant via ~nanoflann')
conflicts('+slepc', when='~petsc',
msg='It is not possible to enable slepc interfaces '
'without petsc.')
conflicts('+adol-c', when='^trilinos+chaco',
msg='symbol clash between the ADOL-C library and '
'Trilinos SEACAS Chaco.')
# interfaces added in 8.5.0:
for p in ['gsl', 'python']:
conflicts('+{0}'.format(p), when='@:8.4.2',
msg='The interface to {0} is supported from version 8.5.0 '
'onwards. Please explicitly disable this variant '
'via ~{0}'.format(p))
# MPI requirements:
for p in ['arpack', 'hdf5', 'netcdf', 'p4est', 'petsc', 'scalapack',
'slepc', 'trilinos']:
conflicts('+{0}'.format(p), when='~mpi',
msg='To enable {0} it is necessary to build deal.II with '
'MPI support enabled.'.format(p))
def cmake_args(self):
spec = self.spec
options = []
# release flags
cxx_flags_release = []
# debug and release flags
cxx_flags = []
lapack_blas_libs = spec['lapack'].libs + spec['blas'].libs
lapack_blas_headers = spec['lapack'].headers + spec['blas'].headers
options.extend([
'-DDEAL_II_COMPONENT_EXAMPLES=ON',
'-DBOOST_DIR=%s' % spec['boost'].prefix,
# CMake's FindBlas/Lapack may pickup system's blas/lapack instead
# of Spack's. Be more specific to avoid this.
# Note that both lapack and blas are provided in -DLAPACK_XYZ.
'-DLAPACK_FOUND=true',
'-DLAPACK_INCLUDE_DIRS=%s' % ';'.join(
lapack_blas_headers.directories),
'-DLAPACK_LIBRARIES=%s' % lapack_blas_libs.joined(';'),
'-DUMFPACK_DIR=%s' % spec['suite-sparse'].prefix,
'-DZLIB_DIR=%s' % spec['zlib'].prefix,
'-DDEAL_II_ALLOW_BUNDLED=OFF'
])
if '+threads' in spec:
options.append('-DDEAL_II_WITH_THREADS:BOOL=ON')
else:
options.extend(['-DDEAL_II_WITH_THREADS:BOOL=OFF'])
if (spec.satisfies('^intel-parallel-studio+tbb')
and '+threads' in spec):
# deal.II/cmake will have hard time picking up TBB from Intel.
tbb_ver = '.'.join(('%s' % spec['tbb'].version).split('.')[1:])
options.extend([
'-DTBB_FOUND=true',
'-DTBB_VERSION=%s' % tbb_ver,
'-DTBB_INCLUDE_DIRS=%s' % ';'.join(
spec['tbb'].headers.directories),
'-DTBB_LIBRARIES=%s' % spec['tbb'].libs.joined(';')
])
else:
options.append('-DTBB_DIR=%s' % spec['tbb'].prefix)
if (spec.satisfies('^openblas+ilp64') or
spec.satisfies('^intel-mkl+ilp64') or
spec.satisfies('^intel-parallel-studio+mkl+ilp64')):
options.append('-DLAPACK_WITH_64BIT_BLAS_INDICES=ON')
if spec.satisfies('@:8.99'):
options.extend([
# Cmake may still pick up system's bzip2, fix this:
'-DBZIP2_FOUND=true',
'-DBZIP2_INCLUDE_DIRS=%s' % spec['bzip2'].prefix.include,
'-DBZIP2_LIBRARIES=%s' % spec['bzip2'].libs.joined(';')
])
# Set recommended flags for maximum (matrix-free) performance, see
# https://groups.google.com/forum/?fromgroups#!topic/dealii/3Yjy8CBIrgU
if spec.satisfies('%gcc'):
cxx_flags_release.extend(['-O3'])
elif spec.satisfies('%intel'):
cxx_flags_release.extend(['-O3'])
elif spec.satisfies('%clang') or spec.satisfies('%apple-clang'):
cxx_flags_release.extend(['-O3', '-ffp-contract=fast'])
# Python bindings
if spec.satisfies('@8.5.0:'):
options.extend([
'-DDEAL_II_COMPONENT_PYTHON_BINDINGS=%s' %
('ON' if '+python' in spec else 'OFF')
])
if '+python' in spec:
python_exe = spec['python'].command.path
python_library = spec['python'].libs[0]
python_include = spec['python'].headers.directories[0]
options.extend([
'-DPYTHON_EXECUTABLE=%s' % python_exe,
'-DPYTHON_INCLUDE_DIR=%s' % python_include,
'-DPYTHON_LIBRARY=%s' % python_library
])
# Set directory structure:
if spec.satisfies('@:8.2.1'):
options.extend(['-DDEAL_II_COMPONENT_COMPAT_FILES=OFF'])
else:
options.extend([
'-DDEAL_II_EXAMPLES_RELDIR=share/deal.II/examples',
'-DDEAL_II_DOCREADME_RELDIR=share/deal.II/',
'-DDEAL_II_DOCHTML_RELDIR=share/deal.II/doc'
])
# CUDA
if '+cuda' in spec:
options.append(
'-DDEAL_II_WITH_CUDA=ON'
)
if not spec.satisfies('^cuda@9:'):
options.append('-DDEAL_II_WITH_CXX14=OFF')
cuda_arch = spec.variants['cuda_arch'].value
if cuda_arch != 'none':
if len(cuda_arch) > 1:
raise InstallError(
'deal.II only supports compilation for a single GPU!'
)
flags = '-arch=sm_{0}'.format(cuda_arch[0])
# FIXME: there are some compiler errors in dealii
# with: flags = ' '.join(self.cuda_flags(cuda_arch))
# Stick with -arch=sm_xy for now.
options.append(
'-DDEAL_II_CUDA_FLAGS={0}'.format(flags)
)
else:
options.extend([
'-DDEAL_II_WITH_CUDA=OFF',
])
# MPI
if '+mpi' in spec:
options.extend([
'-DDEAL_II_WITH_MPI:BOOL=ON',
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
'-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc,
])
else:
options.extend([
'-DDEAL_II_WITH_MPI:BOOL=OFF',
])
# Optional dependencies for which library names are the same as CMake
# variables:
for library in (
'gsl', 'hdf5', 'p4est', 'petsc', 'slepc', 'trilinos', 'metis',
'sundials', 'nanoflann', 'assimp', 'gmsh', 'muparser',
'symengine', 'ginkgo'):
if ('+' + library) in spec:
options.extend([
'-D%s_DIR=%s' % (library.upper(), spec[library].prefix),
'-DDEAL_II_WITH_%s:BOOL=ON' % library.upper()
])
else:
options.extend([
'-DDEAL_II_WITH_%s:BOOL=OFF' % library.upper()
])
# adol-c
if '+adol-c' in spec:
options.extend([
'-DADOLC_DIR=%s' % spec['adol-c'].prefix,
'-DDEAL_II_WITH_ADOLC=ON'
])
else:
options.extend([
'-DDEAL_II_WITH_ADOLC=OFF'
])
# doxygen
options.extend([
'-DDEAL_II_COMPONENT_DOCUMENTATION=%s' %
('ON' if '+doc' in spec else 'OFF'),
])
# arpack
if '+arpack' in spec and '+mpi' in spec:
options.extend([
'-DARPACK_DIR=%s' % spec['arpack-ng'].prefix,
'-DDEAL_II_WITH_ARPACK=ON',
'-DDEAL_II_ARPACK_WITH_PARPACK=ON'
])
else:
options.extend([
'-DDEAL_II_WITH_ARPACK=OFF'
])
# since Netcdf is spread among two, need to do it by hand:
if '+netcdf' in spec and '+mpi' in spec:
netcdf = spec['netcdf-cxx'].libs + spec['netcdf-c'].libs
options.extend([
'-DNETCDF_FOUND=true',
'-DNETCDF_LIBRARIES=%s' % netcdf.joined(';'),
'-DNETCDF_INCLUDE_DIRS=%s;%s' % (
spec['netcdf-cxx'].prefix.include,
spec['netcdf-c'].prefix.include),
])
else:
options.extend([
'-DDEAL_II_WITH_NETCDF=OFF'
])
if '+scalapack' in spec:
scalapack = spec['scalapack'].libs
options.extend([
'-DSCALAPACK_FOUND=true',
'-DSCALAPACK_INCLUDE_DIRS=%s' % (
spec['scalapack'].prefix.include),
'-DSCALAPACK_LIBRARIES=%s' % scalapack.joined(';'),
'-DDEAL_II_WITH_SCALAPACK=ON'
])
else:
options.extend([
'-DDEAL_II_WITH_SCALAPACK=OFF'
])
# Open Cascade
if '+oce' in spec:
options.extend([
'-DOPENCASCADE_DIR=%s' % spec['oce'].prefix,
'-DDEAL_II_WITH_OPENCASCADE=ON'
])
else:
options.extend([
'-DDEAL_II_WITH_OPENCASCADE=OFF'
])
# 64 bit indices
options.extend([
'-DDEAL_II_WITH_64BIT_INDICES=%s' % ('+int64' in spec)
])
# collect CXX flags:
if len(cxx_flags_release) > 0 and '+optflags' in spec:
options.extend([
'-DCMAKE_CXX_FLAGS_RELEASE:STRING=%s' % (
' '.join(cxx_flags_release)),
'-DCMAKE_CXX_FLAGS:STRING=%s' % (
' '.join(cxx_flags))
])
# Add flags for machine vectorization, used when tutorials
# and user code is built.
# See https://github.com/dealii/dealii/issues/9164
options.extend([
'-DDEAL_II_CXX_FLAGS=%s' % os.environ['SPACK_TARGET_ARGS']
])
return options
def setup_run_environment(self, env):
env.set('DEAL_II_DIR', self.prefix)
| rspavel/spack | var/spack/repos/builtin/packages/dealii/package.py | Python | lgpl-2.1 | 21,842 | [
"NetCDF"
] | 51c3d2981b1282238e31c47e5d493d30ca3c260f1373ba8b6f45f27f52d3abb4 |
#!/usr/bin/env python
#
# Copyright 2008 Jose Fonseca
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''Visualize dot graphs via the xdot format.'''
__author__ = "Jose Fonseca"
__version__ = "0.4"
import os
import sys
import subprocess
import math
import colorsys
import time
import re
import gobject
import gtk
import gtk.gdk
import gtk.keysyms
import cairo
import pango
import pangocairo
# See http://www.graphviz.org/pub/scm/graphviz-cairo/plugin/cairo/gvrender_cairo.c
# For pygtk inspiration and guidance see:
# - http://mirageiv.berlios.de/
# - http://comix.sourceforge.net/
class Pen:
"""Store pen attributes."""
def __init__(self):
# set default attributes
self.color = (0.0, 0.0, 0.0, 1.0)
self.fillcolor = (0.0, 0.0, 0.0, 1.0)
self.linewidth = 1.0
self.fontsize = 14.0
self.fontname = "Times-Roman"
self.dash = ()
def copy(self):
"""Create a copy of this pen."""
pen = Pen()
pen.__dict__ = self.__dict__.copy()
return pen
def highlighted(self):
pen = self.copy()
pen.color = (1, 0, 0, 1)
pen.fillcolor = (1, .8, .8, 1)
return pen
class Shape:
"""Abstract base class for all the drawing shapes."""
def __init__(self):
pass
def draw(self, cr, highlight=False):
"""Draw this shape with the given cairo context"""
raise NotImplementedError
def select_pen(self, highlight):
if highlight:
if not hasattr(self, 'highlight_pen'):
self.highlight_pen = self.pen.highlighted()
return self.highlight_pen
else:
return self.pen
class TextShape(Shape):
#fontmap = pangocairo.CairoFontMap()
#fontmap.set_resolution(72)
#context = fontmap.create_context()
LEFT, CENTER, RIGHT = -1, 0, 1
def __init__(self, pen, x, y, j, w, t):
Shape.__init__(self)
self.pen = pen.copy()
self.x = x
self.y = y
self.j = j
self.w = w
self.t = t
def draw(self, cr, highlight=False):
try:
layout = self.layout
except AttributeError:
layout = cr.create_layout()
# set font options
# see http://lists.freedesktop.org/archives/cairo/2007-February/009688.html
context = layout.get_context()
fo = cairo.FontOptions()
fo.set_antialias(cairo.ANTIALIAS_DEFAULT)
fo.set_hint_style(cairo.HINT_STYLE_NONE)
fo.set_hint_metrics(cairo.HINT_METRICS_OFF)
try:
pangocairo.context_set_font_options(context, fo)
except TypeError:
# XXX: Some broken pangocairo bindings show the error
# 'TypeError: font_options must be a cairo.FontOptions or None'
pass
# set font
font = pango.FontDescription()
font.set_family(self.pen.fontname)
font.set_absolute_size(self.pen.fontsize * pango.SCALE)
layout.set_font_description(font)
# set text
layout.set_text(self.t)
# cache it
self.layout = layout
else:
cr.update_layout(layout)
descent = 2 # XXX get descender from font metrics
width, height = layout.get_size()
width = float(width) / pango.SCALE
height = float(height) / pango.SCALE
# we know the width that dot thinks this text should have
# we do not necessarily have a font with the same metrics
# scale it so that the text fits inside its box
if width > self.w:
f = self.w / width
width = self.w # equivalent to width *= f
height *= f
descent *= f
else:
f = 1.0
if self.j == self.LEFT:
x = self.x
elif self.j == self.CENTER:
x = self.x - 0.5 * width
elif self.j == self.RIGHT:
x = self.x - width
else:
assert 0
y = self.y - height + descent
cr.move_to(x, y)
cr.save()
cr.scale(f, f)
cr.set_source_rgba(*self.select_pen(highlight).color)
cr.show_layout(layout)
cr.restore()
if 0: # DEBUG
# show where dot thinks the text should appear
cr.set_source_rgba(1, 0, 0, .9)
if self.j == self.LEFT:
x = self.x
elif self.j == self.CENTER:
x = self.x - 0.5 * self.w
elif self.j == self.RIGHT:
x = self.x - self.w
cr.move_to(x, self.y)
cr.line_to(x + self.w, self.y)
cr.stroke()
class ImageShape(Shape):
def __init__(self, pen, x0, y0, w, h, path):
Shape.__init__(self)
self.pen = pen.copy()
self.x0 = x0
self.y0 = y0
self.w = w
self.h = h
self.path = path
def draw(self, cr, highlight=False):
cr2 = gtk.gdk.CairoContext(cr)
pixbuf = gtk.gdk.pixbuf_new_from_file(self.path)
sx = float(self.w) / float(pixbuf.get_width())
sy = float(self.h) / float(pixbuf.get_height())
cr.save()
cr.translate(self.x0, self.y0 - self.h)
cr.scale(sx, sy)
cr2.set_source_pixbuf(pixbuf, 0, 0)
cr2.paint()
cr.restore()
class EllipseShape(Shape):
def __init__(self, pen, x0, y0, w, h, filled=False):
Shape.__init__(self)
self.pen = pen.copy()
self.x0 = x0
self.y0 = y0
self.w = w
self.h = h
self.filled = filled
def draw(self, cr, highlight=False):
cr.save()
cr.translate(self.x0, self.y0)
cr.scale(self.w, self.h)
cr.move_to(1.0, 0.0)
cr.arc(0.0, 0.0, 1.0, 0, 2.0 * math.pi)
cr.restore()
pen = self.select_pen(highlight)
if self.filled:
cr.set_source_rgba(*pen.fillcolor)
cr.fill()
else:
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class PolygonShape(Shape):
def __init__(self, pen, points, filled=False):
Shape.__init__(self)
self.pen = pen.copy()
self.points = points
self.filled = filled
def draw(self, cr, highlight=False):
x0, y0 = self.points[-1]
cr.move_to(x0, y0)
for x, y in self.points:
cr.line_to(x, y)
cr.close_path()
pen = self.select_pen(highlight)
if self.filled:
cr.set_source_rgba(*pen.fillcolor)
cr.fill_preserve()
cr.fill()
else:
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class LineShape(Shape):
def __init__(self, pen, points):
Shape.__init__(self)
self.pen = pen.copy()
self.points = points
def draw(self, cr, highlight=False):
x0, y0 = self.points[0]
cr.move_to(x0, y0)
for x1, y1 in self.points[1:]:
cr.line_to(x1, y1)
pen = self.select_pen(highlight)
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class BezierShape(Shape):
def __init__(self, pen, points, filled=False):
Shape.__init__(self)
self.pen = pen.copy()
self.points = points
self.filled = filled
def draw(self, cr, highlight=False):
x0, y0 = self.points[0]
cr.move_to(x0, y0)
for i in xrange(1, len(self.points), 3):
x1, y1 = self.points[i]
x2, y2 = self.points[i + 1]
x3, y3 = self.points[i + 2]
cr.curve_to(x1, y1, x2, y2, x3, y3)
pen = self.select_pen(highlight)
if self.filled:
cr.set_source_rgba(*pen.fillcolor)
cr.fill_preserve()
cr.fill()
else:
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class CompoundShape(Shape):
def __init__(self, shapes):
Shape.__init__(self)
self.shapes = shapes
def draw(self, cr, highlight=False):
for shape in self.shapes:
shape.draw(cr, highlight=highlight)
class Url(object):
def __init__(self, item, url, highlight=None):
self.item = item
self.url = url
if highlight is None:
highlight = set([item])
self.highlight = highlight
class Jump(object):
def __init__(self, item, x, y, highlight=None):
self.item = item
self.x = x
self.y = y
if highlight is None:
highlight = set([item])
self.highlight = highlight
class Element(CompoundShape):
"""Base class for graph nodes and edges."""
def __init__(self, shapes):
CompoundShape.__init__(self, shapes)
def get_url(self, x, y):
return None
def get_jump(self, x, y):
return None
class Node(Element):
def __init__(self, x, y, w, h, shapes, url):
Element.__init__(self, shapes)
self.x = x
self.y = y
self.x1 = x - 0.5 * w
self.y1 = y - 0.5 * h
self.x2 = x + 0.5 * w
self.y2 = y + 0.5 * h
self.url = url
def is_inside(self, x, y):
return self.x1 <= x and x <= self.x2 and self.y1 <= y and y <= self.y2
def get_url(self, x, y):
if self.url is None:
return None
#print (x, y), (self.x1, self.y1), "-", (self.x2, self.y2)
if self.is_inside(x, y):
return Url(self, self.url)
return None
def get_jump(self, x, y):
if self.is_inside(x, y):
return Jump(self, self.x, self.y)
return None
def square_distance(x1, y1, x2, y2):
deltax = x2 - x1
deltay = y2 - y1
return deltax * deltax + deltay * deltay
class Edge(Element):
def __init__(self, src, dst, points, shapes):
Element.__init__(self, shapes)
self.src = src
self.dst = dst
self.points = points
RADIUS = 10
def get_jump(self, x, y):
if square_distance(x, y, *self.points[0]) <= self.RADIUS * self.RADIUS:
return Jump(self, self.dst.x, self.dst.y, highlight=set([self, self.dst]))
if square_distance(x, y, *self.points[-1]) <= self.RADIUS * self.RADIUS:
return Jump(self, self.src.x, self.src.y, highlight=set([self, self.src]))
return None
class Graph(Shape):
def __init__(self, width=1, height=1, shapes=(), nodes=(), edges=()):
Shape.__init__(self)
self.width = width
self.height = height
self.shapes = shapes
self.nodes = nodes
self.edges = edges
def get_size(self):
return self.width, self.height
def draw(self, cr, highlight_items=None):
if highlight_items is None:
highlight_items = ()
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.set_line_cap(cairo.LINE_CAP_BUTT)
cr.set_line_join(cairo.LINE_JOIN_MITER)
for shape in self.shapes:
shape.draw(cr)
for edge in self.edges:
edge.draw(cr, highlight=(edge in highlight_items))
for node in self.nodes:
node.draw(cr, highlight=(node in highlight_items))
def get_url(self, x, y):
for node in self.nodes:
url = node.get_url(x, y)
if url is not None:
return url
return None
def get_jump(self, x, y):
for edge in self.edges:
jump = edge.get_jump(x, y)
if jump is not None:
return jump
for node in self.nodes:
jump = node.get_jump(x, y)
if jump is not None:
return jump
return None
class XDotAttrParser:
"""Parser for xdot drawing attributes.
See also:
- http://www.graphviz.org/doc/info/output.html#d:xdot
"""
def __init__(self, parser, buf):
self.parser = parser
self.buf = buf
self.pos = 0
self.pen = Pen()
self.shapes = []
def __nonzero__(self):
return self.pos < len(self.buf)
def read_code(self):
pos = self.buf.find(" ", self.pos)
res = self.buf[self.pos:pos]
self.pos = pos + 1
while self.pos < len(self.buf) and self.buf[self.pos].isspace():
self.pos += 1
return res
def read_number(self):
return int(self.read_code())
def read_float(self):
return float(self.read_code())
def read_point(self):
x = self.read_number()
y = self.read_number()
return self.transform(x, y)
def read_text(self):
num = self.read_number()
pos = self.buf.find("-", self.pos) + 1
self.pos = pos + num
res = self.buf[pos:self.pos]
while self.pos < len(self.buf) and self.buf[self.pos].isspace():
self.pos += 1
return res
def read_polygon(self):
n = self.read_number()
p = []
for i in range(n):
x, y = self.read_point()
p.append((x, y))
return p
def read_color(self):
# See http://www.graphviz.org/doc/info/attrs.html#k:color
c = self.read_text()
c1 = c[:1]
if c1 == '#':
hex2float = lambda h: float(int(h, 16) / 255.0)
r = hex2float(c[1:3])
g = hex2float(c[3:5])
b = hex2float(c[5:7])
try:
a = hex2float(c[7:9])
except (IndexError, ValueError):
a = 1.0
return r, g, b, a
elif c1.isdigit() or c1 == ".":
# "H,S,V" or "H S V" or "H, S, V" or any other variation
h, s, v = map(float, c.replace(",", " ").split())
r, g, b = colorsys.hsv_to_rgb(h, s, v)
a = 1.0
return r, g, b, a
else:
return self.lookup_color(c)
def lookup_color(self, c):
try:
color = gtk.gdk.color_parse(c)
except ValueError:
pass
else:
s = 1.0 / 65535.0
r = color.red * s
g = color.green * s
b = color.blue * s
a = 1.0
return r, g, b, a
try:
dummy, scheme, index = c.split('/')
r, g, b = brewer_colors[scheme][int(index)]
except (ValueError, KeyError):
pass
else:
s = 1.0 / 255.0
r = r * s
g = g * s
b = b * s
a = 1.0
return r, g, b, a
sys.stderr.write("unknown color '%s'\n" % c)
return None
def parse(self):
s = self
while s:
op = s.read_code()
if op == "c":
color = s.read_color()
if color is not None:
self.handle_color(color, filled=False)
elif op == "C":
color = s.read_color()
if color is not None:
self.handle_color(color, filled=True)
elif op == "S":
# http://www.graphviz.org/doc/info/attrs.html#k:style
style = s.read_text()
if style.startswith("setlinewidth("):
lw = style.split("(")[1].split(")")[0]
lw = float(lw)
self.handle_linewidth(lw)
elif style in ("solid", "dashed", "dotted"):
self.handle_linestyle(style)
elif op == "F":
size = s.read_float()
name = s.read_text()
self.handle_font(size, name)
elif op == "T":
x, y = s.read_point()
j = s.read_number()
w = s.read_number()
t = s.read_text()
self.handle_text(x, y, j, w, t)
elif op == "E":
x0, y0 = s.read_point()
w = s.read_number()
h = s.read_number()
self.handle_ellipse(x0, y0, w, h, filled=True)
elif op == "e":
x0, y0 = s.read_point()
w = s.read_number()
h = s.read_number()
self.handle_ellipse(x0, y0, w, h, filled=False)
elif op == "L":
points = self.read_polygon()
self.handle_line(points)
elif op == "B":
points = self.read_polygon()
self.handle_bezier(points, filled=False)
elif op == "b":
points = self.read_polygon()
self.handle_bezier(points, filled=True)
elif op == "P":
points = self.read_polygon()
self.handle_polygon(points, filled=True)
elif op == "p":
points = self.read_polygon()
self.handle_polygon(points, filled=False)
elif op == "I":
x0, y0 = s.read_point()
w = s.read_number()
h = s.read_number()
path = s.read_text()
self.handle_image(x0, y0, w, h, path)
else:
sys.stderr.write("unknown xdot opcode '%s'\n" % op)
break
return self.shapes
def transform(self, x, y):
return self.parser.transform(x, y)
def handle_color(self, color, filled=False):
if filled:
self.pen.fillcolor = color
else:
self.pen.color = color
def handle_linewidth(self, linewidth):
self.pen.linewidth = linewidth
def handle_linestyle(self, style):
if style == "solid":
self.pen.dash = ()
elif style == "dashed":
self.pen.dash = (6, ) # 6pt on, 6pt off
elif style == "dotted":
self.pen.dash = (2, 4) # 2pt on, 4pt off
def handle_font(self, size, name):
self.pen.fontsize = size
self.pen.fontname = name
def handle_text(self, x, y, j, w, t):
self.shapes.append(TextShape(self.pen, x, y, j, w, t))
def handle_ellipse(self, x0, y0, w, h, filled=False):
if filled:
# xdot uses this to mean "draw a filled shape with an outline"
self.shapes.append(EllipseShape(self.pen, x0, y0, w, h, filled=True))
self.shapes.append(EllipseShape(self.pen, x0, y0, w, h))
def handle_image(self, x0, y0, w, h, path):
self.shapes.append(ImageShape(self.pen, x0, y0, w, h, path))
def handle_line(self, points):
self.shapes.append(LineShape(self.pen, points))
def handle_bezier(self, points, filled=False):
if filled:
# xdot uses this to mean "draw a filled shape with an outline"
self.shapes.append(BezierShape(self.pen, points, filled=True))
self.shapes.append(BezierShape(self.pen, points))
def handle_polygon(self, points, filled=False):
if filled:
# xdot uses this to mean "draw a filled shape with an outline"
self.shapes.append(PolygonShape(self.pen, points, filled=True))
self.shapes.append(PolygonShape(self.pen, points))
EOF = -1
SKIP = -2
class ParseError(Exception):
def __init__(self, msg=None, filename=None, line=None, col=None):
self.msg = msg
self.filename = filename
self.line = line
self.col = col
def __str__(self):
return ':'.join([str(part) for part in (self.filename, self.line, self.col, self.msg) if part != None])
class Scanner:
"""Stateless scanner."""
# should be overriden by derived classes
tokens = []
symbols = {}
literals = {}
ignorecase = False
def __init__(self):
flags = re.DOTALL
if self.ignorecase:
flags |= re.IGNORECASE
self.tokens_re = re.compile(
'|'.join(['(' + regexp + ')' for type, regexp, test_lit in self.tokens]),
flags
)
def next(self, buf, pos):
if pos >= len(buf):
return EOF, '', pos
mo = self.tokens_re.match(buf, pos)
if mo:
text = mo.group()
type, regexp, test_lit = self.tokens[mo.lastindex - 1]
pos = mo.end()
if test_lit:
type = self.literals.get(text, type)
return type, text, pos
else:
c = buf[pos]
return self.symbols.get(c, None), c, pos + 1
class Token:
def __init__(self, type, text, line, col):
self.type = type
self.text = text
self.line = line
self.col = col
class Lexer:
# should be overriden by derived classes
scanner = None
tabsize = 8
newline_re = re.compile(r'\r\n?|\n')
def __init__(self, buf=None, pos=0, filename=None, fp=None):
if fp is not None:
try:
fileno = fp.fileno()
length = os.path.getsize(fp.name)
import mmap
except:
# read whole file into memory
buf = fp.read()
pos = 0
else:
# map the whole file into memory
if length:
# length must not be zero
buf = mmap.mmap(fileno, length, access=mmap.ACCESS_READ)
pos = os.lseek(fileno, 0, 1)
else:
buf = ''
pos = 0
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = None
self.buf = buf
self.pos = pos
self.line = 1
self.col = 1
self.filename = filename
def next(self):
while True:
# save state
pos = self.pos
line = self.line
col = self.col
type, text, endpos = self.scanner.next(self.buf, pos)
assert pos + len(text) == endpos
self.consume(text)
type, text = self.filter(type, text)
self.pos = endpos
if type == SKIP:
continue
elif type is None:
msg = 'unexpected char '
if text >= ' ' and text <= '~':
msg += "'%s'" % text
else:
msg += "0x%X" % ord(text)
raise ParseError(msg, self.filename, line, col)
else:
break
return Token(type=type, text=text, line=line, col=col)
def consume(self, text):
# update line number
pos = 0
for mo in self.newline_re.finditer(text, pos):
self.line += 1
self.col = 1
pos = mo.end()
# update column number
while True:
tabpos = text.find('\t', pos)
if tabpos == -1:
break
self.col += tabpos - pos
self.col = ((self.col - 1) // self.tabsize + 1) * self.tabsize + 1
pos = tabpos + 1
self.col += len(text) - pos
class Parser:
def __init__(self, lexer):
self.lexer = lexer
self.lookahead = self.lexer.next()
def match(self, type):
if self.lookahead.type != type:
raise ParseError(
msg='unexpected token %r' % self.lookahead.text,
filename=self.lexer.filename,
line=self.lookahead.line,
col=self.lookahead.col)
def skip(self, type):
while self.lookahead.type != type:
self.consume()
def consume(self):
token = self.lookahead
self.lookahead = self.lexer.next()
return token
ID = 0
STR_ID = 1
HTML_ID = 2
EDGE_OP = 3
LSQUARE = 4
RSQUARE = 5
LCURLY = 6
RCURLY = 7
COMMA = 8
COLON = 9
SEMI = 10
EQUAL = 11
PLUS = 12
STRICT = 13
GRAPH = 14
DIGRAPH = 15
NODE = 16
EDGE = 17
SUBGRAPH = 18
class DotScanner(Scanner):
# token regular expression table
tokens = [
# whitespace and comments
(SKIP,
r'[ \t\f\r\n\v]+|'
r'//[^\r\n]*|'
r'/\*.*?\*/|'
r'#[^\r\n]*',
False),
# Alphanumeric IDs
(ID, r'[a-zA-Z_\x80-\xff][a-zA-Z0-9_\x80-\xff]*', True),
# Numeric IDs
(ID, r'-?(?:\.[0-9]+|[0-9]+(?:\.[0-9]*)?)', False),
# String IDs
(STR_ID, r'"[^"\\]*(?:\\.[^"\\]*)*"', False),
# HTML IDs
(HTML_ID, r'<[^<>]*(?:<[^<>]*>[^<>]*)*>', False),
# Edge operators
(EDGE_OP, r'-[>-]', False),
]
# symbol table
symbols = {
'[': LSQUARE,
']': RSQUARE,
'{': LCURLY,
'}': RCURLY,
',': COMMA,
':': COLON,
';': SEMI,
'=': EQUAL,
'+': PLUS,
}
# literal table
literals = {
'strict': STRICT,
'graph': GRAPH,
'digraph': DIGRAPH,
'node': NODE,
'edge': EDGE,
'subgraph': SUBGRAPH,
}
ignorecase = True
class DotLexer(Lexer):
scanner = DotScanner()
def filter(self, type, text):
# TODO: handle charset
if type == STR_ID:
text = text[1:-1]
# line continuations
text = text.replace('\\\r\n', '')
text = text.replace('\\\r', '')
text = text.replace('\\\n', '')
# quotes
text = text.replace('\\"', '"')
# layout engines recognize other escape codes (many non-standard)
# but we don't translate them here
type = ID
elif type == HTML_ID:
text = text[1:-1]
type = ID
return type, text
class DotParser(Parser):
def __init__(self, lexer):
Parser.__init__(self, lexer)
self.graph_attrs = {}
self.node_attrs = {}
self.edge_attrs = {}
def parse(self):
self.parse_graph()
self.match(EOF)
def parse_graph(self):
if self.lookahead.type == STRICT:
self.consume()
self.skip(LCURLY)
self.consume()
while self.lookahead.type != RCURLY:
self.parse_stmt()
self.consume()
def parse_subgraph(self):
id = None
if self.lookahead.type == SUBGRAPH:
self.consume()
if self.lookahead.type == ID:
id = self.lookahead.text
self.consume()
if self.lookahead.type == LCURLY:
self.consume()
while self.lookahead.type != RCURLY:
self.parse_stmt()
self.consume()
return id
def parse_stmt(self):
if self.lookahead.type == GRAPH:
self.consume()
attrs = self.parse_attrs()
self.graph_attrs.update(attrs)
self.handle_graph(attrs)
elif self.lookahead.type == NODE:
self.consume()
self.node_attrs.update(self.parse_attrs())
elif self.lookahead.type == EDGE:
self.consume()
self.edge_attrs.update(self.parse_attrs())
elif self.lookahead.type in (SUBGRAPH, LCURLY):
self.parse_subgraph()
else:
id = self.parse_node_id()
if self.lookahead.type == EDGE_OP:
self.consume()
node_ids = [id, self.parse_node_id()]
while self.lookahead.type == EDGE_OP:
node_ids.append(self.parse_node_id())
attrs = self.parse_attrs()
for i in range(0, len(node_ids) - 1):
self.handle_edge(node_ids[i], node_ids[i + 1], attrs)
elif self.lookahead.type == EQUAL:
self.consume()
self.parse_id()
else:
attrs = self.parse_attrs()
self.handle_node(id, attrs)
if self.lookahead.type == SEMI:
self.consume()
def parse_attrs(self):
attrs = {}
while self.lookahead.type == LSQUARE:
self.consume()
while self.lookahead.type != RSQUARE:
name, value = self.parse_attr()
attrs[name] = value
if self.lookahead.type == COMMA:
self.consume()
self.consume()
return attrs
def parse_attr(self):
name = self.parse_id()
if self.lookahead.type == EQUAL:
self.consume()
value = self.parse_id()
else:
value = 'true'
return name, value
def parse_node_id(self):
node_id = self.parse_id()
if self.lookahead.type == COLON:
self.consume()
port = self.parse_id()
if self.lookahead.type == COLON:
self.consume()
compass_pt = self.parse_id()
else:
compass_pt = None
else:
port = None
compass_pt = None
# XXX: we don't really care about port and compass point values when parsing xdot
return node_id
def parse_id(self):
self.match(ID)
id = self.lookahead.text
self.consume()
return id
def handle_graph(self, attrs):
pass
def handle_node(self, id, attrs):
pass
def handle_edge(self, src_id, dst_id, attrs):
pass
class XDotParser(DotParser):
def __init__(self, xdotcode):
lexer = DotLexer(buf=xdotcode)
DotParser.__init__(self, lexer)
self.nodes = []
self.edges = []
self.shapes = []
self.node_by_name = {}
self.top_graph = True
def handle_graph(self, attrs):
if self.top_graph:
try:
bb = attrs['bb']
except KeyError:
return
if not bb:
return
xmin, ymin, xmax, ymax = map(float, bb.split(","))
self.xoffset = -xmin
self.yoffset = -ymax
self.xscale = 1.0
self.yscale = -1.0
# FIXME: scale from points to pixels
self.width = max(xmax - xmin, 1)
self.height = max(ymax - ymin, 1)
self.top_graph = False
for attr in ("_draw_", "_ldraw_", "_hdraw_", "_tdraw_", "_hldraw_", "_tldraw_"):
if attr in attrs:
parser = XDotAttrParser(self, attrs[attr])
self.shapes.extend(parser.parse())
def handle_node(self, id, attrs):
try:
pos = attrs['pos']
except KeyError:
return
x, y = self.parse_node_pos(pos)
w = float(attrs.get('width', 0)) * 72
h = float(attrs.get('height', 0)) * 72
shapes = []
for attr in ("_draw_", "_ldraw_"):
if attr in attrs:
parser = XDotAttrParser(self, attrs[attr])
shapes.extend(parser.parse())
url = attrs.get('URL', None)
node = Node(x, y, w, h, shapes, url)
self.node_by_name[id] = node
if shapes:
self.nodes.append(node)
def handle_edge(self, src_id, dst_id, attrs):
try:
pos = attrs['pos']
except KeyError:
return
points = self.parse_edge_pos(pos)
shapes = []
for attr in ("_draw_", "_ldraw_", "_hdraw_", "_tdraw_", "_hldraw_", "_tldraw_"):
if attr in attrs:
parser = XDotAttrParser(self, attrs[attr])
shapes.extend(parser.parse())
if shapes:
src = self.node_by_name[src_id]
dst = self.node_by_name[dst_id]
self.edges.append(Edge(src, dst, points, shapes))
def parse(self):
DotParser.parse(self)
return Graph(self.width, self.height, self.shapes, self.nodes, self.edges)
def parse_node_pos(self, pos):
x, y = pos.split(",")
return self.transform(float(x), float(y))
def parse_edge_pos(self, pos):
points = []
for entry in pos.split(' '):
fields = entry.split(',')
try:
x, y = fields
except ValueError:
# TODO: handle start/end points
continue
else:
points.append(self.transform(float(x), float(y)))
return points
def transform(self, x, y):
# XXX: this is not the right place for this code
x = (x + self.xoffset) * self.xscale
y = (y + self.yoffset) * self.yscale
return x, y
class Animation(object):
step = 0.03 # seconds
def __init__(self, dot_widget):
self.dot_widget = dot_widget
self.timeout_id = None
def start(self):
self.timeout_id = gobject.timeout_add(int(self.step * 1000), self.tick)
def stop(self):
self.dot_widget.animation = NoAnimation(self.dot_widget)
if self.timeout_id is not None:
gobject.source_remove(self.timeout_id)
self.timeout_id = None
def tick(self):
self.stop()
class NoAnimation(Animation):
def start(self):
pass
def stop(self):
pass
class LinearAnimation(Animation):
duration = 0.6
def start(self):
self.started = time.time()
Animation.start(self)
def tick(self):
t = (time.time() - self.started) / self.duration
self.animate(max(0, min(t, 1)))
return (t < 1)
def animate(self, t):
pass
class MoveToAnimation(LinearAnimation):
def __init__(self, dot_widget, target_x, target_y):
Animation.__init__(self, dot_widget)
self.source_x = dot_widget.x
self.source_y = dot_widget.y
self.target_x = target_x
self.target_y = target_y
def animate(self, t):
sx, sy = self.source_x, self.source_y
tx, ty = self.target_x, self.target_y
self.dot_widget.x = tx * t + sx * (1 - t)
self.dot_widget.y = ty * t + sy * (1 - t)
self.dot_widget.queue_draw()
class ZoomToAnimation(MoveToAnimation):
def __init__(self, dot_widget, target_x, target_y):
MoveToAnimation.__init__(self, dot_widget, target_x, target_y)
self.source_zoom = dot_widget.zoom_ratio
self.target_zoom = self.source_zoom
self.extra_zoom = 0
middle_zoom = 0.5 * (self.source_zoom + self.target_zoom)
distance = math.hypot(self.source_x - self.target_x,
self.source_y - self.target_y)
rect = self.dot_widget.get_allocation()
visible = min(rect.width, rect.height) / self.dot_widget.zoom_ratio
visible *= 0.9
if distance > 0:
desired_middle_zoom = visible / distance
self.extra_zoom = min(0, 4 * (desired_middle_zoom - middle_zoom))
def animate(self, t):
a, b, c = self.source_zoom, self.extra_zoom, self.target_zoom
self.dot_widget.zoom_ratio = c * t + b * t * (1 - t) + a * (1 - t)
self.dot_widget.zoom_to_fit_on_resize = False
MoveToAnimation.animate(self, t)
class DragAction(object):
def __init__(self, dot_widget):
self.dot_widget = dot_widget
def on_button_press(self, event):
self.startmousex = self.prevmousex = event.x
self.startmousey = self.prevmousey = event.y
self.start()
def on_motion_notify(self, event):
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
deltax = self.prevmousex - x
deltay = self.prevmousey - y
self.drag(deltax, deltay)
self.prevmousex = x
self.prevmousey = y
def on_button_release(self, event):
self.stopmousex = event.x
self.stopmousey = event.y
self.stop()
def draw(self, cr):
pass
def start(self):
pass
def drag(self, deltax, deltay):
pass
def stop(self):
pass
def abort(self):
pass
class NullAction(DragAction):
def on_motion_notify(self, event):
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
dot_widget = self.dot_widget
item = dot_widget.get_url(x, y)
if item is None:
item = dot_widget.get_jump(x, y)
if item is not None:
dot_widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND2))
dot_widget.set_highlight(item.highlight)
else:
dot_widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
dot_widget.set_highlight(None)
class PanAction(DragAction):
def start(self):
self.dot_widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
def drag(self, deltax, deltay):
self.dot_widget.x += deltax / self.dot_widget.zoom_ratio
self.dot_widget.y += deltay / self.dot_widget.zoom_ratio
self.dot_widget.queue_draw()
def stop(self):
self.dot_widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
abort = stop
class ZoomAction(DragAction):
def drag(self, deltax, deltay):
self.dot_widget.zoom_ratio *= 1.005 ** (deltax + deltay)
self.dot_widget.zoom_to_fit_on_resize = False
self.dot_widget.queue_draw()
def stop(self):
self.dot_widget.queue_draw()
class ZoomAreaAction(DragAction):
def drag(self, deltax, deltay):
self.dot_widget.queue_draw()
def draw(self, cr):
cr.save()
cr.set_source_rgba(.5, .5, 1.0, 0.25)
cr.rectangle(self.startmousex, self.startmousey,
self.prevmousex - self.startmousex,
self.prevmousey - self.startmousey)
cr.fill()
cr.set_source_rgba(.5, .5, 1.0, 1.0)
cr.set_line_width(1)
cr.rectangle(self.startmousex - .5, self.startmousey - .5,
self.prevmousex - self.startmousex + 1,
self.prevmousey - self.startmousey + 1)
cr.stroke()
cr.restore()
def stop(self):
x1, y1 = self.dot_widget.window2graph(self.startmousex,
self.startmousey)
x2, y2 = self.dot_widget.window2graph(self.stopmousex,
self.stopmousey)
self.dot_widget.zoom_to_area(x1, y1, x2, y2)
def abort(self):
self.dot_widget.queue_draw()
class DotWidget(gtk.DrawingArea):
"""PyGTK widget that draws dot graphs."""
__gsignals__ = {
'expose-event': 'override',
'clicked': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, gtk.gdk.Event))
}
filter = 'dot'
def __init__(self):
gtk.DrawingArea.__init__(self)
self.graph = Graph()
self.openfilename = None
self.set_flags(gtk.CAN_FOCUS)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("button-press-event", self.on_area_button_press)
self.connect("button-release-event", self.on_area_button_release)
self.add_events(gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("motion-notify-event", self.on_area_motion_notify)
self.connect("scroll-event", self.on_area_scroll_event)
self.connect("size-allocate", self.on_area_size_allocate)
self.connect('key-press-event', self.on_key_press_event)
self.last_mtime = None
gobject.timeout_add(1000, self.update)
self.x, self.y = 0.0, 0.0
self.zoom_ratio = 1.0
self.zoom_to_fit_on_resize = False
self.animation = NoAnimation(self)
self.drag_action = NullAction(self)
self.presstime = None
self.highlight = None
def set_filter(self, filter):
self.filter = filter
def run_filter(self, dotcode):
if not self.filter:
return dotcode
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(
[self.filter, '-Txdot'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
universal_newlines=True,
startupinfo=startupinfo
)
xdotcode, error = p.communicate(dotcode)
sys.stderr.write(error)
if p.returncode != 0:
dialog = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
message_format=error,
buttons=gtk.BUTTONS_OK)
dialog.set_title('Dot Viewer')
dialog.run()
dialog.destroy()
return None
return xdotcode
def set_dotcode(self, dotcode, filename=None):
self.openfilename = None
if isinstance(dotcode, unicode):
dotcode = dotcode.encode('utf8')
xdotcode = self.run_filter(dotcode)
if xdotcode is None:
return False
try:
self.set_xdotcode(xdotcode)
except ParseError, ex:
dialog = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
message_format=str(ex),
buttons=gtk.BUTTONS_OK)
dialog.set_title('Dot Viewer')
dialog.run()
dialog.destroy()
return False
else:
if filename is None:
self.last_mtime = None
else:
self.last_mtime = os.stat(filename).st_mtime
self.openfilename = filename
return True
def set_xdotcode(self, xdotcode):
#print xdotcode
parser = XDotParser(xdotcode)
self.graph = parser.parse()
self.zoom_image(self.zoom_ratio, center=True)
def reload(self):
if self.openfilename is not None:
try:
fp = file(self.openfilename, 'rt')
self.set_dotcode(fp.read(), self.openfilename)
fp.close()
except IOError:
pass
def update(self):
if self.openfilename is not None:
current_mtime = os.stat(self.openfilename).st_mtime
if current_mtime != self.last_mtime:
self.last_mtime = current_mtime
self.reload()
return True
def do_expose_event(self, event):
cr = self.window.cairo_create()
# set a clip region for the expose event
cr.rectangle(
event.area.x, event.area.y,
event.area.width, event.area.height
)
cr.clip()
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.paint()
cr.save()
rect = self.get_allocation()
cr.translate(0.5 * rect.width, 0.5 * rect.height)
cr.scale(self.zoom_ratio, self.zoom_ratio)
cr.translate(-self.x, -self.y)
self.graph.draw(cr, highlight_items=self.highlight)
cr.restore()
self.drag_action.draw(cr)
return False
def get_current_pos(self):
return self.x, self.y
def set_current_pos(self, x, y):
self.x = x
self.y = y
self.queue_draw()
def set_highlight(self, items):
if self.highlight != items:
self.highlight = items
self.queue_draw()
def zoom_image(self, zoom_ratio, center=False, pos=None):
if center:
self.x = self.graph.width / 2
self.y = self.graph.height / 2
elif pos is not None:
rect = self.get_allocation()
x, y = pos
x -= 0.5 * rect.width
y -= 0.5 * rect.height
self.x += x / self.zoom_ratio - x / zoom_ratio
self.y += y / self.zoom_ratio - y / zoom_ratio
self.zoom_ratio = zoom_ratio
self.zoom_to_fit_on_resize = False
self.queue_draw()
def zoom_to_area(self, x1, y1, x2, y2):
rect = self.get_allocation()
width = abs(x1 - x2)
height = abs(y1 - y2)
self.zoom_ratio = min(
float(rect.width) / float(width),
float(rect.height) / float(height)
)
self.zoom_to_fit_on_resize = False
self.x = (x1 + x2) / 2
self.y = (y1 + y2) / 2
self.queue_draw()
def zoom_to_fit(self):
rect = self.get_allocation()
rect.x += self.ZOOM_TO_FIT_MARGIN
rect.y += self.ZOOM_TO_FIT_MARGIN
rect.width -= 2 * self.ZOOM_TO_FIT_MARGIN
rect.height -= 2 * self.ZOOM_TO_FIT_MARGIN
zoom_ratio = min(
float(rect.width) / float(self.graph.width),
float(rect.height) / float(self.graph.height)
)
self.zoom_image(zoom_ratio, center=True)
self.zoom_to_fit_on_resize = True
ZOOM_INCREMENT = 1.25
ZOOM_TO_FIT_MARGIN = 12
def on_zoom_in(self, action):
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
def on_zoom_out(self, action):
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
def on_zoom_fit(self, action):
self.zoom_to_fit()
def on_zoom_100(self, action):
self.zoom_image(1.0)
POS_INCREMENT = 100
def on_key_press_event(self, widget, event):
if event.keyval == gtk.keysyms.Left:
self.x -= self.POS_INCREMENT / self.zoom_ratio
self.queue_draw()
return True
if event.keyval == gtk.keysyms.Right:
self.x += self.POS_INCREMENT / self.zoom_ratio
self.queue_draw()
return True
if event.keyval == gtk.keysyms.Up:
self.y -= self.POS_INCREMENT / self.zoom_ratio
self.queue_draw()
return True
if event.keyval == gtk.keysyms.Down:
self.y += self.POS_INCREMENT / self.zoom_ratio
self.queue_draw()
return True
if event.keyval in (gtk.keysyms.Page_Up,
gtk.keysyms.plus,
gtk.keysyms.equal,
gtk.keysyms.KP_Add):
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
self.queue_draw()
return True
if event.keyval in (gtk.keysyms.Page_Down,
gtk.keysyms.minus,
gtk.keysyms.KP_Subtract):
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
self.queue_draw()
return True
if event.keyval == gtk.keysyms.Escape:
self.drag_action.abort()
self.drag_action = NullAction(self)
return True
if event.keyval == gtk.keysyms.r:
self.reload()
return True
if event.keyval == gtk.keysyms.q:
gtk.main_quit()
return True
return False
def get_drag_action(self, event):
state = event.state
if event.button in (1, 2): # left or middle button
if state & gtk.gdk.CONTROL_MASK:
return ZoomAction
elif state & gtk.gdk.SHIFT_MASK:
return ZoomAreaAction
else:
return PanAction
return NullAction
def on_area_button_press(self, area, event):
self.animation.stop()
self.drag_action.abort()
action_type = self.get_drag_action(event)
self.drag_action = action_type(self)
self.drag_action.on_button_press(event)
self.presstime = time.time()
self.pressx = event.x
self.pressy = event.y
return False
def is_click(self, event, click_fuzz=4, click_timeout=1.0):
assert event.type == gtk.gdk.BUTTON_RELEASE
if self.presstime is None:
# got a button release without seeing the press?
return False
# XXX instead of doing this complicated logic, shouldn't we listen
# for gtk's clicked event instead?
deltax = self.pressx - event.x
deltay = self.pressy - event.y
return (time.time() < self.presstime + click_timeout
and math.hypot(deltax, deltay) < click_fuzz)
def on_area_button_release(self, area, event):
self.drag_action.on_button_release(event)
self.drag_action = NullAction(self)
if event.button == 1 and self.is_click(event):
x, y = int(event.x), int(event.y)
url = self.get_url(x, y)
if url is not None:
self.emit('clicked', unicode(url.url), event)
# else:
# jump = self.get_jump(x, y)
# if jump is not None:
# self.animate_to(jump.x, jump.y)
return True
if event.button == 1 or event.button == 2:
return True
return False
def on_area_scroll_event(self, area, event):
if event.direction == gtk.gdk.SCROLL_UP:
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT,
pos=(event.x, event.y))
return True
if event.direction == gtk.gdk.SCROLL_DOWN:
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT,
pos=(event.x, event.y))
return True
return False
def on_area_motion_notify(self, area, event):
self.drag_action.on_motion_notify(event)
return True
def on_area_size_allocate(self, area, allocation):
if self.zoom_to_fit_on_resize:
self.zoom_to_fit()
def animate_to(self, x, y):
self.animation = ZoomToAnimation(self, x, y)
self.animation.start()
def window2graph(self, x, y):
rect = self.get_allocation()
x -= 0.5 * rect.width
y -= 0.5 * rect.height
x /= self.zoom_ratio
y /= self.zoom_ratio
x += self.x
y += self.y
return x, y
def get_url(self, x, y):
x, y = self.window2graph(x, y)
return self.graph.get_url(x, y)
def get_jump(self, x, y):
x, y = self.window2graph(x, y)
return self.graph.get_jump(x, y)
class DotWindow(gtk.Window):
ui = '''
<ui>
<toolbar name="ToolBar">
<toolitem action="Open"/>
<toolitem action="Reload"/>
<separator/>
<toolitem action="ZoomIn"/>
<toolitem action="ZoomOut"/>
<toolitem action="ZoomFit"/>
<toolitem action="Zoom100"/>
</toolbar>
</ui>
'''
base_title = 'Explicator'
def __init__(self):
gtk.Window.__init__(self)
self.graph = Graph()
window = self
window.set_title(self.base_title)
window.set_default_size(512, 512)
vbox = gtk.VBox()
window.add(vbox)
self.widget = DotWidget()
# Create a UIManager instance
uimanager = self.uimanager = gtk.UIManager()
# Add the accelerator group to the toplevel window
accelgroup = uimanager.get_accel_group()
window.add_accel_group(accelgroup)
# Create an ActionGroup
actiongroup = gtk.ActionGroup('Actions')
self.actiongroup = actiongroup
# Create actions
actiongroup.add_actions((
('Open', gtk.STOCK_OPEN, None, None, None, self.on_open),
('Reload', gtk.STOCK_REFRESH, None, None, None, self.on_reload),
('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget.on_zoom_in),
('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget.on_zoom_out),
('ZoomFit', gtk.STOCK_ZOOM_FIT, None, None, None, self.widget.on_zoom_fit),
('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget.on_zoom_100),
))
# Add the actiongroup to the uimanager
uimanager.insert_action_group(actiongroup, 0)
# Add a UI descrption
uimanager.add_ui_from_string(self.ui)
# Create a Toolbar
toolbar = uimanager.get_widget('/ToolBar')
vbox.pack_start(toolbar, False)
vbox.pack_start(self.widget)
self.set_focus(self.widget)
self.show_all()
def set_filter(self, filter):
self.widget.set_filter(filter)
def set_dotcode(self, dotcode, filename=None):
if self.widget.set_dotcode(dotcode, filename):
self.update_title(filename)
self.widget.zoom_to_fit()
def set_xdotcode(self, xdotcode, filename=None):
if self.widget.set_xdotcode(xdotcode):
self.update_title(filename)
self.widget.zoom_to_fit()
def update_title(self, filename=None):
if filename is None:
self.set_title(self.base_title)
else:
self.set_title(os.path.basename(filename) + ' - ' + self.base_title)
def open_file(self, filename):
try:
fp = file(filename, 'rt')
self.set_dotcode(fp.read(), filename)
fp.close()
except IOError, ex:
dlg = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
message_format=str(ex),
buttons=gtk.BUTTONS_OK)
dlg.set_title(self.base_title)
dlg.run()
dlg.destroy()
def on_open(self, action):
chooser = gtk.FileChooserDialog(title="Open dot File",
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL,
gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN,
gtk.RESPONSE_OK))
chooser.set_default_response(gtk.RESPONSE_OK)
filter = gtk.FileFilter()
filter.set_name("Graphviz dot files")
filter.add_pattern("*.dot")
chooser.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("All files")
filter.add_pattern("*")
chooser.add_filter(filter)
if chooser.run() == gtk.RESPONSE_OK:
filename = chooser.get_filename()
chooser.destroy()
self.open_file(filename)
else:
chooser.destroy()
def on_reload(self, action):
self.widget.reload()
def main():
import optparse
parser = optparse.OptionParser(
usage='\n\t%prog [file]',
version='%%prog %s' % __version__)
parser.add_option(
'-f', '--filter',
type='choice', choices=('dot', 'neato', 'twopi', 'circo', 'fdp'),
dest='filter', default='dot',
help='graphviz filter: dot, neato, twopi, circo, or fdp [default: %default]')
parser.add_option(
'-n', '--no-filter',
action='store_const', const=None, dest='filter',
help='assume input is already filtered into xdot format (use e.g. dot -Txdot)')
(options, args) = parser.parse_args(sys.argv[1:])
if len(args) > 1:
parser.error('incorrect number of arguments')
win = DotWindow()
win.connect('destroy', gtk.main_quit)
win.set_filter(options.filter)
if len(args) >= 1:
if args[0] == '-':
win.set_dotcode(sys.stdin.read())
else:
win.open_file(args[0])
gtk.main()
# Apache-Style Software License for ColorBrewer software and ColorBrewer Color
# Schemes, Version 1.1
#
# Copyright (c) 2002 Cynthia Brewer, Mark Harrower, and The Pennsylvania State
# University. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions as source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The end-user documentation included with the redistribution, if any,
# must include the following acknowledgment:
#
# This product includes color specifications and designs developed by
# Cynthia Brewer (http://colorbrewer.org/).
#
# Alternately, this acknowledgment may appear in the software itself, if and
# wherever such third-party acknowledgments normally appear.
#
# 3. The name "ColorBrewer" must not be used to endorse or promote products
# derived from this software without prior written permission. For written
# permission, please contact Cynthia Brewer at cbrewer@psu.edu.
#
# 4. Products derived from this software may not be called "ColorBrewer",
# nor may "ColorBrewer" appear in their name, without prior written
# permission of Cynthia Brewer.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CYNTHIA
# BREWER, MARK HARROWER, OR THE PENNSYLVANIA STATE UNIVERSITY BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
brewer_colors = {
'accent3': [(127, 201, 127), (190, 174, 212), (253, 192, 134)],
'accent4': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153)],
'accent5': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176)],
'accent6': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176), (240, 2, 127)],
'accent7': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176), (240, 2, 127),
(191, 91, 23)],
'accent8': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176), (240, 2, 127),
(191, 91, 23), (102, 102, 102)],
'blues3': [(222, 235, 247), (158, 202, 225), (49, 130, 189)],
'blues4': [(239, 243, 255), (189, 215, 231), (107, 174, 214), (33, 113, 181)],
'blues5': [(239, 243, 255), (189, 215, 231), (107, 174, 214), (49, 130, 189), (8, 81, 156)],
'blues6': [(239, 243, 255), (198, 219, 239), (158, 202, 225), (107, 174, 214), (49, 130, 189), (8, 81, 156)],
'blues7': [(239, 243, 255), (198, 219, 239), (158, 202, 225), (107, 174, 214), (66, 146, 198), (33, 113, 181),
(8, 69, 148)],
'blues8': [(247, 251, 255), (222, 235, 247), (198, 219, 239), (158, 202, 225), (107, 174, 214), (66, 146, 198),
(33, 113, 181), (8, 69, 148)],
'blues9': [(247, 251, 255), (222, 235, 247), (198, 219, 239), (158, 202, 225), (107, 174, 214), (66, 146, 198),
(33, 113, 181), (8, 81, 156), (8, 48, 107)],
'brbg10': [(84, 48, 5), (0, 60, 48), (140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195),
(199, 234, 229), (128, 205, 193), (53, 151, 143), (1, 102, 94)],
'brbg11': [(84, 48, 5), (1, 102, 94), (0, 60, 48), (140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195),
(245, 245, 245), (199, 234, 229), (128, 205, 193), (53, 151, 143)],
'brbg3': [(216, 179, 101), (245, 245, 245), (90, 180, 172)],
'brbg4': [(166, 97, 26), (223, 194, 125), (128, 205, 193), (1, 133, 113)],
'brbg5': [(166, 97, 26), (223, 194, 125), (245, 245, 245), (128, 205, 193), (1, 133, 113)],
'brbg6': [(140, 81, 10), (216, 179, 101), (246, 232, 195), (199, 234, 229), (90, 180, 172), (1, 102, 94)],
'brbg7': [(140, 81, 10), (216, 179, 101), (246, 232, 195), (245, 245, 245), (199, 234, 229), (90, 180, 172),
(1, 102, 94)],
'brbg8': [(140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195), (199, 234, 229), (128, 205, 193),
(53, 151, 143), (1, 102, 94)],
'brbg9': [(140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195), (245, 245, 245), (199, 234, 229),
(128, 205, 193), (53, 151, 143), (1, 102, 94)],
'bugn3': [(229, 245, 249), (153, 216, 201), (44, 162, 95)],
'bugn4': [(237, 248, 251), (178, 226, 226), (102, 194, 164), (35, 139, 69)],
'bugn5': [(237, 248, 251), (178, 226, 226), (102, 194, 164), (44, 162, 95), (0, 109, 44)],
'bugn6': [(237, 248, 251), (204, 236, 230), (153, 216, 201), (102, 194, 164), (44, 162, 95), (0, 109, 44)],
'bugn7': [(237, 248, 251), (204, 236, 230), (153, 216, 201), (102, 194, 164), (65, 174, 118), (35, 139, 69),
(0, 88, 36)],
'bugn8': [(247, 252, 253), (229, 245, 249), (204, 236, 230), (153, 216, 201), (102, 194, 164), (65, 174, 118),
(35, 139, 69), (0, 88, 36)],
'bugn9': [(247, 252, 253), (229, 245, 249), (204, 236, 230), (153, 216, 201), (102, 194, 164), (65, 174, 118),
(35, 139, 69), (0, 109, 44), (0, 68, 27)],
'bupu3': [(224, 236, 244), (158, 188, 218), (136, 86, 167)],
'bupu4': [(237, 248, 251), (179, 205, 227), (140, 150, 198), (136, 65, 157)],
'bupu5': [(237, 248, 251), (179, 205, 227), (140, 150, 198), (136, 86, 167), (129, 15, 124)],
'bupu6': [(237, 248, 251), (191, 211, 230), (158, 188, 218), (140, 150, 198), (136, 86, 167), (129, 15, 124)],
'bupu7': [(237, 248, 251), (191, 211, 230), (158, 188, 218), (140, 150, 198), (140, 107, 177), (136, 65, 157),
(110, 1, 107)],
'bupu8': [(247, 252, 253), (224, 236, 244), (191, 211, 230), (158, 188, 218), (140, 150, 198), (140, 107, 177),
(136, 65, 157), (110, 1, 107)],
'bupu9': [(247, 252, 253), (224, 236, 244), (191, 211, 230), (158, 188, 218), (140, 150, 198), (140, 107, 177),
(136, 65, 157), (129, 15, 124), (77, 0, 75)],
'dark23': [(27, 158, 119), (217, 95, 2), (117, 112, 179)],
'dark24': [(27, 158, 119), (217, 95, 2), (117, 112, 179), (231, 41, 138)],
'dark25': [(27, 158, 119), (217, 95, 2), (117, 112, 179), (231, 41, 138), (102, 166, 30)],
'dark26': [(27, 158, 119), (217, 95, 2), (117, 112, 179), (231, 41, 138), (102, 166, 30), (230, 171, 2)],
'dark27': [(27, 158, 119), (217, 95, 2), (117, 112, 179), (231, 41, 138), (102, 166, 30), (230, 171, 2),
(166, 118, 29)],
'dark28': [(27, 158, 119), (217, 95, 2), (117, 112, 179), (231, 41, 138), (102, 166, 30), (230, 171, 2),
(166, 118, 29), (102, 102, 102)],
'gnbu3': [(224, 243, 219), (168, 221, 181), (67, 162, 202)],
'gnbu4': [(240, 249, 232), (186, 228, 188), (123, 204, 196), (43, 140, 190)],
'gnbu5': [(240, 249, 232), (186, 228, 188), (123, 204, 196), (67, 162, 202), (8, 104, 172)],
'gnbu6': [(240, 249, 232), (204, 235, 197), (168, 221, 181), (123, 204, 196), (67, 162, 202), (8, 104, 172)],
'gnbu7': [(240, 249, 232), (204, 235, 197), (168, 221, 181), (123, 204, 196), (78, 179, 211), (43, 140, 190),
(8, 88, 158)],
'gnbu8': [(247, 252, 240), (224, 243, 219), (204, 235, 197), (168, 221, 181), (123, 204, 196), (78, 179, 211),
(43, 140, 190), (8, 88, 158)],
'gnbu9': [(247, 252, 240), (224, 243, 219), (204, 235, 197), (168, 221, 181), (123, 204, 196), (78, 179, 211),
(43, 140, 190), (8, 104, 172), (8, 64, 129)],
'greens3': [(229, 245, 224), (161, 217, 155), (49, 163, 84)],
'greens4': [(237, 248, 233), (186, 228, 179), (116, 196, 118), (35, 139, 69)],
'greens5': [(237, 248, 233), (186, 228, 179), (116, 196, 118), (49, 163, 84), (0, 109, 44)],
'greens6': [(237, 248, 233), (199, 233, 192), (161, 217, 155), (116, 196, 118), (49, 163, 84), (0, 109, 44)],
'greens7': [(237, 248, 233), (199, 233, 192), (161, 217, 155), (116, 196, 118), (65, 171, 93), (35, 139, 69),
(0, 90, 50)],
'greens8': [(247, 252, 245), (229, 245, 224), (199, 233, 192), (161, 217, 155), (116, 196, 118), (65, 171, 93),
(35, 139, 69), (0, 90, 50)],
'greens9': [(247, 252, 245), (229, 245, 224), (199, 233, 192), (161, 217, 155), (116, 196, 118), (65, 171, 93),
(35, 139, 69), (0, 109, 44), (0, 68, 27)],
'greys3': [(240, 240, 240), (189, 189, 189), (99, 99, 99)],
'greys4': [(247, 247, 247), (204, 204, 204), (150, 150, 150), (82, 82, 82)],
'greys5': [(247, 247, 247), (204, 204, 204), (150, 150, 150), (99, 99, 99), (37, 37, 37)],
'greys6': [(247, 247, 247), (217, 217, 217), (189, 189, 189), (150, 150, 150), (99, 99, 99), (37, 37, 37)],
'greys7': [(247, 247, 247), (217, 217, 217), (189, 189, 189), (150, 150, 150), (115, 115, 115), (82, 82, 82),
(37, 37, 37)],
'greys8': [(255, 255, 255), (240, 240, 240), (217, 217, 217), (189, 189, 189), (150, 150, 150), (115, 115, 115),
(82, 82, 82), (37, 37, 37)],
'greys9': [(255, 255, 255), (240, 240, 240), (217, 217, 217), (189, 189, 189), (150, 150, 150), (115, 115, 115),
(82, 82, 82), (37, 37, 37), (0, 0, 0)],
'oranges3': [(254, 230, 206), (253, 174, 107), (230, 85, 13)],
'oranges4': [(254, 237, 222), (253, 190, 133), (253, 141, 60), (217, 71, 1)],
'oranges5': [(254, 237, 222), (253, 190, 133), (253, 141, 60), (230, 85, 13), (166, 54, 3)],
'oranges6': [(254, 237, 222), (253, 208, 162), (253, 174, 107), (253, 141, 60), (230, 85, 13), (166, 54, 3)],
'oranges7': [(254, 237, 222), (253, 208, 162), (253, 174, 107), (253, 141, 60), (241, 105, 19), (217, 72, 1),
(140, 45, 4)],
'oranges8': [(255, 245, 235), (254, 230, 206), (253, 208, 162), (253, 174, 107), (253, 141, 60), (241, 105, 19),
(217, 72, 1), (140, 45, 4)],
'oranges9': [(255, 245, 235), (254, 230, 206), (253, 208, 162), (253, 174, 107), (253, 141, 60), (241, 105, 19),
(217, 72, 1), (166, 54, 3), (127, 39, 4)],
'orrd3': [(254, 232, 200), (253, 187, 132), (227, 74, 51)],
'orrd4': [(254, 240, 217), (253, 204, 138), (252, 141, 89), (215, 48, 31)],
'orrd5': [(254, 240, 217), (253, 204, 138), (252, 141, 89), (227, 74, 51), (179, 0, 0)],
'orrd6': [(254, 240, 217), (253, 212, 158), (253, 187, 132), (252, 141, 89), (227, 74, 51), (179, 0, 0)],
'orrd7': [(254, 240, 217), (253, 212, 158), (253, 187, 132), (252, 141, 89), (239, 101, 72), (215, 48, 31),
(153, 0, 0)],
'orrd8': [(255, 247, 236), (254, 232, 200), (253, 212, 158), (253, 187, 132), (252, 141, 89), (239, 101, 72),
(215, 48, 31), (153, 0, 0)],
'orrd9': [(255, 247, 236), (254, 232, 200), (253, 212, 158), (253, 187, 132), (252, 141, 89), (239, 101, 72),
(215, 48, 31), (179, 0, 0), (127, 0, 0)],
'paired10': [(166, 206, 227), (106, 61, 154), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153),
(227, 26, 28), (253, 191, 111), (255, 127, 0), (202, 178, 214)],
'paired11': [(166, 206, 227), (106, 61, 154), (255, 255, 153), (31, 120, 180), (178, 223, 138), (51, 160, 44),
(251, 154, 153), (227, 26, 28), (253, 191, 111), (255, 127, 0), (202, 178, 214)],
'paired12': [(166, 206, 227), (106, 61, 154), (255, 255, 153), (177, 89, 40), (31, 120, 180), (178, 223, 138),
(51, 160, 44), (251, 154, 153), (227, 26, 28), (253, 191, 111), (255, 127, 0), (202, 178, 214)],
'paired3': [(166, 206, 227), (31, 120, 180), (178, 223, 138)],
'paired4': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44)],
'paired5': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153)],
'paired6': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28)],
'paired7': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28),
(253, 191, 111)],
'paired8': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28),
(253, 191, 111), (255, 127, 0)],
'paired9': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28),
(253, 191, 111), (255, 127, 0), (202, 178, 214)],
'pastel13': [(251, 180, 174), (179, 205, 227), (204, 235, 197)],
'pastel14': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228)],
'pastel15': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228), (254, 217, 166)],
'pastel16': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228), (254, 217, 166), (255, 255, 204)],
'pastel17': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228), (254, 217, 166), (255, 255, 204),
(229, 216, 189)],
'pastel18': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228), (254, 217, 166), (255, 255, 204),
(229, 216, 189), (253, 218, 236)],
'pastel19': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228), (254, 217, 166), (255, 255, 204),
(229, 216, 189), (253, 218, 236), (242, 242, 242)],
'pastel23': [(179, 226, 205), (253, 205, 172), (203, 213, 232)],
'pastel24': [(179, 226, 205), (253, 205, 172), (203, 213, 232), (244, 202, 228)],
'pastel25': [(179, 226, 205), (253, 205, 172), (203, 213, 232), (244, 202, 228), (230, 245, 201)],
'pastel26': [(179, 226, 205), (253, 205, 172), (203, 213, 232), (244, 202, 228), (230, 245, 201), (255, 242, 174)],
'pastel27': [(179, 226, 205), (253, 205, 172), (203, 213, 232), (244, 202, 228), (230, 245, 201), (255, 242, 174),
(241, 226, 204)],
'pastel28': [(179, 226, 205), (253, 205, 172), (203, 213, 232), (244, 202, 228), (230, 245, 201), (255, 242, 174),
(241, 226, 204), (204, 204, 204)],
'piyg10': [(142, 1, 82), (39, 100, 25), (197, 27, 125), (222, 119, 174), (241, 182, 218), (253, 224, 239),
(230, 245, 208), (184, 225, 134), (127, 188, 65), (77, 146, 33)],
'piyg11': [(142, 1, 82), (77, 146, 33), (39, 100, 25), (197, 27, 125), (222, 119, 174), (241, 182, 218),
(253, 224, 239), (247, 247, 247), (230, 245, 208), (184, 225, 134), (127, 188, 65)],
'piyg3': [(233, 163, 201), (247, 247, 247), (161, 215, 106)],
'piyg4': [(208, 28, 139), (241, 182, 218), (184, 225, 134), (77, 172, 38)],
'piyg5': [(208, 28, 139), (241, 182, 218), (247, 247, 247), (184, 225, 134), (77, 172, 38)],
'piyg6': [(197, 27, 125), (233, 163, 201), (253, 224, 239), (230, 245, 208), (161, 215, 106), (77, 146, 33)],
'piyg7': [(197, 27, 125), (233, 163, 201), (253, 224, 239), (247, 247, 247), (230, 245, 208), (161, 215, 106),
(77, 146, 33)],
'piyg8': [(197, 27, 125), (222, 119, 174), (241, 182, 218), (253, 224, 239), (230, 245, 208), (184, 225, 134),
(127, 188, 65), (77, 146, 33)],
'piyg9': [(197, 27, 125), (222, 119, 174), (241, 182, 218), (253, 224, 239), (247, 247, 247), (230, 245, 208),
(184, 225, 134), (127, 188, 65), (77, 146, 33)],
'prgn10': [(64, 0, 75), (0, 68, 27), (118, 42, 131), (153, 112, 171), (194, 165, 207), (231, 212, 232),
(217, 240, 211), (166, 219, 160), (90, 174, 97), (27, 120, 55)],
'prgn11': [(64, 0, 75), (27, 120, 55), (0, 68, 27), (118, 42, 131), (153, 112, 171), (194, 165, 207),
(231, 212, 232), (247, 247, 247), (217, 240, 211), (166, 219, 160), (90, 174, 97)],
'prgn3': [(175, 141, 195), (247, 247, 247), (127, 191, 123)],
'prgn4': [(123, 50, 148), (194, 165, 207), (166, 219, 160), (0, 136, 55)],
'prgn5': [(123, 50, 148), (194, 165, 207), (247, 247, 247), (166, 219, 160), (0, 136, 55)],
'prgn6': [(118, 42, 131), (175, 141, 195), (231, 212, 232), (217, 240, 211), (127, 191, 123), (27, 120, 55)],
'prgn7': [(118, 42, 131), (175, 141, 195), (231, 212, 232), (247, 247, 247), (217, 240, 211), (127, 191, 123),
(27, 120, 55)],
'prgn8': [(118, 42, 131), (153, 112, 171), (194, 165, 207), (231, 212, 232), (217, 240, 211), (166, 219, 160),
(90, 174, 97), (27, 120, 55)],
'prgn9': [(118, 42, 131), (153, 112, 171), (194, 165, 207), (231, 212, 232), (247, 247, 247), (217, 240, 211),
(166, 219, 160), (90, 174, 97), (27, 120, 55)],
'pubu3': [(236, 231, 242), (166, 189, 219), (43, 140, 190)],
'pubu4': [(241, 238, 246), (189, 201, 225), (116, 169, 207), (5, 112, 176)],
'pubu5': [(241, 238, 246), (189, 201, 225), (116, 169, 207), (43, 140, 190), (4, 90, 141)],
'pubu6': [(241, 238, 246), (208, 209, 230), (166, 189, 219), (116, 169, 207), (43, 140, 190), (4, 90, 141)],
'pubu7': [(241, 238, 246), (208, 209, 230), (166, 189, 219), (116, 169, 207), (54, 144, 192), (5, 112, 176),
(3, 78, 123)],
'pubu8': [(255, 247, 251), (236, 231, 242), (208, 209, 230), (166, 189, 219), (116, 169, 207), (54, 144, 192),
(5, 112, 176), (3, 78, 123)],
'pubu9': [(255, 247, 251), (236, 231, 242), (208, 209, 230), (166, 189, 219), (116, 169, 207), (54, 144, 192),
(5, 112, 176), (4, 90, 141), (2, 56, 88)],
'pubugn3': [(236, 226, 240), (166, 189, 219), (28, 144, 153)],
'pubugn4': [(246, 239, 247), (189, 201, 225), (103, 169, 207), (2, 129, 138)],
'pubugn5': [(246, 239, 247), (189, 201, 225), (103, 169, 207), (28, 144, 153), (1, 108, 89)],
'pubugn6': [(246, 239, 247), (208, 209, 230), (166, 189, 219), (103, 169, 207), (28, 144, 153), (1, 108, 89)],
'pubugn7': [(246, 239, 247), (208, 209, 230), (166, 189, 219), (103, 169, 207), (54, 144, 192), (2, 129, 138),
(1, 100, 80)],
'pubugn8': [(255, 247, 251), (236, 226, 240), (208, 209, 230), (166, 189, 219), (103, 169, 207), (54, 144, 192),
(2, 129, 138), (1, 100, 80)],
'pubugn9': [(255, 247, 251), (236, 226, 240), (208, 209, 230), (166, 189, 219), (103, 169, 207), (54, 144, 192),
(2, 129, 138), (1, 108, 89), (1, 70, 54)],
'puor10': [(127, 59, 8), (45, 0, 75), (179, 88, 6), (224, 130, 20), (253, 184, 99), (254, 224, 182), (216, 218, 235)
, (178, 171, 210), (128, 115, 172), (84, 39, 136)],
'puor11': [(127, 59, 8), (84, 39, 136), (45, 0, 75), (179, 88, 6), (224, 130, 20), (253, 184, 99), (254, 224, 182),
(247, 247, 247), (216, 218, 235), (178, 171, 210), (128, 115, 172)],
'puor3': [(241, 163, 64), (247, 247, 247), (153, 142, 195)],
'puor4': [(230, 97, 1), (253, 184, 99), (178, 171, 210), (94, 60, 153)],
'puor5': [(230, 97, 1), (253, 184, 99), (247, 247, 247), (178, 171, 210), (94, 60, 153)],
'puor6': [(179, 88, 6), (241, 163, 64), (254, 224, 182), (216, 218, 235), (153, 142, 195), (84, 39, 136)],
'puor7': [(179, 88, 6), (241, 163, 64), (254, 224, 182), (247, 247, 247), (216, 218, 235), (153, 142, 195),
(84, 39, 136)],
'puor8': [(179, 88, 6), (224, 130, 20), (253, 184, 99), (254, 224, 182), (216, 218, 235), (178, 171, 210),
(128, 115, 172), (84, 39, 136)],
'puor9': [(179, 88, 6), (224, 130, 20), (253, 184, 99), (254, 224, 182), (247, 247, 247), (216, 218, 235),
(178, 171, 210), (128, 115, 172), (84, 39, 136)],
'purd3': [(231, 225, 239), (201, 148, 199), (221, 28, 119)],
'purd4': [(241, 238, 246), (215, 181, 216), (223, 101, 176), (206, 18, 86)],
'purd5': [(241, 238, 246), (215, 181, 216), (223, 101, 176), (221, 28, 119), (152, 0, 67)],
'purd6': [(241, 238, 246), (212, 185, 218), (201, 148, 199), (223, 101, 176), (221, 28, 119), (152, 0, 67)],
'purd7': [(241, 238, 246), (212, 185, 218), (201, 148, 199), (223, 101, 176), (231, 41, 138), (206, 18, 86),
(145, 0, 63)],
'purd8': [(247, 244, 249), (231, 225, 239), (212, 185, 218), (201, 148, 199), (223, 101, 176), (231, 41, 138),
(206, 18, 86), (145, 0, 63)],
'purd9': [(247, 244, 249), (231, 225, 239), (212, 185, 218), (201, 148, 199), (223, 101, 176), (231, 41, 138),
(206, 18, 86), (152, 0, 67), (103, 0, 31)],
'purples3': [(239, 237, 245), (188, 189, 220), (117, 107, 177)],
'purples4': [(242, 240, 247), (203, 201, 226), (158, 154, 200), (106, 81, 163)],
'purples5': [(242, 240, 247), (203, 201, 226), (158, 154, 200), (117, 107, 177), (84, 39, 143)],
'purples6': [(242, 240, 247), (218, 218, 235), (188, 189, 220), (158, 154, 200), (117, 107, 177), (84, 39, 143)],
'purples7': [(242, 240, 247), (218, 218, 235), (188, 189, 220), (158, 154, 200), (128, 125, 186), (106, 81, 163),
(74, 20, 134)],
'purples8': [(252, 251, 253), (239, 237, 245), (218, 218, 235), (188, 189, 220), (158, 154, 200), (128, 125, 186),
(106, 81, 163), (74, 20, 134)],
'purples9': [(252, 251, 253), (239, 237, 245), (218, 218, 235), (188, 189, 220), (158, 154, 200), (128, 125, 186),
(106, 81, 163), (84, 39, 143), (63, 0, 125)],
'rdbu10': [(103, 0, 31), (5, 48, 97), (178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199),
(209, 229, 240), (146, 197, 222), (67, 147, 195), (33, 102, 172)],
'rdbu11': [(103, 0, 31), (33, 102, 172), (5, 48, 97), (178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199)
, (247, 247, 247), (209, 229, 240), (146, 197, 222), (67, 147, 195)],
'rdbu3': [(239, 138, 98), (247, 247, 247), (103, 169, 207)],
'rdbu4': [(202, 0, 32), (244, 165, 130), (146, 197, 222), (5, 113, 176)],
'rdbu5': [(202, 0, 32), (244, 165, 130), (247, 247, 247), (146, 197, 222), (5, 113, 176)],
'rdbu6': [(178, 24, 43), (239, 138, 98), (253, 219, 199), (209, 229, 240), (103, 169, 207), (33, 102, 172)],
'rdbu7': [(178, 24, 43), (239, 138, 98), (253, 219, 199), (247, 247, 247), (209, 229, 240), (103, 169, 207),
(33, 102, 172)],
'rdbu8': [(178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (209, 229, 240), (146, 197, 222),
(67, 147, 195), (33, 102, 172)],
'rdbu9': [(178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (247, 247, 247), (209, 229, 240),
(146, 197, 222), (67, 147, 195), (33, 102, 172)],
'rdgy10': [(103, 0, 31), (26, 26, 26), (178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199),
(224, 224, 224), (186, 186, 186), (135, 135, 135), (77, 77, 77)],
'rdgy11': [(103, 0, 31), (77, 77, 77), (26, 26, 26), (178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199),
(255, 255, 255), (224, 224, 224), (186, 186, 186), (135, 135, 135)],
'rdgy3': [(239, 138, 98), (255, 255, 255), (153, 153, 153)],
'rdgy4': [(202, 0, 32), (244, 165, 130), (186, 186, 186), (64, 64, 64)],
'rdgy5': [(202, 0, 32), (244, 165, 130), (255, 255, 255), (186, 186, 186), (64, 64, 64)],
'rdgy6': [(178, 24, 43), (239, 138, 98), (253, 219, 199), (224, 224, 224), (153, 153, 153), (77, 77, 77)],
'rdgy7': [(178, 24, 43), (239, 138, 98), (253, 219, 199), (255, 255, 255), (224, 224, 224), (153, 153, 153),
(77, 77, 77)],
'rdgy8': [(178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (224, 224, 224), (186, 186, 186),
(135, 135, 135), (77, 77, 77)],
'rdgy9': [(178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (255, 255, 255), (224, 224, 224),
(186, 186, 186), (135, 135, 135), (77, 77, 77)],
'rdpu3': [(253, 224, 221), (250, 159, 181), (197, 27, 138)],
'rdpu4': [(254, 235, 226), (251, 180, 185), (247, 104, 161), (174, 1, 126)],
'rdpu5': [(254, 235, 226), (251, 180, 185), (247, 104, 161), (197, 27, 138), (122, 1, 119)],
'rdpu6': [(254, 235, 226), (252, 197, 192), (250, 159, 181), (247, 104, 161), (197, 27, 138), (122, 1, 119)],
'rdpu7': [(254, 235, 226), (252, 197, 192), (250, 159, 181), (247, 104, 161), (221, 52, 151), (174, 1, 126),
(122, 1, 119)],
'rdpu8': [(255, 247, 243), (253, 224, 221), (252, 197, 192), (250, 159, 181), (247, 104, 161), (221, 52, 151),
(174, 1, 126), (122, 1, 119)],
'rdpu9': [(255, 247, 243), (253, 224, 221), (252, 197, 192), (250, 159, 181), (247, 104, 161), (221, 52, 151),
(174, 1, 126), (122, 1, 119), (73, 0, 106)],
'rdylbu10': [(165, 0, 38), (49, 54, 149), (215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 144),
(224, 243, 248), (171, 217, 233), (116, 173, 209), (69, 117, 180)],
'rdylbu11': [(165, 0, 38), (69, 117, 180), (49, 54, 149), (215, 48, 39), (244, 109, 67), (253, 174, 97),
(254, 224, 144), (255, 255, 191), (224, 243, 248), (171, 217, 233), (116, 173, 209)],
'rdylbu3': [(252, 141, 89), (255, 255, 191), (145, 191, 219)],
'rdylbu4': [(215, 25, 28), (253, 174, 97), (171, 217, 233), (44, 123, 182)],
'rdylbu5': [(215, 25, 28), (253, 174, 97), (255, 255, 191), (171, 217, 233), (44, 123, 182)],
'rdylbu6': [(215, 48, 39), (252, 141, 89), (254, 224, 144), (224, 243, 248), (145, 191, 219), (69, 117, 180)],
'rdylbu7': [(215, 48, 39), (252, 141, 89), (254, 224, 144), (255, 255, 191), (224, 243, 248), (145, 191, 219),
(69, 117, 180)],
'rdylbu8': [(215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 144), (224, 243, 248), (171, 217, 233),
(116, 173, 209), (69, 117, 180)],
'rdylbu9': [(215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 144), (255, 255, 191), (224, 243, 248),
(171, 217, 233), (116, 173, 209), (69, 117, 180)],
'rdylgn10': [(165, 0, 38), (0, 104, 55), (215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 139),
(217, 239, 139), (166, 217, 106), (102, 189, 99), (26, 152, 80)],
'rdylgn11': [(165, 0, 38), (26, 152, 80), (0, 104, 55), (215, 48, 39), (244, 109, 67), (253, 174, 97),
(254, 224, 139), (255, 255, 191), (217, 239, 139), (166, 217, 106), (102, 189, 99)],
'rdylgn3': [(252, 141, 89), (255, 255, 191), (145, 207, 96)],
'rdylgn4': [(215, 25, 28), (253, 174, 97), (166, 217, 106), (26, 150, 65)],
'rdylgn5': [(215, 25, 28), (253, 174, 97), (255, 255, 191), (166, 217, 106), (26, 150, 65)],
'rdylgn6': [(215, 48, 39), (252, 141, 89), (254, 224, 139), (217, 239, 139), (145, 207, 96), (26, 152, 80)],
'rdylgn7': [(215, 48, 39), (252, 141, 89), (254, 224, 139), (255, 255, 191), (217, 239, 139), (145, 207, 96),
(26, 152, 80)],
'rdylgn8': [(215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 139), (217, 239, 139), (166, 217, 106),
(102, 189, 99), (26, 152, 80)],
'rdylgn9': [(215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 139), (255, 255, 191), (217, 239, 139),
(166, 217, 106), (102, 189, 99), (26, 152, 80)],
'reds3': [(254, 224, 210), (252, 146, 114), (222, 45, 38)],
'reds4': [(254, 229, 217), (252, 174, 145), (251, 106, 74), (203, 24, 29)],
'reds5': [(254, 229, 217), (252, 174, 145), (251, 106, 74), (222, 45, 38), (165, 15, 21)],
'reds6': [(254, 229, 217), (252, 187, 161), (252, 146, 114), (251, 106, 74), (222, 45, 38), (165, 15, 21)],
'reds7': [(254, 229, 217), (252, 187, 161), (252, 146, 114), (251, 106, 74), (239, 59, 44), (203, 24, 29),
(153, 0, 13)],
'reds8': [(255, 245, 240), (254, 224, 210), (252, 187, 161), (252, 146, 114), (251, 106, 74), (239, 59, 44),
(203, 24, 29), (153, 0, 13)],
'reds9': [(255, 245, 240), (254, 224, 210), (252, 187, 161), (252, 146, 114), (251, 106, 74), (239, 59, 44),
(203, 24, 29), (165, 15, 21), (103, 0, 13)],
'set13': [(228, 26, 28), (55, 126, 184), (77, 175, 74)],
'set14': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163)],
'set15': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163), (255, 127, 0)],
'set16': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163), (255, 127, 0), (255, 255, 51)],
'set17': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163), (255, 127, 0), (255, 255, 51),
(166, 86, 40)],
'set18': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163), (255, 127, 0), (255, 255, 51), (166, 86, 40)
, (247, 129, 191)],
'set19': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163), (255, 127, 0), (255, 255, 51), (166, 86, 40)
, (247, 129, 191), (153, 153, 153)],
'set23': [(102, 194, 165), (252, 141, 98), (141, 160, 203)],
'set24': [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138, 195)],
'set25': [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138, 195), (166, 216, 84)],
'set26': [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138, 195), (166, 216, 84), (255, 217, 47)],
'set27': [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138, 195), (166, 216, 84), (255, 217, 47),
(229, 196, 148)],
'set28': [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138, 195), (166, 216, 84), (255, 217, 47),
(229, 196, 148), (179, 179, 179)],
'set310': [(141, 211, 199), (188, 128, 189), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211),
(253, 180, 98), (179, 222, 105), (252, 205, 229), (217, 217, 217)],
'set311': [(141, 211, 199), (188, 128, 189), (204, 235, 197), (255, 255, 179), (190, 186, 218), (251, 128, 114),
(128, 177, 211), (253, 180, 98), (179, 222, 105), (252, 205, 229), (217, 217, 217)],
'set312': [(141, 211, 199), (188, 128, 189), (204, 235, 197), (255, 237, 111), (255, 255, 179), (190, 186, 218),
(251, 128, 114), (128, 177, 211), (253, 180, 98), (179, 222, 105), (252, 205, 229), (217, 217, 217)],
'set33': [(141, 211, 199), (255, 255, 179), (190, 186, 218)],
'set34': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114)],
'set35': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211)],
'set36': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98)],
'set37': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98),
(179, 222, 105)],
'set38': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98),
(179, 222, 105), (252, 205, 229)],
'set39': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98),
(179, 222, 105), (252, 205, 229), (217, 217, 217)],
'spectral10': [(158, 1, 66), (94, 79, 162), (213, 62, 79), (244, 109, 67), (253, 174, 97), (254, 224, 139),
(230, 245, 152), (171, 221, 164), (102, 194, 165), (50, 136, 189)],
'spectral11': [(158, 1, 66), (50, 136, 189), (94, 79, 162), (213, 62, 79), (244, 109, 67), (253, 174, 97),
(254, 224, 139), (255, 255, 191), (230, 245, 152), (171, 221, 164), (102, 194, 165)],
'spectral3': [(252, 141, 89), (255, 255, 191), (153, 213, 148)],
'spectral4': [(215, 25, 28), (253, 174, 97), (171, 221, 164), (43, 131, 186)],
'spectral5': [(215, 25, 28), (253, 174, 97), (255, 255, 191), (171, 221, 164), (43, 131, 186)],
'spectral6': [(213, 62, 79), (252, 141, 89), (254, 224, 139), (230, 245, 152), (153, 213, 148), (50, 136, 189)],
'spectral7': [(213, 62, 79), (252, 141, 89), (254, 224, 139), (255, 255, 191), (230, 245, 152), (153, 213, 148),
(50, 136, 189)],
'spectral8': [(213, 62, 79), (244, 109, 67), (253, 174, 97), (254, 224, 139), (230, 245, 152), (171, 221, 164),
(102, 194, 165), (50, 136, 189)],
'spectral9': [(213, 62, 79), (244, 109, 67), (253, 174, 97), (254, 224, 139), (255, 255, 191), (230, 245, 152),
(171, 221, 164), (102, 194, 165), (50, 136, 189)],
'ylgn3': [(247, 252, 185), (173, 221, 142), (49, 163, 84)],
'ylgn4': [(255, 255, 204), (194, 230, 153), (120, 198, 121), (35, 132, 67)],
'ylgn5': [(255, 255, 204), (194, 230, 153), (120, 198, 121), (49, 163, 84), (0, 104, 55)],
'ylgn6': [(255, 255, 204), (217, 240, 163), (173, 221, 142), (120, 198, 121), (49, 163, 84), (0, 104, 55)],
'ylgn7': [(255, 255, 204), (217, 240, 163), (173, 221, 142), (120, 198, 121), (65, 171, 93), (35, 132, 67),
(0, 90, 50)],
'ylgn8': [(255, 255, 229), (247, 252, 185), (217, 240, 163), (173, 221, 142), (120, 198, 121), (65, 171, 93),
(35, 132, 67), (0, 90, 50)],
'ylgn9': [(255, 255, 229), (247, 252, 185), (217, 240, 163), (173, 221, 142), (120, 198, 121), (65, 171, 93),
(35, 132, 67), (0, 104, 55), (0, 69, 41)],
'ylgnbu3': [(237, 248, 177), (127, 205, 187), (44, 127, 184)],
'ylgnbu4': [(255, 255, 204), (161, 218, 180), (65, 182, 196), (34, 94, 168)],
'ylgnbu5': [(255, 255, 204), (161, 218, 180), (65, 182, 196), (44, 127, 184), (37, 52, 148)],
'ylgnbu6': [(255, 255, 204), (199, 233, 180), (127, 205, 187), (65, 182, 196), (44, 127, 184), (37, 52, 148)],
'ylgnbu7': [(255, 255, 204), (199, 233, 180), (127, 205, 187), (65, 182, 196), (29, 145, 192), (34, 94, 168),
(12, 44, 132)],
'ylgnbu8': [(255, 255, 217), (237, 248, 177), (199, 233, 180), (127, 205, 187), (65, 182, 196), (29, 145, 192),
(34, 94, 168), (12, 44, 132)],
'ylgnbu9': [(255, 255, 217), (237, 248, 177), (199, 233, 180), (127, 205, 187), (65, 182, 196), (29, 145, 192),
(34, 94, 168), (37, 52, 148), (8, 29, 88)],
'ylorbr3': [(255, 247, 188), (254, 196, 79), (217, 95, 14)],
'ylorbr4': [(255, 255, 212), (254, 217, 142), (254, 153, 41), (204, 76, 2)],
'ylorbr5': [(255, 255, 212), (254, 217, 142), (254, 153, 41), (217, 95, 14), (153, 52, 4)],
'ylorbr6': [(255, 255, 212), (254, 227, 145), (254, 196, 79), (254, 153, 41), (217, 95, 14), (153, 52, 4)],
'ylorbr7': [(255, 255, 212), (254, 227, 145), (254, 196, 79), (254, 153, 41), (236, 112, 20), (204, 76, 2),
(140, 45, 4)],
'ylorbr8': [(255, 255, 229), (255, 247, 188), (254, 227, 145), (254, 196, 79), (254, 153, 41), (236, 112, 20),
(204, 76, 2), (140, 45, 4)],
'ylorbr9': [(255, 255, 229), (255, 247, 188), (254, 227, 145), (254, 196, 79), (254, 153, 41), (236, 112, 20),
(204, 76, 2), (153, 52, 4), (102, 37, 6)],
'ylorrd3': [(255, 237, 160), (254, 178, 76), (240, 59, 32)],
'ylorrd4': [(255, 255, 178), (254, 204, 92), (253, 141, 60), (227, 26, 28)],
'ylorrd5': [(255, 255, 178), (254, 204, 92), (253, 141, 60), (240, 59, 32), (189, 0, 38)],
'ylorrd6': [(255, 255, 178), (254, 217, 118), (254, 178, 76), (253, 141, 60), (240, 59, 32), (189, 0, 38)],
'ylorrd7': [(255, 255, 178), (254, 217, 118), (254, 178, 76), (253, 141, 60), (252, 78, 42), (227, 26, 28),
(177, 0, 38)],
'ylorrd8': [(255, 255, 204), (255, 237, 160), (254, 217, 118), (254, 178, 76), (253, 141, 60), (252, 78, 42),
(227, 26, 28), (177, 0, 38)],
}
if __name__ == '__main__':
main() | hszer0/Explicator | xdot.py | Python | gpl-3.0 | 92,826 | [
"FLEUR"
] | 902c509d165d45a95cdd99d538a33706b3922bac415f8fa78549dc24ff18e75e |
#!/usr/bin/env python
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# Basic information
import sys
import platform
print "Python:"
print " Basic version: %s.%s.%s" % (sys.version_info[0],
sys.version_info[1],
sys.version_info[2], )
print " Full version: " + sys.version.replace('\n', ' ')
print
def c(s):
return s or "<COULD NOT DETERMINE>"
print "System:"
print " Type: " + c(platform.system())
print " Architecture: " + c(platform.architecture()[0])
print " Machine: " + c(platform.machine())
print " Platform: " + c(platform.platform())
print " Processor: " + c(platform.processor())
print
##############################################################################
print "Libraries:"
try:
import sip
print " sip installed."
print " version: " + sip.SIP_VERSION_STR
except ImportError:
print " sip NOT installed."
print
try:
import PyQt4.Qt
print " PyQt installed."
print " Qt version: " + PyQt4.Qt.QT_VERSION_STR
print " PyQt version: " + PyQt4.Qt.PYQT_VERSION_STR
except ImportError:
print " PyQt NOT installed."
print
try:
import vtk
print " VTK installed."
print " VTK short version: " + vtk.vtkVersion().GetVTKVersion()
print " VTK full version: " + vtk.vtkVersion().GetVTKSourceVersion()
except ImportError:
print " VTK NOT installed."
| CMUSV-VisTrails/WorkflowRecommendation | scripts/system_info.py | Python | bsd-3-clause | 3,215 | [
"VTK"
] | e4ac7366187a57f132861786f8441d5c797d01cb0e2eae13c486d7ca4bc192ce |
#!/usr/bin/env python
from horton import *
import h5py as h5
import os
log.set_level(log.silent)
def store_wfn(fn_h5, mixing, name_case, exp):
with h5.File(fn_h5) as f:
name_mixing = '%08.5f' % (-np.log10(mixing))
grp = f.require_group(name_mixing)
grp = grp.require_group(name_case)
# clear the group if anything was present
for key in grp.keys():
del grp[key]
for key in grp.attrs.keys():
del grp.attrs[key]
exp.to_hdf5(grp)
# The following is needed to create object of the right type when
# reading from the checkpoint:
grp.attrs['class'] = exp.__class__.__name__
def get_random_occupations(nbasis, nep):
result = np.zeros(nbasis)
# this is not uniformely random, but it is good enough.
for iep in xrange(int(np.round(nep))):
total = 1.0
while total > 0:
if total < 0.01:
fraction = total
total = 0.0
else:
fraction = np.random.uniform(0, total)
total -= fraction
index = np.random.randint(nbasis)
result[index] += fraction
if result[index] > 1:
total += result[index] - 1
result[index] = 1.0
return result
def main():
try:
os.remove("guesses.h5")
except OSError:
pass
fn_name = context.get_fn('test/2h-azirine.xyz')
mol = Molecule.from_file(fn_name)
obasis = get_gobasis(mol.coordinates, mol.numbers, '3-21G')
lf = DenseLinalgFactory(obasis.nbasis)
# Compute Gaussian integrals
olp = obasis.compute_overlap(lf)
kin = obasis.compute_kinetic(lf)
na = obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, lf)
er = obasis.compute_electron_repulsion(lf)
# Create alpha orbitals
exp_alpha = lf.create_expansion()
# Initial guess
guess_core_hamiltonian(olp, kin, na, exp_alpha)
# Construct the restricted HF effective Hamiltonian
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
ROneBodyTerm(kin, 'kin'),
RDirectTerm(er, 'hartree'),
RExchangeTerm(er, 'x_hf'),
ROneBodyTerm(na, 'ne'),
]
ham = REffHam(terms, external)
# Decide how to occupy the orbitals (5 alpha electrons)
occ_model = AufbauOccModel(5)
# Converge WFN with plain SCF
scf_solver = PlainSCFSolver(1e-6)
scf_solver(ham, lf, olp, occ_model, exp_alpha)
# generate randomized wavefunctions:
# - arbitrary unitary transformation
# - arbitrary (fractional) occupation numbers (with proper sum)
nbasis = obasis.nbasis
random_exps = []
for irandom in xrange(nrandom):
# random symmetric matrix
tmp1 = np.random.normal(0, 1, (nbasis, nbasis))
tmp1 = tmp1 + tmp1.T
# the random unitary matrix
utrans = np.linalg.eigh(tmp1)[1]
# apply transformation
coeffs = np.dot(exp_alpha.coeffs, utrans)
# random occupation numbers
occupations = get_random_occupations(nbasis, exp_alpha.occupations.sum())
# create a expansion object
exp_alpha_temp = lf.create_expansion()
# assign the random orbitals
exp_alpha_temp.coeffs[:] = coeffs
exp_alpha_temp.occupations[:] = occupations
# store the expansion in the h5 file and in the list
store_wfn('guesses.h5', 1.0, 'case_%03i' % irandom, exp_alpha_temp)
random_exps.append(exp_alpha_temp)
# interpolate between solution and random wfns
for mixing in mixings[1:]: # do not consider mixing==1.0
for irandom in xrange(nrandom):
# create a new wfn object.
# construct the mixed density matrix
dm_mixed = lf.create_one_body()
dm_mixed.iadd(random_exps[irandom].to_dm(), mixing)
dm_mixed.iadd(ham.cache['dm_alpha'], 1-mixing)
# turn it into a set of orbitals
exp_alpha_temp = lf.create_expansion()
exp_alpha_temp.derive_naturals(dm_mixed, olp)
# store the wfn in the h5 file
store_wfn('guesses.h5', mixing, 'case_%03i' % irandom, exp_alpha_temp)
if __name__ == '__main__':
mixings = np.array([1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8])
nrandom = 20
main()
| eustislab/horton | tools/convergence_tester/make_guesses.py | Python | gpl-3.0 | 4,368 | [
"Gaussian"
] | c4b47e73f213380d9a20f91eb7b13fa4a1cb960f1ef158eb44315b0a40a8b713 |
import re
import sys
import datetime
import AlphaSubstValidation
import AlphaSubstPrep
import AlphaSubstBaseMLBootstrap
import AlphaSubstScoring
import random
def stop_err(msg):
"Write the error message and exit"
sys.stderr.write(msg)
sys.exit()
#Retrieve Data
OutputFile = sys.argv[1]
AnalysisType = sys.argv[2]
SubstModel = sys.argv[3]
CompType = sys.argv[4]
DoSingleBoot = sys.argv[5]
SingleBootIterations = sys.argv[6]
DoDoubleBoot = sys.argv[7]
DoubleBootIterations = sys.argv[8]
Sequences1 = sys.argv[9]
Sequences2 = sys.argv[10]
TreeDefinition = sys.argv[11]
DoIntAlpha = sys.argv[12]
DoExtAlpha = sys.argv[13]
CleanData = sys.argv[14]
DoBranchAlpha = sys.argv[15]
Output_Format = sys.argv[16]
ExtraBaseML = 0
#Get galaxy location
OutputSplit = re.compile('database')
OutContents = OutputSplit.split(OutputFile)
GalaxyLocation = OutContents[0]
BaseMLLocation = "/home/universe/linux-i686/PAML/paml3.15/bin/"
if int(DoSingleBoot) == 0:
Iterations = 1
GetSE = 1
else: GetSE = 0
if int(AnalysisType) == 0:
AlignmentTogether = 1
DoDoubleBoot = 0
elif int(AnalysisType) == 1:
AlignmentTogether = 0
DoDoubleBoot = 0
elif int(AnalysisType) == 2:
AlignmentTogether = 0
DoDoubleBoot = 1
GetSE = 0
#Initial Data Validation
AlphaValid = AlphaSubstValidation.AlphaSubstValidation()
ValidationErrors = AlphaValid.ValidateAlphaSubstData(AnalysisType,CompType,DoSingleBoot,SingleBootIterations,DoDoubleBoot,DoubleBootIterations,Sequences1,Sequences2,TreeDefinition,BaseMLLocation)
if ValidationErrors != "":
stop_err(ValidationErrors)
#Set post-validation work variables
SequenceCount = AlphaValid.SequenceCount
TotalSeqLength1 = AlphaValid.TotalSequenceLength1
TotalSeqLength2 = AlphaValid.TotalSequenceLength2
Group1AlignmentCount = AlphaValid.Group1AlignmentCount
Group1Alignments = AlphaValid.Group1Alignments
Group1AlignLength = AlphaValid.Group1AlignLength
Group2AlignmentCount = AlphaValid.Group2AlignmentCount
Group2Alignments = AlphaValid.Group2Alignments
Group2AlignLength = AlphaValid.Group2AlignLength
UserRandomKey = str(datetime.date.today()) + "-" + str(random.randrange(0,50000,1))
#Prepare the data for BaseML
AlphaPrep = AlphaSubstPrep.AlphaSubstPrep()
AlphaPrep.PrepBaseML(AnalysisType,TreeDefinition,SequenceCount,CompType,UserRandomKey,BaseMLLocation,SubstModel,GetSE,DoIntAlpha,DoExtAlpha,GalaxyLocation,1,0,1,2.5,1,0)
BranchDescriptions = AlphaPrep.BranchDescriptions
InternalBranches = AlphaPrep.InternalBranches
ExternalBranches = AlphaPrep.ExternalBranches
Group1BranchList = AlphaPrep.Group1Branches
Group2BranchList = AlphaPrep.Group2Branches
Group1ExtBranchList = AlphaPrep.Group1ExtBranches
Group2ExtBranchList = AlphaPrep.Group2ExtBranches
Group1IntBranchList = AlphaPrep.Group1IntBranches
Group2IntBranchList = AlphaPrep.Group2IntBranches
DoIntAlpha = AlphaPrep.DoIntAlpha
DoExtAlpha = AlphaPrep.DoExtAlpha
#Prepare scoring class
AlphaSaveData = AlphaSubstScoring.AlphaSubstScoring(CompType,DoIntAlpha,DoExtAlpha,AlignmentTogether,BranchDescriptions,Group1BranchList,Group2BranchList,Group1ExtBranchList,Group2ExtBranchList,Group1IntBranchList,Group2IntBranchList,InternalBranches,ExternalBranches,DoBranchAlpha,GetSE)
#Perform Boostrapping and BaseML Functions
AlphaSubstWork = AlphaSubstBaseMLBootstrap.AlphaSubstBaseMLBootstrap("")
TimesFailed = 0
if int(DoDoubleBoot) == 0: Iterations = SingleBootIterations
else: Iterations = DoubleBootIterations
for IterationIndex in range(0,int(Iterations)):
SuccessfulStrap = 0
TimesFailed = 0
while SuccessfulStrap == 0 and TimesFailed <= 100:
if str(DoDoubleBoot) == "0":
AlphaSubstWork.StrapSequence(Group1Alignments,TotalSeqLength1,SequenceCount,UserRandomKey,DoSingleBoot)
AlphaSubstWork.RunBaseML(BaseMLLocation,UserRandomKey,GalaxyLocation)
SuccessfulStrap = int(AlphaSubstWork.ScoreBaseML(BaseMLLocation,UserRandomKey,BranchDescriptions,GalaxyLocation,GetSE,ExtraBaseML,SubstModel)) #Get the results from baseml execution
#Save the baseml results to the score class
if SuccessfulStrap != 0:
AlphaSaveData.AddScores(AlphaSubstWork.BaseMLScores,AlphaSubstWork.BaseMLBranchDesc,1,AlphaSubstWork.SEScores)
else: TimesFailed += 1
if int(SuccessfulStrap) != "0" and str(AlignmentTogether) == "0":
#Process the second sequence
AlphaSubstWork.StrapSequence(Group2Alignments,TotalSeqLength2,SequenceCount,UserRandomKey,DoSingleBoot)
AlphaSubstWork.RunBaseML(BaseMLLocation,UserRandomKey,GalaxyLocation)
SuccessfulStrap = int(AlphaSubstWork.ScoreBaseML(BaseMLLocation,UserRandomKey,BranchDescriptions,GalaxyLocation,GetSE,ExtraBaseML,SubstModel))
if SuccessfulStrap != 0:
AlphaSaveData.AddScores(AlphaSubstWork.BaseMLScores,AlphaSubstWork.BaseMLBranchDesc,2,AlphaSubstWork.SEScores)
AlphaSaveData.CalcMultiSeqAlphas(IterationIndex,DoBranchAlpha) #Now can calc multiple alignment alphas
else: TimesFailed += 1
elif int(SuccessfulStrap) == "0": TimesFailed += 1
else: #Double Bootstrapping
#FIRST ALIGNMENT
#Initialize a blank array for per iteration storage
IterationBranchScoreArray = []
for TempBranch in BranchDescriptions: IterationBranchScoreArray.append(0)
SequenceIDArray = []
#Top level (double) bootstrapping
for DoubleBootIndex in range(0,Group1AlignmentCount):
SequenceIDArray.append(str(random.randrange(0,Group1AlignmentCount,1)))
#Get new a total sequence length
WeightedLength1 = 0
for SequenceID in SequenceIDArray:
WeightedLength1 += Group1AlignLength[int(SequenceID)]
for SequenceID in SequenceIDArray:
SequenceID = int(SequenceID)
SequenceLength = Group1AlignLength[SequenceID]
Sequence = Group1Alignments[SequenceID]
AlphaSubstWork.WriteDBSAlignment(SequenceLength,SequenceCount,Sequence,UserRandomKey,DoSingleBoot)
AlphaSubstWork.RunBaseML(BaseMLLocation,UserRandomKey,GalaxyLocation)
SuccessfulStrap = int(AlphaSubstWork.ScoreBaseML(BaseMLLocation,UserRandomKey,BranchDescriptions,GalaxyLocation,GetSE,ExtraBaseML,SubstModel))
if SuccessfulStrap != 0:
BranchScores = AlphaSaveData.Get_DBS_Scores(AlphaSubstWork.BaseMLScores,AlphaSubstWork.BaseMLBranchDesc,1,SequenceLength,WeightedLength1)
for BranchSubIndex in range (0,len(BranchScores)):
IterationBranchScoreArray[BranchSubIndex] += BranchScores[BranchSubIndex]
else:
TimesFailed += 1
#Save the data
AlphaSaveData.Save_DBS_Scores(IterationBranchScoreArray,AlphaSubstWork.BaseMLBranchDesc,1,0)
#SECOND SET OF ALIGNMENTS
IterationBranchScoreArray = []
for TempBranch in BranchDescriptions:
IterationBranchScoreArray.append(0)
SequenceIDArray = []
for DoubleBootIndex in range(0,Group2AlignmentCount):
SequenceIDArray.append(str(random.randrange(0,Group2AlignmentCount,1)))
#Get a total sequence length
WeightedLength2 = 0
for SequenceID in SequenceIDArray:
WeightedLength2 += Group2AlignLength[int(SequenceID)]
for SequenceID in SequenceIDArray:
SequenceID = int(SequenceID)
SequenceLength = Group2AlignLength[SequenceID]
Sequence = Group2Alignments[SequenceID]
AlphaSubstWork.WriteDBSAlignment(SequenceLength,SequenceCount,Sequence,UserRandomKey,DoSingleBoot)
AlphaSubstWork.RunBaseML(BaseMLLocation,UserRandomKey,GalaxyLocation)
SuccessfulStrap = int(AlphaSubstWork.ScoreBaseML(BaseMLLocation,UserRandomKey,BranchDescriptions,GalaxyLocation,GetSE,ExtraBaseML,SubstModel))
if SuccessfulStrap != 0:
BranchScores = AlphaSaveData.Get_DBS_Scores(AlphaSubstWork.BaseMLScores,AlphaSubstWork.BaseMLBranchDesc,2,SequenceLength,WeightedLength2)
for BranchSubIndex in range (0,len(BranchScores)):
IterationBranchScoreArray[BranchSubIndex] += BranchScores[BranchSubIndex]
else:
TimesFailed += 1
#Save the data
AlphaSaveData.Save_DBS_Scores(IterationBranchScoreArray,AlphaSubstWork.BaseMLBranchDesc,2,0)
#FOR BOTH ALIGNMENTS
#Calculate the Alpha Specific Branches
AlphaSaveData.CalcMultiSeqAlphas(IterationIndex,DoBranchAlpha)
if TimesFailed > 100:
stop_err("Maximum chances expended. Please inspect your sequences.")
#Reporting
Results = AlphaSaveData.CalcStatScores(Iterations,CompType,DoSingleBoot,AlignmentTogether,DoDoubleBoot,Sequences1,Sequences2,SubstModel,GetSE,ExtraBaseML,TotalSeqLength1,TotalSeqLength2,"AlphaSubst",Output_Format)
#create output
of = open(OutputFile,'w')
print >>of,Results
#Clean up data
AlphaSubstWork.FinalCleanUp(BaseMLLocation,GalaxyLocation,UserRandomKey)
| jmchilton/galaxy-central | tools/mdea/AlphaSubst.py | Python | mit | 9,343 | [
"Galaxy"
] | e91da56b9b9f66c082981016ea36231d366c2c47a15192491cebb17ddb71c5a8 |
#
# Copyright 2021 Lars Pastewka (U. Freiburg)
# 2018 Jacek Golebiowski (Imperial College London)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from .base_qm_cluster_tool import BaseQMClusterTool
class QMFlaggingTool(BaseQMClusterTool):
"""This class is responsible for flagging atoms
that move out of their equilibrium"""
def __init__(self, mediator=None, qm_flag_potential_energies=None,
small_cluster_hops=3, only_heavy=False, ema_parameter=0.1, energy_cap=None,
energy_increase=1):
"""This class is responsible for flagging atoms
that move out of their equilibrium
Parameters
----------
mediator : matscipy.calculators.mcfm.QMCluster
class responsible for managing the QM clusters in the simulation
qm_flag_potential_energies : np.array
threshholds for flagging indivual atoms.
The diensions are (nAtoms, 2) where:
column 1: threshold to enter the QM regios
column 2: threshold to stay the QM region
small_cluster_hops : int
Each flagged atom and atoms around it within small_cluster_hops neighbour hops
will generate a single cluster, clusters are later joined.
only_heavy : bool
If True, only consider non-hydrogen atoms in cluster expansion.
Hydrogens are added later
ema_parameter : float
parameter lambda in the exponential mean average calculation
energy_cap : float
if not None, cap potential energy per atom at this value
energy_increase : int
Multiplier for potential energy per atom, used to scale it for convininece
"""
# Initialize the QMClusterObject with a mediator
super(QMFlaggingTool, self).__init__(mediator)
try:
self.qm_flag_potential_energies = qm_flag_potential_energies
except AttributeError:
raise AttributeError("QM flag PE/force tolerance must be defined")
self.small_cluster_hops = small_cluster_hops
self.only_heavy = only_heavy
self.ema_parameter = ema_parameter
self.energy_cap = energy_cap
self.energy_increase = energy_increase
self.qm_atoms_list = []
self.old_energized_list = []
self.verbose = 0
def get_energized_list(self, atoms, data_array, property_str, hysteretic_tolerance):
"""Produce a list of atoms that are ot be flagged as a QM region
based on the properties given in the array according to the
tolerance given.
Parameters
----------
atoms : ase.Atoms
Whole structure
data_array : array
an array of per atom data providing information
property_str : str
name of th property so that it can be stored in atoms.properties.
hysteretic_tolerance : array
Threshholds for flagging indivual atoms.
The diensions are (nAtoms, 2) where:
column 1: threshold to enter the QM regios
column 2: threshold to stay the QM region
Returns
-------
list
List of flagged atoms
"""
# ------ Update EPA
update_avg_property_per_atom(atoms, data_array, property_str, self.ema_parameter)
avg_property_per_atom = atoms.arrays[property_str]
tolerance = np.zeros(len(atoms)) + hysteretic_tolerance[:, 0]
tolerance[self.old_energized_list] = hysteretic_tolerance[self.old_energized_list, 1]
energized_mask = np.greater_equal(avg_property_per_atom, tolerance)
energized_list = np.arange(len(atoms))[energized_mask]
return energized_list
def create_cluster_around_atom(self, atoms, atom_id, hydrogenate=False):
"""Carve a cluster around the atom with atom_id
This function operates on sets and returns a set
Parameters
----------
atoms : ase.Atoms
Whole structure
atom_id : int
Atomic index
hydrogenate : bool
If true, hydrogenate the resulting structure
Returns
-------
list
atoms in the new cluster
"""
cluster_set = set([atom_id])
edge_neighbours = set([atom_id])
for i in range(self.small_cluster_hops):
new_neighbours = set()
# For each atom in edge neighbours list, expand the list
for index in edge_neighbours:
new_neighbours |= set(self.find_neighbours(atoms, index)[0])
# Remove atoms already in the qm list
edge_neighbours = new_neighbours - cluster_set
# Make a union of the sets
cluster_set = cluster_set | edge_neighbours
# ----- If specified, add hydrogens ot the cluster
if hydrogenate:
self.hydrogenate_cluster(atoms, cluster_set)
return cluster_set
def join_clusters(self, verbose=False):
"""This function will join the clusters if they overlap
Input is an array of sets each representing individual
small cluster
Parameters
----------
verbose : bool
Print messages during calculation
"""
i = 0
# Iterate over the whole list C taking into account that it might get
# throughout the loop
while (i < len(self.qm_atoms_list)):
# Iterate over the sets taking into account that C can change
# Do not repeat pairise disjointment checks
# i.e. for a list of sets [A, B, C, D]
# first loop included checks A-B, A-C, A-D (pairs 0 - 1:3)
# Then make sure the second only does B-C, B-D (pairs 1 - 2:3)
for j in range(i + 1, len(self.qm_atoms_list)):
if verbose is True:
print(i, j, self.qm_atoms_list[i], self.qm_atoms_list[j],
not set.isdisjoint(self.qm_atoms_list[i], self.qm_atoms_list[j]))
if not set.isdisjoint(self.qm_atoms_list[i], self.qm_atoms_list[j]):
# If intersection detected, unify sets
self.qm_atoms_list[i] |= self.qm_atoms_list[j]
# Then delete the second set to avoid duplicates
# Then restart the j loop to see if now, any set
# has an intersection with the new union
del self.qm_atoms_list[j]
i -= 1
if verbose is True:
for entry in self.qm_atoms_list:
print(entry)
break
i += 1
def expand_cluster(self, special_atoms_list):
"""Include extra atoms in the cluster.
If one of the special atoms is included in one of the clusters,
add all other special atoms to this cluster
Parameters
----------
special_atoms_list : list
list of the special atoms
"""
for specialMolecule in special_atoms_list:
specialMoleculeSet = set(specialMolecule)
for clusterIndex in range(len(self.qm_atoms_list)):
if (not specialMoleculeSet.isdisjoint(self.qm_atoms_list[clusterIndex])):
self.qm_atoms_list[clusterIndex] |= specialMoleculeSet
def update_qm_region(self, atoms,
potential_energies=None,
):
"""Update the QM region while the simulation is running
Parameters
----------
atoms : ase.Atoms
whole structure
potential_energies : array
Potential energy per atom
Returns
-------
list of lists of ints
list of individual clusters as lists of atoms
"""
# Make sure the right atoms object is in
# ------ Increase the energy by a common factor - makes it more readable in some cases
if (self.energy_increase is not None):
potential_energies *= self.energy_increase
# ------ Cap maximum energy according to the flag
if (self.energy_cap is not None):
np.minimum(potential_energies, self.energy_cap, potential_energies)
# ------ Get the energized atoms list
flagged_atoms_dict = {}
flagged_atoms_dict["potential_energies"] = self.get_energized_list(atoms,
potential_energies,
"avg_potential_energies",
self.qm_flag_potential_energies)
energized_set = set()
for key in flagged_atoms_dict:
energized_set = set(flagged_atoms_dict[key]) | energized_set
energized_list = list(energized_set)
self.old_energized_list = list(energized_list)
if (len(energized_list) != 0):
self.mediator.neighbour_list.update(atoms)
# TODO if energized list include the whole system just pass it along
for array_i, atom_i in enumerate(energized_list):
energized_list[array_i] = self.create_cluster_around_atom(atoms, atom_i, hydrogenate=False)
self.qm_atoms_list = energized_list
if (len(self.qm_atoms_list) > 0):
self.join_clusters()
self.expand_cluster(self.mediator.special_atoms_list)
self.join_clusters()
if self.only_heavy is False:
for index in range(len(self.qm_atoms_list)):
self.qm_atoms_list[index] = self.hydrogenate_cluster(atoms, self.qm_atoms_list[index])
self.qm_atoms_list = list(map(list, self.qm_atoms_list))
return self.qm_atoms_list
# print "QM cluster", self.qm_atoms_list
def exponential_moving_average(oldset, newset=None, ema_parameter=0.1):
"""Apply the exponential moving average to the given array
Parameters
----------
oldset : array
old values
newset : array
new data set
ema_parameter : float
parameter lambda
"""
if newset is None:
pass
else:
oldset *= (1 - ema_parameter)
oldset += ema_parameter * newset
def update_avg_property_per_atom(atoms, data_array, property_str, ema_parameter):
"""Update the per atom property using running avarages
and store it in atoms.properties[property_str]
Parameters
----------
atoms : ase.Atoms
structure that need updated values
data_array : array
data that need to be attached to atoms
property_str : str
key for structure properties dictionary
ema_parameter : float
Coefficient for the Exponential Moving Average
"""
# Abbreviations
# ppa - (property per atom
# appa - average property per atom
ppa = data_array
# ------ Get average ppa
if (property_str in atoms.arrays):
exponential_moving_average(atoms.arrays[property_str],
ppa, ema_parameter)
else:
atoms.arrays[property_str] = ppa.copy()
| libAtoms/matscipy | matscipy/calculators/mcfm/qm_cluster_tools/qm_flagging_tool.py | Python | lgpl-2.1 | 11,970 | [
"ASE",
"Matscipy"
] | a498c4af6b6f058cd179853dea6a101d031a733d4a4c3a9f626461156904b24b |
""" EC2Endpoint class is the implementation of the EC2 interface to
a cloud endpoint
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import json
import boto3
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.Resources.Cloud.Endpoint import Endpoint
__RCSID__ = "$Id$"
class EC2Endpoint(Endpoint):
def __init__(self, parameters=None):
super(EC2Endpoint, self).__init__(parameters=parameters)
# logger
self.log = gLogger.getSubLogger("EC2Endpoint")
self.valid = False
result = self.initialize()
if result["OK"]:
self.log.debug("EC2Endpoint created and validated")
self.valid = True
else:
self.log.error(result["Message"])
def initialize(self):
availableParams = {
"RegionName": "region_name",
"AccessKey": "aws_access_key_id",
"SecretKey": "aws_secret_access_key",
"EndpointUrl": "endpoint_url", # EndpointUrl is optional
}
connDict = {}
for var in availableParams:
if var in self.parameters:
connDict[availableParams[var]] = self.parameters[var]
try:
self.__ec2 = boto3.resource("ec2", **connDict)
except Exception as e:
self.log.exception("Failed to connect to EC2")
errorStatus = "Can't connect to EC2: " + str(e)
return S_ERROR(errorStatus)
result = self.__loadInstanceType()
if not result["OK"]:
return result
result = self.__checkConnection()
return result
def __loadInstanceType(self):
currentDir = os.path.dirname(os.path.abspath(__file__))
instanceTypeFile = os.path.join(currentDir, "ec2_instance_type.json")
try:
with open(instanceTypeFile, "r") as f:
self.__instanceTypeInfo = json.load(f)
except Exception as e:
self.log.exception("Failed to fetch EC2 instance details")
errmsg = "Exception loading EC2 instance type info: %s" % e
self.log.error(errmsg)
return S_ERROR(errmsg)
return S_OK()
def __checkConnection(self):
"""
Checks connection status by trying to list the images.
:return: S_OK | S_ERROR
"""
try:
self.__ec2.images.filter(Owners=["self"])
except Exception as e:
self.log.exception("Failed to list EC2 images")
return S_ERROR(e)
return S_OK()
def createInstances(self, vmsToSubmit):
outputDict = {}
for nvm in range(vmsToSubmit):
instanceID = makeGuid()[:8]
result = self.createInstance(instanceID)
if result["OK"]:
ec2Id, nodeDict = result["Value"]
self.log.debug("Created VM instance %s/%s" % (ec2Id, instanceID))
outputDict[ec2Id] = nodeDict
else:
self.log.error("Create EC2 instance error:", result["Message"])
break
return S_OK(outputDict)
def createInstance(self, instanceID=""):
if not instanceID:
instanceID = makeGuid()[:8]
self.parameters["VMUUID"] = instanceID
self.parameters["VMType"] = self.parameters.get("CEType", "EC2")
createNodeDict = {}
# Image
if "ImageID" in self.parameters and "ImageName" not in self.parameters:
try:
images = self.__ec2.images.filter(Filters=[{"Name": "name", "Values": [self.parameters["ImageName"]]}])
imageId = None
for image in images:
imageId = image.id
break
except Exception as e:
self.log.exception("Exception when get ID from image name %s:" % self.parameters["ImageName"])
return S_ERROR("Failed to get image for Name %s" % self.parameters["ImageName"])
if imageId is None:
return S_ERROR("Image name %s not found" % self.parameters["ImageName"])
elif "ImageID" in self.parameters:
try:
self.__ec2.images.filter(ImageIds=[self.parameters["ImageID"]])
except Exception as e:
self.log.exception("Failed to get EC2 image list")
return S_ERROR("Failed to get image for ID %s" % self.parameters["ImageID"])
imageId = self.parameters["ImageID"]
else:
return S_ERROR("No image specified")
createNodeDict["ImageId"] = imageId
# Instance type
if "FlavorName" not in self.parameters:
return S_ERROR("No flavor specified")
instanceType = self.parameters["FlavorName"]
createNodeDict["InstanceType"] = instanceType
# User data
result = self._createUserDataScript()
if not result["OK"]:
return result
createNodeDict["UserData"] = str(result["Value"])
# Other params
for param in ["KeyName", "SubnetId", "EbsOptimized"]:
if param in self.parameters:
createNodeDict[param] = self.parameters[param]
self.log.info("Creating node:")
for key, value in createNodeDict.items():
self.log.verbose("%s: %s" % (key, value))
# Create the VM instance now
try:
instances = self.__ec2.create_instances(MinCount=1, MaxCount=1, **createNodeDict)
except Exception as e:
self.log.exception("Failed to create EC2 instance")
return S_ERROR("Exception in ec2 create_instances: %s" % e)
if len(instances) < 1:
errmsg = "ec2 create_instances failed to create any VM"
self.log.error(errmsg)
return S_ERROR(errmsg)
# Create the name in tags
ec2Id = instances[0].id
tags = [{"Key": "Name", "Value": "DIRAC_%s" % instanceID}]
try:
self.__ec2.create_tags(Resources=[ec2Id], Tags=tags)
except Exception as e:
self.log.exception("Failed to tag EC2 instance")
return S_ERROR("Exception setup name for %s: %s" % (ec2Id, e))
# Properties of the instance
nodeDict = {}
# nodeDict['PublicIP'] = publicIP
nodeDict["InstanceID"] = instanceID
if instanceType in self.__instanceTypeInfo:
nodeDict["NumberOfProcessors"] = self.__instanceTypeInfo[instanceType]["vCPU"]
nodeDict["RAM"] = self.__instanceTypeInfo[instanceType]["Memory"]
else:
nodeDict["NumberOfProcessors"] = 1
return S_OK((ec2Id, nodeDict))
def stopVM(self, nodeID, publicIP=""):
"""
Given the node ID it gets the node details, which are used to destroy the
node making use of the libcloud.openstack driver. If three is any public IP
( floating IP ) assigned, frees it as well.
:Parameters:
**uniqueId** - `string`
openstack node id ( not uuid ! )
**public_ip** - `string`
public IP assigned to the node if any
:return: S_OK | S_ERROR
"""
try:
self.__ec2.Instance(nodeID).terminate()
except Exception as e:
self.log.exception("Failed to terminate EC2 instance")
return S_ERROR("Exception terminate instance %s: %s" % (nodeID, e))
return S_OK()
| ic-hep/DIRAC | src/DIRAC/Resources/Cloud/EC2Endpoint.py | Python | gpl-3.0 | 7,540 | [
"DIRAC"
] | 9ceaabd78a6cfffa20366908bac5f56f88b9c351a481e6a36567025b32b5069b |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import platform
import sys
import os
from spack import *
class Namd(MakefilePackage):
"""NAMDis a parallel molecular dynamics code designed for
high-performance simulation of large biomolecular systems."""
homepage = "http://www.ks.uiuc.edu/Research/namd/"
url = "file://{0}/NAMD_2.12_Source.tar.gz".format(os.getcwd())
git = "https://charm.cs.illinois.edu/gerrit/namd.git"
manual_download = True
version("develop", branch="master")
version('2.14b2', sha256='cb4bd918d2d545bb618e4b4a20023a53916f0aa362d9e57f3de1562c36240b00')
version('2.14b1', sha256='9407e54f5271b3d3039a5a9d2eae63c7e108ce31b7481e2197c19e1125b43919')
version('2.13', '9e3323ed856e36e34d5c17a7b0341e38', preferred=True)
version('2.12', '2a1191909b1ab03bf0205971ad4d8ee9')
variant('fftw', default='3', values=('none', '2', '3', 'mkl'),
description='Enable the use of FFTW/FFTW3/MKL FFT')
variant('interface', default='none', values=('none', 'tcl', 'python'),
description='Enables TCL and/or python interface')
depends_on('charmpp@6.10.1:', when="@2.14b1:")
depends_on('charmpp@6.8.2', when="@2.13")
depends_on('charmpp@6.7.1', when="@2.12")
depends_on('fftw@:2.99', when="fftw=2")
depends_on('fftw@3:', when="fftw=3")
depends_on('intel-mkl', when="fftw=mkl")
depends_on('tcl', when='interface=tcl')
depends_on('tcl', when='interface=python')
depends_on('python', when='interface=python')
def _copy_arch_file(self, lib):
config_filename = 'arch/{0}.{1}'.format(self.arch, lib)
copy('arch/Linux-x86_64.{0}'.format(lib),
config_filename)
if lib == 'tcl':
filter_file(r'-ltcl8\.5',
'-ltcl{0}'.format(self.spec['tcl'].version.up_to(2)),
config_filename)
def _append_option(self, opts, lib):
if lib != 'python':
self._copy_arch_file(lib)
spec = self.spec
opts.extend([
'--with-{0}'.format(lib),
'--{0}-prefix'.format(lib), spec[lib].prefix
])
@property
def arch(self):
plat = sys.platform
if plat.startswith("linux"):
plat = "linux"
march = platform.machine()
return '{0}-{1}'.format(plat, march)
@property
def build_directory(self):
return '{0}-spack'.format(self.arch)
def edit(self, spec, prefix):
m64 = '-m64 ' if not spec.satisfies('arch=aarch64:') else ''
with working_dir('arch'):
with open('{0}.arch'.format(self.build_directory), 'w') as fh:
# this options are take from the default provided
# configuration files
# https://github.com/UIUC-PPL/charm/pull/2778
if self.spec.satisfies('^charmpp@:6.10.1'):
optims_opts = {
'gcc': m64 + '-O3 -fexpensive-optimizations \
-ffast-math -lpthread',
'intel': '-O2 -ip'}
else:
optims_opts = {
'gcc': m64 + '-O3 -fexpensive-optimizations \
-ffast-math',
'intel': '-O2 -ip'}
optim_opts = optims_opts[self.compiler.name] \
if self.compiler.name in optims_opts else ''
fh.write('\n'.join([
'NAMD_ARCH = {0}'.format(self.arch),
'CHARMARCH = {0}'.format(self.spec['charmpp'].charmarch),
'CXX = {0.cxx} {0.cxx11_flag}'.format(
self.compiler),
'CXXOPTS = {0}'.format(optim_opts),
'CC = {0}'.format(self.compiler.cc),
'COPTS = {0}'.format(optim_opts),
''
]))
self._copy_arch_file('base')
opts = ['--charm-base', spec['charmpp'].prefix]
fftw_version = spec.variants['fftw'].value
if fftw_version == 'none':
opts.append('--without-fftw')
elif fftw_version == 'mkl':
self._append_option(opts, 'mkl')
else:
_fftw = 'fftw{0}'.format('' if fftw_version == '2' else '3')
self._copy_arch_file(_fftw)
opts.extend(['--with-{0}'.format(_fftw),
'--fftw-prefix', spec['fftw'].prefix])
interface_type = spec.variants['interface'].value
if interface_type != 'none':
self._append_option(opts, 'tcl')
if interface_type == 'python':
self._append_option(opts, 'python')
else:
opts.extend([
'--without-tcl',
'--without-python'
])
config = Executable('./config')
config(self.build_directory, *opts)
def install(self, spec, prefix):
with working_dir(self.build_directory):
mkdirp(prefix.bin)
install('namd2', prefix.bin)
# I'm not sure this is a good idea or if an autoload of the charm
# module would not be better.
install('charmrun', prefix.bin)
| rspavel/spack | var/spack/repos/builtin/packages/namd/package.py | Python | lgpl-2.1 | 5,399 | [
"NAMD"
] | 0ac8b31a40d3c7e5b1312d9bf2a1079b5488b8b7bcf3c03b15102d35031e54c0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0092_remove_visit_improvementissues'),
]
operations = [
migrations.AddField(
model_name='visit',
name='improvementissues',
field=models.ManyToManyField(related_name='visits', through='visit.VisitImprovement', to='visit.ImprovementIssue'),
),
]
| koebbe/homeworks | visit/migrations/0093_visit_improvementissues.py | Python | mit | 497 | [
"VisIt"
] | b660e9f1063b20499a170da02d9e2c5d8858888c0397906b4de9003548a21ca0 |
"""
NeuroLearn Analysis Tools
=========================
These tools provide the ability to quickly run
machine-learning analyses on imaging data
"""
__all__ = ["Roc"]
__author__ = ["Luke Chang"]
__license__ = "MIT"
import pandas as pd
import numpy as np
from nltools.plotting import roc_plot
from scipy.stats import norm, binom_test
from sklearn.metrics import auc
from copy import deepcopy
class Roc(object):
"""Roc Class
The Roc class is based on Tor Wager's Matlab roc_plot.m function and
allows a user to easily run different types of receiver operator
characteristic curves. For example, one might be interested in single
interval or forced choice.
Args:
input_values: nibabel data instance
binary_outcome: vector of training labels
threshold_type: ['optimal_overall', 'optimal_balanced',
'minimum_sdt_bias']
**kwargs: Additional keyword arguments to pass to the prediction
algorithm
"""
def __init__(
self,
input_values=None,
binary_outcome=None,
threshold_type="optimal_overall",
forced_choice=None,
**kwargs
):
if len(input_values) != len(binary_outcome):
raise ValueError(
"Data Problem: input_value and binary_outcome" "are different lengths."
)
if not any(binary_outcome):
raise ValueError("Data Problem: binary_outcome may not be boolean")
thr_type = ["optimal_overall", "optimal_balanced", "minimum_sdt_bias"]
if threshold_type not in thr_type:
raise ValueError(
"threshold_type must be ['optimal_overall', "
"'optimal_balanced','minimum_sdt_bias']"
)
self.input_values = deepcopy(input_values)
self.binary_outcome = deepcopy(binary_outcome)
self.threshold_type = deepcopy(threshold_type)
self.forced_choice = deepcopy(forced_choice)
if isinstance(self.binary_outcome, pd.DataFrame):
self.binary_outcome = np.array(self.binary_outcome).flatten()
else:
self.binary_outcome = deepcopy(binary_outcome)
def calculate(
self,
input_values=None,
binary_outcome=None,
criterion_values=None,
threshold_type="optimal_overall",
forced_choice=None,
balanced_acc=False,
):
"""Calculate Receiver Operating Characteristic plot (ROC) for
single-interval classification.
Args:
input_values: nibabel data instance
binary_outcome: vector of training labels
criterion_values: (optional) criterion values for calculating fpr
& tpr
threshold_type: ['optimal_overall', 'optimal_balanced',
'minimum_sdt_bias']
forced_choice: index indicating position for each unique subject
(default=None)
balanced_acc: balanced accuracy for single-interval classification
(bool). THIS IS NOT COMPLETELY IMPLEMENTED BECAUSE
IT AFFECTS ACCURACY ESTIMATES, BUT NOT P-VALUES OR
THRESHOLD AT WHICH TO EVALUATE SENS/SPEC
**kwargs: Additional keyword arguments to pass to the prediction
algorithm
"""
if input_values is not None:
self.input_values = deepcopy(input_values)
if binary_outcome is not None:
self.binary_outcome = deepcopy(binary_outcome)
# Create Criterion Values
if criterion_values is not None:
self.criterion_values = deepcopy(criterion_values)
else:
self.criterion_values = np.linspace(
np.min(self.input_values.squeeze()),
np.max(self.input_values.squeeze()),
num=50 * len(self.binary_outcome),
)
if forced_choice is not None:
self.forced_choice = deepcopy(forced_choice)
if self.forced_choice is not None:
sub_idx = np.unique(self.forced_choice)
if len(sub_idx) != len(self.binary_outcome) / 2:
raise ValueError(
"Make sure that subject ids are correct for 'forced_choice'."
)
if len(
set(sub_idx).union(
set(np.array(self.forced_choice)[self.binary_outcome])
)
) != len(sub_idx):
raise ValueError("Issue with forced_choice subject labels.")
if len(
set(sub_idx).union(
set(np.array(self.forced_choice)[~self.binary_outcome])
)
) != len(sub_idx):
raise ValueError("Issue with forced_choice subject labels.")
for sub in sub_idx:
sub_mn = (
self.input_values[
(self.forced_choice == sub) & (self.binary_outcome)
]
+ self.input_values[
(self.forced_choice == sub) & (~self.binary_outcome)
]
)[0] / 2
self.input_values[
(self.forced_choice == sub) & (self.binary_outcome)
] = (
self.input_values[
(self.forced_choice == sub) & (self.binary_outcome)
][0]
- sub_mn
)
self.input_values[
(self.forced_choice == sub) & (~self.binary_outcome)
] = (
self.input_values[
(self.forced_choice == sub) & (~self.binary_outcome)
][0]
- sub_mn
)
self.class_thr = 0
# Calculate true positive and false positive rate
self.tpr = np.zeros(self.criterion_values.shape)
self.fpr = np.zeros(self.criterion_values.shape)
for i, x in enumerate(self.criterion_values):
wh = self.input_values >= x
self.tpr[i] = np.sum(wh[self.binary_outcome]) / np.sum(self.binary_outcome)
self.fpr[i] = np.sum(wh[~self.binary_outcome]) / np.sum(
~self.binary_outcome
)
self.n_true = np.sum(self.binary_outcome)
self.n_false = np.sum(~self.binary_outcome)
self.auc = auc(self.fpr, self.tpr)
# Get criterion threshold
if self.forced_choice is None:
self.threshold_type = threshold_type
if threshold_type == "optimal_balanced":
mn = (self.tpr + self.fpr) / 2
self.class_thr = self.criterion_values[np.argmax(mn)]
elif threshold_type == "optimal_overall":
n_corr_t = self.tpr * self.n_true
n_corr_f = (1 - self.fpr) * self.n_false
sm = n_corr_t + n_corr_f
self.class_thr = self.criterion_values[np.argmax(sm)]
elif threshold_type == "minimum_sdt_bias":
# Calculate MacMillan and Creelman 2005 Response Bias (c_bias)
c_bias = (
norm.ppf(np.maximum(0.0001, np.minimum(0.9999, self.tpr)))
+ norm.ppf(np.maximum(0.0001, np.minimum(0.9999, self.fpr)))
) / float(2)
self.class_thr = self.criterion_values[np.argmin(abs(c_bias))]
# Calculate output
self.false_positive = (self.input_values >= self.class_thr) & (
~self.binary_outcome
)
self.false_negative = (self.input_values < self.class_thr) & (
self.binary_outcome
)
self.misclass = (self.false_negative) | (self.false_positive)
self.true_positive = (self.binary_outcome) & (~self.misclass)
self.true_negative = (~self.binary_outcome) & (~self.misclass)
self.sensitivity = (
np.sum(self.input_values[self.binary_outcome] >= self.class_thr)
/ self.n_true
)
self.specificity = (
1
- np.sum(self.input_values[~self.binary_outcome] >= self.class_thr)
/ self.n_false
)
self.ppv = np.sum(self.true_positive) / (
np.sum(self.true_positive) + np.sum(self.false_positive)
)
if self.forced_choice is not None:
self.true_positive = self.true_positive[self.binary_outcome]
self.true_negative = self.true_negative[~self.binary_outcome]
self.false_negative = self.false_negative[self.binary_outcome]
self.false_positive = self.false_positive[~self.binary_outcome]
self.misclass = (self.false_positive) | (self.false_negative)
# Calculate Accuracy
if balanced_acc:
self.accuracy = np.mean(
[self.sensitivity, self.specificity]
) # See Brodersen, Ong, Stephan, Buhmann (2010)
else:
self.accuracy = 1 - np.mean(self.misclass)
# Calculate p-Value using binomial test (can add hierarchical version of binomial test)
self.n = len(self.misclass)
self.accuracy_p = binom_test(int(np.sum(~self.misclass)), self.n, p=0.5)
self.accuracy_se = np.sqrt(
np.mean(~self.misclass) * (np.mean(~self.misclass)) / self.n
)
def plot(self, plot_method="gaussian", balanced_acc=False, **kwargs):
"""Create ROC Plot
Create a specific kind of ROC curve plot, based on input values
along a continuous distribution and a binary outcome variable (logical)
Args:
plot_method: type of plot ['gaussian','observed']
binary_outcome: vector of training labels
**kwargs: Additional keyword arguments to pass to the prediction
algorithm
Returns:
fig
"""
self.calculate(balanced_acc=balanced_acc) # Calculate ROC parameters
if plot_method == "gaussian":
if self.forced_choice is not None:
sub_idx = np.unique(self.forced_choice)
diff_scores = []
for sub in sub_idx:
diff_scores.append(
self.input_values[
(self.forced_choice == sub) & (self.binary_outcome)
][0]
- self.input_values[
(self.forced_choice == sub) & (~self.binary_outcome)
][0]
)
diff_scores = np.array(diff_scores)
mn_diff = np.mean(diff_scores)
d = mn_diff / np.std(diff_scores)
pooled_sd = np.std(diff_scores) / np.sqrt(2)
d_a_model = mn_diff / pooled_sd
expected_acc = 1 - norm.cdf(0, d, 1)
self.sensitivity = expected_acc
self.specificity = expected_acc
self.ppv = self.sensitivity / (self.sensitivity + 1 - self.specificity)
self.auc = norm.cdf(d_a_model / np.sqrt(2))
x = np.arange(-3, 3, 0.1)
self.tpr_smooth = 1 - norm.cdf(x, d, 1)
self.fpr_smooth = 1 - norm.cdf(x, -d, 1)
else:
mn_true = np.mean(self.input_values[self.binary_outcome])
mn_false = np.mean(self.input_values[~self.binary_outcome])
var_true = np.var(self.input_values[self.binary_outcome])
var_false = np.var(self.input_values[~self.binary_outcome])
pooled_sd = np.sqrt(
(var_true * (self.n_true - 1)) / (self.n_true + self.n_false - 2)
)
d = (mn_true - mn_false) / pooled_sd
z_true = mn_true / pooled_sd
z_false = mn_false / pooled_sd
x = np.arange(z_false - 3, z_true + 3, 0.1)
self.tpr_smooth = 1 - (norm.cdf(x, z_true, 1))
self.fpr_smooth = 1 - (norm.cdf(x, z_false, 1))
self.aucn = auc(self.fpr_smooth, self.tpr_smooth)
fig = roc_plot(self.fpr_smooth, self.tpr_smooth)
elif plot_method == "observed":
fig = roc_plot(self.fpr, self.tpr)
else:
raise ValueError("plot_method must be 'gaussian' or 'observed'")
return fig
def summary(self):
"""Display a formatted summary of ROC analysis."""
print("------------------------")
print(".:ROC Analysis Summary:.")
print("------------------------")
print("{:20s}".format("Accuracy:") + "{:.2f}".format(self.accuracy))
print("{:20s}".format("Accuracy SE:") + "{:.2f}".format(self.accuracy_se))
print("{:20s}".format("Accuracy p-value:") + "{:.2f}".format(self.accuracy_p))
print("{:20s}".format("Sensitivity:") + "{:.2f}".format(self.sensitivity))
print("{:20s}".format("Specificity:") + "{:.2f}".format(self.specificity))
print("{:20s}".format("AUC:") + "{:.2f}".format(self.auc))
print("{:20s}".format("PPV:") + "{:.2f}".format(self.ppv))
print("------------------------")
| ljchang/nltools | nltools/analysis.py | Python | mit | 13,305 | [
"Gaussian"
] | 2716f7bedb2430e654457f85d90f34403782d4668bb2dc228ed15d0e0b2df0eb |
"""
Imageutils unit tests.
"""
from __future__ import division
import unittest
import numpy as np
from astraviso import imageutils as iu
class imageutilstests(unittest.TestCase):
"""
Imageutils unit test class.
"""
def setUp(self):
pass
def tearDown(self):
pass
class test_poisson_noise(imageutilstests):
"""
Test poisson_noise function.
"""
def test_empty_image(self):
"""
Test output value and type.
"""
# Allocate placeholder image
image = np.zeros((512))
# Add noise
noisy_image = iu.poisson_noise(image, 0, 1200, 200)
# Check result
self.assertIsInstance(noisy_image, np.ndarray, "Output type should be ndarray.")
self.assertEqual(noisy_image.shape, image.shape, "Image shape should be preserved.")
self.assertTrue(np.all(noisy_image >= 0), "Image with noise should be strictly positive.")
class test_gaussian_noise(imageutilstests):
"""
Test gaussian_noise function.
"""
def test_empty_image(self):
"""
Test output value and type.
"""
# Allocate placeholder image
image = np.zeros((512))
# Add noise
noisy_image = iu.gaussian_noise(image, 0, 1200, 200)
# Check result
self.assertIsInstance(noisy_image, np.ndarray, "Output type should be ndarray.")
self.assertEqual(noisy_image.shape, image.shape, "Image shape should be preserved.")
class test_vismag2photon(imageutilstests):
"""
Test vismag2photon function.
"""
def test_single(self):
"""
Test output value and type for single input.
"""
# Set up visible magnitudes
vismags = -1
# Convert to photons
photons = iu.vismag2photon(vismags, 1, 1, 1)
# Check output
self.assertIsInstance(photons, float, "Output type should be float.")
self.assertGreater(photons, 0, "Photon count must be positive.")
def test_single(self):
"""
Test output value and type for multiple input.
"""
# Set up visible magnitudes
vismags = np.array([1, 0, -1])
# Convert to photons
photons = iu.vismag2photon(vismags, 1, 1, 1)
# Check output
self.assertEqual(len(photons), len(vismags), "Output size not equal to input.")
self.assertIsInstance(photons, np.ndarray, "Output type should be float.")
self.assertTrue(np.all(photons>0), "Photon counts must be positive.")
self.assertGreater(photons[2], photons[0], "Incorrect output values.")
self.assertEqual(photons[1], 1, "Incorrect output value for input 0.")
class test_apply_constant_qe(imageutilstests):
"""
Test apply_constant_quantum_efficiency function.
"""
def test_zero(self):
"""
Test output value and type for zero QE.
"""
# Convert to photoelectrons
photo_electrons = iu.apply_constant_quantum_efficiency(16*np.ones((16,16)), 0)
# Check output
self.assertIsInstance(photo_electrons, np.ndarray, "Output type should be ndarray.")
self.assertTrue(np.all(photo_electrons==0), "Output values should all be equal to 0.")
def test_positive(self):
"""
Test output value and type for positive QE.
"""
# Convert to photoelectrons
photo_electrons = iu.apply_constant_quantum_efficiency(16*np.ones((16,16)), 0.4)
# Check output
self.assertIsInstance(photo_electrons, np.ndarray, "Output type should be ndarray.")
self.assertTrue(np.all(photo_electrons==6), "Output values should all be equal to 6.")
class test_apply_gaussian_qe(imageutilstests):
"""
Test apply_gaussian_quantum_efficiency function.
"""
def test_zero(self):
"""
Test output value and type for zero QE.
"""
# Create test image
test_image = 16*np.ones((16,16))
# Convert to photoelectrons
photo_electrons = iu.apply_gaussian_quantum_efficiency(test_image, 0, 0)
# Check output
self.assertIsInstance(photo_electrons, np.ndarray, "Output type should be ndarray.")
self.assertTrue(np.all(photo_electrons==0), "Output values should all be equal to 0.")
def test_seed(self):
"""
Test RNG seed capability for Gaussian QE.
"""
# Create test image
test_image = 16*np.ones((16,16))
# Convert to photoelectrons
photo_electrons_1 = iu.apply_gaussian_quantum_efficiency(test_image, 0.2, 0.01, seed=1)
photo_electrons_2 = iu.apply_gaussian_quantum_efficiency(test_image, 0.2, 0.01, seed=1)
# Check output
self.assertIsInstance(photo_electrons_1, np.ndarray, "Output type should be ndarray.")
self.assertIsInstance(photo_electrons_2, np.ndarray, "Output type should be ndarray.")
self.assertTrue(np.all(photo_electrons_1==photo_electrons_2), \
"Seed does not lead to consistent results.")
def test_positive(self):
"""
Check Gaussian QE for negative values.
"""
# Create test image
test_image = 16*np.ones((256,256))
# Convert to photoelectrons
photo_electrons = iu.apply_gaussian_quantum_efficiency(test_image, 0, 1, seed=1)
# Check output
self.assertIsInstance(photo_electrons, np.ndarray, "Output type should be ndarray.")
self.assertTrue(np.all(photo_electrons>=0), "Quantum efficiency must be strictly positive.")
class test_saturate(imageutilstests):
"""
Test saturate function.
"""
def test_no_clipping(self):
"""
Test output value and type for array input and sufficient bit_depth.
"""
# Compute saturated image
saturated = iu.saturate(16*np.ones((16,16)), 8)
# Check output
self.assertIsInstance(saturated, np.ndarray, "Output type should be ndarray.")
self.assertTrue(np.all(saturated==16), "Output values should all be equal to 16.")
def test_clipping(self):
"""
Test output value and type for array input and insufficient bit_depth.
"""
# Compute saturated image
saturated = iu.saturate(16*np.ones((16,16)), 2)
# Check output
self.assertIsInstance(saturated, np.ndarray, "Output type should be ndarray.")
self.assertTrue(np.all(saturated==3), "Output values should all be equal to 3.")
class test_conv2(imageutilstests):
"""
Test conv2 function.
"""
def test_3by3(self):
"""
Test 3x3 convoltuion kernel.
"""
# Create kernel & image
kernel = np.ones((3,3))
image = np.ones((64,64))
# Convolve
result = iu.conv2(image, kernel)
# Check result
self.assertIsInstance(result, np.ndarray, "Output type should be ndarray.")
self.assertEqual(image.shape, result.shape, "Image shape must be preserved.")
self.assertTrue(np.all(result[1:-2,1:-2] == 9), "Incorrect pixel values.")
def test_exceptions(self):
"""
Verify conv2 exceptions.
"""
# Create kernel & image
kernel = np.ones((3,3))
image = np.ones((64,64))
# Test even kernel
with self.assertRaises(ValueError):
iu.conv2(image, np.ones((2,2)))
# Test rectangular kernel
with self.assertRaises(ValueError):
iu.conv2(image, np.ones((2,3)))
| bradsease/astra-viso | astraviso/test/imageutils.py | Python | mit | 7,601 | [
"Gaussian"
] | 27219aa38f29f9e407231a6a73bfd7ed32aa7103ad3ae0dc5bff7e53a4355b99 |
#!/usr/bin/env python
# encoding: utf-8
"""description: Cororado PGEM models
"""
__version__ = "0.1"
__author__ = "@boqiling"
__all__ = ["PGEMBase", "DUT", "DUT_STATUS", "Cycle"]
from base import PGEMBase
from dut import DUT, DUT_STATUS, Cycle
class Crystal(PGEMBase):
pass
class Saphire(PGEMBase):
PGEM_ID = {"name": "INITIALCAP", "addr": 0x077, "length": 1, "type": "int"}
def write_pgemid(self):
# write to VPD
self.device.slave_addr = 0x53
#
self.device.write_reg(i, buffebf[i])
self.device.sleep(5)
| fanmuzhi/UFT | src/UFT/models/__init__.py | Python | gpl-3.0 | 564 | [
"CRYSTAL"
] | d4600926e9f5b6fb8c36df5d0186a38a6337b14d49b67944bbc25b3d14cb897e |
# Ask vnc text spoke
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Brian C. Lane <bcl@redhat.com>
#
from pyanaconda.ui.tui.spokes import StandaloneTUISpoke
from pyanaconda.ui.tui.simpleline import TextWidget
from pyanaconda.ui.tui.hubs.summary import SummaryHub
from pyanaconda.i18n import N_, _
from pyanaconda.iutil import is_unsupported_hw
from pyanaconda.product import productName
import logging
log = logging.getLogger("anaconda")
__all__ = ["WarningsSpoke"]
class WarningsSpoke(StandaloneTUISpoke):
"""
.. inheritance-diagram:: WarningsSpoke
:parts: 3
"""
title = N_("Warnings")
preForHub = SummaryHub
priority = 0
def __init__(self, *args, **kwargs):
StandaloneTUISpoke.__init__(self, *args, **kwargs)
self._message = _("This hardware (or a combination thereof) is not "
"supported by Red Hat. For more information on "
"supported hardware, please refer to "
"http://www.redhat.com/hardware.")
# Does anything need to be displayed?
self._unsupported = productName.startswith("Red Hat ") and \
is_unsupported_hw() and \
not self.data.unsupportedhardware.unsupported_hardware
@property
def completed(self):
return not self._unsupported
def refresh(self, args=None):
StandaloneTUISpoke.refresh(self, args)
self._window += [TextWidget(self._message), ""]
return True
# Override Spoke.apply
def apply(self):
pass
| wgwoods/anaconda | pyanaconda/ui/tui/spokes/warnings_spoke.py | Python | gpl-2.0 | 2,529 | [
"Brian"
] | 4dacb6f0253d4a4f8c2613ca1ce3f50f7ce3fed2d683b49625a3ad7196caf767 |
#!/usr/bin/env python
"""Miscellaneous wrapper functions to Schrodinger's computational chemistry tools."""
import os
import sys
import csv
import shutil
import logging
import subprocess
import mdtraj
from openmoltools import utils
logger = logging.getLogger(__name__)
def run_and_log_error(command):
"""Run the process specified by the command and log eventual errors.
Parameters
----------
command : str
The command to be run.
Returns
-------
output : str
The output of the process.
Raises
------
subprocess.CalledProcessError
In case the commands fails.
"""
try:
output = subprocess.check_output(command)
except subprocess.CalledProcessError as e:
logger.error(e.output)
logger.error(str(e))
raise e
return output.decode()
def is_schrodinger_suite_installed():
"""Check that Schrodinger's suite is installed.
Currently only checks whether the environmental variable SCHRODINGER
is defined. This should contain the path to its main installation folder.
Returns
-------
bool
True if the Schrodinger's suite is found, False otherwise.
"""
try:
os.environ['SCHRODINGER']
except KeyError:
return False
return True
def need_schrodinger(func):
@utils.wraps_py2(func)
def _need_schrodinger(*args, **kwargs):
"""Decorator that checks if the Schrodinger's suite is installed."""
if not is_schrodinger_suite_installed():
err_msg = "Cannot locate Schrodinger's suite!"
logger.error(err_msg)
raise RuntimeError(err_msg)
return func(*args, **kwargs)
return _need_schrodinger
@need_schrodinger
def run_proplister(input_file_path):
"""Run proplister utility on a file and return its properties.
Parameters
----------
input_file_path: str
The path to the file describing the molecule with its properties.
Returns
-------
properties: list of dict
A list containing a dictionary for each molecule in the input file
representing their properties. Each dictionary is in the format
property_name -> property_value.
"""
proplister_path = os.path.join(os.environ['SCHRODINGER'], 'utilities', 'proplister')
# Normalize path
input_file_path = os.path.abspath(input_file_path)
# Run proplister, we need the list in case there are spaces in paths
cmd = [proplister_path, '-a', '-c', input_file_path]
output = run_and_log_error(cmd)
output = output.replace('\_', '_') # Parse '\_' characters in names
# The output is a cvs file. The first line are the property names and then each row
# contains the values for each molecule. We use the csv module to avoid splitting
# strings that contain commas (e.g. "2,2-dimethylpropane").
properties = []
csv_reader = csv.reader(output.split('\n'))
names = next(csv_reader)
for values in csv_reader:
if len(values) == 0:
continue # proplister prints a final empty line
# Convert raw strings into literals (e.g. convert '\\n' to '\n')
if sys.version_info < (3, 0): # Python 2
converted_values = [v.decode('string_escape') for v in values]
else: # Python 3 doesn't have decode on strings
converted_values = [bytes(v, "utf-8").decode("unicode_escape")
for v in values]
properties.append(dict(zip(names, converted_values)))
return properties
@need_schrodinger
def run_structconvert(input_file_path, output_file_path):
"""Run Schrodinger's structconvert command line utility to convert from one
format to another.
The input and output formats are inferred from the given files extensions.
Parameters
----------
input_file_path : str
Path to the input file describing the molecule.
output_file_path : str
Path were the converted file will be saved.
"""
formats_map = {'sdf': 'sd'} # convert common extensions to format code
# Locate structconvert executable
structconvert_path = os.path.join(os.environ['SCHRODINGER'], 'utilities',
'structconvert')
# Normalize paths
input_file_path = os.path.abspath(input_file_path)
output_file_path = os.path.abspath(output_file_path)
# Determine input and output format
input_format = os.path.splitext(input_file_path)[1][1:]
output_format = os.path.splitext(output_file_path)[1][1:]
if input_format in formats_map:
input_format = formats_map[input_format]
if output_format in formats_map:
output_format = formats_map[output_format]
# Run structconvert, we need the list in case there are spaces in paths
cmd = [structconvert_path, '-i' + input_format, input_file_path,
'-o' + output_format, output_file_path]
run_and_log_error(cmd)
def autoconvert_maestro(func):
@utils.wraps_py2(func)
def _autoconvert_maestro(input_file_path, output_file_path, *args, **kwargs):
"""Decorator that make a function support more than only Maestro files.
Input and output formats are inferred from extensions. If the input file
is not in Maestro format, this automatically uses the utility structconvert
to create a temporary Maestro file. Similarly, if the output file path does
not have a 'mae' extension, a temporary output file is created and converted
at the end of the wrapped function execution.
The decorated function must take as first two parameters the input and the
output paths respectively.
"""
is_input_mae = os.path.splitext(input_file_path)[1] == '.mae'
is_output_mae = os.path.splitext(output_file_path)[1] == '.mae'
# If they are both in Maestro format just call the function
if is_output_mae and is_input_mae:
return func(input_file_path, output_file_path, *args, **kwargs)
# Otherwise we create a temporary directory to host temp files
# First transform desired paths into absolute
input_file_path = os.path.abspath(input_file_path)
output_file_path = os.path.abspath(output_file_path)
with mdtraj.utils.enter_temp_directory():
# Convert input file if necessary
if is_input_mae:
func_input = input_file_path
else:
func_input = os.path.splitext(os.path.basename(input_file_path))[0] + '.mae'
run_structconvert(input_file_path, func_input)
# Determine if we need to convert output
if is_output_mae:
func_output = output_file_path
else:
func_output = os.path.splitext(os.path.basename(output_file_path))[0] + '.mae'
# Execute function
return_value = func(func_input, func_output, *args, **kwargs)
# Delete temporary input
if not is_input_mae:
os.remove(func_input)
# Convert temporary output
if not is_output_mae:
run_structconvert(func_output, output_file_path)
os.remove(func_output)
# Copy any other output file in the temporary folder
output_dir = os.path.dirname(output_file_path)
for file_name in os.listdir('.'):
shutil.copy2(file_name, os.path.join(output_dir, file_name))
return return_value
return _autoconvert_maestro
@need_schrodinger
@autoconvert_maestro
def run_maesubset(input_file_path, output_file_path, range):
"""Run Schrodinger's maesubset command line utility to extract a range of
structures from a file.
Parameters
----------
input_file_path : str
Path to the input file with multiple structures.
output_file_path : str
Path to output file.
range : int or list of ints
The 0-based indices of the structures to extract from the input files.
"""
# Locate maesubset executable
maesubset_path = os.path.join(os.environ['SCHRODINGER'], 'utilities', 'maesubset')
# Normalize paths
input_file_path = os.path.abspath(input_file_path)
output_file_path = os.path.abspath(output_file_path)
# Determine molecules to extract
try: # if range is a list of ints
range_str = [str(i + 1) for i in range]
except TypeError: # if range is an int
range_str = [str(range + 1)]
range_str = ','.join(range_str)
# Run maesubset, we need the list in case there are spaces in paths
cmd = [maesubset_path, '-n', range_str, input_file_path]
output = run_and_log_error(cmd)
# Save result
with open(output_file_path, 'w') as f:
f.write(output)
@need_schrodinger
@autoconvert_maestro
def run_epik(input_file_path, output_file_path, max_structures=32, ph=7.4,
ph_tolerance=None, min_probability=None, tautomerize=True, extract_range=None, max_atoms=150):
"""Run Schrodinger's epik command line utility to enumerate protonation and
tautomeric states.
Parameters
----------
input_file_path : str
Path to input file describing the molecule.
output_file_path : str
Path to the output file created by epik.
max_structures : int, optional
Maximum number of generated structures (default is 32).
ph : float, optional
Target pH for generated states (default is 7.4).
ph_tolerance : float, optional
Equivalent of -pht option in Epik command (default is None).
min_probability: float, optional
Minimum probability for the generated states.
tautomerize : bool, optional
Whether or not tautomerize the input structure (default is True).
extract_range : int or list of ints, optional
If not None, the function uses the Schrodinger's utility maesubset to
extract only a subset of the generated structures. This is the 0-based
indices of the structures to extract from the input files.
max_atoms : int, optional
Structures containing more than max_atoms atoms will not be adjusted. (default is 150)
"""
# Locate epik executable
epik_path = os.path.join(os.environ['SCHRODINGER'], 'epik')
# Normalize paths as we'll run in a different working directory
input_file_path = os.path.abspath(input_file_path)
output_file_path = os.path.abspath(output_file_path)
output_dir = os.path.dirname(output_file_path)
# Preparing epik command arguments for format()
epik_args = dict(ms=max_structures, ph=ph)
epik_args['pht'] = '-pht {}'.format(ph_tolerance) if ph_tolerance else ''
epik_args['nt'] = '' if tautomerize else '-nt'
epik_args['p'] = '-p {}'.format(min_probability) if min_probability else ''
epik_args['ma'] = '-ma {}'.format(max_atoms)
# Determine if we need to convert input and/or output file
if extract_range is None:
epik_output = output_file_path
else:
epik_output = os.path.splitext(output_file_path)[0] + '-full.mae'
# Epik command. We need list in case there's a space in the paths
cmd = [epik_path, '-imae', input_file_path, '-omae', epik_output]
cmd += '-ms {ms} -ph {ph} {ma} {pht} {nt} {p} -pKa_atom -WAIT -NO_JOBCONTROL'.format(
**epik_args).split()
# We run with output_dir as working directory to save there the log file
with utils.temporary_cd(output_dir):
run_and_log_error(cmd)
# Check if we need to extract a range of structures
if extract_range is not None:
run_maesubset(epik_output, output_file_path, extract_range)
os.remove(epik_output)
| choderalab/openmoltools | openmoltools/schrodinger.py | Python | mit | 11,742 | [
"MDTraj"
] | bcd858b882d840b989cc7b5089812644dd20adb03568ea76af05f6166726a482 |
__author__ = 'BisharaKorkor'
import numpy as np
from math import exp, pow, sqrt, pi, fmod
def movingaverage(a, w):
""" An array b of length len(a)-w is returned where b_n = (a_n + a_n-1 + ... + a_n-w)/w """
return [np.mean(a[i:i+w]) for i in range(len(a)-w)]
def gaussiankernel(sigma, width):
"""Generates gaussian kernel"""
# tmp is a non-normalized gaussian kernel
tmp = [exp(-pow((width/2 - i) / sigma, 2)/2)/(sigma * sqrt(2 * pi)) for i in range(width)]
# compute sum for normalization
s = np.sum(tmp)
# return the normalized kernel
return [i / s for i in tmp]
def movingbaseline(array, width):
""" Each array value is assigned to be it's value divided by the average of the preceding width (inclusive)
elements"""
mva = movingaverage(array, width)
return [array[i+width]/mva[i] for i in range(len(mva))]
def exponentialsmoothing(array, alpha):
sa = [array[0]] #smoothed array
for i in range(len(array)):
sa += [alpha * array[i] + (1-alpha) * sa[i]]
del sa[0]
return sa
def histogramfrom2Darray(array, nbins):
"""
Creates histogram of elements from 2 dimensional array
:param array: input 2 dimensional array
:param nbins: number of bins so that bin size = (maximum value in array - minimum value in array) / nbins
the motivation for returning this array is for the purpose of easily plotting with matplotlib
:return: list of three elements:
list[0] = length nbins list of integers, a histogram of the array elements
list[1] = length nbins list of values of array element types, values of the lower end of the bins
list[2] = [minimum in list, maximum in list]
this is just good to know sometimes.
"""
#find minimum
minimum = np.min(array)
#find maximu
maximum = np.max(array)
#compute bin size
binsize = (maximum - minimum) / nbins
#create bin array
bins = [minimum + binsize * i for i in range(nbins)]
histo = [0 for b in range(nbins)]
for x in array:
for y in x:
#find the lower end of the affiliated bin
ab = y - (minimum + fmod(y - minimum, binsize))
histo[int(ab/binsize)-1] += 1
return [histo, bins, [minimum, maximum]]
def sum_of_subset(array, x, y, dx, dy):
summ = 0 # summ because sum is native
for ix in range(x, x + dx):
for iy in range(y, y + dy):
summ += array[ix][iy]
return summ
def subset(array, x, y, dx, dy):
ss = []
for ix in range(x, x + dx):
for iy in range(y, y + dy):
ss.appen(array[ix][iy])
return ss
| BishKor/pyboon | arrayoperations.py | Python | mit | 2,659 | [
"Gaussian"
] | fe0ce18f4025cc1f6db9b26ef977ae1b2447a44fe7518dce36acc1b3453aab1f |
"""
expectmax.py
Implementation of the expectation-maximisation algorithm used to fit
a multivariate gaussian mixture model of moving groups' origins
to a data set of stars, measured in Cartesian space, centred on and
co-rotating with the local standard of rest.
This module is in desperate need of a tidy. The entry point
`fit_many_comps` is particularly messy and clumsy.
"""
from __future__ import print_function, division
from distutils.dir_util import mkpath
import itertools
import logging
import numpy as np
import multiprocessing
# python3 throws FileNotFoundError that is essentially the same as IOError
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
# The placement of logsumexp varies wildly between scipy versions
import scipy
_SCIPY_VERSION = [int(v.split('rc')[0])
for v in scipy.__version__.split('.')]
if _SCIPY_VERSION[0] == 0 and _SCIPY_VERSION[1] < 10:
from scipy.maxentropy import logsumexp
elif ((_SCIPY_VERSION[0] == 1 and _SCIPY_VERSION[1] >= 3) or
_SCIPY_VERSION[0] > 1):
from scipy.special import logsumexp
else:
from scipy.misc import logsumexp
from scipy import stats
import os
try:
import matplotlib as mpl
# prevents displaying plots from generation from tasks in background
mpl.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
print("Warning: matplotlib not imported")
from .component import SphereComponent
from . import likelihood
from . import compfitter
from . import tabletool
try:
print('Trying to use C implementation in expectmax')
from ._overlap import get_lnoverlaps
except:
print("WARNING: Couldn't import C implementation, using slow pythonic overlap instead")
#Do NOT use logging here, as it won't set up a log file at all if logging is attempted prior to
#setting up the directory and log file...
#logging.info("WARNING: Couldn't import C implementation, using slow pythonic overlap instead")
from .likelihood import slow_get_lnoverlaps as get_lnoverlaps
#from functools import partial
def log_message(msg, symbol='.', surround=False):
"""Little formatting helper"""
res = '{}{:^40}{}'.format(5*symbol, msg, 5*symbol)
if surround:
res = '\n{}\n{}\n{}'.format(50*symbol, res, 50*symbol)
logging.info(res)
def get_best_permutation(memb_probs, true_memb_probs):
n_comps = memb_probs.shape[1]
perms = itertools.permutations(np.arange(n_comps))
best_perm = None
min_diff = np.inf
for perm in perms:
diff = np.sum(np.abs(memb_probs[:, perm] - true_memb_probs))
if diff < min_diff:
min_diff = diff
best_perm = perm
return best_perm
def get_kernel_densities(background_means, star_means, amp_scale=1.0):
"""
Build a PDF from `data`, then evaluate said pdf at `points`
The Z and W value of points (height above, and velocity through the plane,
respectively) are inverted in an effort to make the inferred background
phase-space density independent of over-densities caused by suspected
moving groups/associations. The idea is that the Galactic density is
vertically symmetric about the plane, and any deviations are temporary.
Because background density is assumed to be mostly flat over typical
spans of stellar uncertainties, we can ignore star covariance matrices.
Parameters
----------
background_means: [nstars,6] float array_like
Phase-space positions of some star set that greatly envelops points
in question. Typically contents of gaia_xyzuvw.npy.
star_means: [npoints,6] float array_like
Phase-space positions of stellar data that we are fitting components to
amp_scale: float {1.0}
One can optionally weight the background density so as to make over-densities
more or less prominent. For e.g., amp_scale of 0.1 will make background
overlaps an order of magnitude lower.
Returns
-------
bg_lnols: [nstars] float array_like
Background log overlaps of stars with background probability density
function.
"""
if type(background_means) is str:
background_means = np.load(background_means)
nstars = amp_scale * background_means.shape[0]
kernel = stats.gaussian_kde(background_means.T)
star_means = np.copy(star_means)
star_means[:, 2] *= -1
star_means[:, 5] *= -1
bg_lnols = np.log(nstars)+kernel.logpdf(star_means.T)
return bg_lnols
def get_background_overlaps_with_covariances(background_means, star_means,
star_covs):
"""
author: Marusa Zerjal 2019 - 05 - 25
Determine background overlaps using means and covariances for both
background and stars.
Covariance matrices for the background are Identity*bandwidth.
Takes about 3 seconds per star if using whole Gaia DR2 stars with 6D
kinematics as reference.
Parameters
----------
background_means: [nstars,6] float array_like
Phase-space positions of some star set that greatly envelops points
in question. Typically contents of gaia_xyzuvw.npy, or the output of
>> tabletool.build_data_dict_from_table(
'../data/gaia_cartesian_full_6d_table.fits',
historical=True)['means']
star_means: [npoints,6] float array_like
Phase-space positions of stellar data that we are fitting components to
star_covs: [npoints,6,6] float array_like
Phase-space covariances of stellar data that we are fitting components to
Returns
-------
bg_lnols: [nstars] float array_like
Background log overlaps of stars with background probability density
function.
Notes
-----
We invert the vertical values (Z and U) because the typical background
density should be symmetric along the vertical axis, and this distances
stars from their siblings. I.e. association stars aren't assigned
higher background overlaps by virtue of being an association star.
Edits
-----
TC 2019-05-28: changed signature such that it follows similar usage as
get_kernel_densitites
"""
# Inverting the vertical values
star_means = np.copy(star_means)
star_means[:, 2] *= -1
star_means[:, 5] *= -1
# Background covs with bandwidth using Scott's rule
d = 6.0 # number of dimensions
nstars = background_means.shape[0]
bandwidth = nstars**(-1.0 / (d + 4.0))
background_cov = np.cov(background_means.T) * bandwidth ** 2
background_covs = np.array(nstars * [background_cov]) # same cov for every star
# shapes of the c_get_lnoverlaps input must be: (6, 6), (6,), (120, 6, 6), (120, 6)
# So I do it in a loop for every star
bg_lnols = []
for i, (star_mean, star_cov) in enumerate(zip(star_means, star_covs)):
print('bgols', i)
#print('{} of {}'.format(i, len(star_means)))
#print(star_cov)
#print('det', np.linalg.det(star_cov))
#bg_lnol = get_lnoverlaps(star_cov, star_mean, background_covs,
# background_means, nstars)
try:
#print('***********', nstars, star_cov, star_mean, background_covs, background_means)
bg_lnol = get_lnoverlaps(star_cov, star_mean, background_covs,
background_means, nstars)
#print('intermediate', bg_lnol)
# bg_lnol = np.log(np.sum(np.exp(bg_lnol))) # sum in linear space
bg_lnol = logsumexp(bg_lnol) # sum in linear space
# Do we really want to make exceptions here? If the sum fails then
# there's something wrong with the data.
except:
# TC: Changed sign to negative (surely if it fails, we want it to
# have a neglible background overlap?
print('bg ln overlap failed, setting it to -inf')
bg_lnol = -np.inf
bg_lnols.append(bg_lnol)
#print(bg_lnol)
#print('')
# This should be parallelized
# bg_lnols = [np.sum(get_lnoverlaps(star_cov, star_mean, background_covs,
# background_means, nstars))
# for star_mean, star_cov in zip(star_means, star_covs)]
#print(bg_lnols)
return bg_lnols
def check_convergence(old_best_comps, new_chains, perc=40):
"""Check if the last maximisation step yielded is consistent to new fit
Convergence is achieved if previous key values fall within +/-"perc" of
the new fits. With default `perc` value of 40, the previous best fits
must be within the 80% range (i.e. not fall outside the bottom or top
10th percentiles in any parameter) of the current chains.
Parameters
----------
old_best_fits: [ncomp] Component objects
List of Components that represent the best possible fits from the
previous run.
new_chain: list of ([nwalkers, nsteps, npars] float array_like)
The sampler chain from the new runs of each component
perc: int
the percentage distance that previous values must be within current
values. Must be within 0 and 50
Returns
-------
converged : bool
If the runs have converged, return true
"""
# Handle case where input is bad (due to run just starting out for e.g.)
if old_best_comps is None:
return False
if old_best_comps[0] is None:
return False
# Check each run in turn
each_converged = []
for old_best_comp, new_chain in zip(old_best_comps, new_chains):
med_and_spans = compfitter.calc_med_and_span(new_chain, perc=perc)
upper_contained =\
old_best_comp.get_emcee_pars() < med_and_spans[:, 1]
lower_contained = \
old_best_comp.get_emcee_pars() > med_and_spans[:, 2]
each_converged.append(
np.all(upper_contained) and np.all(lower_contained))
return np.all(each_converged)
def calc_membership_probs(star_lnols):
"""Calculate probabilities of membership for a single star from overlaps
Parameters
----------
star_lnols : [ncomps] array
The log of the overlap of a star with each group
Returns
-------
star_memb_probs : [ncomps] array
The probability of membership to each group, normalised to sum to 1
"""
ncomps = star_lnols.shape[0]
star_memb_probs = np.zeros(ncomps)
for i in range(ncomps):
star_memb_probs[i] = 1. / np.sum(np.exp(star_lnols - star_lnols[i]))
return star_memb_probs
def get_all_lnoverlaps(data, comps, old_memb_probs=None,
inc_posterior=False, amp_prior=None,
use_box_background=False):
"""
Get the log overlap integrals of each star with each component
Parameters
----------
data: dict -or- astropy.table.Table -or- path to astrop.table.Table
if dict, should have following structure:
'means': [nstars,6] float array_like
the central estimates of star phase-space properties
'covs': [nstars,6,6] float array_like
the phase-space covariance matrices of stars
'bg_lnols': [nstars] float array_like (opt.)
the log overlaps of stars with whatever pdf describes
the background distribution of stars.
if table, see tabletool.build_data_dict_from_table to see
table requirements.
comps: [ncomps] syn.Group object list
a fit for each comp (in internal form)
old_memb_probs: [nstars, ncomps] float array {None}
Only used to get weights (amplitudes) for each fitted component.
Tracks membership probabilities of each star to each comp. Each
element is between 0.0 and 1.0 such that each row sums to 1.0
exactly.
If bg_hists are also being used, there is an extra column for the
background (but note that it is not used in this function)
inc_posterior: bool {False}
If true, includes prior on groups into their relative weightings
amp_prior: int {None}
If set, forces the combined ampltude of Gaussian components to be
at least equal to `amp_prior`
Returns
-------
lnols: [nstars, ncomps (+1)] float array
The log overlaps of each star with each component, optionally
with the log background overlaps appended as the final column
"""
#~ print('old_memb_probs from the beginning of get_all_lnoverlaps', old_memb_probs)
# Tidy input, infer some values
if not isinstance(data, dict):
data = tabletool.build_data_dict_from_table(data)
nstars = len(data['means'])
ncomps = len(comps)
using_bg = 'bg_lnols' in data.keys()
n_memb_cols = ncomps + (using_bg or use_box_background)
lnols = np.zeros((nstars, n_memb_cols))
# Set up old membership probabilities
if old_memb_probs is None:
raise UserWarning('Why are you trying to get an overall likelihood, when '
'you don\'t even have memberships!??!')
# old_memb_probs = np.ones((nstars, ncomps)) / ncomps
# 'weights' is the same as 'amplitudes', amplitudes for components
weights = old_memb_probs[:, :ncomps].sum(axis=0)
#~ print('weights in get_all_lnoverlaps', weights)
if np.min(weights) < 0.01:
raise UserWarning("An association must have at least 1 star. <0.01 stars is extreme...")
# [ADVANCED/dodgy] Optionally scale each weight by the component prior, then rebalance
# such that total expected stars across all components is unchanged
if inc_posterior:
comp_lnpriors = np.zeros(ncomps)
for i, comp in enumerate(comps):
comp_lnpriors[i] = likelihood.ln_alpha_prior(
comp, memb_probs=old_memb_probs
)
comp_starcount = weights.sum()
weights *= np.exp(comp_lnpriors)
weights = weights / weights.sum() * comp_starcount
# Optionally scale each weight such that the total expected stars
# is equal to or greater than `amp_prior`
if amp_prior:
if weights.sum() < amp_prior:
weights *= amp_prior / weights.sum()
# For each component, get log overlap with each star, scaled by
# amplitude (weight) of each component's PDF
for i, comp in enumerate(comps):
lnols[:, i] = \
np.log(weights[i]) + \
likelihood.get_lnoverlaps(comp, data)
# insert one time calculated background overlaps
if using_bg:
lnols[:, -1] = data['bg_lnols']
if use_box_background:
logging.info('Calculating overall lnlike with a box bg')
nbg_stars = np.sum(old_memb_probs[:, -1])
star_volume = np.product(np.ptp(data['means'], axis=0))
lnols[:, -1] = np.log(nbg_stars/star_volume)
return lnols
def calc_bic(data, ncomps, lnlike, memb_probs=None, Component=SphereComponent):
"""Calculates the Bayesian Information Criterion
A simple metric to judge whether added components are worthwhile.
The number of 'data points' is the expected star membership count.
This way the BIC is (mostly) independent of the overall data set,
if most of those stars are not likely members of the component fit.
Parameters
----------
data: dict
See fit_many_comps
ncomps: int
Number of components used in fit
lnlike: float
the overall log likelihood of the fit
memb_probs: [nstars,ncomps {+1}] float array_like
See fit_many_comps
Component:
See fit_many_comps
Returns
-------
bic: float
A log likelihood score, scaled by number of free parameters. A
lower BIC indicates a better fit. Differences of <4 are minor
improvements.
"""
# 2020/11/15 TC: removed this...
# if memb_probs is not None:
# nstars = np.sum(memb_probs[:, :ncomps])
# else:
nstars = len(data['means'])
ncomp_pars = len(Component.PARAMETER_FORMAT)
n = nstars * 6 # 6 for phase space origin
k = ncomps * (ncomp_pars) # parameters for each component model
# -1 for age, +1 for amplitude
return np.log(n)*k - 2 * lnlike
def expectation(data, comps, old_memb_probs=None,
inc_posterior=False, amp_prior=None,
use_box_background=False):
"""Calculate membership probabilities given fits to each group
Parameters
----------
data: dict
See fit_many_comps
comps: [ncomps] Component list
The best fit for each component from previous runs
old_memb_probs: [nstars, ncomps (+1)] float array
Memberhsip probability of each star to each component. Only used here
to set amplitudes of each component.
inc_posterior: bool {False}
Whether to rebalance the weighting of each component by their
relative priors
amp_prior: float {None}
If set, forces the combined ampltude of Gaussian components to be
at least equal to `amp_prior`
Returns
-------
memb_probs: [nstars, ncomps] float array
An array designating each star's probability of being a member to
each component. It is populated by floats in the range (0.0, 1.0) such
that each row sums to 1.0, each column sums to the expected size of
each component, and the entire array sums to the number of stars.
"""
#To see in real-time what is happening. TODO Remove this once a better performance monitoring is in place!
print('In expectation')
# Tidy input and infer some values
if not isinstance(data, dict):
data = tabletool.build_data_dict_from_table(data)
ncomps = len(comps)
nstars = len(data['means'])
if ('bg_lnols' in data.keys()) or use_box_background:
n_memb_cols = ncomps + 1
else:
n_memb_cols = ncomps
# TODO: implement interation till convergence
memberships_converged = False
# if no memb_probs provided, assume perfectly equal membership
iter_cnt = 0
old_bic = np.inf
while not memberships_converged:
if iter_cnt > 0:
print('Expectation iter cnt: %i'%iter_cnt)
if old_memb_probs is None:
print('Initialising old_memb_probs with equal membership')
old_memb_probs = np.ones((nstars, n_memb_cols)) / (n_memb_cols)
#~ #!!!MJI Logging to screen what is about to be done.
#~ if inc_posterior:
#~ print("Expectation overlaps. Posterior True.")
#~ else:
#~ print("Expectation overlaps. Posterior False.")
# Calculate all log overlaps
lnols = get_all_lnoverlaps(data, comps, old_memb_probs,
inc_posterior=inc_posterior,
amp_prior=amp_prior,
use_box_background=use_box_background,
)
# Calculate membership probabilities, tidying up 'nan's as required
memb_probs = np.zeros((nstars, n_memb_cols))
for i in range(nstars):
memb_probs[i] = calc_membership_probs(lnols[i])
if np.isnan(memb_probs).any():
log_message('AT LEAST ONE MEMBERSHIP IS "NAN"', symbol='!')
memb_probs[np.where(np.isnan(memb_probs))] = 0.
# Hack in a failsafe to stop a component having an amplitude lower than 10
if np.min(memb_probs.sum(axis=0)) < 10.:
break
#!!!MJI Remove einsum here.
weighted_lnols = np.einsum('ij,ij->ij', lnols, memb_probs)
lnlike = np.sum(weighted_lnols)
# Check for convergence
# TODO: remove hardcoded SphereComponent here.
new_bic = calc_bic(data, ncomps=ncomps, lnlike=lnlike, memb_probs=memb_probs,
Component=SphereComponent)
if np.isclose(old_bic, new_bic):
memberships_converged = True
else:
old_bic = new_bic
old_memb_probs = memb_probs
# MZ: set memberships_converged to True for the testing purposes!
#~ print('expectmax.expectation: MZ: set memberships_converged to True for the testing purposes!')
#~ memberships_converged = True
iter_cnt += 1
return memb_probs
def get_overall_lnlikelihood(data, comps, return_memb_probs=False,
old_memb_probs=None,
inc_posterior=False,
use_box_background=False):
"""
Get overall likelihood for a proposed model.
Evaluates each star's overlap with every component and background
If only fitting one group, inc_posterior does nothing
Parameters
----------
data: (dict)
See fit_many_comps
comps: [ncomps] list of Component objects
See fit_many_comps
return_memb_probs: bool {False}
Along with log likelihood, return membership probabilites
Returns
-------
overall_lnlikelihood: float
"""
print('expectmax before expectation')
print('comps')
print(comps)
print('old_memb_probs')
print(old_memb_probs)
memb_probs = expectation(data, comps,
old_memb_probs=old_memb_probs,
inc_posterior=inc_posterior,
use_box_background=use_box_background)
print('expectmax.det_overall_likelihood DIFF')
try:
print(memb_probs-old_memb_probs)
except:
print('memb_probs-old_memb_probs not possible')
all_ln_ols = get_all_lnoverlaps(data, comps,
old_memb_probs=memb_probs,
inc_posterior=inc_posterior,
use_box_background=use_box_background)
# multiplies each log overlap by the star's membership probability
# (In linear space, takes the star's overlap to the power of its
# membership probability)
#einsum is an Einstein summation convention. Not suer why it is used here???
#weighted_lnols = np.einsum('ij,ij->ij', all_ln_ols, memb_probs)
weighted_lnols = all_ln_ols * memb_probs
#if np.sum(weighted_lnols) != np.sum(weighted_lnols):
# import pdb; pdb.set_trace() #!!!!
if return_memb_probs:
return np.sum(weighted_lnols), memb_probs
else:
return np.sum(weighted_lnols)
def maximise_one_comp(data, memb_probs, i, idir, all_init_pars=None, all_init_pos=None,
ignore_stable_comps=False, ignore_dead_comps=False,
DEATH_THRESHOLD=2.1, unstable_comps=None,
burnin_steps=None, plot_it=False,
pool=None, convergence_tol=0.25,
plot_dir=None, save_dir=None,
Component=SphereComponent,
trace_orbit_func=None,
store_burnin_chains=False,
nthreads=1,
optimisation_method='emcee',
nprocess_ncomp=False,
):
"""
Performs the 'maximisation' step of the EM algorithm for 1 component
at a time.
all_init_pars must be given in 'internal' form, that is the standard
deviations must be provided in log form.
Parameters
----------
data: dict
See fit_many_comps
memb_probs: [nstars, ncomps {+1}] float array_like
See fit_many_comps
i: int
Perform optimisation for the i-th component of the model.
DEATH_THRESHOLD: float {2.1}
...
burnin_steps: int
The number of steps for each burnin loop
idir: str
The results directory for this iteration
all_init_pars: [ncomps, npars] float array_like
The initial parameters around which to initialise emcee walkers
all_init_pos: [ncomps, nwalkers, npars] float array_like
The actual exact positions at which to initialise emcee walkers
(from, say, the output of a previous emcee run)
plot_it: bool {False}
Whether to plot lnprob chains (from burnin, etc) as we go
pool: MPIPool object {None}
pool of threads to execute walker steps concurrently
convergence_tol: float {0.25}
How many standard devaitions an lnprob chain is allowed to vary
from its mean over the course of a burnin stage and still be
considered "converged". Default value allows the median of the
final 20 steps to differ by 0.25 of its standard deviations from
the median of the first 20 steps.
ignore_dead_comps : bool {False}
if componennts have fewer than 2(?) expected members, then ignore
them
ignore_stable_comps : bool {False}
If components have been deemed to be stable, then disregard them
Component: Implementation of AbstractComponent {Sphere Component}
The class used to convert raw parametrisation of a model to
actual model attributes.
trace_orbit_func: function {None}
A function to trace cartesian oribts through the Galactic potential.
If left as None, will use traceorbit.trace_cartesian_orbit (base
signature of any alternate function on this ones)
optimisation_method: str {'emcee'}
Optimisation method to be used in the maximisation step to fit
the model. Default: emcee. Available: scipy.optimise.minimize with
the Nelder-Mead method. Note that in case of the gradient descent,
no chain is returned and meds and spans cannot be determined.
nprocess_ncomp: bool {False}
Compute maximisation in parallel? This is relevant only in case
Nelder-Mead method is used: This method computes optimisation
many times with different initial positions. The result is the
one with the best likelihood. These optimisations are computed
in parallel if nprocess_ncomp equals True.
Returns
-------
best_comp:
The best fitting component.
chain:
lnprob:
final_pos:
The final positions of walkers for this maximisation.
Useful for restarting the next emcee run.
"""
log_message('Fitting comp {}'.format(i), symbol='.', surround=True)
gdir = idir + "comp{}/".format(i)
mkpath(gdir)
#~ # If component has too few stars, skip fit, and use previous best walker
#~ if ignore_dead_comps and (np.sum(memb_probs[:, i]) < DEATH_THRESHOLD):
#~ logging.info("Skipped component {} with nstars {}".format(
#~ i, np.sum(memb_probs[:, i])
#~ ))
#~ elif ignore_stable_comps and not unstable_comps[i]:
#~ logging.info("Skipped stable component {}".format(i))
# Otherwise, run maximisation and sampling stage
#~ else:
best_comp, chain, lnprob = compfitter.fit_comp(
data=data, memb_probs=memb_probs[:, i],
burnin_steps=burnin_steps, plot_it=plot_it,
pool=pool, convergence_tol=convergence_tol,
plot_dir=gdir, save_dir=gdir, init_pos=all_init_pos[i],
init_pars=all_init_pars[i], Component=Component,
trace_orbit_func=trace_orbit_func,
store_burnin_chains=store_burnin_chains,
nthreads=nthreads,
nprocess_ncomp=nprocess_ncomp,
optimisation_method=optimisation_method,
)
logging.info("Finished fit")
logging.info("Best comp pars:\n{}".format(
best_comp.get_pars()
))
if optimisation_method == 'emcee':
final_pos = chain[:, -1, :]
logging.info("With age of: {:.3} +- {:.3} Myr".
format(np.median(chain[:, :, -1]),
np.std(chain[:, :, -1])))
elif optimisation_method == 'Nelder-Mead':
final_pos = chain
logging.info("With age of: {:.3} Myr".
format(chain[-1]))
best_comp.store_raw(gdir + 'best_comp_fit.npy')
np.save(gdir + "best_comp_fit_bak.npy", best_comp) # can remove this line when working
np.save(gdir + 'final_chain.npy', chain)
np.save(gdir + 'final_lnprob.npy', lnprob)
return best_comp, chain, lnprob, final_pos
def maximisation(data, ncomps, memb_probs, burnin_steps, idir,
all_init_pars, all_init_pos=None, plot_it=False, pool=None,
convergence_tol=0.25, ignore_dead_comps=False,
Component=SphereComponent,
trace_orbit_func=None,
store_burnin_chains=False,
unstable_comps=None,
ignore_stable_comps=False,
nthreads=1, optimisation_method='emcee',
nprocess_ncomp=False,
):
"""
Performs the 'maximisation' step of the EM algorithm
all_init_pars must be given in 'internal' form, that is the standard
deviations must be provided in log form.
Parameters
----------
data: dict
See fit_many_comps
ncomps: int
Number of components being fitted
memb_probs: [nstars, ncomps {+1}] float array_like
See fit_many_comps
burnin_steps: int
The number of steps for each burnin loop
idir: str
The results directory for this iteration
all_init_pars: [ncomps, npars] float array_like
The initial parameters around which to initialise emcee walkers
all_init_pos: [ncomps, nwalkers, npars] float array_like
The actual exact positions at which to initialise emcee walkers
(from, say, the output of a previous emcee run)
plot_it: bool {False}
Whehter to plot lnprob chains (from burnin, etc) as we go
pool: MPIPool object {None}
pool of threads to execute walker steps concurrently
convergence_tol: float {0.25}
How many standard devaitions an lnprob chain is allowed to vary
from its mean over the course of a burnin stage and still be
considered "converged". Default value allows the median of the
final 20 steps to differ by 0.25 of its standard deviations from
the median of the first 20 steps.
ignore_dead_comps : bool {False}
if componennts have fewer than 2(?) expected members, then ignore
them
ignore_stable_comps : bool {False}
If components have been deemed to be stable, then disregard them
Component: Implementation of AbstractComponent {Sphere Component}
The class used to convert raw parametrisation of a model to
actual model attributes.
trace_orbit_func: function {None}
A function to trace cartesian oribts through the Galactic potential.
If left as None, will use traceorbit.trace_cartesian_orbit (base
signature of any alternate function on this ones)
optimisation_method: str {'emcee'}
Optimisation method to be used in the maximisation step to fit
the model. Default: emcee. Available: scipy.optimise.minimize with
the Nelder-Mead method. Note that in case of the gradient descent,
no chain is returned and meds and spans cannot be determined.
nprocess_ncomp: bool {False}
How many processes to use in the maximisation of ncomps with
python's multiprocessing library in case Nelder-Mead is used.
Returns
-------
new_comps: [ncomps] Component array
For each component's maximisation, we have the best fitting component
all_samples: [ncomps, nwalkers, nsteps, npars] float array
An array of each component's final sampling chain
all_lnprob: [ncomps, nwalkers, nsteps] float array
An array of each components lnprob
all_final_pos: [ncomps, nwalkers, npars] float array
The final positions of walkers from each separate Compoment
maximisation. Useful for restarting the next emcee run.
success_mask: np.where mask
If ignoring dead components, use this mask to indicate the components
that didn't die
"""
#To help with debugging...
print("In Maximisation")
# Set up some values
DEATH_THRESHOLD = 2.1 # The total expected stellar membership below
# which a component is deemed 'dead' (if
# `ignore_dead_comps` is True)
new_comps = []
all_samples = []
all_lnprob = []
success_mask = []
all_final_pos = ncomps * [None]
# Ensure None value inputs are still iterable
if all_init_pos is None:
all_init_pos = ncomps * [None]
if all_init_pars is None:
all_init_pars = ncomps * [None]
if unstable_comps is None:
unstable_comps = ncomps * [True]
log_message('Ignoring stable comps? {}'.format(ignore_stable_comps))
log_message('Unstable comps are {}'.format(unstable_comps))
### MULTIPROCESSING
if nprocess_ncomp and ncomps>1:
logging.info("Maximising components with multiprocessing")
manager = multiprocessing.Manager()
return_dict = manager.dict()
def worker(i, return_dict):
best_comp, chain, lnprob, final_pos = maximise_one_comp(data,
memb_probs, i, all_init_pars=all_init_pars,
all_init_pos=all_init_pos, idir=idir,
ignore_stable_comps=ignore_stable_comps,
ignore_dead_comps=ignore_dead_comps,
DEATH_THRESHOLD=DEATH_THRESHOLD, unstable_comps=unstable_comps,
burnin_steps=burnin_steps, plot_it=plot_it,
pool=pool, convergence_tol=0.25,
Component=Component,
trace_orbit_func=trace_orbit_func,
store_burnin_chains=store_burnin_chains,
nthreads=nthreads,
optimisation_method=optimisation_method,
)
return_dict[i] = {'best_comp': best_comp, 'chain': chain, 'lnprob': lnprob, 'final_pos': final_pos}
jobs = []
for i in range(ncomps):
# If component has too few stars, skip fit, and use previous best walker
if ignore_dead_comps and (np.sum(memb_probs[:, i]) < DEATH_THRESHOLD):
logging.info("Skipped component {} with nstars {}".format(
i, np.sum(memb_probs[:, i])
))
elif ignore_stable_comps and not unstable_comps[i]:
logging.info("Skipped stable component {}".format(i))
else:
process = multiprocessing.Process(target=worker, args=(i, return_dict))
jobs.append(process)
# Start the threads (i.e. calculate the random number lists)
for j in jobs:
j.start()
# Ensure all of the threads have finished
for j in jobs:
j.join()
keys = return_dict.keys()
keys = sorted(keys)
for i in keys:
v = return_dict[i]
best_comp = v['best_comp']
chain = v['chain']
lnprob = v['lnprob']
final_pos = v['final_pos']
new_comps.append(best_comp)
all_samples.append(chain)
all_lnprob.append(lnprob)
# Keep track of the components that weren't ignored
success_mask.append(i)
# record the final position of the walkers for each comp
all_final_pos[i] = final_pos
else:
logging.info("Maximising components in a for loop")
for i in range(ncomps):
# If component has too few stars, skip fit, and use previous best walker
if ignore_dead_comps and (np.sum(memb_probs[:, i]) < DEATH_THRESHOLD):
logging.info("Skipped component {} with nstars {}".format(
i, np.sum(memb_probs[:, i])
))
elif ignore_stable_comps and not unstable_comps[i]:
logging.info("Skipped stable component {}".format(i))
else:
best_comp, chain, lnprob, final_pos = maximise_one_comp(data,
memb_probs, i, all_init_pars=all_init_pars,
all_init_pos=all_init_pos, idir=idir,
ignore_stable_comps=ignore_stable_comps,
ignore_dead_comps=ignore_dead_comps,
DEATH_THRESHOLD=DEATH_THRESHOLD, unstable_comps=unstable_comps,
burnin_steps=burnin_steps, plot_it=plot_it,
pool=pool, convergence_tol=0.25,
Component=Component,
trace_orbit_func=trace_orbit_func,
store_burnin_chains=store_burnin_chains,
nthreads=nthreads,
optimisation_method=optimisation_method,
)
new_comps.append(best_comp)
all_samples.append(chain)
all_lnprob.append(lnprob)
# Keep track of the components that weren't ignored
success_mask.append(i)
# record the final position of the walkers for each comp
all_final_pos[i] = final_pos
# # TODO: Maybe need to this outside of this call, so as to include
# # reference to stable comps
# Component.store_raw_components(idir + 'best_comps.npy', new_comps)
# np.save(idir + 'best_comps_bak.npy', new_comps)
return new_comps, all_samples, all_lnprob, \
all_final_pos, success_mask
def maximisation_gradient_descent(data, ncomps=None, memb_probs=None,
all_init_pars=None, all_init_pos=None,
convergence_tol=1,
Component=SphereComponent,
trace_orbit_func=None,
optimisation_method='Nelder-Mead',
idir=None,
):
"""
MZ: changed the code but not the docs...
Performs the 'maximisation' step of the EM algorithm
all_init_pars must be given in 'internal' form, that is the standard
deviations must be provided in log form.
Parameters
----------
data: dict
See fit_many_comps
ncomps: int
Number of components being fitted
memb_probs: [nstars, ncomps {+1}] float array_like
See fit_many_comps
burnin_steps: int
The number of steps for each burnin loop
idir: str
The results directory for this iteration
all_init_pars: [ncomps, npars] float array_like
The initial parameters around which to initialise emcee walkers
all_init_pos: [ncomps, nwalkers, npars] float array_like
The actual exact positions at which to initialise emcee walkers
(from, say, the output of a previous emcee run)
plot_it: bool {False}
Whehter to plot lnprob chains (from burnin, etc) as we go
pool: MPIPool object {None}
pool of threads to execute walker steps concurrently
convergence_tol: float {0.25}
How many standard devaitions an lnprob chain is allowed to vary
from its mean over the course of a burnin stage and still be
considered "converged". Default value allows the median of the
final 20 steps to differ by 0.25 of its standard deviations from
the median of the first 20 steps.
ignore_dead_comps : bool {False}
if componennts have fewer than 2(?) expected members, then ignore
them
ignore_stable_comps : bool {False}
If components have been deemed to be stable, then disregard them
Component: Implementation of AbstractComponent {Sphere Component}
The class used to convert raw parametrisation of a model to
actual model attributes.
trace_orbit_func: function {None}
A function to trace cartesian oribts through the Galactic potential.
If left as None, will use traceorbit.trace_cartesian_orbit (base
signature of any alternate function on this ones)
optimisation_method: str {'emcee'}
Optimisation method to be used in the maximisation step to fit
the model. Default: emcee. Available: scipy.optimise.minimize with
the Nelder-Mead method. Note that in case of the gradient descent,
no chain is returned and meds and spans cannot be determined.
nprocess_ncomp: bool {False}
How many processes to use in the maximisation of ncomps with
python's multiprocessing library in case Nelder-Mead is used.
Returns
-------
new_comps: [ncomps] Component array
For each component's maximisation, we have the best fitting component
all_samples: [ncomps, nwalkers, nsteps, npars] float array
An array of each component's final sampling chain
all_lnprob: [ncomps, nwalkers, nsteps] float array
An array of each components lnprob
all_final_pos: [ncomps, nwalkers, npars] float array
The final positions of walkers from each separate Compoment
maximisation. Useful for restarting the next emcee run.
success_mask: np.where mask
If ignoring dead components, use this mask to indicate the components
that didn't die
"""
new_comps = []
all_lnprob = []
all_final_pos = []
for i in range(ncomps):
best_comp, final_pos, lnprob = compfitter.fit_comp_gradient_descent_multiprocessing(
data=data, memb_probs=memb_probs[:, i],
convergence_tol=convergence_tol,
init_pos=all_init_pos[i],
init_pars=all_init_pars[i], Component=Component,
trace_orbit_func=trace_orbit_func,
optimisation_method=optimisation_method, # e.g. Nelder-Mead
)
# Save results
gdir = os.path.join(idir, "comp{}/".format(i))
mkpath(gdir)
best_comp.store_raw(gdir + 'best_comp_fit.npy')
np.save(gdir + 'final_lnprob.npy', lnprob)
new_comps.append(best_comp)
all_final_pos.append(final_pos)
all_lnprob.append(lnprob)
return new_comps, all_lnprob, all_final_pos
def maximisation_gradient_descent_multiprocessing(data, ncomps=None,
memb_probs=None, all_init_pars=None, all_init_pos=None,
convergence_tol=1, Component=SphereComponent,
trace_orbit_func=None, optimisation_method='Nelder-Mead',
idir=None,
):
"""
MZ: changed the code but not the docs...
"""
manager = multiprocessing.Manager()
return_dict = manager.dict()
def worker(i, return_dict):
best_comp, final_pos, lnprob = compfitter.fit_comp_gradient_descent_multiprocessing(
data=data, memb_probs=memb_probs[:, i],
convergence_tol=convergence_tol,
init_pos=all_init_pos[i],
init_pars=all_init_pars[i], Component=Component,
trace_orbit_func=trace_orbit_func,
optimisation_method=optimisation_method, # e.g. Nelder-Mead
)
# Save results
#~ gdir = os.path.join(idir, "comp{}/".format(i))
#~ mkpath(gdir)
#~ best_comp.store_raw(gdir + 'best_comp_fit.npy')
#~ np.save(gdir + 'final_lnprob.npy', lnprob)
return_dict[i] = [best_comp, lnprob, final_pos]
jobs = []
for i in range(ncomps):
process = multiprocessing.Process(target=worker,
args=(i, return_dict))
jobs.append(process)
# Start the processes
for j in jobs:
j.start()
# Ensure all of the processes have finished
for j in jobs:
j.join()
new_comps = [return_dict[i][0] for i in range(ncomps)]
all_lnprob = [return_dict[i][1] for i in range(ncomps)]
all_final_pos = [return_dict[i][2] for i in range(ncomps)]
return new_comps, all_lnprob, all_final_pos
def check_stability(data, best_comps, memb_probs, use_box_background=False):
"""
Checks if run has encountered problems
Common problems include: a component losing all its members, lnprob
return nans, a membership listed as nan
Paramters
---------
star_pars: dict
See fit_many_comps
best_comps: [ncomps] list of Component objects
The best fits (np.argmax(chain)) for each component from the most
recent run
memb_probs: [nstars, ncomps] float array
The membership array from the most recent run
Returns
-------
stable: bool
Whether or not the run is stable or not
Notes
-----
TODO: For some reason runs are continuing past less than 2 members...
"""
ncomps = len(best_comps)
logging.info('DEBUG: memb_probs shape: {}'.format(memb_probs.shape))
if np.min(np.sum(memb_probs[:, :ncomps], axis=0)) <= 2.:
logging.info("ERROR: A component has less than 2 members")
return False
if not np.isfinite(get_overall_lnlikelihood(data, best_comps,
use_box_background=use_box_background)):
logging.info("ERROR: Posterior is not finite")
return False
if not np.isfinite(memb_probs).all():
logging.info("ERROR: At least one membership is not finite")
return False
return True
def check_comps_stability(z, unstable_flags_old, ref_counts, using_bg, thresh=0.02):
"""
Compares current total member count of each component with those
from the last time it was deemed stable, and see if membership has
changed strongly enough to warrant a refit of a component model
TODO: maybe worth investigating if run can be deemed converged if all
components are "stable". Tim think better safe than sorry.
Parameters
----------
z : [nstars,ncomps] float array
Membership probability of each star with each component
ref_counts : [ncomps] float array
Stored expected membership of each component, when the component was
last refitted.
thresh : float {0.02}
The threshold fractional difference within which the component
is considered stable
"""
ncomps = z.shape[1] - using_bg
memb_counts = z.sum(axis=0)
# Handle first call
if ref_counts is None:
unstable_flags = np.array(ncomps * [True])
ref_counts = memb_counts
else:
# Update instability flag
unstable_flags = np.abs((memb_counts - ref_counts)/ref_counts) > thresh
# Disregard column for background memberships
if using_bg:
unstable_flags = unstable_flags[:-1]
# Only update reference counts for components that have just been
# refitted
ref_counts[unstable_flags_old] = memb_counts[unstable_flags_old]
return unstable_flags, ref_counts
def fit_many_comps(data, ncomps, rdir='', pool=None, init_memb_probs=None,
init_comps=None, inc_posterior=False, burnin=1000,
sampling_steps=5000, ignore_dead_comps=False,
Component=SphereComponent, trace_orbit_func=None,
use_background=False,
use_box_background=False,
store_burnin_chains=False,
ignore_stable_comps=False, max_em_iterations=100,
record_len=30, bic_conv_tol=0.1, min_em_iterations=30,
nthreads=1, optimisation_method='emcee',
nprocess_ncomp = False,
**kwargs):
"""
Entry point: Fit multiple Gaussians to data set
This is where we apply the expectation maximisation algorithm.
There are two ways to initialise this function, either:
membership probabilities -or- initial components.
If only fitting with one component (and a background) this function
can initilialise itself.
Parameters
----------
data: dict -or- astropy.table.Table -or- path to astrop.table.Table
if dict, should have following structure:
'means': [nstars,6] float array_like
the central estimates of star phase-space properties
'covs': [nstars,6,6] float array_like
the phase-space covariance matrices of stars
'bg_lnols': [nstars] float array_like (opt.)
the log overlaps of stars with whatever pdf describes
the background distribution of stars.
if table, see tabletool.build_data_dict_from_table to see
table requirements.
ncomps: int
the number of components to be fitted to the data
rdir: String {''}
The directory in which all the data will be stored and accessed
from
pool: MPIPool object {None}
the pool of threads to be passed into emcee
init_memb_probs: [nstars, ngroups] array {None} [UNIMPLEMENTED]
If some members are already known, the initialsiation process
could use this.
init_comps: [ncomps] Component list
Initial components around whose parameters we can initialise
emcee walkers.
inc_posterior: bool {False}
Whether to scale the relative component amplitudes by their priors
burnin: int {1000}
The number of emcee steps for each burnin loop
sampling_steps: int {5000}
The number of emcee steps for sampling a Component's fit
ignore_dead_comps: bool {False}
DEPRECATED FOR NOW!!!
order groupfitter to skip maximising if component has less than...
2..? expected members
Component: Implementation of AbstractComponent {Sphere Component}
The class used to convert raw parametrisation of a model to
actual model attributes.
trace_orbit_func: function {None}
A function to trace cartesian oribts through the Galactic potential.
If left as None, will use traceorbit.trace_cartesian_orbit (base
signature of any alternate function on this ones)
use_background: bool {False}
Whether to incorporate a background density to account for stars
that mightn't belong to any component. If this is true, then
background overlaps should have been pre-calculated and stored in
`data` under 'bg_lnols'
use_box_background: bool {False}
(New and unstable)
Whether to use a variable, flat density to model the background
density to account for stars that mightn't belong to any component.
Currently intended use is that it will override `use_background`
ignore_stable_comps: bool {False}
Set to true if components that barely change should only be refitted
every 5 iterations. Component stability is determined by inspecting
whether the change in total star member count is less than 2% as
compared to previous fit.
optimisation_method: str {'emcee'}
Optimisation method to be used in the maximisation step to fit
the model. Default: emcee. Available: scipy.optimise.minimize with
the Nelder-Mead method. Note that in case of the gradient descent,
no chain is returned and meds and spans cannot be determined.
nprocess_ncomp: bool {False}
How many processes to use in the maximisation of ncomps with
python's multiprocessing library in case Nelder-Mead is used.
Return
------
final_comps: [ncomps] list of synthesiser.Group objects
the best fit for each component
final_med_errs: [ncomps, npars, 3] array
the median, -34 perc, +34 perc values of each parameter from
each final sampling chain
memb_probs: [nstars, ncomps] array
membership probabilities
Edit History
------------
2020.11.16 TC: added use_box_background
"""
# Tidying up input
if not isinstance(data, dict):
data = tabletool.build_data_dict_from_table(
data, get_background_overlaps=use_background
)
if rdir == '': # Ensure results directory has a
rdir = '.' # trailing '/'
rdir = rdir.rstrip('/') + '/'
if not os.path.exists(rdir):
mkpath(rdir)
if use_background:
assert 'bg_lnols' in data.keys()
use_bg_column = use_background or use_box_background
# filenames
init_comp_filename = 'init_comps.npy'
# setting up some constants
nstars = data['means'].shape[0]
C_TOL = 0.5
if optimisation_method=='emcee':
logging.info("Fitting {} groups with {} burnin steps with cap "
"of {} iterations".format(ncomps, burnin, max_em_iterations))
else:
logging.info("Fitting {} groups with {} method with cap of {} EM iterations.".format(ncomps, optimisation_method, max_em_iterations))
#### PRINT OUT INPUT PARAMS FOR run_em.py ##########################
import pickle
with open('input_data_for_em.pkl', 'wb') as h:
pickle.dump([data, ncomps, init_memb_probs, init_comps], h)
print('$$$$$$$ input_data_for_em.pkl written.')
####################################################################
# INITIALISE RUN PARAMETERS
print('## start running expectmax.fit_many_comps', init_comps)
# If initialising with components then need to convert to emcee parameter lists
if init_comps is not None:
print('Initialised by components')
logging.info('Initialised by components')
all_init_pars = [ic.get_emcee_pars() for ic in init_comps]
skip_first_e_step = False
# Memberships are only used at this point to inform amplitude of components
# If memberships are provided, use those
if init_memb_probs is not None:
memb_probs_old = init_memb_probs
print('init_memb_probs was None, now it is set to', memb_probs_old)
# Otherwise, we initialise memb_probs_old such that each component as an equal
# amplitude. We do this by assuming each star is equal member of every component
# (including background)
else:
logging.info('Initialising amplitudes to be equal')
memb_probs_old = np.ones((nstars, ncomps+use_bg_column))\
/ (ncomps+use_bg_column)
print('memb_probs_old normalised to', memb_probs_old)
# If initialising with membership probabilities, we need to skip first
# expectation step, but make sure other values are iterable
elif init_memb_probs is not None and init_comps is None: # MZ added and init_comps is None
logging.info('Initialised by memberships')
print('Initialised by memberships0')
skip_first_e_step = True
all_init_pars = ncomps * [None]
init_comps = ncomps * [None]
memb_probs_old = init_memb_probs
# MZ
# We need all_init_pars for scipy as a starting point
elif init_memb_probs is not None and init_comps is not None:
logging.info('Initialised by memberships')
print('Initialised by memberships1')
skip_first_e_step = True
all_init_pars = np.array([c.get_emcee_pars() for c in init_comps])
init_comps = ncomps * [None]
memb_probs_old = init_memb_probs
# If no initialisation provided, assume each star is equally probable to belong
# to each component, but 0% likely to be part of the background
# Currently only implemented blind initialisation for one component
else:
print('NOT initialised by comps and NOT initialised by membs')
assert ncomps == 1, 'If no initialisation set, can only accept ncomp==1'
logging.info('No specificed initialisation... assuming equal memberships')
init_memb_probs = np.ones((nstars, ncomps)) / ncomps
if use_bg_column:
init_memb_probs = np.hstack((init_memb_probs, np.zeros((nstars,1))))
memb_probs_old = init_memb_probs
skip_first_e_step = True
all_init_pars = ncomps * [None]
init_comps = ncomps * [None]
# Store the initial components if available
if init_comps[0] is not None:
Component.store_raw_components(rdir + init_comp_filename, init_comps)
# Initialise values for upcoming iterations
old_comps = init_comps
all_init_pos = ncomps * [None]
all_med_and_spans = ncomps * [None]
all_converged = False
stable_state = True # used to track issues
# Keep track of all fits for convergence checking
list_prev_comps = []
list_prev_memberships = []
list_all_init_pos = []
list_all_med_and_spans = []
list_prev_bics = []
# Keep track of ALL BICs, so that progress can be observed
all_bics = []
# Keep track of unstable components, which will require
# extra iterations
ref_counts = None
if ignore_stable_comps:
unstable_comps = np.array(ncomps * [True])
else:
unstable_comps = None
logging.info("Search for previous iterations")
# Look for previous iterations and update values as appropriate
prev_iters = True
iter_count = 0
found_prev_iters = False
while prev_iters:
try:
idir = rdir+"iter{:02}/".format(iter_count)
memb_probs_old = np.load(idir + 'membership.npy')
try:
old_comps = Component.load_raw_components(idir + 'best_comps.npy')
# End up here if components aren't loadable due to change in module
# So we rebuild from chains.
#!!! WARNING: This only seems to work with emcee fitting.
except AttributeError:
print('fit_many_comps AttributeError (WARNING: This only seems to work with emcee fitting.)')
old_comps = ncomps * [None]
for i in range(ncomps):
chain = np.load(idir + 'comp{}/final_chain.npy'.format(i))
lnprob = np.load(idir + 'comp{}/final_lnprob.npy'.format(i))
npars = len(Component.PARAMETER_FORMAT)
best_ix = np.argmax(lnprob)
best_pars = chain.reshape(-1, npars)[best_ix]
old_comps[i] = Component(emcee_pars=best_pars)
logging.info('Now start with calc_med_and_spans')
all_med_and_spans[i] = compfitter.calc_med_and_span(
chain, intern_to_extern=True, Component=Component,
)
all_init_pars = [old_comp.get_emcee_pars()
for old_comp in old_comps]
# logging.info('old_overall_lnlike')
print('determine old_memb_probs here')
old_overall_lnlike, old_memb_probs = \
get_overall_lnlikelihood(data, old_comps,
inc_posterior=False,
return_memb_probs=True,
use_box_background=use_box_background)
ref_counts = np.sum(old_memb_probs, axis=0)
# logging.info('append')
list_prev_comps.append(old_comps)
list_prev_memberships.append(old_memb_probs)
list_all_init_pos.append(all_init_pos)
list_all_med_and_spans.append(all_med_and_spans)
list_prev_bics.append(calc_bic(data, len(old_comps),
lnlike=old_overall_lnlike,
memb_probs=old_memb_probs))
all_bics.append(list_prev_bics[-1])
iter_count += 1
found_prev_iters = True
except IOError:
logging.info("Managed to find {} previous iterations".format(
iter_count
))
print("Managed to find {} previous iterations".format(
iter_count
))
prev_iters = False
# Until convergence is achieved (or max_iters is exceeded) iterate through
# the Expecation and Maximisation stages
print('Start EM algorithm')
logging.info("MZ: Start EM algorithm")
# TODO: put convergence checking at the start of the loop so restarting doesn't repeat an iteration
while not all_converged and stable_state and iter_count < max_em_iterations:
ignore_stable_comps_iter = ignore_stable_comps and (iter_count % 5 != 0)
# for iter_count in range(10):
idir = rdir+"iter{:02}/".format(iter_count)
mkpath(idir)
log_message('Iteration {}'.format(iter_count),
symbol='-', surround=True)
if not ignore_stable_comps_iter:
log_message('Fitting all {} components'.format(ncomps))
unstable_comps = np.where(np.array(ncomps * [True]))
else:
log_message('Fitting the following unstable comps:')
log_message('TC: maybe fixed?')
log_message(str(np.arange(ncomps)[unstable_comps]))
log_message('MZ: removed this line due to index error (unstable_comps too big number)')
log_message(str(unstable_comps))
print('EM: Expectation step')
# EXPECTATION
# Need to handle couple of side cases of initalising by memberships.
if found_prev_iters:
print('Expectation if found_prev_iters')
logging.info("Using previously found memberships")
memb_probs_new = memb_probs_old
found_prev_iters = False
skip_first_e_step = False # Unset the flag to initialise with
# memb probs
elif skip_first_e_step:
print('Expectation skip_first_e_step')
logging.info("Using initialising memb_probs for first iteration")
memb_probs_new = init_memb_probs
skip_first_e_step = False
else:
print('Expectation else')
memb_probs_new = expectation(data, old_comps, memb_probs_old,
inc_posterior=inc_posterior,
use_box_background=use_box_background)
logging.info("Membership distribution:\n{}".format(
memb_probs_new.sum(axis=0)
))
np.save(idir+"membership.npy", memb_probs_new)
# MAXIMISE
print('EM: Maximisation step')
new_comps, all_samples, _, all_init_pos, success_mask =\
maximisation(data, ncomps=ncomps,
burnin_steps=burnin,
plot_it=True, pool=pool, convergence_tol=C_TOL,
memb_probs=memb_probs_new, idir=idir,
all_init_pars=all_init_pars,
all_init_pos=all_init_pos,
ignore_dead_comps=ignore_dead_comps,
trace_orbit_func=trace_orbit_func,
store_burnin_chains=store_burnin_chains,
unstable_comps=unstable_comps,
ignore_stable_comps=ignore_stable_comps_iter,
nthreads=nthreads,
optimisation_method=optimisation_method,
nprocess_ncomp=nprocess_ncomp,
)
for i in range(ncomps):
if i in success_mask:
j = success_mask.index(i)
if optimisation_method=='emcee':
all_med_and_spans[i] = compfitter.calc_med_and_span(
all_samples[j], intern_to_extern=True,
Component=Component,
)
else: # Nelder-Mead
all_med_and_spans[i] = None
# If component is stable, then it wasn't fit, so just duplicate
# from last fit
else:
all_med_and_spans[i] = list_all_med_and_spans[-1][i]
new_comps.insert(i,list_prev_comps[-1][i])
all_init_pos.insert(i,list_all_init_pos[-1][i])
Component.store_raw_components(idir + 'best_comps.npy', new_comps)
np.save(idir + 'best_comps_bak.npy', new_comps)
logging.info('DEBUG: new_comps length: {}'.format(len(new_comps)))
# LOG RESULTS OF ITERATION
print("About to log without and with posterior lnlikelihoods") #!!!MJI
overall_lnlike = get_overall_lnlikelihood(data, new_comps,
old_memb_probs=memb_probs_new,
inc_posterior=False,
use_box_background=use_box_background)
overall_lnposterior = get_overall_lnlikelihood(data, new_comps,
old_memb_probs=memb_probs_new,
inc_posterior=True,
use_box_background=use_box_background)
bic = calc_bic(data, ncomps, overall_lnlike,
memb_probs=memb_probs_new,
Component=Component)
logging.info("--- Iteration results --")
logging.info("-- Overall likelihood so far: {} --".\
format(overall_lnlike))
logging.info("-- Overall posterior so far: {} --". \
format(overall_lnposterior))
logging.info("-- BIC so far: {} --". \
format(calc_bic(data, ncomps, overall_lnlike,
memb_probs=memb_probs_new,
Component=Component)))
list_prev_comps.append(new_comps)
list_prev_memberships.append(memb_probs_new)
list_all_init_pos.append(all_init_pos)
list_all_med_and_spans.append(all_med_and_spans)
list_prev_bics.append(bic)
all_bics.append(bic)
if len(list_prev_bics) < min_em_iterations:
all_converged = False
print('len(list_prev_bics) < min_em_iterations')
else:
print('CHECK CONVERGENCE with compfitter.burnin_convergence')
# Exploitng pre-exisitng burnin_convergecne checker bya pplying it to BIC "chain"
all_converged = compfitter.burnin_convergence(
lnprob=np.expand_dims(list_prev_bics[-min_em_iterations:], axis=0),
tol=bic_conv_tol, slice_size=int(min_em_iterations/2)
)
old_overall_lnlike = overall_lnlike
log_message('Convergence status: {}'.format(all_converged),
symbol='-', surround=True)
if not all_converged:
logging.info('BIC not converged')
np.save(rdir + 'all_bics.npy', all_bics)
# Plot all bics to date
plt.clf()
plt.plot(all_bics,
label='All {} BICs'.format(len(all_bics)))
plt.vlines(np.argmin(all_bics), linestyles='--', color='red',
ymin=plt.ylim()[0], ymax=plt.ylim()[1],
label='best BIC {:.2f} | iter {}'.format(np.min(all_bics),
np.argmin(all_bics)))
plt.legend(loc='best')
plt.title(rdir)
plt.savefig(rdir + 'all_bics.pdf')
# Check individual components stability
if (iter_count % 5 == 0 and ignore_stable_comps):
memb_probs_new = expectation(data, new_comps, memb_probs_new,
inc_posterior=inc_posterior,
use_box_background=use_box_background)
log_message('Orig ref_counts {}'.format(ref_counts))
unstable_comps, ref_counts = check_comps_stability(memb_probs_new,
unstable_comps,
ref_counts,
using_bg=use_bg_column)
log_message('New memb counts: {}'.format(memb_probs_new.sum(axis=0)))
log_message('Unstable comps: {}'.format(unstable_comps))
log_message('New ref_counts {}'.format(ref_counts))
# Check stablity, but only affect run after sufficient iterations to
# settle
temp_stable_state = check_stability(data, new_comps, memb_probs_new,
use_box_background=use_box_background)
#if not temp_stable_state:
# import pdb; pdb.set_trace() #!!!
logging.info('Stability: {}'.format(temp_stable_state))
if iter_count > 10:
stable_state = temp_stable_state
# only update if we're about to iterate again
if not all_converged:
old_comps = new_comps
memb_probs_old = memb_probs_new
iter_count += 1
logging.info("CONVERGENCE COMPLETE")
np.save(rdir + 'bic_list.npy', list_prev_bics)
# Plot final few BICs
plt.clf()
nbics = len(list_prev_bics)
start_ix = iter_count - nbics
plt.plot(range(start_ix, iter_count), list_prev_bics,
label='Final {} BICs'.format(len(list_prev_bics)))
plt.vlines(start_ix + np.argmin(list_prev_bics), linestyle='--', color='red',
ymin=plt.ylim()[0], ymax=plt.ylim()[1],
label='best BIC {:.2f} | iter {}'.format(np.min(list_prev_bics),
start_ix+np.argmin(list_prev_bics)))
plt.legend(loc='best')
plt.title(rdir)
plt.savefig(rdir + 'bics.pdf')
best_bic_ix = np.argmin(list_prev_bics)
# Since len(list_prev_bics) is capped, need to count backwards form iter_count
best_iter = iter_count - (len(list_prev_bics) - best_bic_ix)
logging.info('Picked iteration: {}'.format(best_iter))
logging.info('With BIC: {}'.format(list_prev_bics[best_bic_ix]))
log_message('EM Algorithm finished', symbol='*')
final_best_comps = list_prev_comps[best_bic_ix]
final_memb_probs = list_prev_memberships[best_bic_ix]
best_all_init_pos = list_all_init_pos[best_bic_ix]
final_med_and_spans = list_all_med_and_spans[best_bic_ix]
log_message('Storing final result', symbol='-', surround=True)
final_dir = rdir+'final/'
mkpath(final_dir)
# memb_probs_final = expectation(data, best_comps, best_memb_probs,
# inc_posterior=inc_posterior)
np.save(final_dir+'final_membership.npy', final_memb_probs)
logging.info('Membership distribution:\n{}'.format(
final_memb_probs.sum(axis=0)
))
# Save membership fits file
try:
tabletool.construct_an_astropy_table_with_gaia_ids_and_membership_probabilities(self.fit_pars['data_table'], final_memb_probs, final_best_comps, os.path.join(final_dir, 'final_memberships_%d.fits'%len(final_best_comps)), get_background_overlaps=True, stellar_id_colname = self.fit_pars['stellar_id_colname'])
except:
logging.info("[WARNING] Couldn't print membership.fits file. Is source_id available?")
# SAVE FINAL RESULTS IN MAIN SAVE DIRECTORY
Component.store_raw_components(final_dir+'final_comps.npy', final_best_comps)
np.save(final_dir+'final_comps_bak.npy', final_best_comps)
np.save(final_dir+'final_med_and_spans.npy', final_med_and_spans)
# Save components in fits file
tabcomps = Component.convert_components_array_into_astropy_table(final_best_comps)
tabcomps.write(os.path.join(final_dir, 'final_comps_%d.fits'%len(final_best_comps)), overwrite=True)
overall_lnlike = get_overall_lnlikelihood(
data, final_best_comps, inc_posterior=False,
use_box_background=use_box_background,
)
overall_lnposterior = get_overall_lnlikelihood(
data, final_best_comps, inc_posterior=True,
use_box_background=use_box_background,
)
bic = calc_bic(data, ncomps, overall_lnlike,
memb_probs=final_memb_probs, Component=Component)
logging.info("Final overall lnlikelihood: {}".format(overall_lnlike))
logging.info("Final overall lnposterior: {}".format(overall_lnposterior))
logging.info("Final BIC: {}".format(bic))
np.save(final_dir+'likelihood_post_and_bic.npy', (overall_lnlike,
overall_lnposterior,
bic))
logging.info("FINISHED SAVING")
logging.info("Best fits:\n{}".format(
[fc.get_pars() for fc in final_best_comps]
))
logging.info("Stars per component:\n{}".format(
final_memb_probs.sum(axis=0)
))
logging.info("Memberships: \n{}".format(
(final_memb_probs*100).astype(np.int)
))
# If compoents aren't super great, log a message, but return whatever we
# get.
if not stable_state:
log_message('BAD RUN TERMINATED (not stable_state)', symbol='*', surround=True)
logging.info(50*'=')
return final_best_comps, np.array(final_med_and_spans), final_memb_probs
# # Handle the case where the run was not stable
# # Should this even return something....?
# else:
# log_message('BAD RUN TERMINATED', symbol='*', surround=True)
#
# # Store the bad results anyway, just in case.
# final_dir = rdir+'failed_final/'
# mkpath(final_dir)
# np.save(final_dir+'final_membership.npy', final_memb_probs)
# Component.store_raw_components(final_dir+'final_comps.npy', final_best_comps)
# np.save(final_dir+'final_comps_bak.npy', final_best_comps)
# np.save(final_dir+'final_med_and_spans.npy', final_med_and_spans)
# raise UserWarning('Was unable to reach convergence within given iterations')
# # return final_best_comps, np.array(final_med_and_spans), final_memb_probs
| mikeireland/chronostar | chronostar/expectmax.py | Python | mit | 73,596 | [
"Gaussian"
] | 73fc9717ff2fc33cda34118e7b948e92b1b7aa831b5fb3362a1e6d6e1ccaa7c6 |
# Copyright (c) 2015, Scott J Maddox. All rights reserved.
# Use of this source code is governed by the BSD-3-Clause
# license that can be found in the LICENSE file.
import os
import sys
fpath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../fdint/dgfd.pyx'))
with open(fpath, 'w') as f:
f.write("""# Copyright (c) 2015, Scott J Maddox. All rights reserved.
# Use of this source code is governed by the BSD-3-Clause
# license that can be found in the LICENSE file.
# This file was generated by `scripts/gen_dgfd_pyx.py`.
# Do not edit this file directly, or your changes will be lost.
'''
First derivatives of the generalized Fermi-Dirac integrals.
'''
""")
f.write('from fdint cimport _fdint\n')
f.write('import numpy\n')
for i in xrange(-1,6,2):
k2 = str(i).replace('-','m')
f.write('''
def dgfd{k2}h(phi, beta, out=None):
cdef int num
if isinstance(phi, numpy.ndarray):
num = phi.shape[0]
assert isinstance(beta, numpy.ndarray) and beta.shape[0] == num
if out is None:
out = numpy.empty(num)
else:
assert isinstance(out, numpy.ndarray) and out.shape[0] == num
_fdint.vdgfd{k2}h(phi, beta, out)
return out
else:
assert not isinstance(beta, numpy.ndarray)
return _fdint.dgfd{k2}h(phi, beta)
'''.format(k2=k2))
| scott-maddox/fdint | scripts/gen_dgfd_pyx.py | Python | bsd-3-clause | 1,395 | [
"DIRAC"
] | 2eb32b104ebbc0e847ac1bad9767f5fd648b508d946ae29c179b56331a41f7f6 |
########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import errno
import datetime
import os
import tarfile
import tempfile
import yaml
from distutils.version import LooseVersion
from shutil import rmtree
from ansible import context
from ansible.errors import AnsibleError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import open_url
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.display import Display
display = Display()
class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = (os.path.join('meta', 'main.yml'), os.path.join('meta', 'main.yaml'))
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
ROLE_DIRS = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests')
def __init__(self, galaxy, api, name, src=None, version=None, scm=None, path=None):
self._metadata = None
self._install_info = None
self._validate_certs = not context.CLIARGS['ignore_certs']
display.debug('Validate TLS certificates: %s' % self._validate_certs)
self.galaxy = galaxy
self.api = api
self.name = name
self.version = version
self.src = src or name
self.scm = scm
if path is not None:
if not path.endswith(os.path.join(os.path.sep, self.name)):
path = os.path.join(path, self.name)
else:
# Look for a meta/main.ya?ml inside the potential role dir in case
# the role name is the same as parent directory of the role.
#
# Example:
# ./roles/testing/testing/meta/main.yml
for meta_main in self.META_MAIN:
if os.path.exists(os.path.join(path, name, meta_main)):
path = os.path.join(path, self.name)
break
self.path = path
else:
# use the first path by default
self.path = os.path.join(galaxy.roles_paths[0], self.name)
# create list of possible paths
self.paths = [x for x in galaxy.roles_paths]
self.paths = [os.path.join(x, self.name) for x in self.paths]
def __repr__(self):
"""
Returns "rolename (version)" if version is not null
Returns "rolename" otherwise
"""
if self.version:
return "%s (%s)" % (self.name, self.version)
else:
return self.name
def __eq__(self, other):
return self.name == other.name
@property
def metadata(self):
"""
Returns role metadata
"""
if self._metadata is None:
for meta_main in self.META_MAIN:
meta_path = os.path.join(self.path, meta_main)
if os.path.isfile(meta_path):
try:
f = open(meta_path, 'r')
self._metadata = yaml.safe_load(f)
except Exception:
display.vvvvv("Unable to load metadata for %s" % self.name)
return False
finally:
f.close()
return self._metadata
@property
def install_info(self):
"""
Returns role install info
"""
if self._install_info is None:
info_path = os.path.join(self.path, self.META_INSTALL)
if os.path.isfile(info_path):
try:
f = open(info_path, 'r')
self._install_info = yaml.safe_load(f)
except Exception:
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
finally:
f.close()
return self._install_info
def _write_galaxy_install_info(self):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version=self.version,
install_date=datetime.datetime.utcnow().strftime("%c"),
)
if not os.path.exists(os.path.join(self.path, 'meta')):
os.makedirs(os.path.join(self.path, 'meta'))
info_path = os.path.join(self.path, self.META_INSTALL)
with open(info_path, 'w+') as f:
try:
self._install_info = yaml.safe_dump(info, f)
except Exception:
return False
return True
def remove(self):
"""
Removes the specified role from the roles path.
There is a sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories.
"""
if self.metadata:
try:
rmtree(self.path)
return True
except Exception:
pass
return False
def fetch(self, role_data):
"""
Downloads the archived role to a temp location based on role data
"""
if role_data:
# first grab the file and save it to a temp location
if "github_user" in role_data and "github_repo" in role_data:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
else:
archive_url = self.src
display.display("- downloading role from %s" % archive_url)
try:
url_file = open_url(archive_url, validate_certs=self._validate_certs, http_agent=user_agent())
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except Exception as e:
display.error(u"failed to download the file: %s" % to_text(e))
return False
def install(self):
if self.scm:
# create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(keep_scm_meta=context.CLIARGS['keep_scm_meta'], **self.spec)
elif self.src:
if os.path.isfile(self.src):
tmp_file = self.src
elif '://' in self.src:
role_data = self.src
tmp_file = self.fetch(role_data)
else:
role_data = self.api.lookup_role_by_name(self.src)
if not role_data:
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, self.api.api_server))
if role_data.get('role_type') == 'APP':
# Container Role
display.warning("%s is a Container App role, and should only be installed using Ansible "
"Container" % self.name)
role_versions = self.api.fetch_role_related('versions', role_data['id'])
if not self.version:
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name', None)) for a in role_versions]
try:
loose_versions.sort()
except TypeError:
raise AnsibleError(
'Unable to compare role versions (%s) to determine the most recent version due to incompatible version formats. '
'Please contact the role author to resolve versioning conflicts, or specify an explicit role version to '
'install.' % ', '.join([v.vstring for v in loose_versions])
)
self.version = to_text(loose_versions[-1])
elif role_data.get('github_branch', None):
self.version = role_data['github_branch']
else:
self.version = 'master'
elif self.version != 'master':
if role_versions and to_text(self.version) not in [a.get('name', None) for a in role_versions]:
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version,
self.name,
role_versions))
# check if there's a source link for our role_version
for role_version in role_versions:
if role_version['name'] == self.version and 'source' in role_version:
self.src = role_version['source']
tmp_file = self.fetch(role_data)
else:
raise AnsibleError("No valid role data found")
if tmp_file:
display.debug("installing from %s" % tmp_file)
if not tarfile.is_tarfile(tmp_file):
raise AnsibleError("the downloaded file does not appear to be a valid tar archive.")
else:
role_tar_file = tarfile.open(tmp_file, "r")
# verify the role's meta file
meta_file = None
members = role_tar_file.getmembers()
# next find the metadata file
for member in members:
for meta_main in self.META_MAIN:
if meta_main in member.name:
# Look for parent of meta/main.yml
# Due to possibility of sub roles each containing meta/main.yml
# look for shortest length parent
meta_parent_dir = os.path.dirname(os.path.dirname(member.name))
if not meta_file:
archive_parent_dir = meta_parent_dir
meta_file = member
else:
if len(meta_parent_dir) < len(archive_parent_dir):
archive_parent_dir = meta_parent_dir
meta_file = member
if not meta_file:
raise AnsibleError("this role does not appear to have a meta/main.yml file.")
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except Exception:
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
# we strip off any higher-level directories for all of the files contained within
# the tar file here. The default is 'github_repo-target'. Gerrit instances, on the other
# hand, does not have a parent directory at all.
installed = False
while not installed:
display.display("- extracting %s to %s" % (self.name, self.path))
try:
if os.path.exists(self.path):
if not os.path.isdir(self.path):
raise AnsibleError("the specified roles path exists and is not a directory.")
elif not context.CLIARGS.get("force", False):
raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name)
else:
# using --force, remove the old path
if not self.remove():
raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really "
"want to put the role here." % self.path)
else:
os.makedirs(self.path)
# now we do the actual extraction to the path
for member in members:
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop any containing directory, as mentioned above
if member.isreg() or member.issym():
n_member_name = to_native(member.name)
n_archive_parent_dir = to_native(archive_parent_dir)
n_parts = n_member_name.replace(n_archive_parent_dir, "", 1).split(os.sep)
n_final_parts = []
for n_part in n_parts:
if n_part != '..' and '~' not in n_part and '$' not in n_part:
n_final_parts.append(n_part)
member.name = os.path.join(*n_final_parts)
role_tar_file.extract(member, to_native(self.path))
# write out the install info file for later use
self._write_galaxy_install_info()
installed = True
except OSError as e:
error = True
if e.errno == errno.EACCES and len(self.paths) > 1:
current = self.paths.index(self.path)
if len(self.paths) > current:
self.path = self.paths[current + 1]
error = False
if error:
raise AnsibleError("Could not update files in %s: %s" % (self.path, to_native(e)))
# return the parsed yaml metadata
display.display("- %s was installed successfully" % str(self))
if not (self.src and os.path.isfile(self.src)):
try:
os.unlink(tmp_file)
except (OSError, IOError) as e:
display.warning(u"Unable to remove tmp file (%s): %s" % (tmp_file, to_text(e)))
return True
return False
@property
def spec(self):
"""
Returns role spec info
{
'scm': 'git',
'src': 'http://git.example.com/repos/repo.git',
'version': 'v1.0',
'name': 'repo'
}
"""
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
| 2ndQuadrant/ansible | lib/ansible/galaxy/role.py | Python | gpl-3.0 | 16,264 | [
"Brian",
"Galaxy"
] | b60fde4b79dd94672656d719e82ad13dd60d15f3c17672da9a20977d3e839ad1 |
#import warnings
#warnings.filterwarnings("ignore", message="using a non-integer number instead of an integer will result in an error in the future")
def template_names():
import glob as glob
template_files = glob.glob('miles_models/Mun*.fits')
return template_files
def choose_templates(templates, age_lim = 20.0, max_nonzero = 5):
#start out by loading in the template files as a table
import numpy as np
import astropy.table as table
import SPaCT
ssp_rows = []
for template in templates:
template = template.rstrip('.fits').split('/')[1]
spectral_range = template[0]
IMF_type = template[1:3]
IMF_slope = float(template[3:7])
Z = SPaCT.plusminus(template[8])*float(template[9:13])
T = float(template[14:])
#print template + ':', spectral_range, IMF_type, IMF_slope, Z, T
ssp_i = [template, spectral_range, IMF_type, IMF_slope, Z, T]
ssp_rows.append(ssp_i)
ssps = table.Table(map(list, zip(*ssp_rows)), names = ['name', 'spectral range', 'IMF type', 'IMF slope', 'Z', 't'])
ssps = ssps[ssps['t'] <= age_lim]
#then pick up to `max_nonzero` number of templates to be nonzero
nonzero_templates = np.random.choice(ssps['name'], np.random.randint(1, max_nonzero + 1), replace = False)
template_weights = np.random.rand(len(ssps['name'])) * [1. if i in nonzero_templates else 0. for i in ssps['name']]
template_weights /= template_weights.sum()
ssps.add_column(table.Column(name = 'weight', data = template_weights))
return ssps
def generate_spectrum(ssps):
'''
generate a pristine spectrum based on weights given in an astropy table of templates
'''
import numpy as np
from astropy.io import fits
import astropy.table as table
#now load in each template as a row in an array
all_templates = np.empty([len(ssps['name']), fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['NAXIS1']])
for i, row in enumerate(all_templates):
all_templates[i] = ssps['weight'][i] * fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].data
#if ssps['weight'][i] != 0: print all_templates[i]
clean_spectrum = all_templates.sum(axis = 0)
CRVAL1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['CRVAL1']
CDELT1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['CDELT1']
NAXIS1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['NAXIS1']
l_full = CRVAL1 + np.linspace(0., NAXIS1 * CDELT1, NAXIS1)
clean_spectrum /= np.median(clean_spectrum)
return clean_spectrum, l_full
def generate_LOSVD(spectrum, v_res, moments, plots = False):
'''
Convolve `spectrum` with a Gaussian-like filter, except with nonzero higher-order moments.
This reproduces a velocity field that pPXF will fit
NOTE: nonzero higher-order moments not supported at this time
NOTE: nonzero m1 is not supported (and is a very bad idea) - always use redshift routine to apply this!
'''
import numpy as np
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
#generate a kernel with the given moments
m1, m2, m3, m4, m5, m6 = moments
if m1 != 0.:
while a not in ['y', 'n']:
a = raw_input('Warning! non-zero-centered LOSVDs are not recommended! Proceed? (y/n)')
if a == 'y': break
elif a == 'n': exit()
if moments[2:] != [0., 0., 0., 0.]:
raise ValueError('only nonzero higher-order G-H moments are supported!')
else:
spectrum_LOSVD = ndimage.gaussian_filter1d(spectrum, m2/v_res)
if plots == True:
plt.figure(figsize = (6, 4))
plt.plot(spectrum, c = 'b', label = 'rest-frame')
plt.plot(spectrum_LOSVD, c = 'g', label = 'LOSVD spectrum')
plt.plot(np.abs(spectrum - spectrum_LOSVD), label = 'residual')
plt.legend(loc = 'best')
plt.show()
return spectrum_LOSVD
def redshift_spectrum(l_0, z = None, dz = None):
#redshift a spectrum randomly, and return the new wavelength array, a "real" redshift, and a redshift measurement error
import numpy as np
if z == None:
z = np.random.uniform(0.01, 0.025)
if dz == None:
dz = np.sign(np.random.random() - 0.5) * (10**(np.random.uniform(-1.0, -0.5))) * z #random error beween 1% and 10%, equal probabilities of + and -
#print z, dz
l_1 = l_0 * (1. + z + dz)
return z, dz, l_1
def adjust_FWHM(sharp_spectrum, res_old, res_new, FWHM_old, FWHM_new):
#convolve the spectrum with a Gaussian with a width of the square root of the difference of the squares of the intrument FWHMs
import numpy as np
import scipy.ndimage as ndimage
assert FWHM_new >= FWHM_old
FWHM_dif = np.sqrt(FWHM_new**2. - FWHM_old**2.)
sigma_diff = FWHM_dif/2.355/res_old # Sigma difference in pixels
blurred_spectrum = ndimage.gaussian_filter1d(sharp_spectrum, sigma_diff)
return blurred_spectrum
def downsample_spectrum(l_dense, dense_spectrum, l_sparse):
#linearly interpolate the input dense_spectrum (which has values at all the locations in l_0), to the values in l_1
import numpy as np
import scipy.interpolate as interp
sparse_spectrum = interp.interp1d(l_dense, dense_spectrum, kind = 'linear')(l_sparse)
return l_sparse, sparse_spectrum
def noisify_ifu(spectrum, n, SNR):
#make some number `n` of rows with pure noise, and add similar noise profile to first row
import numpy as np
NAXIS1 = len(spectrum)
raw_noise_IFU = np.random.normal(loc = 0.0, scale = 1./SNR, size = (n, NAXIS1))
empty_fibers = raw_noise_IFU * np.tile(spectrum, reps = (n, 1))
IFU = np.vstack((spectrum, empty_fibers))
galaxy_noise = np.random.normal(loc = 0.0, scale = 1./SNR, size = NAXIS1) * spectrum
IFU[0] = spectrum + galaxy_noise
return IFU, galaxy_noise
def population_sum_models(ssps):
#take a table of templates and weights, and return a spectrum in the specified range
import numpy as np
import scipy.ndimage as ndimage
import astropy.table as table
from astropy.io import fits
#now load in each template as a row in an array
all_templates = np.empty([len(ssps['name']), fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['NAXIS1']])
for i, row in enumerate(all_templates):
all_templates[i] = ssps['weight'][i] * fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].data
#if ssps['weight'][i] != 0: print all_templates[i]
real_spectrum = all_templates.sum(axis = 0)
CRVAL1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['CRVAL1']
CDELT1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['CDELT1']
NAXIS1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['NAXIS1']
l_full = CRVAL1 + np.linspace(0., NAXIS1 * CDELT1, NAXIS1)
real_spectrum /= np.median(real_spectrum)
return real_spectrum, l_full
def population_sum_fit(ssps):
import numpy as np
import scipy.ndimage as ndimage
import astropy.table as table
from astropy.io import fits
#now load in each template as a row in an array
all_templates = np.empty([len(ssps['name']), fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['NAXIS1']])
for i, row in enumerate(all_templates):
all_templates[i] = ssps['best-fit weights'][i] * fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].data
#if ssps['weight'][i] != 0: print all_templates[i]
derived_spectrum = all_templates.sum(axis = 0)
CRVAL1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['CRVAL1']
CDELT1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['CDELT1']
NAXIS1 = fits.open('miles_models/' + ssps['name'][0] + '.fits')[0].header['NAXIS1']
l_full = CRVAL1 + np.linspace(0., NAXIS1 * CDELT1, NAXIS1)
derived_spectrum /= np.median(derived_spectrum)
return derived_spectrum, l_full
def pPXF_summary_plots(ssps, instrument_info, pp, lam_sparse, vel, verbose = False):
#make sure `vel` is the sum of the redshift and the kinematic velocity fit
import numpy as np
import matplotlib.pyplot as plt
import astropy.table as table
import colorpy.ciexyz as ciexyz
import colorpy.colormodels as cmodels
import warnings
c = 299792.458
if verbose == True:
print 'non-zero fit templates:'
print ssps[ssps['best-fit weights'] != 0.]['Z', 't', 'best-fit weights']
print 'non-zero real solution templates:'
print ssps[ssps['weight'] != 0.]['Z', 't', 'weight']
#first plot the original and resultant populations
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex = True, figsize = (8, 6))
ax1.set_title('fit')
a = ax1.scatter(ssps['Z'], ssps['t'], c = pp.weights, cmap = 'gnuplot', s = 40, vmin = 0.0, vmax = 1.0, edgecolor = 'grey')
ax2.set_title('reality')
ax2.scatter(ssps['Z'], ssps['t'], c = ssps['weight'], cmap = 'gnuplot', s = 40, vmin = 0.0, vmax = 1.0, edgecolor = 'grey')
plt.colorbar(a)
plt.suptitle('population fit comparison', size = 16)
plt.show()
#now plot the result with the input
instrument_lam_lims = (instrument_info['CRVAL1'], instrument_info['CRVAL1'] + instrument_info['NAXIS1'] * instrument_info['CDELT1'])
lines = [
['Ca H', 3968.5], ['Ca K', 3933.7], ['H-alpha', 6562.8], ['H-beta', 4861.], ['Mg I', 5175.], ['Ca I', 4307.]
]
#plt.figure(figsize = (10, 6))
#ax = plt.subplot(111)
#ax.plot(lam_sparse, pp.bestfit)
print 'vel:', vel, 'km/s'
#now plot relevant spectral lines
'''
with warnings.catch_warnings():
warnings.simplefilter("ignore", category = DeprecationWarning)
for i, line in enumerate(lines):
line_c = cmodels.irgb_string_from_xyz(ciexyz.xyz_from_wavelength(line[1]/10.))
#print line_c
ax.axvline(line[1] * (1. + vel / c), color = line_c)
ax.annotate(line[0], xy = (line[1], 1.2), xytext = (line[1]+10., 1.1 - 0.1 * i%2), size = 14)
'''
#plt.show()
'''
real_spectrum, l_full = population_sum_models(ssps = ssps)
derived_spectrum, l_full = population_sum_fit(ssps = ssps)
plt.figure(figsize = (10, 6))
ax1 = plt.subplot(211)
#first at full resolution
ax1real = ax1.plot(l_full, real_spectrum, c = 'g', label = 'Reality', linewidth = 0.25)
ax1der = ax1.plot(l_full, derived_spectrum, c = 'b', linestyle = '--', label = 'Fit', linewidth = 0.25)
for val in instrument_lam_lims:
ax1.axvline(val, c = 'r', linestyle = ':')
ax1.set_title('Full-Resolution spectra', size = 16)
ax1_1 = ax1.twinx()
ax1err = ax1_1.plot(l_full, np.abs(real_spectrum - derived_spectrum), linewidth = 0.25, c = 'tomato', label = 'Error')
for tl in ax1_1.get_yticklabels(): tl.set_color('tomato')
ax1_l = ax1_1.legend(ax1real + ax1der + ax1err, [l.get_label() for l in (ax1real + ax1der + ax1err)], loc = 'best')
ax1_l.set_zorder(5)
ax2 = plt.subplot(212, sharex = ax1)
ax2.set_title('Downsampled spectra', size = 16)
ax2.set_xlabel(r'$\lambda[\AA]$', size = 16)
#now after blurring and downsampling
l_sparse = np.linspace(instrument_info['CRVAL1'], instrument_info['CRVAL1'] + instrument_info['NAXIS1'] * instrument_info['CDELT1'], instrument_info['NAXIS1'])
l_sparse, sparse_spectrum_real = downsample_spectrum(l_dense = l_full, dense_spectrum = real_spectrum, l_sparse = l_sparse) #this accomplishes both downsampling and paring!!
l_sparse, sparse_spectrum_der = downsample_spectrum(l_dense = l_full, dense_spectrum = derived_spectrum, l_sparse = l_sparse) #this accomplishes both downsampling and paring!!
ax2real = ax2.plot(l_sparse, sparse_spectrum_real, c = 'g', label = 'Reality', linewidth = 0.25)
ax2der = ax2.plot(l_sparse, sparse_spectrum_der, c = 'b', label = 'Fit', linewidth = 0.25, linestyle = '--')
for val in instrument_lam_lims:
ax2.axvline(val, c = 'r', linestyle = ':')
ax2.set_title('Downsampled spectra', size = 16)
ax2.set_xlabel(r'$\lambda[\AA]$', size = 16)
ax2_1 = ax2.twinx()
ax2err = ax2_1.plot(l_sparse, np.abs(sparse_spectrum_real - sparse_spectrum_der), linewidth = 0.25, c = 'tomato', label = 'Error')
for tl in ax2_1.get_yticklabels(): tl.set_color('tomato')
ax2_l = ax2_1.legend(ax2real + ax2der + ax2err, [l.get_label() for l in (ax2real + ax2der + ax2err)], loc = 'best')
ax2_l.set_zorder(5)
plt.tight_layout()
plt.show()
'''
def simulate_noise(sparse_spectrum, SNR, n_skyfiber_range = [1, 20, 3]):
'''
generate synthetic noise spectra for a given input spectrum, and test the required number of sky fibers (with similar noise profiles) to accurately get the SNR
'''
import numpy as np
import SPaCT
import matplotlib.pyplot as plt
plt.figure(figsize = (6, 4))
for n_skyfibers in range(n_skyfiber_range[0], n_skyfiber_range[1] + 1, n_skyfiber_range[2]):
ifu, galaxy_noise = noisify_ifu(sparse_spectrum, n = n_skyfibers, SNR = SNR)
fiberlist = range(1, n_skyfibers + 1)
SNR_calc = ifu[0] / SPaCT.noise_edgefibers(ifu, width = 3, fiberlist = fiberlist, verbose = False)
bins, edges = np.histogram(SNR_calc, 50, normed = 1)
left, right = edges[:-1],edges[1:]
X = np.array([left,right]).T.flatten()
Y = np.array([bins,bins]).T.flatten()
plt.plot(X, Y/Y.max(), label = str(n_skyfibers) + ' fibers')
plt.axvline(SNR, c = 'k', linestyle = ':')
SNR_annotation = plt.text(SNR, 0.35, '$S/N=' + str(SNR) + '$')
SNR_annotation.set_rotation('vertical')
plt.title('Effect of # of sky fibers on SNR', size = 18)
plt.xscale('log')
plt.ylim([-0.05, 1.05])
plt.xlabel('SNR', size = 18)
plt.ylabel('normed fraction', size = 18)
plt.legend(loc = 'best', prop = {'size':6})
plt.tight_layout()
plt.show()
def simulate_single_spectrum():
'''
STEPS:
1. choose templates
2. make spectrum
2.1. convolve with a LOSVD
3. blur to correct FWHM
4. redshift to within some error
5. downsample to correct wavelengths
6. noisify and create an IFU with the same noise characteristics
7. run pPXF
'''
from astropy.io import fits
from astropy import table
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import SPaCT
import scipy.stats as stats
from ppxf import robust_sigma
import warnings
SPSPK_info = fits.open('NGC2558.msobj.fits')[0].header
template_files = template_names()
ssps = choose_templates(templates = template_files, max_nonzero = 4)
clean_spectrum, l_full = generate_spectrum(ssps = ssps)
#now redshift the spectrum
MILES_res = fits.open(template_files[0])[0].header['CDELT1']
SPSPK_res = 1.4
#FWHMs should be in Angstroms
FWHM_MILES = 1.36
FWHM_SPSPK = 4.877 #this is specific to one particular configuration, so handle with care!
SNR = 100.
n_skyfibers = 8
n_moments = 4 #how many moments to fit
'''
This is a temporary solution to the problem of generating moments.
Basically, just set the first one equal to zero (since that rolls out of redshift)
and set 2 - 4 equal to some reasonable values
'''
moments = [0., 45.]
moments += [0. for _ in range(6 - len(moments))] #pad moments out to the length that the LOSVD function accepts
c = 299792.458
l_sparse = np.linspace(SPSPK_info['CRVAL1'], SPSPK_info['CRVAL1'] + SPSPK_info['NAXIS1'] * SPSPK_info['CDELT1'], SPSPK_info['NAXIS1'])
v_res = np.mean(c / (l_sparse / FWHM_SPSPK))
#print 'Instrument velocity resolution:', v_res
generate_LOSVD(spectrum = clean_spectrum, v_res = v_res, moments = moments, plots = False)
blurred_spectrum = adjust_FWHM(sharp_spectrum = clean_spectrum, res_old = MILES_res, res_new = SPSPK_res, FWHM_old = FWHM_MILES, FWHM_new = FWHM_SPSPK)
#now redshift the new blurred (but still full-resolution) spectrum into the observer frame
z, dz, l_full = redshift_spectrum(l_0 = l_full, dz = 0.)
l_sparse, sparse_spectrum = downsample_spectrum(l_dense = l_full, dense_spectrum = blurred_spectrum, l_sparse = l_sparse) #this accomplishes both downsampling and paring!!
#now construct a fake IFU with 8 rows of pure noise at some SNR
ifu, galaxy_noise = noisify_ifu(sparse_spectrum, n = 2, SNR = SNR)
#simulate_noise(sparse_spectrum, SNR = SNR)
#more debugs
'''plt.plot(l_sparse, sparse_spectrum, linewidth = 0.25, label = 'original')
plt.plot(l_sparse, ifu[0], linewidth = 0.25, label = 'noisy')
plt.plot(l_sparse, galaxy_noise, linewidth = 0.25, label = 'sample noise')
plt.legend(loc = 'best')
plt.show()
'''
edgefibers = range(1, len(ifu))
pp = SPaCT.SP_pPXF(ifu/np.median(ifu[0]), fiber = 0, l_summ = (3907., 1.4, 1934),
z = z + dz, verbose = False, noise_plots = False, fit_plots = False,
edgefibers = edgefibers, age_lim = 20., n_moments = n_moments, bias = 100.)
#now compare the resulting redshift
print 'Best-fitting redshift:\t\t', z + dz + pp.sol[0]/c
print 'Real redshift:\t\t\t\t', z
print 'Guess redshift:\t\t\t\t', z + dz
print 'Reduced chi2:', pp.chi2
print ' # | guess | real'
for (i, fit_guess, real_value) in zip(range(1, n_moments + 1), pp.sol, moments[:n_moments]):
print 'moment', str(i), ':', str(np.round(fit_guess, 2)), ':', str(np.round(real_value, 2))
#compare the resulting population fits
#print pp.weights
ssps.add_column(table.Column(name = 'best-fit weights', data = pp.weights/pp.weights.sum()))
pPXF_summary_plots(ssps = ssps, instrument_info = SPSPK_info, pp = pp, lam_sparse = l_sparse, vel = (z + dz) * c + pp.sol[0], verbose = True)
#now return the chi2 parameter for the best-fit, as opposed to the "reality"
print 'Chi-square test'
simulate_single_spectrum() | zpace/SparsePak-SFH | fake_galaxies.py | Python | mit | 17,002 | [
"Gaussian"
] | ad66898e3e988eec6a8fe718026c5f396cf8025fe2749e85ac6aea840b779f66 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Time Domain Electromagnetics (TDEM) functions and class"""
import sys
from math import pi
import numpy as np
import matplotlib.pyplot as plt
import pygimli as pg
from . vmd import VMDTimeDomainModelling
def rhoafromU(U, t, Tx, current=1.0, Rx=None):
r"""Apparent resistivity curve from classical TEM (U or dB/dt)
rhoafromU(U/I, t, TXarea[, RXarea])
.. math::
\rho_a = ( A_{Rx} *A_{Tx} * \mu_0 / 20 / (U/I) )^2/3*t^{-5/3}*4e-7
"""
UbyI = U / current
if Rx is None:
Rx = Tx # assume single/coincident loop
mu0 = 4e-7 * pi
rhoa = (Rx * Tx * mu0 / 20. / UbyI)**(2. / 3.) * \
t**(-5. / 3.) * mu0 / pi
return rhoa
def rhoafromB(B, t, Tx, current=1):
r"""Apparent resistivity from B-field TEM
.. math::
\rho_a = ( (A_{Tx}*I*\mu_0 ) / (30B) )^2/3 * 4e-7 / t
"""
mu0 = 4e-7 * pi
rhoa = (current * Tx * mu0 / 30. / B)**(2. / 3.) * mu0 / pi / t
return rhoa
# TODO: better derive a class TEMsounding from dict and put functions in there
def TxArea(snd):
""" return effective transmitter area """
if isinstance(snd['LOOP_SIZE'], str):
Tx = np.prod([float(a) for a in snd['LOOP_SIZE'].split()])
else:
Tx = snd['LOOP_SIZE']
return Tx
def RxArea(snd):
"""Return effective receiver area."""
Rx = 0 # just in case of error
if 'COIL_SIZE' in snd:
Rx = snd['COIL_SIZE']
if Rx == 700.:
Rx = 100. # hack for wrong turns in NMR noise loop
else: # no coil size given ==> COI or SIN ==> take loop size
Rx = TxArea(snd)
return Rx
def get_rhoa(snd, cal=260e-9, corrramp=False, verbose=False):
"""Compute apparent resistivity from sounding (usf) dict."""
Tx = TxArea(snd)
Rx = RxArea(snd)
if 'COIL_SIZE' in snd:
Rx = snd['COIL_SIZE']
else:
Rx = Tx
if verbose:
print("Tx/Rx", Tx, Rx)
v = snd['VOLTAGE']
istart, istop = 0, len(v) # default: take all
mav = np.arange(len(v))[v == max(v)]
if len(mav) > 1: # several equal big ones: start after
istart = max(mav) + 1
if min(v) < 0.0: # negative values: stop at first
istop = np.argmax(v[20:] < 0.0) + 20
if verbose:
print(istart, istop)
v = v[istart:istop]
if 'ST_DEV' in snd:
dv = snd['ST_DEV'][istart:istop] # / snd['CURRENT']
else:
dv = v * 0.01
t = snd['TIME'][istart:istop] * 1.0
if corrramp and 'RAMP_TIME' in snd:
t = t - snd['RAMP_TIME'] / 2
if Rx == 1: # apparently B-field not dB/dt
rhoa = rhoafromB(B=v*cal, t=t, Tx=Tx)
else:
if verbose:
print("Using rhoafromU:", v, t, Tx, Rx)
rhoa = rhoafromU(U=v, t=t, Tx=Tx, Rx=Rx)
if verbose:
print(rhoa[0], rhoa[10], rhoa[-1])
rhoaerr = dv / v * (2. / 3.)
return rhoa, t, rhoaerr
def readusffile(filename, stripnoise=True):
"""Read data from single USF (universal sounding file) file
Examples
--------
DATA = readusffile(filename)
DATA = readusffile(filename, DATA) will append to DATA
"""
DATA = []
columns = []
nr = 0
station = {}
sounding = {}
sounding['FILENAME'] = filename
isdata = False
fid = open(filename)
for line in fid:
zeile = line.rstrip('\n').replace(',', ' ') # commas useless here
if zeile: # anything at all
if zeile[0] == '/': # comment-like
if zeile[1:4] == 'END': # end of a sounding
if isdata: # already read some data
sounding['data'] = columns
for i, cn in enumerate(sounding['column_names']):
sounding[cn] = columns[:, i]
sounding['FILENAME'] = filename
if 'INSTRUMENT' in sounding and 'ST_DEV' in sounding:
if 'terraTEM' in sounding['INSTRUMENT']:
sounding['ST_DEV'] *= 0.01
print('taking default stdev')
sounding.update(station)
if not(stripnoise and 'SWEEP_IS_NOISE' in sounding and
sounding['SWEEP_IS_NOISE'] == 1):
DATA.append(sounding)
sounding = {}
isdata = not isdata # turn off data mode
elif zeile.find(':') > 0: # key-value pair
key, value = zeile[1:].split(':')
try:
val = float(value)
sounding[key] = val
except:
sounding[key] = value
if 'SWEEP' in key and len(station) == 0: # first sweep
station = sounding.copy() # save global settings
else:
if isdata:
values = zeile.split()
try:
for i, v in enumerate(values):
columns[nr, i] = float(v)
nr += 1
except:
sounding['column_names'] = values
columns = np.zeros((int(sounding['POINTS']),
len(values)))
nr = 0
fid.close()
return DATA
def readusffiles(filenames):
"""Read all soundings data from a list of usf files
Example
-------
DATA = readusffiles(filenames)
"""
from glob import glob
if isinstance(filenames, str):
if filenames.find('*') >= 0:
filenames = glob(filenames)
else:
filenames = [filenames]
DATA = []
for onefile in filenames:
DATA.extend(readusffile(onefile))
return DATA
def readTEMfastFile(temfile):
"""ReadTEMfastFile(filename) reads TEM-fast file into usf sounding."""
snd = {}
snd['FILENAME'] = temfile
fid = open(temfile)
for i in range(4):
zeile = fid.readline()
snd['STACK_SIZE'] = int(zeile.split()[3])
snd['RAMP_TIME'] = float(zeile.split()[5])*1e-6
snd['CURRENT'] = float(zeile.split()[7][2:])
zeile = fid.readline()
fid.close()
snd['LOOP_SIZE'] = float(zeile.split()[2])**2
snd['COIL_SIZE'] = float(zeile.split()[5])**2
t, v, e, r = np.loadtxt(temfile, skiprows=8, usecols=(1, 2, 3, 4),
unpack=True)
ind = np.nonzero((r > 0) * (v > 0) * (t > snd['RAMP_TIME']*1.2e6)) # us
snd['TIME'] = t[ind] * 1e-6 # us
snd['VOLTAGE'] = v[ind]
snd['ST_DEV'] = e[ind]
snd['RHOA'] = r[ind]
return snd
def readUniKTEMData(filename):
"""Read TEM data format of University of Cologne."""
if '*' in filename:
from glob import glob
allfiles = glob(filename)
else:
allfiles = [filename]
DATA = []
for filename in allfiles:
snd = {}
snd['FILENAME'] = filename
A = np.loadtxt(filename)
snd['TIME'] = A[:, 1]
snd['VOLTAGE'] = A[:, 2]
snd['ST_DEV'] = A[:, 4] / 100 * A[:, 2]
DATA.append(snd)
return DATA
def readSiroTEMData(fname):
"""Read TEM data from siroTEM instrument dump.
Example
-------
DATA = readSiroTEMData(filename)
.. list of soundings with USF and siro-specific keys
"""
Time_ST = np.array([487., 887., 1287., 1687., 2087., 2687., 3487., 4287.,
5087., 5887., 7087., 8687., 10287., 11887., 13487.,
15887., 19087., 22287., 25487., 28687., 33487., 39887.,
46287., 52687., 59087., 68687., 81487., 94287.,
107090., 119890., 139090., 164690., 190290., 215890.,
241490., 279890., 331090., 382290., 433490., 484690.,
561490., 663890., 766290., 868690., 971090., 1124700.,
1329500., 1534300., 1739100., 1943900.])
Time_ET = np.array([0.05, 0.1, 0.15, 0.25, 0.325, 0.425, 0.525, 0.625,
0.725, 0.875, 1.075, 1.275, 1.475, 1.675, 1.975,
2.375, 2.775, 3.175, 3.575, 4.175, 4.975, 5.775,
6.575, 7.375, 8.575, 10.175, 11.775, 13.375, 14.975,
17.375, 20.575, 23.775, 26.975, 30.175, 34.975, 41.375,
47.775, 54.175, 60.574, 70.175, 82.975, 95.775,
108.575, 121.375, 140.575, 166.175, 191.775, 217.375,
242.975, 281.375, 332.575])
fid = open(fname)
# read in file header until : sign
line = 'a'
while len(line) > 0 and line[0] != ':':
line = fid.readline()
DATA = []
line = fid.readline()
while line[0] != ';':
header = line[1:-6].split(',')
snd = {} # dictionary, uppercase corresponds to USF format keys
snd['INSTRUMENT'] = 'siroTEM'
snd['dtype'] = int(header[3])
dstring = header[1]
snd['DATE'] = int('20' + dstring[6:8] + dstring[3:4] + dstring[0:1])
snd['win0'], snd['win1'], ngain, snd['conf'], snd['nch'] = \
[int(h) for h in header[5:10]]
snd['SOUNDING_NUMBER'] = int(header[10])
snd['GAIN_FACTOR'] = [0.1, 1.0, 10.0, 100.0][ngain] # predefined gains
snd['STACK_SIZE'] = int(header[14])
snd['ttype'] = int(header[20])
# 1=composite, 2=earlytime, 3=standard, 4=highresolution
snd['CURRENT'] = float(header[17])
snd['RAMP_TIME'] = float(header[18]) * 1e-6
snd['TIME_DELAY'] = float(header[19])
snd['LOOP_SIZE'] = float(header[21])
snd['COIL_SIZE'] = float(header[22])
fid.readline()
data = []
line = fid.readline()[:-1] # trim CR+LF newline
while len(line) > 0:
while line[-1] == '/':
line = line[:-1] + fid.readline()[:-1].replace('\t', '')
# aline = line
nums = [float(el[-7:-2]) * 10**(float(el[-2:])) for el in
line[1:-5].split(',')[1:]]
data.append(np.array(nums))
line = fid.readline().rstrip('\n').rstrip('\r')
snd['VOLTAGE'] = data[0]
if snd['ttype'] == 2: # early time
snd['TIME'] = Time_ET[snd['win0'] - 1:snd['win1']] * 1e-3
if snd['ttype'] == 3: # standard time
snd['TIME'] = Time_ST[snd['win0'] - 1:snd['win1']] * 1e-6
snd['ST_DEV'] = data[1]
if snd['dtype'] > 0: # normal measurement
DATA.append(snd)
line = fid.readline()
fid.close()
# DATA['FILENAME'] = fname # makes no sense as DATA is an array->snd?
return DATA
def getname(snd):
"""Generate label name from filename entry."""
fname = snd['FILENAME']
name = fname[fname.rfind('\\')+1:-4]
if 'STACK_SIZE' in snd:
name += '-' + str(int(snd['STACK_SIZE']))
return name
class TDEM():
"""TEM class mainly for holding data etc."""
def __init__(self, filename=None):
"""Initialize class and (optionally) load data"""
self.DATA = []
self.names = []
if filename:
self.load(filename)
def load(self, filename):
"""Road data from usf, txt (siroTEM), tem (TEMfast) or UniK file."""
if filename.lower().endswith('.usf'):
self.DATA.extend(readusffiles(filename))
elif filename.lower().endswith('.txt'):
self.DATA = readSiroTEMData(filename)
elif filename.lower().endswith('.tem'):
self.DATA.append(readTEMfastFile(filename))
elif filename.lower().endswith('.dat'): # dangerous
self.DATA = readUniKTEMData(filename)
def __repr__(self):
return "<TDEMdata: %d soundings>" % (len(self.DATA))
def showInfos(self): # only for old scripts using it
print(self.__repr__)
def plotTransients(self, ax=None, **kwargs):
"""Plot all transients into one window"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
kwargs.setdefault('marker', '.')
plotlegend = kwargs.pop('legend', True)
cols = 'rgbmcyk'
pl = []
for i, data in enumerate(self.DATA):
t = data['TIME']
u = data['VOLTAGE'] / RxArea(data)
col = cols[i % len(cols)]
pl.append(ax.loglog(t, u, label=getname(data),
color=col, **kwargs))
if 'ST_DEV' in data:
err = data['ST_DEV'] / RxArea(data)
ax.errorbar(t, u, yerr=err, color=col)
# uU = u + err
# uL = u - err
# ax.errorbar(t, u, yerr=[uL, uU], color=col)
if 'RAMP_TIME' in data:
ax.vlines(data['RAMP_TIME'], min(u), max(u), colors=col)
ax.set_xlabel('t [s]')
ax.set_ylabel('U/I [V/A]')
if plotlegend:
ax.legend(loc='best')
# xlim = [10e-6, 2e-3]
ax.grid(True)
return fig, ax
def plotRhoa(self, ax=None, ploterror=False, corrramp=False, **kwargs):
"""Plot all apparent resistivity curves into one window."""
if ax is None:
fig, ax = plt.subplots()
kwargs.setdefault('marker', '.')
plotLegend = kwargs.pop('legend', True)
for i, data in enumerate(self.DATA):
rhoa, t, err = get_rhoa(data, corrramp=corrramp)
err[err > .99] = .99
col = 'C'+str(i % 10)
ax.loglog(rhoa, t, label=getname(data), # color=col,
color=col, **kwargs)
if ploterror:
ax.errorbar(rhoa, t, xerr=rhoa * err, color=col)
ax.set_ylabel('t [s]')
ax.set_xlabel(r'$\rho_a$ [$\Omega$m]')
if plotLegend:
ax.legend(loc='best')
ax.grid(True)
ax.set_ylim(ax.get_ylim()[::-1])
return ax
def __call__(self, i=0):
"""Return a single sounding."""
return self.DATA[i]
def getFOP(self, nr=0):
"""Return forward operator."""
snd = self.DATA[0]
return VMDTimeDomainModelling(snd['TIME'], TxArea(snd), 1) # RxArea(snd))
# return VMDTimeDomainModelling(snd['TIME'], TxArea(snd), RxArea(snd))
def invert(self, nr=0, nlay=4, thickness=None):
"""Do inversion."""
self.fop = self.getFOP(nr)
snd = self.DATA[nr]
rhoa, t, err = get_rhoa(snd)
self.fop.t = t
model = self.fop.createStartModel(rhoa, nlay, thickness=None)
self.INV = pg.frameworks.MarquardtInversion(fop=self.fop)
# self.INV = pg.Inversion(rhoa, self.fop)
# self.INV.setMarquardtScheme(0.9)
# self.INV.setModel(model)
# self.INV.setLambda(1000)
# self.INV.setRelativeError(snd.pop('ST_DEV', 0)/snd['VOLTAGE']+0.03)
errorVals = snd.pop('ST_DEV', 0)/snd['VOLTAGE']+0.03
self.model = self.INV.run(dataVals=rhoa, errorVals=errorVals,
startModel=model)
return self.model
def stackAll(self, tmin=0, tmax=100):
"""Stack all measurements yielding a new TDEM class instance."""
t = self.DATA[0]['TIME']
v = np.zeros_like(t)
V = np.zeros((len(v), len(self.DATA)))
sumstacks = 0
for i, snd in enumerate(self.DATA):
if np.allclose(snd['TIME'], t):
stacks = snd.pop('STACK_SIZE', 1)
v += snd['VOLTAGE'] * stacks
sumstacks += stacks
V[:, i] = snd['VOLTAGE']
else:
print("sounding {} does not have the same time!".format(i))
v /= sumstacks
VM = np.ma.masked_less_equal(V, 0)
err = np.std(VM, axis=1).data
snd = self.DATA[0].copy()
fi = np.nonzero((t >= tmin) & (t <= tmax))[0]
snd['TIME'] = t[fi]
snd['VOLTAGE'] = v[fi]
snd['ST_DEV'] = err[fi]
del snd['data']
tem = TDEM()
tem.DATA = [snd]
return tem
if __name__ == '__main__':
print("do some tests here")
tem = TDEM(sys.argv[1])
print(tem)
tem.plotTransients()
tem.plotRhoa()
| gimli-org/gimli | pygimli/physics/em/tdem.py | Python | apache-2.0 | 16,310 | [
"VMD"
] | 213a7ac776868c9cd0a2151085ed7f32533d1dacb56531fd17bc39ff679fdb16 |
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.metrics.pairwise` submodule implements utilities to evaluate
pairwise distances or affinity of sets of samples.
This module contains both distance metrics and kernels. A brief summary is
given on the two here.
Distance metrics are a function d(a, b) such that d(a, b) < d(a, c) if objects
a and b are considered "more similar" to objects a and c. Two objects exactly
alike would have a distance of zero.
One of the most popular examples is Euclidean distance.
To be a 'true' metric, it must obey the following four conditions::
1. d(a, b) >= 0, for all a and b
2. d(a, b) == 0, if and only if a = b, positive definiteness
3. d(a, b) == d(b, a), symmetry
4. d(a, c) <= d(a, b) + d(b, c), the triangle inequality
Kernels are measures of similarity, i.e. ``s(a, b) > s(a, c)``
if objects ``a`` and ``b`` are considered "more similar" to objects
``a`` and ``c``. A kernel must also be positive semi-definite.
There are a number of ways to convert between a distance metric and a
similarity measure, such as a kernel. Let D be the distance, and S be the
kernel:
1. ``S = np.exp(-D * gamma)``, where one heuristic for choosing
``gamma`` is ``1 / num_features``
2. ``S = 1. / (D / np.max(D))``
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import atleast2d_or_csr
from ..utils import gen_even_slices
from ..utils.extmath import safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast
# Utility Functions
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
Returns
-------
safe_X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
if Y is X or Y is None:
X = Y = atleast2d_or_csr(X)
else:
X = atleast2d_or_csr(X)
Y = atleast2d_or_csr(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
if not (X.dtype == Y.dtype == np.float32):
if Y is X:
X = Y = X.astype(np.float)
else:
X = X.astype(np.float)
Y = Y.astype(np.float)
return X, Y
# Distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two main advantages. First, it is computationally
efficient when dealing with sparse data. Second, if x varies but y
remains unchanged, then the right-most dot-product `dot(y, y)` can be
pre-computed.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_1, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_2, n_features]
Y_norm_squared : array-like, shape = [n_samples_2], optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape = [n_samples_1, n_samples_2]
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if issparse(X):
XX = X.multiply(X).sum(axis=1)
else:
XX = np.sum(X * X, axis=1)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is None:
if issparse(Y):
# scipy.sparse matrices don't have element-wise scalar
# exponentiation, and tocsr has a copy kwarg only on CSR matrices.
YY = Y.copy() if isinstance(Y, csr_matrix) else Y.tocsr()
YY.data **= 2
YY = np.asarray(YY.sum(axis=1)).T
else:
YY = np.sum(Y ** 2, axis=1)[np.newaxis, :]
else:
YY = atleast2d_or_csr(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances)
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
size_threshold : int, default=5e8
Avoid creating temporary matrices bigger than size_threshold (in
bytes). If the problem size gets too big, the implementation then
breaks it down in smaller problems.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise l1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
if issparse(X) or issparse(Y):
raise ValueError("manhattan_distance does not support sparse"
" matrices.")
X, Y = check_pairwise_arrays(X, Y)
temporary_size = X.size * Y.shape[-1]
# Convert to bytes
temporary_size *= X.itemsize
if temporary_size > size_threshold and sum_over_features:
# Broadcasting the full thing would be too big: it's on the order
# of magnitude of the gigabyte
D = np.empty((X.shape[0], Y.shape[0]), dtype=X.dtype)
index = 0
increment = 1 + int(size_threshold / float(temporary_size) *
X.shape[0])
while index < X.shape[0]:
this_slice = slice(index, index + increment)
tmp = X[this_slice, np.newaxis, :] - Y[np.newaxis, :, :]
tmp = np.abs(tmp, tmp)
tmp = np.sum(tmp, axis=2)
D[this_slice] = tmp
index += increment
else:
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
if sum_over_features:
D = np.sum(D, axis=2)
else:
D = D.reshape((-1, X.shape[1]))
return D
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array_like
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array_like
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = linear_kernel(X_normalized, Y_normalized)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
ret = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(func)(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Please note that support for sparse matrices is currently limited to
'euclidean', 'l2' and 'cosine'.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
# FIXME: can use n_jobs here too
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# distance assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise ValueError("Unknown kernel %r" % metric)
| depet/scikit-learn | sklearn/metrics/pairwise.py | Python | bsd-3-clause | 29,572 | [
"Gaussian"
] | f5e773f18f469de1eb7f36a6e0b6bbc90aafc014c7bc7d1199cd1a1917d7b97a |
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Sales Pipeline"),
"icon": "fa fa-star",
"items": [
{
"type": "doctype",
"name": "Lead",
"description": _("Database of potential customers."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Opportunity",
"description": _("Potential opportunities for selling."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Customer",
"description": _("Customer database."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Contact",
"description": _("All Contacts."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Communication",
"description": _("Record of all communications of type email, phone, chat, visit, etc."),
},
{
"type": "doctype",
"name": "Lead Source",
"description": _("Track Leads by Lead Source.")
},
]
},
{
"label": _("Reports"),
"icon": "fa fa-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Lead Details",
"doctype": "Lead",
"onboard": 1,
},
{
"type": "page",
"name": "sales-funnel",
"label": _("Sales Funnel"),
"icon": "fa fa-bar-chart",
"onboard": 1,
},
{
"type": "report",
"name": "Prospects Engaged But Not Converted",
"doctype": "Lead",
"is_query_report": True,
"onboard": 1,
},
{
"type": "report",
"name": "Minutes to First Response for Opportunity",
"doctype": "Opportunity",
"is_query_report": True,
"dependencies": ["Opportunity"]
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Addresses And Contacts",
"doctype": "Contact",
"dependencies": ["Customer"]
},
{
"type": "report",
"is_query_report": True,
"name": "Inactive Customers",
"doctype": "Sales Order",
"dependencies": ["Sales Order"]
},
{
"type": "report",
"is_query_report": True,
"name": "Campaign Efficiency",
"doctype": "Lead",
"dependencies": ["Lead"]
},
{
"type": "report",
"is_query_report": True,
"name": "Lead Owner Efficiency",
"doctype": "Lead",
"dependencies": ["Lead"]
}
]
},
{
"label": _("Settings"),
"icon": "fa fa-cog",
"items": [
{
"type": "doctype",
"label": _("Customer Group"),
"name": "Customer Group",
"icon": "fa fa-sitemap",
"link": "Tree/Customer Group",
"description": _("Manage Customer Group Tree."),
"onboard": 1,
},
{
"type": "doctype",
"label": _("Territory"),
"name": "Territory",
"icon": "fa fa-sitemap",
"link": "Tree/Territory",
"description": _("Manage Territory Tree."),
"onboard": 1,
},
{
"type": "doctype",
"label": _("Sales Person"),
"name": "Sales Person",
"icon": "fa fa-sitemap",
"link": "Tree/Sales Person",
"description": _("Manage Sales Person Tree."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Campaign",
"description": _("Sales campaigns."),
},
{
"type": "doctype",
"name": "Email Campaign",
"description": _("Sends Mails to lead or contact based on a Campaign schedule"),
},
{
"type": "doctype",
"name": "SMS Center",
"description":_("Send mass SMS to your contacts"),
},
{
"type": "doctype",
"name": "SMS Log",
"description":_("Logs for maintaining sms delivery status"),
},
{
"type": "doctype",
"name": "SMS Settings",
"description": _("Setup SMS gateway settings")
}
]
},
{
"label": _("Maintenance"),
"icon": "fa fa-star",
"items": [
{
"type": "doctype",
"name": "Maintenance Schedule",
"description": _("Plan for maintenance visits."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Maintenance Visit",
"description": _("Visit report for maintenance call."),
},
{
"type": "report",
"name": "Maintenance Schedules",
"is_query_report": True,
"doctype": "Maintenance Schedule"
},
{
"type": "doctype",
"name": "Warranty Claim",
"description": _("Warranty Claim against Serial No."),
},
]
},
# {
# "label": _("Help"),
# "items": [
# {
# "type": "help",
# "label": _("Lead to Quotation"),
# "youtube_id": "TxYX4r4JAKA"
# },
# {
# "type": "help",
# "label": _("Newsletters"),
# "youtube_id": "muLKsCrrDRo"
# },
# ]
# },
]
| Zlash65/erpnext | erpnext/config/crm.py | Python | gpl-3.0 | 4,679 | [
"VisIt"
] | 88bd9de5e23744164ecd7fedda95aebe7a103a18c03aa9026f1e3ed47550cb65 |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd.lb
import espressomd.lbboundaries
import espressomd.shapes
import tests_common
AGRID = 0.5
EXT_FORCE = np.array([-.01, 0.02, 0.03])
VISC = 3.5
DENS = 1.5
TIME_STEP = 0.05
LB_PARAMS = {'agrid': AGRID,
'dens': DENS,
'visc': VISC,
'tau': TIME_STEP,
'ext_force_density': EXT_FORCE}
class LBBoundaryForceCommon:
"""
Checks force on lb boundaries for a fluid with a uniform volume force
"""
system = espressomd.System(box_l=np.array([12.0, 4.0, 4.0]) * AGRID)
system.time_step = TIME_STEP
system.cell_system.skin = 0.4 * AGRID
def setUp(self):
self.lbf = self.lb_class(**LB_PARAMS)
self.system.actors.add(self.lbf)
def tearDown(self):
self.system.lbboundaries.clear()
self.system.actors.clear()
def test(self):
"""
Integrate the LB fluid until steady state is reached within a certain
accuracy. Then compare the force balance between force exerted on fluid
and forces acting on the boundaries.
"""
wall_shape1 = espressomd.shapes.Wall(normal=[1, 0, 0], dist=AGRID)
wall_shape2 = espressomd.shapes.Wall(
normal=[-1, 0, 0], dist=-(self.system.box_l[0] - AGRID))
wall1 = espressomd.lbboundaries.LBBoundary(shape=wall_shape1)
wall2 = espressomd.lbboundaries.LBBoundary(shape=wall_shape2)
self.system.lbboundaries.add(wall1)
self.system.lbboundaries.add(wall2)
fluid_nodes = tests_common.count_fluid_nodes(self.lbf)
self.system.integrator.run(20)
diff = float("inf")
old_val = float("inf")
while diff > 0.002:
self.system.integrator.run(10)
new_val = wall1.get_force()[0]
diff = abs(new_val - old_val)
old_val = new_val
expected_force = fluid_nodes * AGRID**3 * \
np.copy(self.lbf.ext_force_density)
measured_force = np.array(wall1.get_force()) + \
np.array(wall2.get_force())
np.testing.assert_allclose(measured_force, expected_force, atol=2E-2)
@utx.skipIfMissingFeatures(['LB_BOUNDARIES', 'EXTERNAL_FORCES'])
class LBCPUBoundaryForce(LBBoundaryForceCommon, ut.TestCase):
"""Test for the CPU implementation of the LB."""
lb_class = espressomd.lb.LBFluid
@utx.skipIfMissingGPU()
@utx.skipIfMissingFeatures(['LB_BOUNDARIES_GPU', 'EXTERNAL_FORCES'])
class LBGPUBoundaryForce(LBBoundaryForceCommon, ut.TestCase):
"""Test for the GPU implementation of the LB."""
lb_class = espressomd.lb.LBFluidGPU
if __name__ == '__main__':
ut.main()
| pkreissl/espresso | testsuite/python/lb_boundary_volume_force.py | Python | gpl-3.0 | 3,425 | [
"ESPResSo"
] | a2bb68f022ca049df92ad49ad19f36a7653fc76e6327d5b34ed795da0af9259a |
# (c) 2014, Ovais Tariq <me@ovaistariq.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# vim: tabstop=8
# vim: expandtab
# vim: shiftwidth=4
# vim: softtabstop=4
# stdlib
import subprocess
import os
import sys
import re
import traceback
# project
from checks import AgentCheck
from util import Platform
# 3rd party
import pymysql
GAUGE = "gauge"
RATE = "rate"
METRICS_MAP = {
'Ps_digest_95th_percentile_by_avg_us': ('mysql.sys.query_exec_time_95th_per_us', GAUGE)
}
class MySqlSys(AgentCheck):
def __init__(self, name, init_config, agentConfig):
AgentCheck.__init__(self, name, init_config, agentConfig)
self.schema_name = 'sys'
def get_library_versions(self):
return {"pymysql": pymysql.__version__}
def check(self, instance):
host, port, user, password, mysql_sock, defaults_file, tags, options = self._get_config(instance)
if (not host or not user) and not defaults_file:
raise Exception("Mysql host and user are needed.")
db = self._connect(host, port, mysql_sock, user, password, defaults_file)
# check that we are running the correct MySQL version
if not self._version_greater_565(db, host):
raise Exception("MySQL version >= 5.6.5 is required.")
# check that mysql_sys is installed
if not self._is_mysql_sys_schema_installed(db):
raise Exception("The mysql_sys utility is not installed. Please visit https://github.com/MarkLeith/mysql-sys for installation instructions")
# Metric collection
self._collect_metrics(host, db, tags, options)
def _get_config(self, instance):
host = instance.get('server', '')
user = instance.get('user', '')
port = int(instance.get('port', 0))
password = instance.get('pass', '')
mysql_sock = instance.get('sock', '')
defaults_file = instance.get('defaults_file', '')
tags = instance.get('tags', None)
options = instance.get('options', {})
return host, port, user, password, mysql_sock, defaults_file, tags, options
def _connect(self, host, port, mysql_sock, user, password, defaults_file):
if defaults_file != '':
db = pymysql.connect(read_default_file=defaults_file,
db=self.schema_name)
elif mysql_sock != '':
db = pymysql.connect(unix_socket=mysql_sock,
user=user,
passwd=password,
db=self.schema_name)
elif port:
db = pymysql.connect(host=host,
port=port,
user=user,
passwd=password,
db=self.schema_name)
else:
db = pymysql.connect(host=host,
user=user,
passwd=password,
db=self.schema_name)
self.log.debug("Connected to MySQL")
return db
def _collect_metrics(self, host, db, tags, options):
mysql_sys_metrics = dict()
# Compute 95ht percentile query execution time in microseconds
mysql_sys_metrics['Ps_digest_95th_percentile_by_avg_us'] = self._get_query_exec_time_95th_per_us(db)
# Send the metrics to Datadog based on the type of the metric
self._rate_or_gauge_statuses(METRICS_MAP, mysql_sys_metrics, tags)
def _rate_or_gauge_statuses(self, statuses, dbResults, tags):
for status, metric in statuses.iteritems():
metric_name, metric_type = metric
value = self._collect_scalar(status, dbResults)
if value is not None:
if metric_type == RATE:
self.rate(metric_name, value, tags=tags)
elif metric_type == GAUGE:
self.gauge(metric_name, value, tags=tags)
def _get_query_exec_time_95th_per_us(self, db):
# Fetches the 95th percentile query execution time and returns the value
# in microseconds
cursor = db.cursor()
cursor.execute("select * from x$ps_digest_95th_percentile_by_avg_us")
if cursor.rowcount != 1:
raise Exception("Failed to fetch record from the table x$ps_digest_95th_percentile_by_avg_us")
row = cursor.fetchone()
query_exec_time_95th_per = row[0]
return query_exec_time_95th_per
def _version_greater_565(self, db, host):
# some of the performance_schema tables such as events_statements_%
# tables were only introduced in MySQL 5.6.5. For reference see this
# this link from the manual:
# http://dev.mysql.com/doc/refman/5.6/en/performance-schema-statement-digests.html
# some patch version numbers contain letters (e.g. 5.0.51a)
# so let's be careful when we compute the version number
greater_565 = False
try:
mysql_version = self._get_version(db, host)
self.log.debug("MySQL version %s" % mysql_version)
major = int(mysql_version[0])
minor = int(mysql_version[1])
patchlevel = int(re.match(r"([0-9]+)", mysql_version[2]).group(1))
if (major, minor, patchlevel) > (5, 6, 5):
greater_565 = True
except Exception, exception:
self.warning("Cannot compute mysql version, assuming older than 5.6.5: %s" % str(exception))
return greater_565
def _get_version(self, db, host):
# Get MySQL version
cursor = db.cursor()
cursor.execute('SELECT VERSION()')
result = cursor.fetchone()
cursor.close()
del cursor
# Version might include a description e.g. 4.1.26-log.
# See http://dev.mysql.com/doc/refman/4.1/en/information-functions.html#function_version
version = result[0].split('-')
version = version[0].split('.')
return version
def _is_mysql_sys_schema_installed(self, db):
cursor = db.cursor()
return_val = False
cursor.execute("select sys_version from version")
if cursor.rowcount > 0:
return_val = True
cursor.close()
del cursor
return return_val
def _collect_scalar(self, key, dict):
return self._collect_type(key, dict, float)
def _collect_string(self, key, dict):
return self._collect_type(key, dict, unicode)
def _collect_type(self, key, dict, the_type):
self.log.debug("Collecting data with %s" % key)
if key not in dict:
self.log.debug("%s returned None" % key)
return None
self.log.debug("Collecting done, value %s" % dict[key])
return the_type(dict[key])
| ovaistariq/datadog-agent-checks | mysql_sys.py | Python | gpl-3.0 | 7,470 | [
"VisIt"
] | 19fb8af10eaa130e3b7cf17230e4b63d077e7b6961676beaec2b5f3748e0d77f |
"""
Codes for creating and manipulating gate filters. New functions: use of trained
Gaussian Mixture Models to remove noise and clutter from CPOL data before 2009.
@title: filtering.py
@author: Valentin Louf <valentin.louf@bom.gov.au>
@institutions: Monash University and the Australian Bureau of Meteorology
@created: 20/11/2017
@date: 25/02/2021
.. autosummary::
:toctree: generated/
texture
get_clustering
get_gatefilter_GMM
do_gatefilter_cpol
do_gatefilter
"""
# Libraries
import os
import gzip
import pickle
import pyart
import cftime
import numpy as np
import pandas as pd
def texture(data: np.ndarray) -> np.ndarray:
"""
Compute the texture of data.
Compute the texture of the data by comparing values with a 3x3 neighborhood
(based on :cite:`Gourley2007`). NaN values in the original array have
NaN textures. (Wradlib function)
Parameters:
==========
data : :class:`numpy:numpy.ndarray`
multi-dimensional array with shape (..., number of beams, number
of range bins)
Returns:
=======
texture : :class:`numpy:numpy.ndarray`
array of textures with the same shape as data
"""
x1 = np.roll(data, 1, -2) # center:2
x2 = np.roll(data, 1, -1) # 4
x3 = np.roll(data, -1, -2) # 8
x4 = np.roll(data, -1, -1) # 6
x5 = np.roll(x1, 1, -1) # 1
x6 = np.roll(x4, 1, -2) # 3
x7 = np.roll(x3, -1, -1) # 9
x8 = np.roll(x2, -1, -2) # 7
# at least one NaN would give a sum of NaN
xa = np.array([x1, x2, x3, x4, x5, x6, x7, x8])
# get count of valid neighboring pixels
xa_valid = np.ones(np.shape(xa))
xa_valid[np.isnan(xa)] = 0
# count number of valid neighbors
xa_valid_count = np.sum(xa_valid, axis=0)
num = np.zeros(data.shape)
for xarr in xa:
diff = data - xarr
# difference of NaNs will be converted to zero
# (to not affect the summation)
diff[np.isnan(diff)] = 0
# only those with valid values are considered in the summation
num += diff ** 2
# reinforce that NaN values should have NaN textures
num[np.isnan(data)] = np.nan
return np.sqrt(num / xa_valid_count)
def get_clustering(radar, vel_name: str = "VEL", phidp_name: str = "PHIDP", zdr_name: str = "ZDR"):
"""
Create cluster using a trained Gaussian Mixture Model (I use scikit-learn)
to cluster the radar data. Cluster 5 is clutter and 2 is noise. Cluster 1
correponds to a high gradient on PHIDP (folding), so it may corresponds to
either real data that fold or noise. A threshold on reflectivity should be
used on cluster 1.
Parameters:
===========
radar:
Py-ART radar structure.
vel_name: str
Velocity field name.
phidp_name: str
Name of the PHIDP field.
zdr_name: str
Name of the differential_reflectivity field.
Returns:
========
cluster: ndarray
Data ID using GMM (5: clutter, 2: noise, and 1: high-phidp gradient).
"""
# Load and deserialize GMM
location = os.path.dirname(os.path.realpath(__file__))
my_file = os.path.join(location, "data", "GM_model_CPOL.pkl.gz")
with gzip.GzipFile(my_file, "r") as gzid:
gmm = pickle.load(gzid)
df_orig = pd.DataFrame(
{
"VEL": texture(radar.fields[vel_name]["data"]).flatten(),
"PHIDP": texture(radar.fields[phidp_name]["data"]).flatten(),
"ZDR": texture(radar.fields[zdr_name]["data"]).flatten(),
}
)
df = df_orig.dropna()
pos_droped = df_orig.dropna().index
clusters = gmm.predict(df)
r = radar.range["data"]
time = radar.time["data"]
R, _ = np.meshgrid(r, time)
clus = np.zeros_like(R.flatten())
clus[pos_droped] = clusters + 1
cluster = clus.reshape(R.shape)
return cluster
def get_gatefilter_GMM(
radar, refl_name: str = "DBZ", vel_name: str = "VEL", phidp_name: str = "PHIDP", zdr_name: str = "ZDR"
):
"""
Filtering function adapted to CPOL before 2009 using ML Gaussian Mixture
Model. Function does 4 things:
1) Cutoff of the reflectivities below the noise level.
2) GMM using the texture of velocity, phidp and zdr.
3) Filtering using 1) and 2) results.
4) Removing temporary fields from the radar object.
Parameters:
===========
radar:
Py-ART radar structure.
refl_name: str
Reflectivity field name.
vel_name: str
Velocity field name.
phidp_name: str
Name of the PHIDP field.
zdr_name: str
Name of the differential_reflectivity field.
Returns:
========
gf: GateFilter
Gate filter (excluding all bad data).
"""
# GMM clustering (indpdt from cutoff)
cluster = get_clustering(radar, vel_name=vel_name, phidp_name=phidp_name, zdr_name=zdr_name)
radar.add_field_like(refl_name, "CLUS", cluster, replace_existing=True)
pos = (cluster == 1) & (radar.fields[refl_name]["data"] < 20)
radar.add_field_like(refl_name, "TPOS", pos, replace_existing=True)
# Using GMM results to filter.
gf = pyart.filters.GateFilter(radar)
gf.exclude_equal("CLUS", 5)
gf.exclude_equal("CLUS", 2)
gf.exclude_equal("TPOS", 1)
gf = pyart.correct.despeckle_field(radar, refl_name, gatefilter=gf)
# Removing temp keys.
for k in ["TPOS", "CLUS"]:
try:
radar.fields.pop(k)
except KeyError:
continue
return gf
def do_gatefilter_cpol(
radar,
refl_name: str = "DBZ",
phidp_name: str = "PHIDP",
rhohv_name: str = "RHOHV_CORR",
zdr_name: str = "ZDR",
snr_name: str = "SNR",
vel_name: str = "VEL",
):
"""
Filtering function adapted to CPOL.
Parameters:
===========
radar:
Py-ART radar structure.
refl_name: str
Reflectivity field name.
rhohv_name: str
Cross correlation ratio field name.
ncp_name: str
Name of the normalized_coherent_power field.
zdr_name: str
Name of the differential_reflectivity field.
Returns:
========
gf_despeckeld: GateFilter
Gate filter (excluding all bad data).
"""
radar_start_date = cftime.num2pydate(radar.time["data"][0], radar.time["units"])
# if radar_start_date.year < 2009:
gf = get_gatefilter_GMM(
radar, refl_name=refl_name, vel_name=vel_name, phidp_name=phidp_name, zdr_name=zdr_name,
)
# else:
# gf = pyart.filters.GateFilter(radar)
r = radar.range["data"]
azi = radar.azimuth["data"]
R, _ = np.meshgrid(r, azi)
# refl = radar.fields[refl_name]["data"].copy()
# fcut = 10 * np.log10(4e-5 * R)
# refl[refl < fcut] = np.NaN
# radar.add_field_like(refl_name, "NDBZ", refl)
# gf.exclude_invalid("NDBZ")
gf.exclude_below(snr_name, 9)
gf.exclude_outside(zdr_name, -3.0, 7.0)
gf.exclude_outside(refl_name, -20.0, 80.0)
# if radar_start_date.year > 2007:
# gf.exclude_below(rhohv_name, 0.7)
# else:
rhohv = radar.fields[rhohv_name]["data"]
pos = np.zeros_like(rhohv) + 1
pos[(R < 90e3) & (rhohv < 0.7)] = 0
radar.add_field_like(refl_name, "TMPRH", pos)
gf.exclude_equal("TMPRH", 0)
# Remove rings in march 1999.
if radar_start_date.year == 1999 and radar_start_date.month == 3:
radar.add_field_like(refl_name, "RRR", R)
gf.exclude_above("RRR", 140e3)
gf_despeckeld = pyart.correct.despeckle_field(radar, refl_name, gatefilter=gf)
# Remove temporary fields.
for k in ["NDBZ", "RRR", "TMPRH"]:
try:
radar.fields.pop(k)
except KeyError:
pass
return gf_despeckeld
def do_gatefilter(
radar,
refl_name: str = "DBZ",
phidp_name: str = "PHIDP",
rhohv_name: str = "RHOHV_CORR",
zdr_name: str = "ZDR",
snr_name: str = "SNR",
):
"""
Basic filtering function for dual-polarisation data.
Parameters:
===========
radar:
Py-ART radar structure.
refl_name: str
Reflectivity field name.
rhohv_name: str
Cross correlation ratio field name.
ncp_name: str
Name of the normalized_coherent_power field.
zdr_name: str
Name of the differential_reflectivity field.
Returns:
========
gf_despeckeld: GateFilter
Gate filter (excluding all bad data).
"""
# Initialize gatefilter
gf = pyart.filters.GateFilter(radar)
# Remove obviously wrong data.
gf.exclude_outside(zdr_name, -6.0, 7.0)
gf.exclude_outside(refl_name, -20.0, 80.0)
# Compute texture of PHIDP and remove noise.
dphi = texture(radar.fields[phidp_name]["data"])
radar.add_field_like(phidp_name, "PHITXT", dphi)
gf.exclude_above("PHITXT", 20)
gf.exclude_below(rhohv_name, 0.6)
# Despeckle
gf_despeckeld = pyart.correct.despeckle_field(radar, refl_name, gatefilter=gf)
try:
# Remove PHIDP texture
radar.fields.pop("PHITXT")
except Exception:
pass
return gf_despeckeld
| vlouf/cpol_processing | cpol_processing/filtering.py | Python | mit | 9,055 | [
"Gaussian"
] | b57a3c97ef1879760983ef025c83f50b5884c630ae133a1382359ef6b453fe50 |
# -*- coding: UTF-8 -*-
"""
Data literal storing emoji names and unicode codes
"""
__all__ = ['EMOJI_UNICODE', 'UNICODE_EMOJI', 'EMOJI_ALIAS_UNICODE', 'UNICODE_EMOJI_ALIAS']
EMOJI_UNICODE = {
u':1st_place_medal:': u'\U0001F947',
u':2nd_place_medal:': u'\U0001F948',
u':3rd_place_medal:': u'\U0001F949',
u':AB_button_(blood_type):': u'\U0001F18E',
u':ATM_sign:': u'\U0001F3E7',
u':A_button_(blood_type):': u'\U0001F170',
u':Afghanistan:': u'\U0001F1E6 \U0001F1EB',
u':Albania:': u'\U0001F1E6 \U0001F1F1',
u':Algeria:': u'\U0001F1E9 \U0001F1FF',
u':American_Samoa:': u'\U0001F1E6 \U0001F1F8',
u':Andorra:': u'\U0001F1E6 \U0001F1E9',
u':Angola:': u'\U0001F1E6 \U0001F1F4',
u':Anguilla:': u'\U0001F1E6 \U0001F1EE',
u':Antarctica:': u'\U0001F1E6 \U0001F1F6',
u':Antigua_&_Barbuda:': u'\U0001F1E6 \U0001F1EC',
u':Aquarius:': u'\U00002652',
u':Argentina:': u'\U0001F1E6 \U0001F1F7',
u':Aries:': u'\U00002648',
u':Armenia:': u'\U0001F1E6 \U0001F1F2',
u':Aruba:': u'\U0001F1E6 \U0001F1FC',
u':Ascension_Island:': u'\U0001F1E6 \U0001F1E8',
u':Australia:': u'\U0001F1E6 \U0001F1FA',
u':Austria:': u'\U0001F1E6 \U0001F1F9',
u':Azerbaijan:': u'\U0001F1E6 \U0001F1FF',
u':BACK_arrow:': u'\U0001F519',
u':B_button_(blood_type):': u'\U0001F171',
u':Bahamas:': u'\U0001F1E7 \U0001F1F8',
u':Bahrain:': u'\U0001F1E7 \U0001F1ED',
u':Bangladesh:': u'\U0001F1E7 \U0001F1E9',
u':Barbados:': u'\U0001F1E7 \U0001F1E7',
u':Belarus:': u'\U0001F1E7 \U0001F1FE',
u':Belgium:': u'\U0001F1E7 \U0001F1EA',
u':Belize:': u'\U0001F1E7 \U0001F1FF',
u':Benin:': u'\U0001F1E7 \U0001F1EF',
u':Bermuda:': u'\U0001F1E7 \U0001F1F2',
u':Bhutan:': u'\U0001F1E7 \U0001F1F9',
u':Bolivia:': u'\U0001F1E7 \U0001F1F4',
u':Bosnia_&_Herzegovina:': u'\U0001F1E7 \U0001F1E6',
u':Botswana:': u'\U0001F1E7 \U0001F1FC',
u':Bouvet_Island:': u'\U0001F1E7 \U0001F1FB',
u':Brazil:': u'\U0001F1E7 \U0001F1F7',
u':British_Indian_Ocean_Territory:': u'\U0001F1EE \U0001F1F4',
u':British_Virgin_Islands:': u'\U0001F1FB \U0001F1EC',
u':Brunei:': u'\U0001F1E7 \U0001F1F3',
u':Bulgaria:': u'\U0001F1E7 \U0001F1EC',
u':Burkina_Faso:': u'\U0001F1E7 \U0001F1EB',
u':Burundi:': u'\U0001F1E7 \U0001F1EE',
u':CL_button:': u'\U0001F191',
u':COOL_button:': u'\U0001F192',
u':Cambodia:': u'\U0001F1F0 \U0001F1ED',
u':Cameroon:': u'\U0001F1E8 \U0001F1F2',
u':Canada:': u'\U0001F1E8 \U0001F1E6',
u':Canary_Islands:': u'\U0001F1EE \U0001F1E8',
u':Cancer:': u'\U0000264B',
u':Cape_Verde:': u'\U0001F1E8 \U0001F1FB',
u':Capricorn:': u'\U00002651',
u':Caribbean_Netherlands:': u'\U0001F1E7 \U0001F1F6',
u':Cayman_Islands:': u'\U0001F1F0 \U0001F1FE',
u':Central_African_Republic:': u'\U0001F1E8 \U0001F1EB',
u':Ceuta_&_Melilla:': u'\U0001F1EA \U0001F1E6',
u':Chad:': u'\U0001F1F9 \U0001F1E9',
u':Chile:': u'\U0001F1E8 \U0001F1F1',
u':China:': u'\U0001F1E8 \U0001F1F3',
u':Christmas_Island:': u'\U0001F1E8 \U0001F1FD',
u':Christmas_tree:': u'\U0001F384',
u':Clipperton_Island:': u'\U0001F1E8 \U0001F1F5',
u':Cocos_(Keeling)_Islands:': u'\U0001F1E8 \U0001F1E8',
u':Colombia:': u'\U0001F1E8 \U0001F1F4',
u':Comoros:': u'\U0001F1F0 \U0001F1F2',
u':Congo_-_Brazzaville:': u'\U0001F1E8 \U0001F1EC',
u':Congo_-_Kinshasa:': u'\U0001F1E8 \U0001F1E9',
u':Cook_Islands:': u'\U0001F1E8 \U0001F1F0',
u':Costa_Rica:': u'\U0001F1E8 \U0001F1F7',
u':Croatia:': u'\U0001F1ED \U0001F1F7',
u':Cuba:': u'\U0001F1E8 \U0001F1FA',
u':Curaçao:': u'\U0001F1E8 \U0001F1FC',
u':Cyprus:': u'\U0001F1E8 \U0001F1FE',
u':Czech_Republic:': u'\U0001F1E8 \U0001F1FF',
u':Côte_d’Ivoire:': u'\U0001F1E8 \U0001F1EE',
u':Denmark:': u'\U0001F1E9 \U0001F1F0',
u':Diego_Garcia:': u'\U0001F1E9 \U0001F1EC',
u':Djibouti:': u'\U0001F1E9 \U0001F1EF',
u':Dominica:': u'\U0001F1E9 \U0001F1F2',
u':Dominican_Republic:': u'\U0001F1E9 \U0001F1F4',
u':END_arrow:': u'\U0001F51A',
u':Ecuador:': u'\U0001F1EA \U0001F1E8',
u':Egypt:': u'\U0001F1EA \U0001F1EC',
u':El_Salvador:': u'\U0001F1F8 \U0001F1FB',
u':Equatorial_Guinea:': u'\U0001F1EC \U0001F1F6',
u':Eritrea:': u'\U0001F1EA \U0001F1F7',
u':Estonia:': u'\U0001F1EA \U0001F1EA',
u':Ethiopia:': u'\U0001F1EA \U0001F1F9',
u':European_Union:': u'\U0001F1EA \U0001F1FA',
u':FREE_button:': u'\U0001F193',
u':Falkland_Islands:': u'\U0001F1EB \U0001F1F0',
u':Faroe_Islands:': u'\U0001F1EB \U0001F1F4',
u':Fiji:': u'\U0001F1EB \U0001F1EF',
u':Finland:': u'\U0001F1EB \U0001F1EE',
u':France:': u'\U0001F1EB \U0001F1F7',
u':French_Guiana:': u'\U0001F1EC \U0001F1EB',
u':French_Polynesia:': u'\U0001F1F5 \U0001F1EB',
u':French_Southern_Territories:': u'\U0001F1F9 \U0001F1EB',
u':Gabon:': u'\U0001F1EC \U0001F1E6',
u':Gambia:': u'\U0001F1EC \U0001F1F2',
u':Gemini:': u'\U0000264A',
u':Georgia:': u'\U0001F1EC \U0001F1EA',
u':Germany:': u'\U0001F1E9 \U0001F1EA',
u':Ghana:': u'\U0001F1EC \U0001F1ED',
u':Gibraltar:': u'\U0001F1EC \U0001F1EE',
u':Greece:': u'\U0001F1EC \U0001F1F7',
u':Greenland:': u'\U0001F1EC \U0001F1F1',
u':Grenada:': u'\U0001F1EC \U0001F1E9',
u':Guadeloupe:': u'\U0001F1EC \U0001F1F5',
u':Guam:': u'\U0001F1EC \U0001F1FA',
u':Guatemala:': u'\U0001F1EC \U0001F1F9',
u':Guernsey:': u'\U0001F1EC \U0001F1EC',
u':Guinea:': u'\U0001F1EC \U0001F1F3',
u':Guinea-Bissau:': u'\U0001F1EC \U0001F1FC',
u':Guyana:': u'\U0001F1EC \U0001F1FE',
u':Haiti:': u'\U0001F1ED \U0001F1F9',
u':Heard_&_McDonald_Islands:': u'\U0001F1ED \U0001F1F2',
u':Honduras:': u'\U0001F1ED \U0001F1F3',
u':Hong_Kong_SAR_China:': u'\U0001F1ED \U0001F1F0',
u':Hungary:': u'\U0001F1ED \U0001F1FA',
u':ID_button:': u'\U0001F194',
u':Iceland:': u'\U0001F1EE \U0001F1F8',
u':India:': u'\U0001F1EE \U0001F1F3',
u':Indonesia:': u'\U0001F1EE \U0001F1E9',
u':Iran:': u'\U0001F1EE \U0001F1F7',
u':Iraq:': u'\U0001F1EE \U0001F1F6',
u':Ireland:': u'\U0001F1EE \U0001F1EA',
u':Isle_of_Man:': u'\U0001F1EE \U0001F1F2',
u':Israel:': u'\U0001F1EE \U0001F1F1',
u':Italy:': u'\U0001F1EE \U0001F1F9',
u':Jamaica:': u'\U0001F1EF \U0001F1F2',
u':Japan:': u'\U0001F1EF \U0001F1F5',
u':Japanese_acceptable_button:': u'\U0001F251',
u':Japanese_application_button:': u'\U0001F238',
u':Japanese_bargain_button:': u'\U0001F250',
u':Japanese_castle:': u'\U0001F3EF',
u':Japanese_congratulations_button:': u'\U00003297',
u':Japanese_discount_button:': u'\U0001F239',
u':Japanese_dolls:': u'\U0001F38E',
u':Japanese_free_of_charge_button:': u'\U0001F21A',
u':Japanese_here_button:': u'\U0001F201',
u':Japanese_monthly_amount_button:': u'\U0001F237',
u':Japanese_no_vacancy_button:': u'\U0001F235',
u':Japanese_not_free_of_charge_button:': u'\U0001F236',
u':Japanese_open_for_business_button:': u'\U0001F23A',
u':Japanese_passing_grade_button:': u'\U0001F234',
u':Japanese_post_office:': u'\U0001F3E3',
u':Japanese_prohibited_button:': u'\U0001F232',
u':Japanese_reserved_button:': u'\U0001F22F',
u':Japanese_secret_button:': u'\U00003299',
u':Japanese_service_charge_button:': u'\U0001F202',
u':Japanese_symbol_for_beginner:': u'\U0001F530',
u':Japanese_vacancy_button:': u'\U0001F233',
u':Jersey:': u'\U0001F1EF \U0001F1EA',
u':Jordan:': u'\U0001F1EF \U0001F1F4',
u':Kazakhstan:': u'\U0001F1F0 \U0001F1FF',
u':Kenya:': u'\U0001F1F0 \U0001F1EA',
u':Kiribati:': u'\U0001F1F0 \U0001F1EE',
u':Kosovo:': u'\U0001F1FD \U0001F1F0',
u':Kuwait:': u'\U0001F1F0 \U0001F1FC',
u':Kyrgyzstan:': u'\U0001F1F0 \U0001F1EC',
u':Laos:': u'\U0001F1F1 \U0001F1E6',
u':Latvia:': u'\U0001F1F1 \U0001F1FB',
u':Lebanon:': u'\U0001F1F1 \U0001F1E7',
u':Leo:': u'\U0000264C',
u':Lesotho:': u'\U0001F1F1 \U0001F1F8',
u':Liberia:': u'\U0001F1F1 \U0001F1F7',
u':Libra:': u'\U0000264E',
u':Libya:': u'\U0001F1F1 \U0001F1FE',
u':Liechtenstein:': u'\U0001F1F1 \U0001F1EE',
u':Lithuania:': u'\U0001F1F1 \U0001F1F9',
u':Luxembourg:': u'\U0001F1F1 \U0001F1FA',
u':Macau_SAR_China:': u'\U0001F1F2 \U0001F1F4',
u':Macedonia:': u'\U0001F1F2 \U0001F1F0',
u':Madagascar:': u'\U0001F1F2 \U0001F1EC',
u':Malawi:': u'\U0001F1F2 \U0001F1FC',
u':Malaysia:': u'\U0001F1F2 \U0001F1FE',
u':Maldives:': u'\U0001F1F2 \U0001F1FB',
u':Mali:': u'\U0001F1F2 \U0001F1F1',
u':Malta:': u'\U0001F1F2 \U0001F1F9',
u':Marshall_Islands:': u'\U0001F1F2 \U0001F1ED',
u':Martinique:': u'\U0001F1F2 \U0001F1F6',
u':Mauritania:': u'\U0001F1F2 \U0001F1F7',
u':Mauritius:': u'\U0001F1F2 \U0001F1FA',
u':Mayotte:': u'\U0001F1FE \U0001F1F9',
u':Mexico:': u'\U0001F1F2 \U0001F1FD',
u':Micronesia:': u'\U0001F1EB \U0001F1F2',
u':Moldova:': u'\U0001F1F2 \U0001F1E9',
u':Monaco:': u'\U0001F1F2 \U0001F1E8',
u':Mongolia:': u'\U0001F1F2 \U0001F1F3',
u':Montenegro:': u'\U0001F1F2 \U0001F1EA',
u':Montserrat:': u'\U0001F1F2 \U0001F1F8',
u':Morocco:': u'\U0001F1F2 \U0001F1E6',
u':Mozambique:': u'\U0001F1F2 \U0001F1FF',
u':Mrs._Claus:': u'\U0001F936',
u':Mrs._Claus_dark_skin_tone:': u'\U0001F936 \U0001F3FF',
u':Mrs._Claus_light_skin_tone:': u'\U0001F936 \U0001F3FB',
u':Mrs._Claus_medium-dark_skin_tone:': u'\U0001F936 \U0001F3FE',
u':Mrs._Claus_medium-light_skin_tone:': u'\U0001F936 \U0001F3FC',
u':Mrs._Claus_medium_skin_tone:': u'\U0001F936 \U0001F3FD',
u':Myanmar_(Burma):': u'\U0001F1F2 \U0001F1F2',
u':NEW_button:': u'\U0001F195',
u':NG_button:': u'\U0001F196',
u':Namibia:': u'\U0001F1F3 \U0001F1E6',
u':Nauru:': u'\U0001F1F3 \U0001F1F7',
u':Nepal:': u'\U0001F1F3 \U0001F1F5',
u':Netherlands:': u'\U0001F1F3 \U0001F1F1',
u':New_Caledonia:': u'\U0001F1F3 \U0001F1E8',
u':New_Zealand:': u'\U0001F1F3 \U0001F1FF',
u':Nicaragua:': u'\U0001F1F3 \U0001F1EE',
u':Niger:': u'\U0001F1F3 \U0001F1EA',
u':Nigeria:': u'\U0001F1F3 \U0001F1EC',
u':Niue:': u'\U0001F1F3 \U0001F1FA',
u':Norfolk_Island:': u'\U0001F1F3 \U0001F1EB',
u':North_Korea:': u'\U0001F1F0 \U0001F1F5',
u':Northern_Mariana_Islands:': u'\U0001F1F2 \U0001F1F5',
u':Norway:': u'\U0001F1F3 \U0001F1F4',
u':OK_button:': u'\U0001F197',
u':OK_hand:': u'\U0001F44C',
u':OK_hand_dark_skin_tone:': u'\U0001F44C \U0001F3FF',
u':OK_hand_light_skin_tone:': u'\U0001F44C \U0001F3FB',
u':OK_hand_medium-dark_skin_tone:': u'\U0001F44C \U0001F3FE',
u':OK_hand_medium-light_skin_tone:': u'\U0001F44C \U0001F3FC',
u':OK_hand_medium_skin_tone:': u'\U0001F44C \U0001F3FD',
u':ON!_arrow:': u'\U0001F51B',
u':O_button_(blood_type):': u'\U0001F17E',
u':Oman:': u'\U0001F1F4 \U0001F1F2',
u':Ophiuchus:': u'\U000026CE',
u':P_button:': u'\U0001F17F',
u':Pakistan:': u'\U0001F1F5 \U0001F1F0',
u':Palau:': u'\U0001F1F5 \U0001F1FC',
u':Palestinian_Territories:': u'\U0001F1F5 \U0001F1F8',
u':Panama:': u'\U0001F1F5 \U0001F1E6',
u':Papua_New_Guinea:': u'\U0001F1F5 \U0001F1EC',
u':Paraguay:': u'\U0001F1F5 \U0001F1FE',
u':Peru:': u'\U0001F1F5 \U0001F1EA',
u':Philippines:': u'\U0001F1F5 \U0001F1ED',
u':Pisces:': u'\U00002653',
u':Pitcairn_Islands:': u'\U0001F1F5 \U0001F1F3',
u':Poland:': u'\U0001F1F5 \U0001F1F1',
u':Portugal:': u'\U0001F1F5 \U0001F1F9',
u':Puerto_Rico:': u'\U0001F1F5 \U0001F1F7',
u':Qatar:': u'\U0001F1F6 \U0001F1E6',
u':Romania:': u'\U0001F1F7 \U0001F1F4',
u':Russia:': u'\U0001F1F7 \U0001F1FA',
u':Rwanda:': u'\U0001F1F7 \U0001F1FC',
u':Réunion:': u'\U0001F1F7 \U0001F1EA',
u':SOON_arrow:': u'\U0001F51C',
u':SOS_button:': u'\U0001F198',
u':Sagittarius:': u'\U00002650',
u':Samoa:': u'\U0001F1FC \U0001F1F8',
u':San_Marino:': u'\U0001F1F8 \U0001F1F2',
u':Santa_Claus:': u'\U0001F385',
u':Santa_Claus_dark_skin_tone:': u'\U0001F385 \U0001F3FF',
u':Santa_Claus_light_skin_tone:': u'\U0001F385 \U0001F3FB',
u':Santa_Claus_medium-dark_skin_tone:': u'\U0001F385 \U0001F3FE',
u':Santa_Claus_medium-light_skin_tone:': u'\U0001F385 \U0001F3FC',
u':Santa_Claus_medium_skin_tone:': u'\U0001F385 \U0001F3FD',
u':Saudi_Arabia:': u'\U0001F1F8 \U0001F1E6',
u':Scorpius:': u'\U0000264F',
u':Senegal:': u'\U0001F1F8 \U0001F1F3',
u':Serbia:': u'\U0001F1F7 \U0001F1F8',
u':Seychelles:': u'\U0001F1F8 \U0001F1E8',
u':Sierra_Leone:': u'\U0001F1F8 \U0001F1F1',
u':Singapore:': u'\U0001F1F8 \U0001F1EC',
u':Sint_Maarten:': u'\U0001F1F8 \U0001F1FD',
u':Slovakia:': u'\U0001F1F8 \U0001F1F0',
u':Slovenia:': u'\U0001F1F8 \U0001F1EE',
u':Solomon_Islands:': u'\U0001F1F8 \U0001F1E7',
u':Somalia:': u'\U0001F1F8 \U0001F1F4',
u':South_Africa:': u'\U0001F1FF \U0001F1E6',
u':South_Georgia_&_South_Sandwich_Islands:': u'\U0001F1EC \U0001F1F8',
u':South_Korea:': u'\U0001F1F0 \U0001F1F7',
u':South_Sudan:': u'\U0001F1F8 \U0001F1F8',
u':Spain:': u'\U0001F1EA \U0001F1F8',
u':Sri_Lanka:': u'\U0001F1F1 \U0001F1F0',
u':St._Barthélemy:': u'\U0001F1E7 \U0001F1F1',
u':St._Helena:': u'\U0001F1F8 \U0001F1ED',
u':St._Kitts_&_Nevis:': u'\U0001F1F0 \U0001F1F3',
u':St._Lucia:': u'\U0001F1F1 \U0001F1E8',
u':St._Martin:': u'\U0001F1F2 \U0001F1EB',
u':St._Pierre_&_Miquelon:': u'\U0001F1F5 \U0001F1F2',
u':St._Vincent_&_Grenadines:': u'\U0001F1FB \U0001F1E8',
u':Statue_of_Liberty:': u'\U0001F5FD',
u':Sudan:': u'\U0001F1F8 \U0001F1E9',
u':Suriname:': u'\U0001F1F8 \U0001F1F7',
u':Svalbard_&_Jan_Mayen:': u'\U0001F1F8 \U0001F1EF',
u':Swaziland:': u'\U0001F1F8 \U0001F1FF',
u':Sweden:': u'\U0001F1F8 \U0001F1EA',
u':Switzerland:': u'\U0001F1E8 \U0001F1ED',
u':Syria:': u'\U0001F1F8 \U0001F1FE',
u':São_Tomé_&_Príncipe:': u'\U0001F1F8 \U0001F1F9',
u':TOP_arrow:': u'\U0001F51D',
u':Taiwan:': u'\U0001F1F9 \U0001F1FC',
u':Tajikistan:': u'\U0001F1F9 \U0001F1EF',
u':Tanzania:': u'\U0001F1F9 \U0001F1FF',
u':Taurus:': u'\U00002649',
u':Thailand:': u'\U0001F1F9 \U0001F1ED',
u':Timor-Leste:': u'\U0001F1F9 \U0001F1F1',
u':Togo:': u'\U0001F1F9 \U0001F1EC',
u':Tokelau:': u'\U0001F1F9 \U0001F1F0',
u':Tokyo_tower:': u'\U0001F5FC',
u':Tonga:': u'\U0001F1F9 \U0001F1F4',
u':Trinidad_&_Tobago:': u'\U0001F1F9 \U0001F1F9',
u':Tristan_da_Cunha:': u'\U0001F1F9 \U0001F1E6',
u':Tunisia:': u'\U0001F1F9 \U0001F1F3',
u':Turkey:': u'\U0001F1F9 \U0001F1F7',
u':Turkmenistan:': u'\U0001F1F9 \U0001F1F2',
u':Turks_&_Caicos_Islands:': u'\U0001F1F9 \U0001F1E8',
u':Tuvalu:': u'\U0001F1F9 \U0001F1FB',
u':U.S._Outlying_Islands:': u'\U0001F1FA \U0001F1F2',
u':U.S._Virgin_Islands:': u'\U0001F1FB \U0001F1EE',
u':UP!_button:': u'\U0001F199',
u':Uganda:': u'\U0001F1FA \U0001F1EC',
u':Ukraine:': u'\U0001F1FA \U0001F1E6',
u':United_Arab_Emirates:': u'\U0001F1E6 \U0001F1EA',
u':United_Kingdom:': u'\U0001F1EC \U0001F1E7',
u':United_Nations:': u'\U0001F1FA \U0001F1F3',
u':United_States:': u'\U0001F1FA \U0001F1F8',
u':Uruguay:': u'\U0001F1FA \U0001F1FE',
u':Uzbekistan:': u'\U0001F1FA \U0001F1FF',
u':VS_button:': u'\U0001F19A',
u':Vanuatu:': u'\U0001F1FB \U0001F1FA',
u':Vatican_City:': u'\U0001F1FB \U0001F1E6',
u':Venezuela:': u'\U0001F1FB \U0001F1EA',
u':Vietnam:': u'\U0001F1FB \U0001F1F3',
u':Virgo:': u'\U0000264D',
u':Wallis_&_Futuna:': u'\U0001F1FC \U0001F1EB',
u':Western_Sahara:': u'\U0001F1EA \U0001F1ED',
u':Yemen:': u'\U0001F1FE \U0001F1EA',
u':Zambia:': u'\U0001F1FF \U0001F1F2',
u':Zimbabwe:': u'\U0001F1FF \U0001F1FC',
u':admission_tickets:': u'\U0001F39F',
u':aerial_tramway:': u'\U0001F6A1',
u':airplane:': u'\U00002708',
u':airplane_arrival:': u'\U0001F6EC',
u':airplane_departure:': u'\U0001F6EB',
u':alarm_clock:': u'\U000023F0',
u':alembic:': u'\U00002697',
u':alien:': u'\U0001F47D',
u':alien_monster:': u'\U0001F47E',
u':ambulance:': u'\U0001F691',
u':american_football:': u'\U0001F3C8',
u':amphora:': u'\U0001F3FA',
u':anchor:': u'\U00002693',
u':anger_symbol:': u'\U0001F4A2',
u':angry_face:': u'\U0001F620',
u':angry_face_with_horns:': u'\U0001F47F',
u':anguished_face:': u'\U0001F627',
u':ant:': u'\U0001F41C',
u':antenna_bars:': u'\U0001F4F6',
u':anticlockwise_arrows_button:': u'\U0001F504',
u':articulated_lorry:': u'\U0001F69B',
u':artist_palette:': u'\U0001F3A8',
u':astonished_face:': u'\U0001F632',
u':atom_symbol:': u'\U0000269B',
u':automobile:': u'\U0001F697',
u':avocado:': u'\U0001F951',
u':baby:': u'\U0001F476',
u':baby_angel:': u'\U0001F47C',
u':baby_angel_dark_skin_tone:': u'\U0001F47C \U0001F3FF',
u':baby_angel_light_skin_tone:': u'\U0001F47C \U0001F3FB',
u':baby_angel_medium-dark_skin_tone:': u'\U0001F47C \U0001F3FE',
u':baby_angel_medium-light_skin_tone:': u'\U0001F47C \U0001F3FC',
u':baby_angel_medium_skin_tone:': u'\U0001F47C \U0001F3FD',
u':baby_bottle:': u'\U0001F37C',
u':baby_chick:': u'\U0001F424',
u':baby_dark_skin_tone:': u'\U0001F476 \U0001F3FF',
u':baby_light_skin_tone:': u'\U0001F476 \U0001F3FB',
u':baby_medium-dark_skin_tone:': u'\U0001F476 \U0001F3FE',
u':baby_medium-light_skin_tone:': u'\U0001F476 \U0001F3FC',
u':baby_medium_skin_tone:': u'\U0001F476 \U0001F3FD',
u':baby_symbol:': u'\U0001F6BC',
u':backhand_index_pointing_down:': u'\U0001F447',
u':backhand_index_pointing_down_dark_skin_tone:': u'\U0001F447 \U0001F3FF',
u':backhand_index_pointing_down_light_skin_tone:': u'\U0001F447 \U0001F3FB',
u':backhand_index_pointing_down_medium-dark_skin_tone:': u'\U0001F447 \U0001F3FE',
u':backhand_index_pointing_down_medium-light_skin_tone:': u'\U0001F447 \U0001F3FC',
u':backhand_index_pointing_down_medium_skin_tone:': u'\U0001F447 \U0001F3FD',
u':backhand_index_pointing_left:': u'\U0001F448',
u':backhand_index_pointing_left_dark_skin_tone:': u'\U0001F448 \U0001F3FF',
u':backhand_index_pointing_left_light_skin_tone:': u'\U0001F448 \U0001F3FB',
u':backhand_index_pointing_left_medium-dark_skin_tone:': u'\U0001F448 \U0001F3FE',
u':backhand_index_pointing_left_medium-light_skin_tone:': u'\U0001F448 \U0001F3FC',
u':backhand_index_pointing_left_medium_skin_tone:': u'\U0001F448 \U0001F3FD',
u':backhand_index_pointing_right:': u'\U0001F449',
u':backhand_index_pointing_right_dark_skin_tone:': u'\U0001F449 \U0001F3FF',
u':backhand_index_pointing_right_light_skin_tone:': u'\U0001F449 \U0001F3FB',
u':backhand_index_pointing_right_medium-dark_skin_tone:': u'\U0001F449 \U0001F3FE',
u':backhand_index_pointing_right_medium-light_skin_tone:': u'\U0001F449 \U0001F3FC',
u':backhand_index_pointing_right_medium_skin_tone:': u'\U0001F449 \U0001F3FD',
u':backhand_index_pointing_up:': u'\U0001F446',
u':backhand_index_pointing_up_dark_skin_tone:': u'\U0001F446 \U0001F3FF',
u':backhand_index_pointing_up_light_skin_tone:': u'\U0001F446 \U0001F3FB',
u':backhand_index_pointing_up_medium-dark_skin_tone:': u'\U0001F446 \U0001F3FE',
u':backhand_index_pointing_up_medium-light_skin_tone:': u'\U0001F446 \U0001F3FC',
u':backhand_index_pointing_up_medium_skin_tone:': u'\U0001F446 \U0001F3FD',
u':bacon:': u'\U0001F953',
u':badminton:': u'\U0001F3F8',
u':baggage_claim:': u'\U0001F6C4',
u':baguette_bread:': u'\U0001F956',
u':balance_scale:': u'\U00002696',
u':balloon:': u'\U0001F388',
u':ballot_box_with_ballot:': u'\U0001F5F3',
u':ballot_box_with_check:': u'\U00002611',
u':banana:': u'\U0001F34C',
u':bank:': u'\U0001F3E6',
u':bar_chart:': u'\U0001F4CA',
u':barber_pole:': u'\U0001F488',
u':baseball:': u'\U000026BE',
u':basketball:': u'\U0001F3C0',
u':bat:': u'\U0001F987',
u':bathtub:': u'\U0001F6C1',
u':battery:': u'\U0001F50B',
u':beach_with_umbrella:': u'\U0001F3D6',
u':bear_face:': u'\U0001F43B',
u':beating_heart:': u'\U0001F493',
u':bed:': u'\U0001F6CF',
u':beer_mug:': u'\U0001F37A',
u':bell:': u'\U0001F514',
u':bell_with_slash:': u'\U0001F515',
u':bellhop_bell:': u'\U0001F6CE',
u':bento_box:': u'\U0001F371',
u':bicycle:': u'\U0001F6B2',
u':bikini:': u'\U0001F459',
u':biohazard:': u'\U00002623',
u':bird:': u'\U0001F426',
u':birthday_cake:': u'\U0001F382',
u':black_circle:': u'\U000026AB',
u':black_flag:': u'\U0001F3F4',
u':black_heart:': u'\U0001F5A4',
u':black_large_square:': u'\U00002B1B',
u':black_medium-small_square:': u'\U000025FE',
u':black_medium_square:': u'\U000025FC',
u':black_nib:': u'\U00002712',
u':black_small_square:': u'\U000025AA',
u':black_square_button:': u'\U0001F532',
u':blond-haired_man:': u'\U0001F471 \U0000200D \U00002642 \U0000FE0F',
u':blond-haired_man_dark_skin_tone:': u'\U0001F471 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':blond-haired_man_light_skin_tone:': u'\U0001F471 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':blond-haired_man_medium-dark_skin_tone:': u'\U0001F471 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':blond-haired_man_medium-light_skin_tone:': u'\U0001F471 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':blond-haired_man_medium_skin_tone:': u'\U0001F471 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':blond-haired_person:': u'\U0001F471',
u':blond-haired_person_dark_skin_tone:': u'\U0001F471 \U0001F3FF',
u':blond-haired_person_light_skin_tone:': u'\U0001F471 \U0001F3FB',
u':blond-haired_person_medium-dark_skin_tone:': u'\U0001F471 \U0001F3FE',
u':blond-haired_person_medium-light_skin_tone:': u'\U0001F471 \U0001F3FC',
u':blond-haired_person_medium_skin_tone:': u'\U0001F471 \U0001F3FD',
u':blond-haired_woman:': u'\U0001F471 \U0000200D \U00002640 \U0000FE0F',
u':blond-haired_woman_dark_skin_tone:': u'\U0001F471 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':blond-haired_woman_light_skin_tone:': u'\U0001F471 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':blond-haired_woman_medium-dark_skin_tone:': u'\U0001F471 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':blond-haired_woman_medium-light_skin_tone:': u'\U0001F471 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':blond-haired_woman_medium_skin_tone:': u'\U0001F471 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':blossom:': u'\U0001F33C',
u':blowfish:': u'\U0001F421',
u':blue_book:': u'\U0001F4D8',
u':blue_circle:': u'\U0001F535',
u':blue_heart:': u'\U0001F499',
u':boar:': u'\U0001F417',
u':bomb:': u'\U0001F4A3',
u':bookmark:': u'\U0001F516',
u':bookmark_tabs:': u'\U0001F4D1',
u':books:': u'\U0001F4DA',
u':bottle_with_popping_cork:': u'\U0001F37E',
u':bouquet:': u'\U0001F490',
u':bow_and_arrow:': u'\U0001F3F9',
u':bowling:': u'\U0001F3B3',
u':boxing_glove:': u'\U0001F94A',
u':boy:': u'\U0001F466',
u':boy_dark_skin_tone:': u'\U0001F466 \U0001F3FF',
u':boy_light_skin_tone:': u'\U0001F466 \U0001F3FB',
u':boy_medium-dark_skin_tone:': u'\U0001F466 \U0001F3FE',
u':boy_medium-light_skin_tone:': u'\U0001F466 \U0001F3FC',
u':boy_medium_skin_tone:': u'\U0001F466 \U0001F3FD',
u':bread:': u'\U0001F35E',
u':bride_with_veil:': u'\U0001F470',
u':bride_with_veil_dark_skin_tone:': u'\U0001F470 \U0001F3FF',
u':bride_with_veil_light_skin_tone:': u'\U0001F470 \U0001F3FB',
u':bride_with_veil_medium-dark_skin_tone:': u'\U0001F470 \U0001F3FE',
u':bride_with_veil_medium-light_skin_tone:': u'\U0001F470 \U0001F3FC',
u':bride_with_veil_medium_skin_tone:': u'\U0001F470 \U0001F3FD',
u':bridge_at_night:': u'\U0001F309',
u':briefcase:': u'\U0001F4BC',
u':bright_button:': u'\U0001F506',
u':broken_heart:': u'\U0001F494',
u':bug:': u'\U0001F41B',
u':building_construction:': u'\U0001F3D7',
u':burrito:': u'\U0001F32F',
u':bus:': u'\U0001F68C',
u':bus_stop:': u'\U0001F68F',
u':bust_in_silhouette:': u'\U0001F464',
u':busts_in_silhouette:': u'\U0001F465',
u':butterfly:': u'\U0001F98B',
u':cactus:': u'\U0001F335',
u':calendar:': u'\U0001F4C5',
u':call_me_hand:': u'\U0001F919',
u':call_me_hand_dark_skin_tone:': u'\U0001F919 \U0001F3FF',
u':call_me_hand_light_skin_tone:': u'\U0001F919 \U0001F3FB',
u':call_me_hand_medium-dark_skin_tone:': u'\U0001F919 \U0001F3FE',
u':call_me_hand_medium-light_skin_tone:': u'\U0001F919 \U0001F3FC',
u':call_me_hand_medium_skin_tone:': u'\U0001F919 \U0001F3FD',
u':camel:': u'\U0001F42A',
u':camera:': u'\U0001F4F7',
u':camera_with_flash:': u'\U0001F4F8',
u':camping:': u'\U0001F3D5',
u':candle:': u'\U0001F56F',
u':candy:': u'\U0001F36C',
u':canoe:': u'\U0001F6F6',
u':card_file_box:': u'\U0001F5C3',
u':card_index:': u'\U0001F4C7',
u':card_index_dividers:': u'\U0001F5C2',
u':carousel_horse:': u'\U0001F3A0',
u':carp_streamer:': u'\U0001F38F',
u':carrot:': u'\U0001F955',
u':castle:': u'\U0001F3F0',
u':cat:': u'\U0001F408',
u':cat_face:': u'\U0001F431',
u':cat_face_with_tears_of_joy:': u'\U0001F639',
u':cat_face_with_wry_smile:': u'\U0001F63C',
u':chains:': u'\U000026D3',
u':chart_decreasing:': u'\U0001F4C9',
u':chart_increasing:': u'\U0001F4C8',
u':chart_increasing_with_yen:': u'\U0001F4B9',
u':cheese_wedge:': u'\U0001F9C0',
u':chequered_flag:': u'\U0001F3C1',
u':cherries:': u'\U0001F352',
u':cherry_blossom:': u'\U0001F338',
u':chestnut:': u'\U0001F330',
u':chicken:': u'\U0001F414',
u':children_crossing:': u'\U0001F6B8',
u':chipmunk:': u'\U0001F43F',
u':chocolate_bar:': u'\U0001F36B',
u':church:': u'\U000026EA',
u':cigarette:': u'\U0001F6AC',
u':cinema:': u'\U0001F3A6',
u':circled_M:': u'\U000024C2',
u':circus_tent:': u'\U0001F3AA',
u':cityscape:': u'\U0001F3D9',
u':cityscape_at_dusk:': u'\U0001F306',
u':clamp:': u'\U0001F5DC',
u':clapper_board:': u'\U0001F3AC',
u':clapping_hands:': u'\U0001F44F',
u':clapping_hands_dark_skin_tone:': u'\U0001F44F \U0001F3FF',
u':clapping_hands_light_skin_tone:': u'\U0001F44F \U0001F3FB',
u':clapping_hands_medium-dark_skin_tone:': u'\U0001F44F \U0001F3FE',
u':clapping_hands_medium-light_skin_tone:': u'\U0001F44F \U0001F3FC',
u':clapping_hands_medium_skin_tone:': u'\U0001F44F \U0001F3FD',
u':classical_building:': u'\U0001F3DB',
u':clinking_beer_mugs:': u'\U0001F37B',
u':clinking_glasses:': u'\U0001F942',
u':clipboard:': u'\U0001F4CB',
u':clockwise_vertical_arrows:': u'\U0001F503',
u':closed_book:': u'\U0001F4D5',
u':closed_mailbox_with_lowered_flag:': u'\U0001F4EA',
u':closed_mailbox_with_raised_flag:': u'\U0001F4EB',
u':closed_umbrella:': u'\U0001F302',
u':cloud:': u'\U00002601',
u':cloud_with_lightning:': u'\U0001F329',
u':cloud_with_lightning_and_rain:': u'\U000026C8',
u':cloud_with_rain:': u'\U0001F327',
u':cloud_with_snow:': u'\U0001F328',
u':clown_face:': u'\U0001F921',
u':club_suit:': u'\U00002663',
u':clutch_bag:': u'\U0001F45D',
u':cocktail_glass:': u'\U0001F378',
u':coffin:': u'\U000026B0',
u':collision:': u'\U0001F4A5',
u':comet:': u'\U00002604',
u':computer_disk:': u'\U0001F4BD',
u':computer_mouse:': u'\U0001F5B1',
u':confetti_ball:': u'\U0001F38A',
u':confounded_face:': u'\U0001F616',
u':confused_face:': u'\U0001F615',
u':construction:': u'\U0001F6A7',
u':construction_worker:': u'\U0001F477',
u':construction_worker_dark_skin_tone:': u'\U0001F477 \U0001F3FF',
u':construction_worker_light_skin_tone:': u'\U0001F477 \U0001F3FB',
u':construction_worker_medium-dark_skin_tone:': u'\U0001F477 \U0001F3FE',
u':construction_worker_medium-light_skin_tone:': u'\U0001F477 \U0001F3FC',
u':construction_worker_medium_skin_tone:': u'\U0001F477 \U0001F3FD',
u':control_knobs:': u'\U0001F39B',
u':convenience_store:': u'\U0001F3EA',
u':cooked_rice:': u'\U0001F35A',
u':cookie:': u'\U0001F36A',
u':cooking:': u'\U0001F373',
u':copyright:': u'\U000000A9',
u':couch_and_lamp:': u'\U0001F6CB',
u':couple_with_heart:': u'\U0001F491',
u':couple_with_heart_man_man:': u'\U0001F468 \U0000200D \U00002764 \U0000FE0F \U0000200D \U0001F468',
u':couple_with_heart_woman_man:': u'\U0001F469 \U0000200D \U00002764 \U0000FE0F \U0000200D \U0001F468',
u':couple_with_heart_woman_woman:': u'\U0001F469 \U0000200D \U00002764 \U0000FE0F \U0000200D \U0001F469',
u':cow:': u'\U0001F404',
u':cow_face:': u'\U0001F42E',
u':cowboy_hat_face:': u'\U0001F920',
u':crab:': u'\U0001F980',
u':crayon:': u'\U0001F58D',
u':credit_card:': u'\U0001F4B3',
u':crescent_moon:': u'\U0001F319',
u':cricket:': u'\U0001F3CF',
u':crocodile:': u'\U0001F40A',
u':croissant:': u'\U0001F950',
u':cross_mark:': u'\U0000274C',
u':cross_mark_button:': u'\U0000274E',
u':crossed_fingers:': u'\U0001F91E',
u':crossed_fingers_dark_skin_tone:': u'\U0001F91E \U0001F3FF',
u':crossed_fingers_light_skin_tone:': u'\U0001F91E \U0001F3FB',
u':crossed_fingers_medium-dark_skin_tone:': u'\U0001F91E \U0001F3FE',
u':crossed_fingers_medium-light_skin_tone:': u'\U0001F91E \U0001F3FC',
u':crossed_fingers_medium_skin_tone:': u'\U0001F91E \U0001F3FD',
u':crossed_flags:': u'\U0001F38C',
u':crossed_swords:': u'\U00002694',
u':crown:': u'\U0001F451',
u':crying_cat_face:': u'\U0001F63F',
u':crying_face:': u'\U0001F622',
u':crystal_ball:': u'\U0001F52E',
u':cucumber:': u'\U0001F952',
u':curly_loop:': u'\U000027B0',
u':currency_exchange:': u'\U0001F4B1',
u':curry_rice:': u'\U0001F35B',
u':custard:': u'\U0001F36E',
u':customs:': u'\U0001F6C3',
u':cyclone:': u'\U0001F300',
u':dagger:': u'\U0001F5E1',
u':dango:': u'\U0001F361',
u':dark_skin_tone:': u'\U0001F3FF',
u':dashing_away:': u'\U0001F4A8',
u':deciduous_tree:': u'\U0001F333',
u':deer:': u'\U0001F98C',
u':delivery_truck:': u'\U0001F69A',
u':department_store:': u'\U0001F3EC',
u':derelict_house:': u'\U0001F3DA',
u':desert:': u'\U0001F3DC',
u':desert_island:': u'\U0001F3DD',
u':desktop_computer:': u'\U0001F5A5',
u':detective:': u'\U0001F575',
u':detective_dark_skin_tone:': u'\U0001F575 \U0001F3FF',
u':detective_light_skin_tone:': u'\U0001F575 \U0001F3FB',
u':detective_medium-dark_skin_tone:': u'\U0001F575 \U0001F3FE',
u':detective_medium-light_skin_tone:': u'\U0001F575 \U0001F3FC',
u':detective_medium_skin_tone:': u'\U0001F575 \U0001F3FD',
u':diamond_suit:': u'\U00002666',
u':diamond_with_a_dot:': u'\U0001F4A0',
u':dim_button:': u'\U0001F505',
u':direct_hit:': u'\U0001F3AF',
u':disappointed_but_relieved_face:': u'\U0001F625',
u':disappointed_face:': u'\U0001F61E',
u':dizzy:': u'\U0001F4AB',
u':dizzy_face:': u'\U0001F635',
u':dog:': u'\U0001F415',
u':dog_face:': u'\U0001F436',
u':dollar_banknote:': u'\U0001F4B5',
u':dolphin:': u'\U0001F42C',
u':door:': u'\U0001F6AA',
u':dotted_six-pointed_star:': u'\U0001F52F',
u':double_curly_loop:': u'\U000027BF',
u':double_exclamation_mark:': u'\U0000203C',
u':doughnut:': u'\U0001F369',
u':dove:': u'\U0001F54A',
u':down-left_arrow:': u'\U00002199',
u':down-right_arrow:': u'\U00002198',
u':down_arrow:': u'\U00002B07',
u':down_button:': u'\U0001F53D',
u':dragon:': u'\U0001F409',
u':dragon_face:': u'\U0001F432',
u':dress:': u'\U0001F457',
u':drooling_face:': u'\U0001F924',
u':droplet:': u'\U0001F4A7',
u':drum:': u'\U0001F941',
u':duck:': u'\U0001F986',
u':dvd:': u'\U0001F4C0',
u':e-mail:': u'\U0001F4E7',
u':eagle:': u'\U0001F985',
u':ear:': u'\U0001F442',
u':ear_dark_skin_tone:': u'\U0001F442 \U0001F3FF',
u':ear_light_skin_tone:': u'\U0001F442 \U0001F3FB',
u':ear_medium-dark_skin_tone:': u'\U0001F442 \U0001F3FE',
u':ear_medium-light_skin_tone:': u'\U0001F442 \U0001F3FC',
u':ear_medium_skin_tone:': u'\U0001F442 \U0001F3FD',
u':ear_of_corn:': u'\U0001F33D',
u':egg:': u'\U0001F95A',
u':eggplant:': u'\U0001F346',
u':eight-pointed_star:': u'\U00002734',
u':eight-spoked_asterisk:': u'\U00002733',
u':eight-thirty:': u'\U0001F563',
u':eight_o’clock:': u'\U0001F557',
u':eject_button:': u'\U000023CF',
u':electric_plug:': u'\U0001F50C',
u':elephant:': u'\U0001F418',
u':eleven-thirty:': u'\U0001F566',
u':eleven_o’clock:': u'\U0001F55A',
u':envelope:': u'\U00002709',
u':envelope_with_arrow:': u'\U0001F4E9',
u':euro_banknote:': u'\U0001F4B6',
u':evergreen_tree:': u'\U0001F332',
u':exclamation_mark:': u'\U00002757',
u':exclamation_question_mark:': u'\U00002049',
u':expressionless_face:': u'\U0001F611',
u':eye:': u'\U0001F441',
u':eye_in_speech_bubble:': u'\U0001F441 \U0000FE0F \U0000200D \U0001F5E8 \U0000FE0F',
u':eyes:': u'\U0001F440',
u':face_blowing_a_kiss:': u'\U0001F618',
u':face_savouring_delicious_food:': u'\U0001F60B',
u':face_screaming_in_fear:': u'\U0001F631',
u':face_with_cold_sweat:': u'\U0001F613',
u':face_with_head-bandage:': u'\U0001F915',
u':face_with_medical_mask:': u'\U0001F637',
u':face_with_open_mouth:': u'\U0001F62E',
u':face_with_open_mouth_&_cold_sweat:': u'\U0001F630',
u':face_with_rolling_eyes:': u'\U0001F644',
u':face_with_steam_from_nose:': u'\U0001F624',
u':face_with_stuck-out_tongue:': u'\U0001F61B',
u':face_with_stuck-out_tongue_&_closed_eyes:': u'\U0001F61D',
u':face_with_stuck-out_tongue_&_winking_eye:': u'\U0001F61C',
u':face_with_tears_of_joy:': u'\U0001F602',
u':face_with_thermometer:': u'\U0001F912',
u':face_without_mouth:': u'\U0001F636',
u':factory:': u'\U0001F3ED',
u':fallen_leaf:': u'\U0001F342',
u':family:': u'\U0001F46A',
u':family_man_boy:': u'\U0001F468 \U0000200D \U0001F466',
u':family_man_boy_boy:': u'\U0001F468 \U0000200D \U0001F466 \U0000200D \U0001F466',
u':family_man_girl:': u'\U0001F468 \U0000200D \U0001F467',
u':family_man_girl_boy:': u'\U0001F468 \U0000200D \U0001F467 \U0000200D \U0001F466',
u':family_man_girl_girl:': u'\U0001F468 \U0000200D \U0001F467 \U0000200D \U0001F467',
u':family_man_man_boy:': u'\U0001F468 \U0000200D \U0001F468 \U0000200D \U0001F466',
u':family_man_man_boy_boy:': u'\U0001F468 \U0000200D \U0001F468 \U0000200D \U0001F466 \U0000200D \U0001F466',
u':family_man_man_girl:': u'\U0001F468 \U0000200D \U0001F468 \U0000200D \U0001F467',
u':family_man_man_girl_boy:': u'\U0001F468 \U0000200D \U0001F468 \U0000200D \U0001F467 \U0000200D \U0001F466',
u':family_man_man_girl_girl:': u'\U0001F468 \U0000200D \U0001F468 \U0000200D \U0001F467 \U0000200D \U0001F467',
u':family_man_woman_boy:': u'\U0001F468 \U0000200D \U0001F469 \U0000200D \U0001F466',
u':family_man_woman_boy_boy:': u'\U0001F468 \U0000200D \U0001F469 \U0000200D \U0001F466 \U0000200D \U0001F466',
u':family_man_woman_girl:': u'\U0001F468 \U0000200D \U0001F469 \U0000200D \U0001F467',
u':family_man_woman_girl_boy:': u'\U0001F468 \U0000200D \U0001F469 \U0000200D \U0001F467 \U0000200D \U0001F466',
u':family_man_woman_girl_girl:': u'\U0001F468 \U0000200D \U0001F469 \U0000200D \U0001F467 \U0000200D \U0001F467',
u':family_woman_boy:': u'\U0001F469 \U0000200D \U0001F466',
u':family_woman_boy_boy:': u'\U0001F469 \U0000200D \U0001F466 \U0000200D \U0001F466',
u':family_woman_girl:': u'\U0001F469 \U0000200D \U0001F467',
u':family_woman_girl_boy:': u'\U0001F469 \U0000200D \U0001F467 \U0000200D \U0001F466',
u':family_woman_girl_girl:': u'\U0001F469 \U0000200D \U0001F467 \U0000200D \U0001F467',
u':family_woman_woman_boy:': u'\U0001F469 \U0000200D \U0001F469 \U0000200D \U0001F466',
u':family_woman_woman_boy_boy:': u'\U0001F469 \U0000200D \U0001F469 \U0000200D \U0001F466 \U0000200D \U0001F466',
u':family_woman_woman_girl:': u'\U0001F469 \U0000200D \U0001F469 \U0000200D \U0001F467',
u':family_woman_woman_girl_boy:': u'\U0001F469 \U0000200D \U0001F469 \U0000200D \U0001F467 \U0000200D \U0001F466',
u':family_woman_woman_girl_girl:': u'\U0001F469 \U0000200D \U0001F469 \U0000200D \U0001F467 \U0000200D \U0001F467',
u':fast-forward_button:': u'\U000023E9',
u':fast_down_button:': u'\U000023EC',
u':fast_reverse_button:': u'\U000023EA',
u':fast_up_button:': u'\U000023EB',
u':fax_machine:': u'\U0001F4E0',
u':fearful_face:': u'\U0001F628',
u':female_sign:': u'\U00002640',
u':ferris_wheel:': u'\U0001F3A1',
u':ferry:': u'\U000026F4',
u':field_hockey:': u'\U0001F3D1',
u':file_cabinet:': u'\U0001F5C4',
u':file_folder:': u'\U0001F4C1',
u':film_frames:': u'\U0001F39E',
u':film_projector:': u'\U0001F4FD',
u':fire:': u'\U0001F525',
u':fire_engine:': u'\U0001F692',
u':fireworks:': u'\U0001F386',
u':first_quarter_moon:': u'\U0001F313',
u':first_quarter_moon_with_face:': u'\U0001F31B',
u':fish:': u'\U0001F41F',
u':fish_cake_with_swirl:': u'\U0001F365',
u':fishing_pole:': u'\U0001F3A3',
u':five-thirty:': u'\U0001F560',
u':five_o’clock:': u'\U0001F554',
u':flag_in_hole:': u'\U000026F3',
u':flashlight:': u'\U0001F526',
u':fleur-de-lis:': u'\U0000269C',
u':flexed_biceps:': u'\U0001F4AA',
u':flexed_biceps_dark_skin_tone:': u'\U0001F4AA \U0001F3FF',
u':flexed_biceps_light_skin_tone:': u'\U0001F4AA \U0001F3FB',
u':flexed_biceps_medium-dark_skin_tone:': u'\U0001F4AA \U0001F3FE',
u':flexed_biceps_medium-light_skin_tone:': u'\U0001F4AA \U0001F3FC',
u':flexed_biceps_medium_skin_tone:': u'\U0001F4AA \U0001F3FD',
u':floppy_disk:': u'\U0001F4BE',
u':flower_playing_cards:': u'\U0001F3B4',
u':flushed_face:': u'\U0001F633',
u':fog:': u'\U0001F32B',
u':foggy:': u'\U0001F301',
u':folded_hands:': u'\U0001F64F',
u':folded_hands_dark_skin_tone:': u'\U0001F64F \U0001F3FF',
u':folded_hands_light_skin_tone:': u'\U0001F64F \U0001F3FB',
u':folded_hands_medium-dark_skin_tone:': u'\U0001F64F \U0001F3FE',
u':folded_hands_medium-light_skin_tone:': u'\U0001F64F \U0001F3FC',
u':folded_hands_medium_skin_tone:': u'\U0001F64F \U0001F3FD',
u':footprints:': u'\U0001F463',
u':fork_and_knife:': u'\U0001F374',
u':fork_and_knife_with_plate:': u'\U0001F37D',
u':fountain:': u'\U000026F2',
u':fountain_pen:': u'\U0001F58B',
u':four-thirty:': u'\U0001F55F',
u':four_leaf_clover:': u'\U0001F340',
u':four_o’clock:': u'\U0001F553',
u':fox_face:': u'\U0001F98A',
u':framed_picture:': u'\U0001F5BC',
u':french_fries:': u'\U0001F35F',
u':fried_shrimp:': u'\U0001F364',
u':frog_face:': u'\U0001F438',
u':front-facing_baby_chick:': u'\U0001F425',
u':frowning_face:': u'\U00002639',
u':frowning_face_with_open_mouth:': u'\U0001F626',
u':fuel_pump:': u'\U000026FD',
u':full_moon:': u'\U0001F315',
u':full_moon_with_face:': u'\U0001F31D',
u':funeral_urn:': u'\U000026B1',
u':game_die:': u'\U0001F3B2',
u':gear:': u'\U00002699',
u':gem_stone:': u'\U0001F48E',
u':ghost:': u'\U0001F47B',
u':girl:': u'\U0001F467',
u':girl_dark_skin_tone:': u'\U0001F467 \U0001F3FF',
u':girl_light_skin_tone:': u'\U0001F467 \U0001F3FB',
u':girl_medium-dark_skin_tone:': u'\U0001F467 \U0001F3FE',
u':girl_medium-light_skin_tone:': u'\U0001F467 \U0001F3FC',
u':girl_medium_skin_tone:': u'\U0001F467 \U0001F3FD',
u':glass_of_milk:': u'\U0001F95B',
u':glasses:': u'\U0001F453',
u':globe_showing_Americas:': u'\U0001F30E',
u':globe_showing_Asia-Australia:': u'\U0001F30F',
u':globe_showing_Europe-Africa:': u'\U0001F30D',
u':globe_with_meridians:': u'\U0001F310',
u':glowing_star:': u'\U0001F31F',
u':goal_net:': u'\U0001F945',
u':goat:': u'\U0001F410',
u':goblin:': u'\U0001F47A',
u':gorilla:': u'\U0001F98D',
u':graduation_cap:': u'\U0001F393',
u':grapes:': u'\U0001F347',
u':green_apple:': u'\U0001F34F',
u':green_book:': u'\U0001F4D7',
u':green_heart:': u'\U0001F49A',
u':green_salad:': u'\U0001F957',
u':grimacing_face:': u'\U0001F62C',
u':grinning_cat_face_with_smiling_eyes:': u'\U0001F638',
u':grinning_face:': u'\U0001F600',
u':grinning_face_with_smiling_eyes:': u'\U0001F601',
u':growing_heart:': u'\U0001F497',
u':guard:': u'\U0001F482',
u':guard_dark_skin_tone:': u'\U0001F482 \U0001F3FF',
u':guard_light_skin_tone:': u'\U0001F482 \U0001F3FB',
u':guard_medium-dark_skin_tone:': u'\U0001F482 \U0001F3FE',
u':guard_medium-light_skin_tone:': u'\U0001F482 \U0001F3FC',
u':guard_medium_skin_tone:': u'\U0001F482 \U0001F3FD',
u':guitar:': u'\U0001F3B8',
u':hamburger:': u'\U0001F354',
u':hammer:': u'\U0001F528',
u':hammer_and_pick:': u'\U00002692',
u':hammer_and_wrench:': u'\U0001F6E0',
u':hamster_face:': u'\U0001F439',
u':handbag:': u'\U0001F45C',
u':handshake:': u'\U0001F91D',
u':hatching_chick:': u'\U0001F423',
u':headphone:': u'\U0001F3A7',
u':hear-no-evil_monkey:': u'\U0001F649',
u':heart_decoration:': u'\U0001F49F',
u':heart_suit:': u'\U00002665',
u':heart_with_arrow:': u'\U0001F498',
u':heart_with_ribbon:': u'\U0001F49D',
u':heavy_check_mark:': u'\U00002714',
u':heavy_division_sign:': u'\U00002797',
u':heavy_dollar_sign:': u'\U0001F4B2',
u':heavy_heart_exclamation:': u'\U00002763',
u':heavy_large_circle:': u'\U00002B55',
u':heavy_minus_sign:': u'\U00002796',
u':heavy_multiplication_x:': u'\U00002716',
u':heavy_plus_sign:': u'\U00002795',
u':helicopter:': u'\U0001F681',
u':herb:': u'\U0001F33F',
u':hibiscus:': u'\U0001F33A',
u':high-heeled_shoe:': u'\U0001F460',
u':high-speed_train:': u'\U0001F684',
u':high-speed_train_with_bullet_nose:': u'\U0001F685',
u':high_voltage:': u'\U000026A1',
u':hole:': u'\U0001F573',
u':honey_pot:': u'\U0001F36F',
u':honeybee:': u'\U0001F41D',
u':horizontal_traffic_light:': u'\U0001F6A5',
u':horse:': u'\U0001F40E',
u':horse_face:': u'\U0001F434',
u':horse_racing:': u'\U0001F3C7',
u':horse_racing_dark_skin_tone:': u'\U0001F3C7 \U0001F3FF',
u':horse_racing_light_skin_tone:': u'\U0001F3C7 \U0001F3FB',
u':horse_racing_medium-dark_skin_tone:': u'\U0001F3C7 \U0001F3FE',
u':horse_racing_medium-light_skin_tone:': u'\U0001F3C7 \U0001F3FC',
u':horse_racing_medium_skin_tone:': u'\U0001F3C7 \U0001F3FD',
u':hospital:': u'\U0001F3E5',
u':hot_beverage:': u'\U00002615',
u':hot_dog:': u'\U0001F32D',
u':hot_pepper:': u'\U0001F336',
u':hot_springs:': u'\U00002668',
u':hotel:': u'\U0001F3E8',
u':hourglass:': u'\U0000231B',
u':hourglass_with_flowing_sand:': u'\U000023F3',
u':house:': u'\U0001F3E0',
u':house_with_garden:': u'\U0001F3E1',
u':hugging_face:': u'\U0001F917',
u':hundred_points:': u'\U0001F4AF',
u':hushed_face:': u'\U0001F62F',
u':ice_cream:': u'\U0001F368',
u':ice_hockey:': u'\U0001F3D2',
u':ice_skate:': u'\U000026F8',
u':inbox_tray:': u'\U0001F4E5',
u':incoming_envelope:': u'\U0001F4E8',
u':index_pointing_up:': u'\U0000261D',
u':index_pointing_up_dark_skin_tone:': u'\U0000261D \U0001F3FF',
u':index_pointing_up_light_skin_tone:': u'\U0000261D \U0001F3FB',
u':index_pointing_up_medium-dark_skin_tone:': u'\U0000261D \U0001F3FE',
u':index_pointing_up_medium-light_skin_tone:': u'\U0000261D \U0001F3FC',
u':index_pointing_up_medium_skin_tone:': u'\U0000261D \U0001F3FD',
u':information:': u'\U00002139',
u':input_latin_letters:': u'\U0001F524',
u':input_latin_lowercase:': u'\U0001F521',
u':input_latin_uppercase:': u'\U0001F520',
u':input_numbers:': u'\U0001F522',
u':input_symbols:': u'\U0001F523',
u':jack-o-lantern:': u'\U0001F383',
u':jeans:': u'\U0001F456',
u':joker:': u'\U0001F0CF',
u':joystick:': u'\U0001F579',
u':kaaba:': u'\U0001F54B',
u':key:': u'\U0001F511',
u':keyboard:': u'\U00002328',
u':keycap_#:': u'\U00000023 \U0000FE0F \U000020E3',
u':keycap_*:': u'\U0000002A \U0000FE0F \U000020E3',
u':keycap_0:': u'\U00000030 \U0000FE0F \U000020E3',
u':keycap_1:': u'\U00000031 \U0000FE0F \U000020E3',
u':keycap_10:': u'\U0001F51F',
u':keycap_2:': u'\U00000032 \U0000FE0F \U000020E3',
u':keycap_3:': u'\U00000033 \U0000FE0F \U000020E3',
u':keycap_4:': u'\U00000034 \U0000FE0F \U000020E3',
u':keycap_5:': u'\U00000035 \U0000FE0F \U000020E3',
u':keycap_6:': u'\U00000036 \U0000FE0F \U000020E3',
u':keycap_7:': u'\U00000037 \U0000FE0F \U000020E3',
u':keycap_8:': u'\U00000038 \U0000FE0F \U000020E3',
u':keycap_9:': u'\U00000039 \U0000FE0F \U000020E3',
u':kick_scooter:': u'\U0001F6F4',
u':kimono:': u'\U0001F458',
u':kiss:': u'\U0001F48F',
u':kiss_man_man:': u'\U0001F468 \U0000200D \U00002764 \U0000FE0F \U0000200D \U0001F48B \U0000200D \U0001F468',
u':kiss_mark:': u'\U0001F48B',
u':kiss_woman_man:': u'\U0001F469 \U0000200D \U00002764 \U0000FE0F \U0000200D \U0001F48B \U0000200D \U0001F468',
u':kiss_woman_woman:': u'\U0001F469 \U0000200D \U00002764 \U0000FE0F \U0000200D \U0001F48B \U0000200D \U0001F469',
u':kissing_cat_face_with_closed_eyes:': u'\U0001F63D',
u':kissing_face:': u'\U0001F617',
u':kissing_face_with_closed_eyes:': u'\U0001F61A',
u':kissing_face_with_smiling_eyes:': u'\U0001F619',
u':kitchen_knife:': u'\U0001F52A',
u':kiwi_fruit:': u'\U0001F95D',
u':koala:': u'\U0001F428',
u':label:': u'\U0001F3F7',
u':lady_beetle:': u'\U0001F41E',
u':laptop_computer:': u'\U0001F4BB',
u':large_blue_diamond:': u'\U0001F537',
u':large_orange_diamond:': u'\U0001F536',
u':last_quarter_moon:': u'\U0001F317',
u':last_quarter_moon_with_face:': u'\U0001F31C',
u':last_track_button:': u'\U000023EE',
u':latin_cross:': u'\U0000271D',
u':leaf_fluttering_in_wind:': u'\U0001F343',
u':ledger:': u'\U0001F4D2',
u':left-facing_fist:': u'\U0001F91B',
u':left-facing_fist_dark_skin_tone:': u'\U0001F91B \U0001F3FF',
u':left-facing_fist_light_skin_tone:': u'\U0001F91B \U0001F3FB',
u':left-facing_fist_medium-dark_skin_tone:': u'\U0001F91B \U0001F3FE',
u':left-facing_fist_medium-light_skin_tone:': u'\U0001F91B \U0001F3FC',
u':left-facing_fist_medium_skin_tone:': u'\U0001F91B \U0001F3FD',
u':left-pointing_magnifying_glass:': u'\U0001F50D',
u':left-right_arrow:': u'\U00002194',
u':left_arrow:': u'\U00002B05',
u':left_arrow_curving_right:': u'\U000021AA',
u':left_luggage:': u'\U0001F6C5',
u':left_speech_bubble:': u'\U0001F5E8',
u':lemon:': u'\U0001F34B',
u':leopard:': u'\U0001F406',
u':level_slider:': u'\U0001F39A',
u':light_bulb:': u'\U0001F4A1',
u':light_rail:': u'\U0001F688',
u':light_skin_tone:': u'\U0001F3FB',
u':link:': u'\U0001F517',
u':linked_paperclips:': u'\U0001F587',
u':lion_face:': u'\U0001F981',
u':lipstick:': u'\U0001F484',
u':litter_in_bin_sign:': u'\U0001F6AE',
u':lizard:': u'\U0001F98E',
u':locked:': u'\U0001F512',
u':locked_with_key:': u'\U0001F510',
u':locked_with_pen:': u'\U0001F50F',
u':locomotive:': u'\U0001F682',
u':lollipop:': u'\U0001F36D',
u':loudly_crying_face:': u'\U0001F62D',
u':loudspeaker:': u'\U0001F4E2',
u':love_hotel:': u'\U0001F3E9',
u':love_letter:': u'\U0001F48C',
u':lying_face:': u'\U0001F925',
u':mahjong_red_dragon:': u'\U0001F004',
u':male_sign:': u'\U00002642',
u':man:': u'\U0001F468',
u':man_and_woman_holding_hands:': u'\U0001F46B',
u':man_artist:': u'\U0001F468 \U0000200D \U0001F3A8',
u':man_artist_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U0001F3A8',
u':man_artist_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U0001F3A8',
u':man_artist_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U0001F3A8',
u':man_artist_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U0001F3A8',
u':man_artist_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U0001F3A8',
u':man_astronaut:': u'\U0001F468 \U0000200D \U0001F680',
u':man_astronaut_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U0001F680',
u':man_astronaut_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U0001F680',
u':man_astronaut_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U0001F680',
u':man_astronaut_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U0001F680',
u':man_astronaut_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U0001F680',
u':man_biking:': u'\U0001F6B4 \U0000200D \U00002642 \U0000FE0F',
u':man_biking_dark_skin_tone:': u'\U0001F6B4 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_biking_light_skin_tone:': u'\U0001F6B4 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_biking_medium-dark_skin_tone:': u'\U0001F6B4 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_biking_medium-light_skin_tone:': u'\U0001F6B4 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_biking_medium_skin_tone:': u'\U0001F6B4 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_bouncing_ball:': u'\U000026F9 \U0000FE0F \U0000200D \U00002642 \U0000FE0F',
u':man_bouncing_ball_dark_skin_tone:': u'\U000026F9 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_bouncing_ball_light_skin_tone:': u'\U000026F9 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_bouncing_ball_medium-dark_skin_tone:': u'\U000026F9 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_bouncing_ball_medium-light_skin_tone:': u'\U000026F9 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_bouncing_ball_medium_skin_tone:': u'\U000026F9 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_bowing:': u'\U0001F647 \U0000200D \U00002642 \U0000FE0F',
u':man_bowing_dark_skin_tone:': u'\U0001F647 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_bowing_light_skin_tone:': u'\U0001F647 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_bowing_medium-dark_skin_tone:': u'\U0001F647 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_bowing_medium-light_skin_tone:': u'\U0001F647 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_bowing_medium_skin_tone:': u'\U0001F647 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_cartwheeling:': u'\U0001F938 \U0000200D \U00002642 \U0000FE0F',
u':man_cartwheeling_dark_skin_tone:': u'\U0001F938 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_cartwheeling_light_skin_tone:': u'\U0001F938 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_cartwheeling_medium-dark_skin_tone:': u'\U0001F938 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_cartwheeling_medium-light_skin_tone:': u'\U0001F938 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_cartwheeling_medium_skin_tone:': u'\U0001F938 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_construction_worker:': u'\U0001F477 \U0000200D \U00002642 \U0000FE0F',
u':man_construction_worker_dark_skin_tone:': u'\U0001F477 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_construction_worker_light_skin_tone:': u'\U0001F477 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_construction_worker_medium-dark_skin_tone:': u'\U0001F477 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_construction_worker_medium-light_skin_tone:': u'\U0001F477 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_construction_worker_medium_skin_tone:': u'\U0001F477 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_cook:': u'\U0001F468 \U0000200D \U0001F373',
u':man_cook_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U0001F373',
u':man_cook_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U0001F373',
u':man_cook_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U0001F373',
u':man_cook_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U0001F373',
u':man_cook_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U0001F373',
u':man_dancing:': u'\U0001F57A',
u':man_dancing_dark_skin_tone:': u'\U0001F57A \U0001F3FF',
u':man_dancing_light_skin_tone:': u'\U0001F57A \U0001F3FB',
u':man_dancing_medium-dark_skin_tone:': u'\U0001F57A \U0001F3FE',
u':man_dancing_medium-light_skin_tone:': u'\U0001F57A \U0001F3FC',
u':man_dancing_medium_skin_tone:': u'\U0001F57A \U0001F3FD',
u':man_dark_skin_tone:': u'\U0001F468 \U0001F3FF',
u':man_detective:': u'\U0001F575 \U0000FE0F \U0000200D \U00002642 \U0000FE0F',
u':man_detective_dark_skin_tone:': u'\U0001F575 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_detective_light_skin_tone:': u'\U0001F575 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_detective_medium-dark_skin_tone:': u'\U0001F575 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_detective_medium-light_skin_tone:': u'\U0001F575 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_detective_medium_skin_tone:': u'\U0001F575 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_facepalming:': u'\U0001F926 \U0000200D \U00002642 \U0000FE0F',
u':man_facepalming_dark_skin_tone:': u'\U0001F926 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_facepalming_light_skin_tone:': u'\U0001F926 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_facepalming_medium-dark_skin_tone:': u'\U0001F926 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_facepalming_medium-light_skin_tone:': u'\U0001F926 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_facepalming_medium_skin_tone:': u'\U0001F926 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_factory_worker:': u'\U0001F468 \U0000200D \U0001F3ED',
u':man_factory_worker_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U0001F3ED',
u':man_factory_worker_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U0001F3ED',
u':man_factory_worker_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U0001F3ED',
u':man_factory_worker_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U0001F3ED',
u':man_factory_worker_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U0001F3ED',
u':man_farmer:': u'\U0001F468 \U0000200D \U0001F33E',
u':man_farmer_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U0001F33E',
u':man_farmer_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U0001F33E',
u':man_farmer_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U0001F33E',
u':man_farmer_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U0001F33E',
u':man_farmer_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U0001F33E',
u':man_firefighter:': u'\U0001F468 \U0000200D \U0001F692',
u':man_firefighter_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U0001F692',
u':man_firefighter_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U0001F692',
u':man_firefighter_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U0001F692',
u':man_firefighter_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U0001F692',
u':man_firefighter_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U0001F692',
u':man_frowning:': u'\U0001F64D \U0000200D \U00002642 \U0000FE0F',
u':man_frowning_dark_skin_tone:': u'\U0001F64D \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_frowning_light_skin_tone:': u'\U0001F64D \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_frowning_medium-dark_skin_tone:': u'\U0001F64D \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_frowning_medium-light_skin_tone:': u'\U0001F64D \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_frowning_medium_skin_tone:': u'\U0001F64D \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_gesturing_NO:': u'\U0001F645 \U0000200D \U00002642 \U0000FE0F',
u':man_gesturing_NO_dark_skin_tone:': u'\U0001F645 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_gesturing_NO_light_skin_tone:': u'\U0001F645 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_gesturing_NO_medium-dark_skin_tone:': u'\U0001F645 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_gesturing_NO_medium-light_skin_tone:': u'\U0001F645 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_gesturing_NO_medium_skin_tone:': u'\U0001F645 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_gesturing_OK:': u'\U0001F646 \U0000200D \U00002642 \U0000FE0F',
u':man_gesturing_OK_dark_skin_tone:': u'\U0001F646 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_gesturing_OK_light_skin_tone:': u'\U0001F646 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_gesturing_OK_medium-dark_skin_tone:': u'\U0001F646 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_gesturing_OK_medium-light_skin_tone:': u'\U0001F646 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_gesturing_OK_medium_skin_tone:': u'\U0001F646 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_getting_haircut:': u'\U0001F487 \U0000200D \U00002642 \U0000FE0F',
u':man_getting_haircut_dark_skin_tone:': u'\U0001F487 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_getting_haircut_light_skin_tone:': u'\U0001F487 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_getting_haircut_medium-dark_skin_tone:': u'\U0001F487 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_getting_haircut_medium-light_skin_tone:': u'\U0001F487 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_getting_haircut_medium_skin_tone:': u'\U0001F487 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_getting_massage:': u'\U0001F486 \U0000200D \U00002642 \U0000FE0F',
u':man_getting_massage_dark_skin_tone:': u'\U0001F486 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_getting_massage_light_skin_tone:': u'\U0001F486 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_getting_massage_medium-dark_skin_tone:': u'\U0001F486 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_getting_massage_medium-light_skin_tone:': u'\U0001F486 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_getting_massage_medium_skin_tone:': u'\U0001F486 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_golfing:': u'\U0001F3CC \U0000FE0F \U0000200D \U00002642 \U0000FE0F',
u':man_golfing_dark_skin_tone:': u'\U0001F3CC \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_golfing_light_skin_tone:': u'\U0001F3CC \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_golfing_medium-dark_skin_tone:': u'\U0001F3CC \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_golfing_medium-light_skin_tone:': u'\U0001F3CC \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_golfing_medium_skin_tone:': u'\U0001F3CC \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_guard:': u'\U0001F482 \U0000200D \U00002642 \U0000FE0F',
u':man_guard_dark_skin_tone:': u'\U0001F482 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_guard_light_skin_tone:': u'\U0001F482 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_guard_medium-dark_skin_tone:': u'\U0001F482 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_guard_medium-light_skin_tone:': u'\U0001F482 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_guard_medium_skin_tone:': u'\U0001F482 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_health_worker:': u'\U0001F468 \U0000200D \U00002695 \U0000FE0F',
u':man_health_worker_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U00002695 \U0000FE0F',
u':man_health_worker_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U00002695 \U0000FE0F',
u':man_health_worker_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U00002695 \U0000FE0F',
u':man_health_worker_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U00002695 \U0000FE0F',
u':man_health_worker_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U00002695 \U0000FE0F',
u':man_in_business_suit_levitating:': u'\U0001F574',
u':man_in_business_suit_levitating_dark_skin_tone:': u'\U0001F574 \U0001F3FF',
u':man_in_business_suit_levitating_light_skin_tone:': u'\U0001F574 \U0001F3FB',
u':man_in_business_suit_levitating_medium-dark_skin_tone:': u'\U0001F574 \U0001F3FE',
u':man_in_business_suit_levitating_medium-light_skin_tone:': u'\U0001F574 \U0001F3FC',
u':man_in_business_suit_levitating_medium_skin_tone:': u'\U0001F574 \U0001F3FD',
u':man_in_tuxedo:': u'\U0001F935',
u':man_in_tuxedo_dark_skin_tone:': u'\U0001F935 \U0001F3FF',
u':man_in_tuxedo_light_skin_tone:': u'\U0001F935 \U0001F3FB',
u':man_in_tuxedo_medium-dark_skin_tone:': u'\U0001F935 \U0001F3FE',
u':man_in_tuxedo_medium-light_skin_tone:': u'\U0001F935 \U0001F3FC',
u':man_in_tuxedo_medium_skin_tone:': u'\U0001F935 \U0001F3FD',
u':man_judge:': u'\U0001F468 \U0000200D \U00002696 \U0000FE0F',
u':man_judge_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U00002696 \U0000FE0F',
u':man_judge_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U00002696 \U0000FE0F',
u':man_judge_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U00002696 \U0000FE0F',
u':man_judge_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U00002696 \U0000FE0F',
u':man_judge_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U00002696 \U0000FE0F',
u':man_juggling:': u'\U0001F939 \U0000200D \U00002642 \U0000FE0F',
u':man_juggling_dark_skin_tone:': u'\U0001F939 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_juggling_light_skin_tone:': u'\U0001F939 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_juggling_medium-dark_skin_tone:': u'\U0001F939 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_juggling_medium-light_skin_tone:': u'\U0001F939 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_juggling_medium_skin_tone:': u'\U0001F939 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_lifting_weights:': u'\U0001F3CB \U0000FE0F \U0000200D \U00002642 \U0000FE0F',
u':man_lifting_weights_dark_skin_tone:': u'\U0001F3CB \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_lifting_weights_light_skin_tone:': u'\U0001F3CB \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_lifting_weights_medium-dark_skin_tone:': u'\U0001F3CB \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_lifting_weights_medium-light_skin_tone:': u'\U0001F3CB \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_lifting_weights_medium_skin_tone:': u'\U0001F3CB \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_light_skin_tone:': u'\U0001F468 \U0001F3FB',
u':man_mechanic:': u'\U0001F468 \U0000200D \U0001F527',
u':man_mechanic_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U0001F527',
u':man_mechanic_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U0001F527',
u':man_mechanic_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U0001F527',
u':man_mechanic_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U0001F527',
u':man_mechanic_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U0001F527',
u':man_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE',
u':man_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC',
u':man_medium_skin_tone:': u'\U0001F468 \U0001F3FD',
u':man_mountain_biking:': u'\U0001F6B5 \U0000200D \U00002642 \U0000FE0F',
u':man_mountain_biking_dark_skin_tone:': u'\U0001F6B5 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_mountain_biking_light_skin_tone:': u'\U0001F6B5 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_mountain_biking_medium-dark_skin_tone:': u'\U0001F6B5 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_mountain_biking_medium-light_skin_tone:': u'\U0001F6B5 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_mountain_biking_medium_skin_tone:': u'\U0001F6B5 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_office_worker:': u'\U0001F468 \U0000200D \U0001F4BC',
u':man_office_worker_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U0001F4BC',
u':man_office_worker_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U0001F4BC',
u':man_office_worker_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U0001F4BC',
u':man_office_worker_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U0001F4BC',
u':man_office_worker_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U0001F4BC',
u':man_pilot:': u'\U0001F468 \U0000200D \U00002708 \U0000FE0F',
u':man_pilot_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U00002708 \U0000FE0F',
u':man_pilot_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U00002708 \U0000FE0F',
u':man_pilot_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U00002708 \U0000FE0F',
u':man_pilot_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U00002708 \U0000FE0F',
u':man_pilot_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U00002708 \U0000FE0F',
u':man_playing_handball:': u'\U0001F93E \U0000200D \U00002642 \U0000FE0F',
u':man_playing_handball_dark_skin_tone:': u'\U0001F93E \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_playing_handball_light_skin_tone:': u'\U0001F93E \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_playing_handball_medium-dark_skin_tone:': u'\U0001F93E \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_playing_handball_medium-light_skin_tone:': u'\U0001F93E \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_playing_handball_medium_skin_tone:': u'\U0001F93E \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_playing_water_polo:': u'\U0001F93D \U0000200D \U00002642 \U0000FE0F',
u':man_playing_water_polo_dark_skin_tone:': u'\U0001F93D \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_playing_water_polo_light_skin_tone:': u'\U0001F93D \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_playing_water_polo_medium-dark_skin_tone:': u'\U0001F93D \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_playing_water_polo_medium-light_skin_tone:': u'\U0001F93D \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_playing_water_polo_medium_skin_tone:': u'\U0001F93D \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_police_officer:': u'\U0001F46E \U0000200D \U00002642 \U0000FE0F',
u':man_police_officer_dark_skin_tone:': u'\U0001F46E \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_police_officer_light_skin_tone:': u'\U0001F46E \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_police_officer_medium-dark_skin_tone:': u'\U0001F46E \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_police_officer_medium-light_skin_tone:': u'\U0001F46E \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_police_officer_medium_skin_tone:': u'\U0001F46E \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_pouting:': u'\U0001F64E \U0000200D \U00002642 \U0000FE0F',
u':man_pouting_dark_skin_tone:': u'\U0001F64E \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_pouting_light_skin_tone:': u'\U0001F64E \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_pouting_medium-dark_skin_tone:': u'\U0001F64E \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_pouting_medium-light_skin_tone:': u'\U0001F64E \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_pouting_medium_skin_tone:': u'\U0001F64E \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_raising_hand:': u'\U0001F64B \U0000200D \U00002642 \U0000FE0F',
u':man_raising_hand_dark_skin_tone:': u'\U0001F64B \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_raising_hand_light_skin_tone:': u'\U0001F64B \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_raising_hand_medium-dark_skin_tone:': u'\U0001F64B \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_raising_hand_medium-light_skin_tone:': u'\U0001F64B \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_raising_hand_medium_skin_tone:': u'\U0001F64B \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_rowing_boat:': u'\U0001F6A3 \U0000200D \U00002642 \U0000FE0F',
u':man_rowing_boat_dark_skin_tone:': u'\U0001F6A3 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_rowing_boat_light_skin_tone:': u'\U0001F6A3 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_rowing_boat_medium-dark_skin_tone:': u'\U0001F6A3 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_rowing_boat_medium-light_skin_tone:': u'\U0001F6A3 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_rowing_boat_medium_skin_tone:': u'\U0001F6A3 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_running:': u'\U0001F3C3 \U0000200D \U00002642 \U0000FE0F',
u':man_running_dark_skin_tone:': u'\U0001F3C3 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_running_light_skin_tone:': u'\U0001F3C3 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_running_medium-dark_skin_tone:': u'\U0001F3C3 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_running_medium-light_skin_tone:': u'\U0001F3C3 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_running_medium_skin_tone:': u'\U0001F3C3 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_scientist:': u'\U0001F468 \U0000200D \U0001F52C',
u':man_scientist_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U0001F52C',
u':man_scientist_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U0001F52C',
u':man_scientist_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U0001F52C',
u':man_scientist_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U0001F52C',
u':man_scientist_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U0001F52C',
u':man_shrugging:': u'\U0001F937 \U0000200D \U00002642 \U0000FE0F',
u':man_shrugging_dark_skin_tone:': u'\U0001F937 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_shrugging_light_skin_tone:': u'\U0001F937 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_shrugging_medium-dark_skin_tone:': u'\U0001F937 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_shrugging_medium-light_skin_tone:': u'\U0001F937 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_shrugging_medium_skin_tone:': u'\U0001F937 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_singer:': u'\U0001F468 \U0000200D \U0001F3A4',
u':man_singer_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U0001F3A4',
u':man_singer_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U0001F3A4',
u':man_singer_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U0001F3A4',
u':man_singer_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U0001F3A4',
u':man_singer_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U0001F3A4',
u':man_student:': u'\U0001F468 \U0000200D \U0001F393',
u':man_student_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U0001F393',
u':man_student_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U0001F393',
u':man_student_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U0001F393',
u':man_student_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U0001F393',
u':man_student_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U0001F393',
u':man_surfing:': u'\U0001F3C4 \U0000200D \U00002642 \U0000FE0F',
u':man_surfing_dark_skin_tone:': u'\U0001F3C4 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_surfing_light_skin_tone:': u'\U0001F3C4 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_surfing_medium-dark_skin_tone:': u'\U0001F3C4 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_surfing_medium-light_skin_tone:': u'\U0001F3C4 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_surfing_medium_skin_tone:': u'\U0001F3C4 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_swimming:': u'\U0001F3CA \U0000200D \U00002642 \U0000FE0F',
u':man_swimming_dark_skin_tone:': u'\U0001F3CA \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_swimming_light_skin_tone:': u'\U0001F3CA \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_swimming_medium-dark_skin_tone:': u'\U0001F3CA \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_swimming_medium-light_skin_tone:': u'\U0001F3CA \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_swimming_medium_skin_tone:': u'\U0001F3CA \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_teacher:': u'\U0001F468 \U0000200D \U0001F3EB',
u':man_teacher_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U0001F3EB',
u':man_teacher_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U0001F3EB',
u':man_teacher_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U0001F3EB',
u':man_teacher_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U0001F3EB',
u':man_teacher_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U0001F3EB',
u':man_technologist:': u'\U0001F468 \U0000200D \U0001F4BB',
u':man_technologist_dark_skin_tone:': u'\U0001F468 \U0001F3FF \U0000200D \U0001F4BB',
u':man_technologist_light_skin_tone:': u'\U0001F468 \U0001F3FB \U0000200D \U0001F4BB',
u':man_technologist_medium-dark_skin_tone:': u'\U0001F468 \U0001F3FE \U0000200D \U0001F4BB',
u':man_technologist_medium-light_skin_tone:': u'\U0001F468 \U0001F3FC \U0000200D \U0001F4BB',
u':man_technologist_medium_skin_tone:': u'\U0001F468 \U0001F3FD \U0000200D \U0001F4BB',
u':man_tipping_hand:': u'\U0001F481 \U0000200D \U00002642 \U0000FE0F',
u':man_tipping_hand_dark_skin_tone:': u'\U0001F481 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_tipping_hand_light_skin_tone:': u'\U0001F481 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_tipping_hand_medium-dark_skin_tone:': u'\U0001F481 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_tipping_hand_medium-light_skin_tone:': u'\U0001F481 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_tipping_hand_medium_skin_tone:': u'\U0001F481 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_walking:': u'\U0001F6B6 \U0000200D \U00002642 \U0000FE0F',
u':man_walking_dark_skin_tone:': u'\U0001F6B6 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_walking_light_skin_tone:': u'\U0001F6B6 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_walking_medium-dark_skin_tone:': u'\U0001F6B6 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_walking_medium-light_skin_tone:': u'\U0001F6B6 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_walking_medium_skin_tone:': u'\U0001F6B6 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_wearing_turban:': u'\U0001F473 \U0000200D \U00002642 \U0000FE0F',
u':man_wearing_turban_dark_skin_tone:': u'\U0001F473 \U0001F3FF \U0000200D \U00002642 \U0000FE0F',
u':man_wearing_turban_light_skin_tone:': u'\U0001F473 \U0001F3FB \U0000200D \U00002642 \U0000FE0F',
u':man_wearing_turban_medium-dark_skin_tone:': u'\U0001F473 \U0001F3FE \U0000200D \U00002642 \U0000FE0F',
u':man_wearing_turban_medium-light_skin_tone:': u'\U0001F473 \U0001F3FC \U0000200D \U00002642 \U0000FE0F',
u':man_wearing_turban_medium_skin_tone:': u'\U0001F473 \U0001F3FD \U0000200D \U00002642 \U0000FE0F',
u':man_with_Chinese_cap:': u'\U0001F472',
u':man_with_Chinese_cap_dark_skin_tone:': u'\U0001F472 \U0001F3FF',
u':man_with_Chinese_cap_light_skin_tone:': u'\U0001F472 \U0001F3FB',
u':man_with_Chinese_cap_medium-dark_skin_tone:': u'\U0001F472 \U0001F3FE',
u':man_with_Chinese_cap_medium-light_skin_tone:': u'\U0001F472 \U0001F3FC',
u':man_with_Chinese_cap_medium_skin_tone:': u'\U0001F472 \U0001F3FD',
u':mantelpiece_clock:': u'\U0001F570',
u':man’s_shoe:': u'\U0001F45E',
u':map_of_Japan:': u'\U0001F5FE',
u':maple_leaf:': u'\U0001F341',
u':martial_arts_uniform:': u'\U0001F94B',
u':meat_on_bone:': u'\U0001F356',
u':medical_symbol:': u'\U00002695',
u':medium-dark_skin_tone:': u'\U0001F3FE',
u':medium-light_skin_tone:': u'\U0001F3FC',
u':medium_skin_tone:': u'\U0001F3FD',
u':megaphone:': u'\U0001F4E3',
u':melon:': u'\U0001F348',
u':memo:': u'\U0001F4DD',
u':men_with_bunny_ears_partying:': u'\U0001F46F \U0000200D \U00002642 \U0000FE0F',
u':men_wrestling:': u'\U0001F93C \U0000200D \U00002642 \U0000FE0F',
u':menorah:': u'\U0001F54E',
u':men’s_room:': u'\U0001F6B9',
u':metro:': u'\U0001F687',
u':microphone:': u'\U0001F3A4',
u':microscope:': u'\U0001F52C',
u':middle_finger:': u'\U0001F595',
u':middle_finger_dark_skin_tone:': u'\U0001F595 \U0001F3FF',
u':middle_finger_light_skin_tone:': u'\U0001F595 \U0001F3FB',
u':middle_finger_medium-dark_skin_tone:': u'\U0001F595 \U0001F3FE',
u':middle_finger_medium-light_skin_tone:': u'\U0001F595 \U0001F3FC',
u':middle_finger_medium_skin_tone:': u'\U0001F595 \U0001F3FD',
u':military_medal:': u'\U0001F396',
u':milky_way:': u'\U0001F30C',
u':minibus:': u'\U0001F690',
u':moai:': u'\U0001F5FF',
u':mobile_phone:': u'\U0001F4F1',
u':mobile_phone_off:': u'\U0001F4F4',
u':mobile_phone_with_arrow:': u'\U0001F4F2',
u':money-mouth_face:': u'\U0001F911',
u':money_bag:': u'\U0001F4B0',
u':money_with_wings:': u'\U0001F4B8',
u':monkey:': u'\U0001F412',
u':monkey_face:': u'\U0001F435',
u':monorail:': u'\U0001F69D',
u':moon_viewing_ceremony:': u'\U0001F391',
u':mosque:': u'\U0001F54C',
u':motor_boat:': u'\U0001F6E5',
u':motor_scooter:': u'\U0001F6F5',
u':motorcycle:': u'\U0001F3CD',
u':motorway:': u'\U0001F6E3',
u':mount_fuji:': u'\U0001F5FB',
u':mountain:': u'\U000026F0',
u':mountain_cableway:': u'\U0001F6A0',
u':mountain_railway:': u'\U0001F69E',
u':mouse:': u'\U0001F401',
u':mouse_face:': u'\U0001F42D',
u':mouth:': u'\U0001F444',
u':movie_camera:': u'\U0001F3A5',
u':mushroom:': u'\U0001F344',
u':musical_keyboard:': u'\U0001F3B9',
u':musical_note:': u'\U0001F3B5',
u':musical_notes:': u'\U0001F3B6',
u':musical_score:': u'\U0001F3BC',
u':muted_speaker:': u'\U0001F507',
u':nail_polish:': u'\U0001F485',
u':nail_polish_dark_skin_tone:': u'\U0001F485 \U0001F3FF',
u':nail_polish_light_skin_tone:': u'\U0001F485 \U0001F3FB',
u':nail_polish_medium-dark_skin_tone:': u'\U0001F485 \U0001F3FE',
u':nail_polish_medium-light_skin_tone:': u'\U0001F485 \U0001F3FC',
u':nail_polish_medium_skin_tone:': u'\U0001F485 \U0001F3FD',
u':name_badge:': u'\U0001F4DB',
u':national_park:': u'\U0001F3DE',
u':nauseated_face:': u'\U0001F922',
u':necktie:': u'\U0001F454',
u':nerd_face:': u'\U0001F913',
u':neutral_face:': u'\U0001F610',
u':new_moon:': u'\U0001F311',
u':new_moon_face:': u'\U0001F31A',
u':newspaper:': u'\U0001F4F0',
u':next_track_button:': u'\U000023ED',
u':night_with_stars:': u'\U0001F303',
u':nine-thirty:': u'\U0001F564',
u':nine_o’clock:': u'\U0001F558',
u':no_bicycles:': u'\U0001F6B3',
u':no_entry:': u'\U000026D4',
u':no_littering:': u'\U0001F6AF',
u':no_mobile_phones:': u'\U0001F4F5',
u':no_one_under_eighteen:': u'\U0001F51E',
u':no_pedestrians:': u'\U0001F6B7',
u':no_smoking:': u'\U0001F6AD',
u':non-potable_water:': u'\U0001F6B1',
u':nose:': u'\U0001F443',
u':nose_dark_skin_tone:': u'\U0001F443 \U0001F3FF',
u':nose_light_skin_tone:': u'\U0001F443 \U0001F3FB',
u':nose_medium-dark_skin_tone:': u'\U0001F443 \U0001F3FE',
u':nose_medium-light_skin_tone:': u'\U0001F443 \U0001F3FC',
u':nose_medium_skin_tone:': u'\U0001F443 \U0001F3FD',
u':notebook:': u'\U0001F4D3',
u':notebook_with_decorative_cover:': u'\U0001F4D4',
u':nut_and_bolt:': u'\U0001F529',
u':octopus:': u'\U0001F419',
u':oden:': u'\U0001F362',
u':office_building:': u'\U0001F3E2',
u':ogre:': u'\U0001F479',
u':oil_drum:': u'\U0001F6E2',
u':old_key:': u'\U0001F5DD',
u':old_man:': u'\U0001F474',
u':old_man_dark_skin_tone:': u'\U0001F474 \U0001F3FF',
u':old_man_light_skin_tone:': u'\U0001F474 \U0001F3FB',
u':old_man_medium-dark_skin_tone:': u'\U0001F474 \U0001F3FE',
u':old_man_medium-light_skin_tone:': u'\U0001F474 \U0001F3FC',
u':old_man_medium_skin_tone:': u'\U0001F474 \U0001F3FD',
u':old_woman:': u'\U0001F475',
u':old_woman_dark_skin_tone:': u'\U0001F475 \U0001F3FF',
u':old_woman_light_skin_tone:': u'\U0001F475 \U0001F3FB',
u':old_woman_medium-dark_skin_tone:': u'\U0001F475 \U0001F3FE',
u':old_woman_medium-light_skin_tone:': u'\U0001F475 \U0001F3FC',
u':old_woman_medium_skin_tone:': u'\U0001F475 \U0001F3FD',
u':om:': u'\U0001F549',
u':oncoming_automobile:': u'\U0001F698',
u':oncoming_bus:': u'\U0001F68D',
u':oncoming_fist:': u'\U0001F44A',
u':oncoming_fist_dark_skin_tone:': u'\U0001F44A \U0001F3FF',
u':oncoming_fist_light_skin_tone:': u'\U0001F44A \U0001F3FB',
u':oncoming_fist_medium-dark_skin_tone:': u'\U0001F44A \U0001F3FE',
u':oncoming_fist_medium-light_skin_tone:': u'\U0001F44A \U0001F3FC',
u':oncoming_fist_medium_skin_tone:': u'\U0001F44A \U0001F3FD',
u':oncoming_police_car:': u'\U0001F694',
u':oncoming_taxi:': u'\U0001F696',
u':one-thirty:': u'\U0001F55C',
u':one_o’clock:': u'\U0001F550',
u':open_book:': u'\U0001F4D6',
u':open_file_folder:': u'\U0001F4C2',
u':open_hands:': u'\U0001F450',
u':open_hands_dark_skin_tone:': u'\U0001F450 \U0001F3FF',
u':open_hands_light_skin_tone:': u'\U0001F450 \U0001F3FB',
u':open_hands_medium-dark_skin_tone:': u'\U0001F450 \U0001F3FE',
u':open_hands_medium-light_skin_tone:': u'\U0001F450 \U0001F3FC',
u':open_hands_medium_skin_tone:': u'\U0001F450 \U0001F3FD',
u':open_mailbox_with_lowered_flag:': u'\U0001F4ED',
u':open_mailbox_with_raised_flag:': u'\U0001F4EC',
u':optical_disk:': u'\U0001F4BF',
u':orange_book:': u'\U0001F4D9',
u':orthodox_cross:': u'\U00002626',
u':outbox_tray:': u'\U0001F4E4',
u':owl:': u'\U0001F989',
u':ox:': u'\U0001F402',
u':package:': u'\U0001F4E6',
u':page_facing_up:': u'\U0001F4C4',
u':page_with_curl:': u'\U0001F4C3',
u':pager:': u'\U0001F4DF',
u':paintbrush:': u'\U0001F58C',
u':palm_tree:': u'\U0001F334',
u':pancakes:': u'\U0001F95E',
u':panda_face:': u'\U0001F43C',
u':paperclip:': u'\U0001F4CE',
u':part_alternation_mark:': u'\U0000303D',
u':party_popper:': u'\U0001F389',
u':passenger_ship:': u'\U0001F6F3',
u':passport_control:': u'\U0001F6C2',
u':pause_button:': u'\U000023F8',
u':paw_prints:': u'\U0001F43E',
u':peace_symbol:': u'\U0000262E',
u':peach:': u'\U0001F351',
u':peanuts:': u'\U0001F95C',
u':pear:': u'\U0001F350',
u':pen:': u'\U0001F58A',
u':pencil:': u'\U0000270F',
u':penguin:': u'\U0001F427',
u':pensive_face:': u'\U0001F614',
u':people_with_bunny_ears_partying:': u'\U0001F46F',
u':people_wrestling:': u'\U0001F93C',
u':performing_arts:': u'\U0001F3AD',
u':persevering_face:': u'\U0001F623',
u':person_biking:': u'\U0001F6B4',
u':person_biking_dark_skin_tone:': u'\U0001F6B4 \U0001F3FF',
u':person_biking_light_skin_tone:': u'\U0001F6B4 \U0001F3FB',
u':person_biking_medium-dark_skin_tone:': u'\U0001F6B4 \U0001F3FE',
u':person_biking_medium-light_skin_tone:': u'\U0001F6B4 \U0001F3FC',
u':person_biking_medium_skin_tone:': u'\U0001F6B4 \U0001F3FD',
u':person_bouncing_ball:': u'\U000026F9',
u':person_bouncing_ball_dark_skin_tone:': u'\U000026F9 \U0001F3FF',
u':person_bouncing_ball_light_skin_tone:': u'\U000026F9 \U0001F3FB',
u':person_bouncing_ball_medium-dark_skin_tone:': u'\U000026F9 \U0001F3FE',
u':person_bouncing_ball_medium-light_skin_tone:': u'\U000026F9 \U0001F3FC',
u':person_bouncing_ball_medium_skin_tone:': u'\U000026F9 \U0001F3FD',
u':person_bowing:': u'\U0001F647',
u':person_bowing_dark_skin_tone:': u'\U0001F647 \U0001F3FF',
u':person_bowing_light_skin_tone:': u'\U0001F647 \U0001F3FB',
u':person_bowing_medium-dark_skin_tone:': u'\U0001F647 \U0001F3FE',
u':person_bowing_medium-light_skin_tone:': u'\U0001F647 \U0001F3FC',
u':person_bowing_medium_skin_tone:': u'\U0001F647 \U0001F3FD',
u':person_cartwheeling:': u'\U0001F938',
u':person_cartwheeling_dark_skin_tone:': u'\U0001F938 \U0001F3FF',
u':person_cartwheeling_light_skin_tone:': u'\U0001F938 \U0001F3FB',
u':person_cartwheeling_medium-dark_skin_tone:': u'\U0001F938 \U0001F3FE',
u':person_cartwheeling_medium-light_skin_tone:': u'\U0001F938 \U0001F3FC',
u':person_cartwheeling_medium_skin_tone:': u'\U0001F938 \U0001F3FD',
u':person_facepalming:': u'\U0001F926',
u':person_facepalming_dark_skin_tone:': u'\U0001F926 \U0001F3FF',
u':person_facepalming_light_skin_tone:': u'\U0001F926 \U0001F3FB',
u':person_facepalming_medium-dark_skin_tone:': u'\U0001F926 \U0001F3FE',
u':person_facepalming_medium-light_skin_tone:': u'\U0001F926 \U0001F3FC',
u':person_facepalming_medium_skin_tone:': u'\U0001F926 \U0001F3FD',
u':person_fencing:': u'\U0001F93A',
u':person_frowning:': u'\U0001F64D',
u':person_frowning_dark_skin_tone:': u'\U0001F64D \U0001F3FF',
u':person_frowning_light_skin_tone:': u'\U0001F64D \U0001F3FB',
u':person_frowning_medium-dark_skin_tone:': u'\U0001F64D \U0001F3FE',
u':person_frowning_medium-light_skin_tone:': u'\U0001F64D \U0001F3FC',
u':person_frowning_medium_skin_tone:': u'\U0001F64D \U0001F3FD',
u':person_gesturing_NO:': u'\U0001F645',
u':person_gesturing_NO_dark_skin_tone:': u'\U0001F645 \U0001F3FF',
u':person_gesturing_NO_light_skin_tone:': u'\U0001F645 \U0001F3FB',
u':person_gesturing_NO_medium-dark_skin_tone:': u'\U0001F645 \U0001F3FE',
u':person_gesturing_NO_medium-light_skin_tone:': u'\U0001F645 \U0001F3FC',
u':person_gesturing_NO_medium_skin_tone:': u'\U0001F645 \U0001F3FD',
u':person_gesturing_OK:': u'\U0001F646',
u':person_gesturing_OK_dark_skin_tone:': u'\U0001F646 \U0001F3FF',
u':person_gesturing_OK_light_skin_tone:': u'\U0001F646 \U0001F3FB',
u':person_gesturing_OK_medium-dark_skin_tone:': u'\U0001F646 \U0001F3FE',
u':person_gesturing_OK_medium-light_skin_tone:': u'\U0001F646 \U0001F3FC',
u':person_gesturing_OK_medium_skin_tone:': u'\U0001F646 \U0001F3FD',
u':person_getting_haircut:': u'\U0001F487',
u':person_getting_haircut_dark_skin_tone:': u'\U0001F487 \U0001F3FF',
u':person_getting_haircut_light_skin_tone:': u'\U0001F487 \U0001F3FB',
u':person_getting_haircut_medium-dark_skin_tone:': u'\U0001F487 \U0001F3FE',
u':person_getting_haircut_medium-light_skin_tone:': u'\U0001F487 \U0001F3FC',
u':person_getting_haircut_medium_skin_tone:': u'\U0001F487 \U0001F3FD',
u':person_getting_massage:': u'\U0001F486',
u':person_getting_massage_dark_skin_tone:': u'\U0001F486 \U0001F3FF',
u':person_getting_massage_light_skin_tone:': u'\U0001F486 \U0001F3FB',
u':person_getting_massage_medium-dark_skin_tone:': u'\U0001F486 \U0001F3FE',
u':person_getting_massage_medium-light_skin_tone:': u'\U0001F486 \U0001F3FC',
u':person_getting_massage_medium_skin_tone:': u'\U0001F486 \U0001F3FD',
u':person_golfing:': u'\U0001F3CC',
u':person_golfing_dark_skin_tone:': u'\U0001F3CC \U0001F3FF',
u':person_golfing_light_skin_tone:': u'\U0001F3CC \U0001F3FB',
u':person_golfing_medium-dark_skin_tone:': u'\U0001F3CC \U0001F3FE',
u':person_golfing_medium-light_skin_tone:': u'\U0001F3CC \U0001F3FC',
u':person_golfing_medium_skin_tone:': u'\U0001F3CC \U0001F3FD',
u':person_in_bed:': u'\U0001F6CC',
u':person_in_bed_dark_skin_tone:': u'\U0001F6CC \U0001F3FF',
u':person_in_bed_light_skin_tone:': u'\U0001F6CC \U0001F3FB',
u':person_in_bed_medium-dark_skin_tone:': u'\U0001F6CC \U0001F3FE',
u':person_in_bed_medium-light_skin_tone:': u'\U0001F6CC \U0001F3FC',
u':person_in_bed_medium_skin_tone:': u'\U0001F6CC \U0001F3FD',
u':person_juggling:': u'\U0001F939',
u':person_juggling_dark_skin_tone:': u'\U0001F939 \U0001F3FF',
u':person_juggling_light_skin_tone:': u'\U0001F939 \U0001F3FB',
u':person_juggling_medium-dark_skin_tone:': u'\U0001F939 \U0001F3FE',
u':person_juggling_medium-light_skin_tone:': u'\U0001F939 \U0001F3FC',
u':person_juggling_medium_skin_tone:': u'\U0001F939 \U0001F3FD',
u':person_lifting_weights:': u'\U0001F3CB',
u':person_lifting_weights_dark_skin_tone:': u'\U0001F3CB \U0001F3FF',
u':person_lifting_weights_light_skin_tone:': u'\U0001F3CB \U0001F3FB',
u':person_lifting_weights_medium-dark_skin_tone:': u'\U0001F3CB \U0001F3FE',
u':person_lifting_weights_medium-light_skin_tone:': u'\U0001F3CB \U0001F3FC',
u':person_lifting_weights_medium_skin_tone:': u'\U0001F3CB \U0001F3FD',
u':person_mountain_biking:': u'\U0001F6B5',
u':person_mountain_biking_dark_skin_tone:': u'\U0001F6B5 \U0001F3FF',
u':person_mountain_biking_light_skin_tone:': u'\U0001F6B5 \U0001F3FB',
u':person_mountain_biking_medium-dark_skin_tone:': u'\U0001F6B5 \U0001F3FE',
u':person_mountain_biking_medium-light_skin_tone:': u'\U0001F6B5 \U0001F3FC',
u':person_mountain_biking_medium_skin_tone:': u'\U0001F6B5 \U0001F3FD',
u':person_playing_handball:': u'\U0001F93E',
u':person_playing_handball_dark_skin_tone:': u'\U0001F93E \U0001F3FF',
u':person_playing_handball_light_skin_tone:': u'\U0001F93E \U0001F3FB',
u':person_playing_handball_medium-dark_skin_tone:': u'\U0001F93E \U0001F3FE',
u':person_playing_handball_medium-light_skin_tone:': u'\U0001F93E \U0001F3FC',
u':person_playing_handball_medium_skin_tone:': u'\U0001F93E \U0001F3FD',
u':person_playing_water_polo:': u'\U0001F93D',
u':person_playing_water_polo_dark_skin_tone:': u'\U0001F93D \U0001F3FF',
u':person_playing_water_polo_light_skin_tone:': u'\U0001F93D \U0001F3FB',
u':person_playing_water_polo_medium-dark_skin_tone:': u'\U0001F93D \U0001F3FE',
u':person_playing_water_polo_medium-light_skin_tone:': u'\U0001F93D \U0001F3FC',
u':person_playing_water_polo_medium_skin_tone:': u'\U0001F93D \U0001F3FD',
u':person_pouting:': u'\U0001F64E',
u':person_pouting_dark_skin_tone:': u'\U0001F64E \U0001F3FF',
u':person_pouting_light_skin_tone:': u'\U0001F64E \U0001F3FB',
u':person_pouting_medium-dark_skin_tone:': u'\U0001F64E \U0001F3FE',
u':person_pouting_medium-light_skin_tone:': u'\U0001F64E \U0001F3FC',
u':person_pouting_medium_skin_tone:': u'\U0001F64E \U0001F3FD',
u':person_raising_hand:': u'\U0001F64B',
u':person_raising_hand_dark_skin_tone:': u'\U0001F64B \U0001F3FF',
u':person_raising_hand_light_skin_tone:': u'\U0001F64B \U0001F3FB',
u':person_raising_hand_medium-dark_skin_tone:': u'\U0001F64B \U0001F3FE',
u':person_raising_hand_medium-light_skin_tone:': u'\U0001F64B \U0001F3FC',
u':person_raising_hand_medium_skin_tone:': u'\U0001F64B \U0001F3FD',
u':person_rowing_boat:': u'\U0001F6A3',
u':person_rowing_boat_dark_skin_tone:': u'\U0001F6A3 \U0001F3FF',
u':person_rowing_boat_light_skin_tone:': u'\U0001F6A3 \U0001F3FB',
u':person_rowing_boat_medium-dark_skin_tone:': u'\U0001F6A3 \U0001F3FE',
u':person_rowing_boat_medium-light_skin_tone:': u'\U0001F6A3 \U0001F3FC',
u':person_rowing_boat_medium_skin_tone:': u'\U0001F6A3 \U0001F3FD',
u':person_running:': u'\U0001F3C3',
u':person_running_dark_skin_tone:': u'\U0001F3C3 \U0001F3FF',
u':person_running_light_skin_tone:': u'\U0001F3C3 \U0001F3FB',
u':person_running_medium-dark_skin_tone:': u'\U0001F3C3 \U0001F3FE',
u':person_running_medium-light_skin_tone:': u'\U0001F3C3 \U0001F3FC',
u':person_running_medium_skin_tone:': u'\U0001F3C3 \U0001F3FD',
u':person_shrugging:': u'\U0001F937',
u':person_shrugging_dark_skin_tone:': u'\U0001F937 \U0001F3FF',
u':person_shrugging_light_skin_tone:': u'\U0001F937 \U0001F3FB',
u':person_shrugging_medium-dark_skin_tone:': u'\U0001F937 \U0001F3FE',
u':person_shrugging_medium-light_skin_tone:': u'\U0001F937 \U0001F3FC',
u':person_shrugging_medium_skin_tone:': u'\U0001F937 \U0001F3FD',
u':person_surfing:': u'\U0001F3C4',
u':person_surfing_dark_skin_tone:': u'\U0001F3C4 \U0001F3FF',
u':person_surfing_light_skin_tone:': u'\U0001F3C4 \U0001F3FB',
u':person_surfing_medium-dark_skin_tone:': u'\U0001F3C4 \U0001F3FE',
u':person_surfing_medium-light_skin_tone:': u'\U0001F3C4 \U0001F3FC',
u':person_surfing_medium_skin_tone:': u'\U0001F3C4 \U0001F3FD',
u':person_swimming:': u'\U0001F3CA',
u':person_swimming_dark_skin_tone:': u'\U0001F3CA \U0001F3FF',
u':person_swimming_light_skin_tone:': u'\U0001F3CA \U0001F3FB',
u':person_swimming_medium-dark_skin_tone:': u'\U0001F3CA \U0001F3FE',
u':person_swimming_medium-light_skin_tone:': u'\U0001F3CA \U0001F3FC',
u':person_swimming_medium_skin_tone:': u'\U0001F3CA \U0001F3FD',
u':person_taking_bath:': u'\U0001F6C0',
u':person_taking_bath_dark_skin_tone:': u'\U0001F6C0 \U0001F3FF',
u':person_taking_bath_light_skin_tone:': u'\U0001F6C0 \U0001F3FB',
u':person_taking_bath_medium-dark_skin_tone:': u'\U0001F6C0 \U0001F3FE',
u':person_taking_bath_medium-light_skin_tone:': u'\U0001F6C0 \U0001F3FC',
u':person_taking_bath_medium_skin_tone:': u'\U0001F6C0 \U0001F3FD',
u':person_tipping_hand:': u'\U0001F481',
u':person_tipping_hand_dark_skin_tone:': u'\U0001F481 \U0001F3FF',
u':person_tipping_hand_light_skin_tone:': u'\U0001F481 \U0001F3FB',
u':person_tipping_hand_medium-dark_skin_tone:': u'\U0001F481 \U0001F3FE',
u':person_tipping_hand_medium-light_skin_tone:': u'\U0001F481 \U0001F3FC',
u':person_tipping_hand_medium_skin_tone:': u'\U0001F481 \U0001F3FD',
u':person_walking:': u'\U0001F6B6',
u':person_walking_dark_skin_tone:': u'\U0001F6B6 \U0001F3FF',
u':person_walking_light_skin_tone:': u'\U0001F6B6 \U0001F3FB',
u':person_walking_medium-dark_skin_tone:': u'\U0001F6B6 \U0001F3FE',
u':person_walking_medium-light_skin_tone:': u'\U0001F6B6 \U0001F3FC',
u':person_walking_medium_skin_tone:': u'\U0001F6B6 \U0001F3FD',
u':person_wearing_turban:': u'\U0001F473',
u':person_wearing_turban_dark_skin_tone:': u'\U0001F473 \U0001F3FF',
u':person_wearing_turban_light_skin_tone:': u'\U0001F473 \U0001F3FB',
u':person_wearing_turban_medium-dark_skin_tone:': u'\U0001F473 \U0001F3FE',
u':person_wearing_turban_medium-light_skin_tone:': u'\U0001F473 \U0001F3FC',
u':person_wearing_turban_medium_skin_tone:': u'\U0001F473 \U0001F3FD',
u':pick:': u'\U000026CF',
u':pig:': u'\U0001F416',
u':pig_face:': u'\U0001F437',
u':pig_nose:': u'\U0001F43D',
u':pile_of_poo:': u'\U0001F4A9',
u':pill:': u'\U0001F48A',
u':pine_decoration:': u'\U0001F38D',
u':pineapple:': u'\U0001F34D',
u':ping_pong:': u'\U0001F3D3',
u':pistol:': u'\U0001F52B',
u':pizza:': u'\U0001F355',
u':place_of_worship:': u'\U0001F6D0',
u':play_button:': u'\U000025B6',
u':play_or_pause_button:': u'\U000023EF',
u':police_car:': u'\U0001F693',
u':police_car_light:': u'\U0001F6A8',
u':police_officer:': u'\U0001F46E',
u':police_officer_dark_skin_tone:': u'\U0001F46E \U0001F3FF',
u':police_officer_light_skin_tone:': u'\U0001F46E \U0001F3FB',
u':police_officer_medium-dark_skin_tone:': u'\U0001F46E \U0001F3FE',
u':police_officer_medium-light_skin_tone:': u'\U0001F46E \U0001F3FC',
u':police_officer_medium_skin_tone:': u'\U0001F46E \U0001F3FD',
u':poodle:': u'\U0001F429',
u':pool_8_ball:': u'\U0001F3B1',
u':popcorn:': u'\U0001F37F',
u':post_office:': u'\U0001F3E4',
u':postal_horn:': u'\U0001F4EF',
u':postbox:': u'\U0001F4EE',
u':pot_of_food:': u'\U0001F372',
u':potable_water:': u'\U0001F6B0',
u':potato:': u'\U0001F954',
u':poultry_leg:': u'\U0001F357',
u':pound_banknote:': u'\U0001F4B7',
u':pouting_cat_face:': u'\U0001F63E',
u':pouting_face:': u'\U0001F621',
u':prayer_beads:': u'\U0001F4FF',
u':pregnant_woman:': u'\U0001F930',
u':pregnant_woman_dark_skin_tone:': u'\U0001F930 \U0001F3FF',
u':pregnant_woman_light_skin_tone:': u'\U0001F930 \U0001F3FB',
u':pregnant_woman_medium-dark_skin_tone:': u'\U0001F930 \U0001F3FE',
u':pregnant_woman_medium-light_skin_tone:': u'\U0001F930 \U0001F3FC',
u':pregnant_woman_medium_skin_tone:': u'\U0001F930 \U0001F3FD',
u':prince:': u'\U0001F934',
u':prince_dark_skin_tone:': u'\U0001F934 \U0001F3FF',
u':prince_light_skin_tone:': u'\U0001F934 \U0001F3FB',
u':prince_medium-dark_skin_tone:': u'\U0001F934 \U0001F3FE',
u':prince_medium-light_skin_tone:': u'\U0001F934 \U0001F3FC',
u':prince_medium_skin_tone:': u'\U0001F934 \U0001F3FD',
u':princess:': u'\U0001F478',
u':princess_dark_skin_tone:': u'\U0001F478 \U0001F3FF',
u':princess_light_skin_tone:': u'\U0001F478 \U0001F3FB',
u':princess_medium-dark_skin_tone:': u'\U0001F478 \U0001F3FE',
u':princess_medium-light_skin_tone:': u'\U0001F478 \U0001F3FC',
u':princess_medium_skin_tone:': u'\U0001F478 \U0001F3FD',
u':printer:': u'\U0001F5A8',
u':prohibited:': u'\U0001F6AB',
u':purple_heart:': u'\U0001F49C',
u':purse:': u'\U0001F45B',
u':pushpin:': u'\U0001F4CC',
u':question_mark:': u'\U00002753',
u':rabbit:': u'\U0001F407',
u':rabbit_face:': u'\U0001F430',
u':racing_car:': u'\U0001F3CE',
u':radio:': u'\U0001F4FB',
u':radio_button:': u'\U0001F518',
u':radioactive:': u'\U00002622',
u':railway_car:': u'\U0001F683',
u':railway_track:': u'\U0001F6E4',
u':rainbow:': u'\U0001F308',
u':rainbow_flag:': u'\U0001F3F3 \U0000FE0F \U0000200D \U0001F308',
u':raised_back_of_hand:': u'\U0001F91A',
u':raised_back_of_hand_dark_skin_tone:': u'\U0001F91A \U0001F3FF',
u':raised_back_of_hand_light_skin_tone:': u'\U0001F91A \U0001F3FB',
u':raised_back_of_hand_medium-dark_skin_tone:': u'\U0001F91A \U0001F3FE',
u':raised_back_of_hand_medium-light_skin_tone:': u'\U0001F91A \U0001F3FC',
u':raised_back_of_hand_medium_skin_tone:': u'\U0001F91A \U0001F3FD',
u':raised_fist:': u'\U0000270A',
u':raised_fist_dark_skin_tone:': u'\U0000270A \U0001F3FF',
u':raised_fist_light_skin_tone:': u'\U0000270A \U0001F3FB',
u':raised_fist_medium-dark_skin_tone:': u'\U0000270A \U0001F3FE',
u':raised_fist_medium-light_skin_tone:': u'\U0000270A \U0001F3FC',
u':raised_fist_medium_skin_tone:': u'\U0000270A \U0001F3FD',
u':raised_hand:': u'\U0000270B',
u':raised_hand_dark_skin_tone:': u'\U0000270B \U0001F3FF',
u':raised_hand_light_skin_tone:': u'\U0000270B \U0001F3FB',
u':raised_hand_medium-dark_skin_tone:': u'\U0000270B \U0001F3FE',
u':raised_hand_medium-light_skin_tone:': u'\U0000270B \U0001F3FC',
u':raised_hand_medium_skin_tone:': u'\U0000270B \U0001F3FD',
u':raised_hand_with_fingers_splayed:': u'\U0001F590',
u':raised_hand_with_fingers_splayed_dark_skin_tone:': u'\U0001F590 \U0001F3FF',
u':raised_hand_with_fingers_splayed_light_skin_tone:': u'\U0001F590 \U0001F3FB',
u':raised_hand_with_fingers_splayed_medium-dark_skin_tone:': u'\U0001F590 \U0001F3FE',
u':raised_hand_with_fingers_splayed_medium-light_skin_tone:': u'\U0001F590 \U0001F3FC',
u':raised_hand_with_fingers_splayed_medium_skin_tone:': u'\U0001F590 \U0001F3FD',
u':raising_hands:': u'\U0001F64C',
u':raising_hands_dark_skin_tone:': u'\U0001F64C \U0001F3FF',
u':raising_hands_light_skin_tone:': u'\U0001F64C \U0001F3FB',
u':raising_hands_medium-dark_skin_tone:': u'\U0001F64C \U0001F3FE',
u':raising_hands_medium-light_skin_tone:': u'\U0001F64C \U0001F3FC',
u':raising_hands_medium_skin_tone:': u'\U0001F64C \U0001F3FD',
u':ram:': u'\U0001F40F',
u':rat:': u'\U0001F400',
u':record_button:': u'\U000023FA',
u':recycling_symbol:': u'\U0000267B',
u':red_apple:': u'\U0001F34E',
u':red_circle:': u'\U0001F534',
u':red_heart:': u'\U00002764',
u':red_paper_lantern:': u'\U0001F3EE',
u':red_triangle_pointed_down:': u'\U0001F53B',
u':red_triangle_pointed_up:': u'\U0001F53A',
u':registered:': u'\U000000AE',
u':relieved_face:': u'\U0001F60C',
u':reminder_ribbon:': u'\U0001F397',
u':repeat_button:': u'\U0001F501',
u':repeat_single_button:': u'\U0001F502',
u':rescue_worker’s_helmet:': u'\U000026D1',
u':restroom:': u'\U0001F6BB',
u':reverse_button:': u'\U000025C0',
u':revolving_hearts:': u'\U0001F49E',
u':rhinoceros:': u'\U0001F98F',
u':ribbon:': u'\U0001F380',
u':rice_ball:': u'\U0001F359',
u':rice_cracker:': u'\U0001F358',
u':right-facing_fist:': u'\U0001F91C',
u':right-facing_fist_dark_skin_tone:': u'\U0001F91C \U0001F3FF',
u':right-facing_fist_light_skin_tone:': u'\U0001F91C \U0001F3FB',
u':right-facing_fist_medium-dark_skin_tone:': u'\U0001F91C \U0001F3FE',
u':right-facing_fist_medium-light_skin_tone:': u'\U0001F91C \U0001F3FC',
u':right-facing_fist_medium_skin_tone:': u'\U0001F91C \U0001F3FD',
u':right-pointing_magnifying_glass:': u'\U0001F50E',
u':right_anger_bubble:': u'\U0001F5EF',
u':right_arrow:': u'\U000027A1',
u':right_arrow_curving_down:': u'\U00002935',
u':right_arrow_curving_left:': u'\U000021A9',
u':right_arrow_curving_up:': u'\U00002934',
u':ring:': u'\U0001F48D',
u':roasted_sweet_potato:': u'\U0001F360',
u':robot_face:': u'\U0001F916',
u':rocket:': u'\U0001F680',
u':rolled-up_newspaper:': u'\U0001F5DE',
u':roller_coaster:': u'\U0001F3A2',
u':rolling_on_the_floor_laughing:': u'\U0001F923',
u':rooster:': u'\U0001F413',
u':rose:': u'\U0001F339',
u':rosette:': u'\U0001F3F5',
u':round_pushpin:': u'\U0001F4CD',
u':rugby_football:': u'\U0001F3C9',
u':running_shirt:': u'\U0001F3BD',
u':running_shoe:': u'\U0001F45F',
u':sailboat:': u'\U000026F5',
u':sake:': u'\U0001F376',
u':satellite:': u'\U0001F6F0',
u':satellite_antenna:': u'\U0001F4E1',
u':saxophone:': u'\U0001F3B7',
u':school:': u'\U0001F3EB',
u':school_backpack:': u'\U0001F392',
u':scissors:': u'\U00002702',
u':scorpion:': u'\U0001F982',
u':scroll:': u'\U0001F4DC',
u':seat:': u'\U0001F4BA',
u':see-no-evil_monkey:': u'\U0001F648',
u':seedling:': u'\U0001F331',
u':selfie:': u'\U0001F933',
u':selfie_dark_skin_tone:': u'\U0001F933 \U0001F3FF',
u':selfie_light_skin_tone:': u'\U0001F933 \U0001F3FB',
u':selfie_medium-dark_skin_tone:': u'\U0001F933 \U0001F3FE',
u':selfie_medium-light_skin_tone:': u'\U0001F933 \U0001F3FC',
u':selfie_medium_skin_tone:': u'\U0001F933 \U0001F3FD',
u':seven-thirty:': u'\U0001F562',
u':seven_o’clock:': u'\U0001F556',
u':shallow_pan_of_food:': u'\U0001F958',
u':shamrock:': u'\U00002618',
u':shark:': u'\U0001F988',
u':shaved_ice:': u'\U0001F367',
u':sheaf_of_rice:': u'\U0001F33E',
u':sheep:': u'\U0001F411',
u':shield:': u'\U0001F6E1',
u':shinto_shrine:': u'\U000026E9',
u':ship:': u'\U0001F6A2',
u':shooting_star:': u'\U0001F320',
u':shopping_bags:': u'\U0001F6CD',
u':shopping_cart:': u'\U0001F6D2',
u':shortcake:': u'\U0001F370',
u':shower:': u'\U0001F6BF',
u':shrimp:': u'\U0001F990',
u':shuffle_tracks_button:': u'\U0001F500',
u':sign_of_the_horns:': u'\U0001F918',
u':sign_of_the_horns_dark_skin_tone:': u'\U0001F918 \U0001F3FF',
u':sign_of_the_horns_light_skin_tone:': u'\U0001F918 \U0001F3FB',
u':sign_of_the_horns_medium-dark_skin_tone:': u'\U0001F918 \U0001F3FE',
u':sign_of_the_horns_medium-light_skin_tone:': u'\U0001F918 \U0001F3FC',
u':sign_of_the_horns_medium_skin_tone:': u'\U0001F918 \U0001F3FD',
u':six-thirty:': u'\U0001F561',
u':six_o’clock:': u'\U0001F555',
u':skier:': u'\U000026F7',
u':skis:': u'\U0001F3BF',
u':skull:': u'\U0001F480',
u':skull_and_crossbones:': u'\U00002620',
u':sleeping_face:': u'\U0001F634',
u':sleepy_face:': u'\U0001F62A',
u':slightly_frowning_face:': u'\U0001F641',
u':slightly_smiling_face:': u'\U0001F642',
u':slot_machine:': u'\U0001F3B0',
u':small_airplane:': u'\U0001F6E9',
u':small_blue_diamond:': u'\U0001F539',
u':small_orange_diamond:': u'\U0001F538',
u':smiling_cat_face_with_heart-eyes:': u'\U0001F63B',
u':smiling_cat_face_with_open_mouth:': u'\U0001F63A',
u':smiling_face:': u'\U0000263A',
u':smiling_face_with_halo:': u'\U0001F607',
u':smiling_face_with_heart-eyes:': u'\U0001F60D',
u':smiling_face_with_horns:': u'\U0001F608',
u':smiling_face_with_open_mouth:': u'\U0001F603',
u':smiling_face_with_open_mouth_&_closed_eyes:': u'\U0001F606',
u':smiling_face_with_open_mouth_&_cold_sweat:': u'\U0001F605',
u':smiling_face_with_open_mouth_&_smiling_eyes:': u'\U0001F604',
u':smiling_face_with_smiling_eyes:': u'\U0001F60A',
u':smiling_face_with_sunglasses:': u'\U0001F60E',
u':smirking_face:': u'\U0001F60F',
u':snail:': u'\U0001F40C',
u':snake:': u'\U0001F40D',
u':sneezing_face:': u'\U0001F927',
u':snow-capped_mountain:': u'\U0001F3D4',
u':snowboarder:': u'\U0001F3C2',
u':snowboarder_dark_skin_tone:': u'\U0001F3C2 \U0001F3FF',
u':snowboarder_light_skin_tone:': u'\U0001F3C2 \U0001F3FB',
u':snowboarder_medium-dark_skin_tone:': u'\U0001F3C2 \U0001F3FE',
u':snowboarder_medium-light_skin_tone:': u'\U0001F3C2 \U0001F3FC',
u':snowboarder_medium_skin_tone:': u'\U0001F3C2 \U0001F3FD',
u':snowflake:': u'\U00002744',
u':snowman:': u'\U00002603',
u':snowman_without_snow:': u'\U000026C4',
u':soccer_ball:': u'\U000026BD',
u':soft_ice_cream:': u'\U0001F366',
u':spade_suit:': u'\U00002660',
u':spaghetti:': u'\U0001F35D',
u':sparkle:': u'\U00002747',
u':sparkler:': u'\U0001F387',
u':sparkles:': u'\U00002728',
u':sparkling_heart:': u'\U0001F496',
u':speak-no-evil_monkey:': u'\U0001F64A',
u':speaker_high_volume:': u'\U0001F50A',
u':speaker_low_volume:': u'\U0001F508',
u':speaker_medium_volume:': u'\U0001F509',
u':speaking_head:': u'\U0001F5E3',
u':speech_balloon:': u'\U0001F4AC',
u':speedboat:': u'\U0001F6A4',
u':spider:': u'\U0001F577',
u':spider_web:': u'\U0001F578',
u':spiral_calendar:': u'\U0001F5D3',
u':spiral_notepad:': u'\U0001F5D2',
u':spiral_shell:': u'\U0001F41A',
u':spoon:': u'\U0001F944',
u':sport_utility_vehicle:': u'\U0001F699',
u':sports_medal:': u'\U0001F3C5',
u':spouting_whale:': u'\U0001F433',
u':squid:': u'\U0001F991',
u':stadium:': u'\U0001F3DF',
u':star_and_crescent:': u'\U0000262A',
u':star_of_David:': u'\U00002721',
u':station:': u'\U0001F689',
u':steaming_bowl:': u'\U0001F35C',
u':stop_button:': u'\U000023F9',
u':stop_sign:': u'\U0001F6D1',
u':stopwatch:': u'\U000023F1',
u':straight_ruler:': u'\U0001F4CF',
u':strawberry:': u'\U0001F353',
u':studio_microphone:': u'\U0001F399',
u':stuffed_flatbread:': u'\U0001F959',
u':sun:': u'\U00002600',
u':sun_behind_cloud:': u'\U000026C5',
u':sun_behind_large_cloud:': u'\U0001F325',
u':sun_behind_rain_cloud:': u'\U0001F326',
u':sun_behind_small_cloud:': u'\U0001F324',
u':sun_with_face:': u'\U0001F31E',
u':sunflower:': u'\U0001F33B',
u':sunglasses:': u'\U0001F576',
u':sunrise:': u'\U0001F305',
u':sunrise_over_mountains:': u'\U0001F304',
u':sunset:': u'\U0001F307',
u':sushi:': u'\U0001F363',
u':suspension_railway:': u'\U0001F69F',
u':sweat_droplets:': u'\U0001F4A6',
u':synagogue:': u'\U0001F54D',
u':syringe:': u'\U0001F489',
u':t-shirt:': u'\U0001F455',
u':taco:': u'\U0001F32E',
u':tanabata_tree:': u'\U0001F38B',
u':tangerine:': u'\U0001F34A',
u':taxi:': u'\U0001F695',
u':teacup_without_handle:': u'\U0001F375',
u':tear-off_calendar:': u'\U0001F4C6',
u':telephone:': u'\U0000260E',
u':telephone_receiver:': u'\U0001F4DE',
u':telescope:': u'\U0001F52D',
u':television:': u'\U0001F4FA',
u':ten-thirty:': u'\U0001F565',
u':ten_o’clock:': u'\U0001F559',
u':tennis:': u'\U0001F3BE',
u':tent:': u'\U000026FA',
u':thermometer:': u'\U0001F321',
u':thinking_face:': u'\U0001F914',
u':thought_balloon:': u'\U0001F4AD',
u':three-thirty:': u'\U0001F55E',
u':three_o’clock:': u'\U0001F552',
u':thumbs_down:': u'\U0001F44E',
u':thumbs_down_dark_skin_tone:': u'\U0001F44E \U0001F3FF',
u':thumbs_down_light_skin_tone:': u'\U0001F44E \U0001F3FB',
u':thumbs_down_medium-dark_skin_tone:': u'\U0001F44E \U0001F3FE',
u':thumbs_down_medium-light_skin_tone:': u'\U0001F44E \U0001F3FC',
u':thumbs_down_medium_skin_tone:': u'\U0001F44E \U0001F3FD',
u':thumbs_up:': u'\U0001F44D',
u':thumbs_up_dark_skin_tone:': u'\U0001F44D \U0001F3FF',
u':thumbs_up_light_skin_tone:': u'\U0001F44D \U0001F3FB',
u':thumbs_up_medium-dark_skin_tone:': u'\U0001F44D \U0001F3FE',
u':thumbs_up_medium-light_skin_tone:': u'\U0001F44D \U0001F3FC',
u':thumbs_up_medium_skin_tone:': u'\U0001F44D \U0001F3FD',
u':ticket:': u'\U0001F3AB',
u':tiger:': u'\U0001F405',
u':tiger_face:': u'\U0001F42F',
u':timer_clock:': u'\U000023F2',
u':tired_face:': u'\U0001F62B',
u':toilet:': u'\U0001F6BD',
u':tomato:': u'\U0001F345',
u':tongue:': u'\U0001F445',
u':top_hat:': u'\U0001F3A9',
u':tornado:': u'\U0001F32A',
u':trackball:': u'\U0001F5B2',
u':tractor:': u'\U0001F69C',
u':trade_mark:': u'\U00002122',
u':train:': u'\U0001F686',
u':tram:': u'\U0001F68A',
u':tram_car:': u'\U0001F68B',
u':triangular_flag:': u'\U0001F6A9',
u':triangular_ruler:': u'\U0001F4D0',
u':trident_emblem:': u'\U0001F531',
u':trolleybus:': u'\U0001F68E',
u':trophy:': u'\U0001F3C6',
u':tropical_drink:': u'\U0001F379',
u':tropical_fish:': u'\U0001F420',
u':trumpet:': u'\U0001F3BA',
u':tulip:': u'\U0001F337',
u':tumbler_glass:': u'\U0001F943',
u':turkey:': u'\U0001F983',
u':turtle:': u'\U0001F422',
u':twelve-thirty:': u'\U0001F567',
u':twelve_o’clock:': u'\U0001F55B',
u':two-hump_camel:': u'\U0001F42B',
u':two-thirty:': u'\U0001F55D',
u':two_hearts:': u'\U0001F495',
u':two_men_holding_hands:': u'\U0001F46C',
u':two_o’clock:': u'\U0001F551',
u':two_women_holding_hands:': u'\U0001F46D',
u':umbrella:': u'\U00002602',
u':umbrella_on_ground:': u'\U000026F1',
u':umbrella_with_rain_drops:': u'\U00002614',
u':unamused_face:': u'\U0001F612',
u':unicorn_face:': u'\U0001F984',
u':unlocked:': u'\U0001F513',
u':up-down_arrow:': u'\U00002195',
u':up-left_arrow:': u'\U00002196',
u':up-right_arrow:': u'\U00002197',
u':up_arrow:': u'\U00002B06',
u':up_button:': u'\U0001F53C',
u':upside-down_face:': u'\U0001F643',
u':vertical_traffic_light:': u'\U0001F6A6',
u':vibration_mode:': u'\U0001F4F3',
u':victory_hand:': u'\U0000270C',
u':victory_hand_dark_skin_tone:': u'\U0000270C \U0001F3FF',
u':victory_hand_light_skin_tone:': u'\U0000270C \U0001F3FB',
u':victory_hand_medium-dark_skin_tone:': u'\U0000270C \U0001F3FE',
u':victory_hand_medium-light_skin_tone:': u'\U0000270C \U0001F3FC',
u':victory_hand_medium_skin_tone:': u'\U0000270C \U0001F3FD',
u':video_camera:': u'\U0001F4F9',
u':video_game:': u'\U0001F3AE',
u':videocassette:': u'\U0001F4FC',
u':violin:': u'\U0001F3BB',
u':volcano:': u'\U0001F30B',
u':volleyball:': u'\U0001F3D0',
u':vulcan_salute:': u'\U0001F596',
u':vulcan_salute_dark_skin_tone:': u'\U0001F596 \U0001F3FF',
u':vulcan_salute_light_skin_tone:': u'\U0001F596 \U0001F3FB',
u':vulcan_salute_medium-dark_skin_tone:': u'\U0001F596 \U0001F3FE',
u':vulcan_salute_medium-light_skin_tone:': u'\U0001F596 \U0001F3FC',
u':vulcan_salute_medium_skin_tone:': u'\U0001F596 \U0001F3FD',
u':waning_crescent_moon:': u'\U0001F318',
u':waning_gibbous_moon:': u'\U0001F316',
u':warning:': u'\U000026A0',
u':wastebasket:': u'\U0001F5D1',
u':watch:': u'\U0000231A',
u':water_buffalo:': u'\U0001F403',
u':water_closet:': u'\U0001F6BE',
u':water_wave:': u'\U0001F30A',
u':watermelon:': u'\U0001F349',
u':waving_hand:': u'\U0001F44B',
u':waving_hand_dark_skin_tone:': u'\U0001F44B \U0001F3FF',
u':waving_hand_light_skin_tone:': u'\U0001F44B \U0001F3FB',
u':waving_hand_medium-dark_skin_tone:': u'\U0001F44B \U0001F3FE',
u':waving_hand_medium-light_skin_tone:': u'\U0001F44B \U0001F3FC',
u':waving_hand_medium_skin_tone:': u'\U0001F44B \U0001F3FD',
u':wavy_dash:': u'\U00003030',
u':waxing_crescent_moon:': u'\U0001F312',
u':waxing_gibbous_moon:': u'\U0001F314',
u':weary_cat_face:': u'\U0001F640',
u':weary_face:': u'\U0001F629',
u':wedding:': u'\U0001F492',
u':whale:': u'\U0001F40B',
u':wheel_of_dharma:': u'\U00002638',
u':wheelchair_symbol:': u'\U0000267F',
u':white_circle:': u'\U000026AA',
u':white_exclamation_mark:': u'\U00002755',
u':white_flag:': u'\U0001F3F3',
u':white_flower:': u'\U0001F4AE',
u':white_heavy_check_mark:': u'\U00002705',
u':white_large_square:': u'\U00002B1C',
u':white_medium-small_square:': u'\U000025FD',
u':white_medium_square:': u'\U000025FB',
u':white_medium_star:': u'\U00002B50',
u':white_question_mark:': u'\U00002754',
u':white_small_square:': u'\U000025AB',
u':white_square_button:': u'\U0001F533',
u':wilted_flower:': u'\U0001F940',
u':wind_chime:': u'\U0001F390',
u':wind_face:': u'\U0001F32C',
u':wine_glass:': u'\U0001F377',
u':winking_face:': u'\U0001F609',
u':wolf_face:': u'\U0001F43A',
u':woman:': u'\U0001F469',
u':woman_artist:': u'\U0001F469 \U0000200D \U0001F3A8',
u':woman_artist_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U0001F3A8',
u':woman_artist_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U0001F3A8',
u':woman_artist_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U0001F3A8',
u':woman_artist_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U0001F3A8',
u':woman_artist_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U0001F3A8',
u':woman_astronaut:': u'\U0001F469 \U0000200D \U0001F680',
u':woman_astronaut_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U0001F680',
u':woman_astronaut_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U0001F680',
u':woman_astronaut_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U0001F680',
u':woman_astronaut_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U0001F680',
u':woman_astronaut_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U0001F680',
u':woman_biking:': u'\U0001F6B4 \U0000200D \U00002640 \U0000FE0F',
u':woman_biking_dark_skin_tone:': u'\U0001F6B4 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_biking_light_skin_tone:': u'\U0001F6B4 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_biking_medium-dark_skin_tone:': u'\U0001F6B4 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_biking_medium-light_skin_tone:': u'\U0001F6B4 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_biking_medium_skin_tone:': u'\U0001F6B4 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_bouncing_ball:': u'\U000026F9 \U0000FE0F \U0000200D \U00002640 \U0000FE0F',
u':woman_bouncing_ball_dark_skin_tone:': u'\U000026F9 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_bouncing_ball_light_skin_tone:': u'\U000026F9 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_bouncing_ball_medium-dark_skin_tone:': u'\U000026F9 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_bouncing_ball_medium-light_skin_tone:': u'\U000026F9 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_bouncing_ball_medium_skin_tone:': u'\U000026F9 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_bowing:': u'\U0001F647 \U0000200D \U00002640 \U0000FE0F',
u':woman_bowing_dark_skin_tone:': u'\U0001F647 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_bowing_light_skin_tone:': u'\U0001F647 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_bowing_medium-dark_skin_tone:': u'\U0001F647 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_bowing_medium-light_skin_tone:': u'\U0001F647 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_bowing_medium_skin_tone:': u'\U0001F647 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_cartwheeling:': u'\U0001F938 \U0000200D \U00002640 \U0000FE0F',
u':woman_cartwheeling_dark_skin_tone:': u'\U0001F938 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_cartwheeling_light_skin_tone:': u'\U0001F938 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_cartwheeling_medium-dark_skin_tone:': u'\U0001F938 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_cartwheeling_medium-light_skin_tone:': u'\U0001F938 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_cartwheeling_medium_skin_tone:': u'\U0001F938 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_construction_worker:': u'\U0001F477 \U0000200D \U00002640 \U0000FE0F',
u':woman_construction_worker_dark_skin_tone:': u'\U0001F477 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_construction_worker_light_skin_tone:': u'\U0001F477 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_construction_worker_medium-dark_skin_tone:': u'\U0001F477 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_construction_worker_medium-light_skin_tone:': u'\U0001F477 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_construction_worker_medium_skin_tone:': u'\U0001F477 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_cook:': u'\U0001F469 \U0000200D \U0001F373',
u':woman_cook_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U0001F373',
u':woman_cook_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U0001F373',
u':woman_cook_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U0001F373',
u':woman_cook_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U0001F373',
u':woman_cook_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U0001F373',
u':woman_dancing:': u'\U0001F483',
u':woman_dancing_dark_skin_tone:': u'\U0001F483 \U0001F3FF',
u':woman_dancing_light_skin_tone:': u'\U0001F483 \U0001F3FB',
u':woman_dancing_medium-dark_skin_tone:': u'\U0001F483 \U0001F3FE',
u':woman_dancing_medium-light_skin_tone:': u'\U0001F483 \U0001F3FC',
u':woman_dancing_medium_skin_tone:': u'\U0001F483 \U0001F3FD',
u':woman_dark_skin_tone:': u'\U0001F469 \U0001F3FF',
u':woman_detective:': u'\U0001F575 \U0000FE0F \U0000200D \U00002640 \U0000FE0F',
u':woman_detective_dark_skin_tone:': u'\U0001F575 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_detective_light_skin_tone:': u'\U0001F575 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_detective_medium-dark_skin_tone:': u'\U0001F575 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_detective_medium-light_skin_tone:': u'\U0001F575 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_detective_medium_skin_tone:': u'\U0001F575 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_facepalming:': u'\U0001F926 \U0000200D \U00002640 \U0000FE0F',
u':woman_facepalming_dark_skin_tone:': u'\U0001F926 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_facepalming_light_skin_tone:': u'\U0001F926 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_facepalming_medium-dark_skin_tone:': u'\U0001F926 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_facepalming_medium-light_skin_tone:': u'\U0001F926 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_facepalming_medium_skin_tone:': u'\U0001F926 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_factory_worker:': u'\U0001F469 \U0000200D \U0001F3ED',
u':woman_factory_worker_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U0001F3ED',
u':woman_factory_worker_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U0001F3ED',
u':woman_factory_worker_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U0001F3ED',
u':woman_factory_worker_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U0001F3ED',
u':woman_factory_worker_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U0001F3ED',
u':woman_farmer:': u'\U0001F469 \U0000200D \U0001F33E',
u':woman_farmer_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U0001F33E',
u':woman_farmer_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U0001F33E',
u':woman_farmer_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U0001F33E',
u':woman_farmer_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U0001F33E',
u':woman_farmer_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U0001F33E',
u':woman_firefighter:': u'\U0001F469 \U0000200D \U0001F692',
u':woman_firefighter_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U0001F692',
u':woman_firefighter_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U0001F692',
u':woman_firefighter_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U0001F692',
u':woman_firefighter_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U0001F692',
u':woman_firefighter_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U0001F692',
u':woman_frowning:': u'\U0001F64D \U0000200D \U00002640 \U0000FE0F',
u':woman_frowning_dark_skin_tone:': u'\U0001F64D \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_frowning_light_skin_tone:': u'\U0001F64D \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_frowning_medium-dark_skin_tone:': u'\U0001F64D \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_frowning_medium-light_skin_tone:': u'\U0001F64D \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_frowning_medium_skin_tone:': u'\U0001F64D \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_gesturing_NO:': u'\U0001F645 \U0000200D \U00002640 \U0000FE0F',
u':woman_gesturing_NO_dark_skin_tone:': u'\U0001F645 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_gesturing_NO_light_skin_tone:': u'\U0001F645 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_gesturing_NO_medium-dark_skin_tone:': u'\U0001F645 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_gesturing_NO_medium-light_skin_tone:': u'\U0001F645 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_gesturing_NO_medium_skin_tone:': u'\U0001F645 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_gesturing_OK:': u'\U0001F646 \U0000200D \U00002640 \U0000FE0F',
u':woman_gesturing_OK_dark_skin_tone:': u'\U0001F646 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_gesturing_OK_light_skin_tone:': u'\U0001F646 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_gesturing_OK_medium-dark_skin_tone:': u'\U0001F646 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_gesturing_OK_medium-light_skin_tone:': u'\U0001F646 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_gesturing_OK_medium_skin_tone:': u'\U0001F646 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_getting_haircut:': u'\U0001F487 \U0000200D \U00002640 \U0000FE0F',
u':woman_getting_haircut_dark_skin_tone:': u'\U0001F487 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_getting_haircut_light_skin_tone:': u'\U0001F487 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_getting_haircut_medium-dark_skin_tone:': u'\U0001F487 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_getting_haircut_medium-light_skin_tone:': u'\U0001F487 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_getting_haircut_medium_skin_tone:': u'\U0001F487 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_getting_massage:': u'\U0001F486 \U0000200D \U00002640 \U0000FE0F',
u':woman_getting_massage_dark_skin_tone:': u'\U0001F486 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_getting_massage_light_skin_tone:': u'\U0001F486 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_getting_massage_medium-dark_skin_tone:': u'\U0001F486 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_getting_massage_medium-light_skin_tone:': u'\U0001F486 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_getting_massage_medium_skin_tone:': u'\U0001F486 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_golfing:': u'\U0001F3CC \U0000FE0F \U0000200D \U00002640 \U0000FE0F',
u':woman_golfing_dark_skin_tone:': u'\U0001F3CC \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_golfing_light_skin_tone:': u'\U0001F3CC \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_golfing_medium-dark_skin_tone:': u'\U0001F3CC \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_golfing_medium-light_skin_tone:': u'\U0001F3CC \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_golfing_medium_skin_tone:': u'\U0001F3CC \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_guard:': u'\U0001F482 \U0000200D \U00002640 \U0000FE0F',
u':woman_guard_dark_skin_tone:': u'\U0001F482 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_guard_light_skin_tone:': u'\U0001F482 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_guard_medium-dark_skin_tone:': u'\U0001F482 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_guard_medium-light_skin_tone:': u'\U0001F482 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_guard_medium_skin_tone:': u'\U0001F482 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_health_worker:': u'\U0001F469 \U0000200D \U00002695 \U0000FE0F',
u':woman_health_worker_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U00002695 \U0000FE0F',
u':woman_health_worker_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U00002695 \U0000FE0F',
u':woman_health_worker_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U00002695 \U0000FE0F',
u':woman_health_worker_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U00002695 \U0000FE0F',
u':woman_health_worker_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U00002695 \U0000FE0F',
u':woman_judge:': u'\U0001F469 \U0000200D \U00002696 \U0000FE0F',
u':woman_judge_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U00002696 \U0000FE0F',
u':woman_judge_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U00002696 \U0000FE0F',
u':woman_judge_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U00002696 \U0000FE0F',
u':woman_judge_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U00002696 \U0000FE0F',
u':woman_judge_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U00002696 \U0000FE0F',
u':woman_juggling:': u'\U0001F939 \U0000200D \U00002640 \U0000FE0F',
u':woman_juggling_dark_skin_tone:': u'\U0001F939 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_juggling_light_skin_tone:': u'\U0001F939 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_juggling_medium-dark_skin_tone:': u'\U0001F939 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_juggling_medium-light_skin_tone:': u'\U0001F939 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_juggling_medium_skin_tone:': u'\U0001F939 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_lifting_weights:': u'\U0001F3CB \U0000FE0F \U0000200D \U00002640 \U0000FE0F',
u':woman_lifting_weights_dark_skin_tone:': u'\U0001F3CB \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_lifting_weights_light_skin_tone:': u'\U0001F3CB \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_lifting_weights_medium-dark_skin_tone:': u'\U0001F3CB \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_lifting_weights_medium-light_skin_tone:': u'\U0001F3CB \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_lifting_weights_medium_skin_tone:': u'\U0001F3CB \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_light_skin_tone:': u'\U0001F469 \U0001F3FB',
u':woman_mechanic:': u'\U0001F469 \U0000200D \U0001F527',
u':woman_mechanic_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U0001F527',
u':woman_mechanic_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U0001F527',
u':woman_mechanic_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U0001F527',
u':woman_mechanic_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U0001F527',
u':woman_mechanic_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U0001F527',
u':woman_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE',
u':woman_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC',
u':woman_medium_skin_tone:': u'\U0001F469 \U0001F3FD',
u':woman_mountain_biking:': u'\U0001F6B5 \U0000200D \U00002640 \U0000FE0F',
u':woman_mountain_biking_dark_skin_tone:': u'\U0001F6B5 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_mountain_biking_light_skin_tone:': u'\U0001F6B5 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_mountain_biking_medium-dark_skin_tone:': u'\U0001F6B5 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_mountain_biking_medium-light_skin_tone:': u'\U0001F6B5 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_mountain_biking_medium_skin_tone:': u'\U0001F6B5 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_office_worker:': u'\U0001F469 \U0000200D \U0001F4BC',
u':woman_office_worker_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U0001F4BC',
u':woman_office_worker_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U0001F4BC',
u':woman_office_worker_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U0001F4BC',
u':woman_office_worker_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U0001F4BC',
u':woman_office_worker_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U0001F4BC',
u':woman_pilot:': u'\U0001F469 \U0000200D \U00002708 \U0000FE0F',
u':woman_pilot_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U00002708 \U0000FE0F',
u':woman_pilot_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U00002708 \U0000FE0F',
u':woman_pilot_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U00002708 \U0000FE0F',
u':woman_pilot_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U00002708 \U0000FE0F',
u':woman_pilot_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U00002708 \U0000FE0F',
u':woman_playing_handball:': u'\U0001F93E \U0000200D \U00002640 \U0000FE0F',
u':woman_playing_handball_dark_skin_tone:': u'\U0001F93E \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_playing_handball_light_skin_tone:': u'\U0001F93E \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_playing_handball_medium-dark_skin_tone:': u'\U0001F93E \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_playing_handball_medium-light_skin_tone:': u'\U0001F93E \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_playing_handball_medium_skin_tone:': u'\U0001F93E \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_playing_water_polo:': u'\U0001F93D \U0000200D \U00002640 \U0000FE0F',
u':woman_playing_water_polo_dark_skin_tone:': u'\U0001F93D \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_playing_water_polo_light_skin_tone:': u'\U0001F93D \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_playing_water_polo_medium-dark_skin_tone:': u'\U0001F93D \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_playing_water_polo_medium-light_skin_tone:': u'\U0001F93D \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_playing_water_polo_medium_skin_tone:': u'\U0001F93D \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_police_officer:': u'\U0001F46E \U0000200D \U00002640 \U0000FE0F',
u':woman_police_officer_dark_skin_tone:': u'\U0001F46E \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_police_officer_light_skin_tone:': u'\U0001F46E \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_police_officer_medium-dark_skin_tone:': u'\U0001F46E \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_police_officer_medium-light_skin_tone:': u'\U0001F46E \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_police_officer_medium_skin_tone:': u'\U0001F46E \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_pouting:': u'\U0001F64E \U0000200D \U00002640 \U0000FE0F',
u':woman_pouting_dark_skin_tone:': u'\U0001F64E \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_pouting_light_skin_tone:': u'\U0001F64E \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_pouting_medium-dark_skin_tone:': u'\U0001F64E \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_pouting_medium-light_skin_tone:': u'\U0001F64E \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_pouting_medium_skin_tone:': u'\U0001F64E \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_raising_hand:': u'\U0001F64B \U0000200D \U00002640 \U0000FE0F',
u':woman_raising_hand_dark_skin_tone:': u'\U0001F64B \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_raising_hand_light_skin_tone:': u'\U0001F64B \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_raising_hand_medium-dark_skin_tone:': u'\U0001F64B \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_raising_hand_medium-light_skin_tone:': u'\U0001F64B \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_raising_hand_medium_skin_tone:': u'\U0001F64B \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_rowing_boat:': u'\U0001F6A3 \U0000200D \U00002640 \U0000FE0F',
u':woman_rowing_boat_dark_skin_tone:': u'\U0001F6A3 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_rowing_boat_light_skin_tone:': u'\U0001F6A3 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_rowing_boat_medium-dark_skin_tone:': u'\U0001F6A3 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_rowing_boat_medium-light_skin_tone:': u'\U0001F6A3 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_rowing_boat_medium_skin_tone:': u'\U0001F6A3 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_running:': u'\U0001F3C3 \U0000200D \U00002640 \U0000FE0F',
u':woman_running_dark_skin_tone:': u'\U0001F3C3 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_running_light_skin_tone:': u'\U0001F3C3 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_running_medium-dark_skin_tone:': u'\U0001F3C3 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_running_medium-light_skin_tone:': u'\U0001F3C3 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_running_medium_skin_tone:': u'\U0001F3C3 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_scientist:': u'\U0001F469 \U0000200D \U0001F52C',
u':woman_scientist_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U0001F52C',
u':woman_scientist_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U0001F52C',
u':woman_scientist_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U0001F52C',
u':woman_scientist_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U0001F52C',
u':woman_scientist_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U0001F52C',
u':woman_shrugging:': u'\U0001F937 \U0000200D \U00002640 \U0000FE0F',
u':woman_shrugging_dark_skin_tone:': u'\U0001F937 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_shrugging_light_skin_tone:': u'\U0001F937 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_shrugging_medium-dark_skin_tone:': u'\U0001F937 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_shrugging_medium-light_skin_tone:': u'\U0001F937 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_shrugging_medium_skin_tone:': u'\U0001F937 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_singer:': u'\U0001F469 \U0000200D \U0001F3A4',
u':woman_singer_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U0001F3A4',
u':woman_singer_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U0001F3A4',
u':woman_singer_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U0001F3A4',
u':woman_singer_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U0001F3A4',
u':woman_singer_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U0001F3A4',
u':woman_student:': u'\U0001F469 \U0000200D \U0001F393',
u':woman_student_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U0001F393',
u':woman_student_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U0001F393',
u':woman_student_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U0001F393',
u':woman_student_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U0001F393',
u':woman_student_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U0001F393',
u':woman_surfing:': u'\U0001F3C4 \U0000200D \U00002640 \U0000FE0F',
u':woman_surfing_dark_skin_tone:': u'\U0001F3C4 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_surfing_light_skin_tone:': u'\U0001F3C4 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_surfing_medium-dark_skin_tone:': u'\U0001F3C4 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_surfing_medium-light_skin_tone:': u'\U0001F3C4 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_surfing_medium_skin_tone:': u'\U0001F3C4 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_swimming:': u'\U0001F3CA \U0000200D \U00002640 \U0000FE0F',
u':woman_swimming_dark_skin_tone:': u'\U0001F3CA \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_swimming_light_skin_tone:': u'\U0001F3CA \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_swimming_medium-dark_skin_tone:': u'\U0001F3CA \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_swimming_medium-light_skin_tone:': u'\U0001F3CA \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_swimming_medium_skin_tone:': u'\U0001F3CA \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_teacher:': u'\U0001F469 \U0000200D \U0001F3EB',
u':woman_teacher_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U0001F3EB',
u':woman_teacher_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U0001F3EB',
u':woman_teacher_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U0001F3EB',
u':woman_teacher_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U0001F3EB',
u':woman_teacher_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U0001F3EB',
u':woman_technologist:': u'\U0001F469 \U0000200D \U0001F4BB',
u':woman_technologist_dark_skin_tone:': u'\U0001F469 \U0001F3FF \U0000200D \U0001F4BB',
u':woman_technologist_light_skin_tone:': u'\U0001F469 \U0001F3FB \U0000200D \U0001F4BB',
u':woman_technologist_medium-dark_skin_tone:': u'\U0001F469 \U0001F3FE \U0000200D \U0001F4BB',
u':woman_technologist_medium-light_skin_tone:': u'\U0001F469 \U0001F3FC \U0000200D \U0001F4BB',
u':woman_technologist_medium_skin_tone:': u'\U0001F469 \U0001F3FD \U0000200D \U0001F4BB',
u':woman_tipping_hand:': u'\U0001F481 \U0000200D \U00002640 \U0000FE0F',
u':woman_tipping_hand_dark_skin_tone:': u'\U0001F481 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_tipping_hand_light_skin_tone:': u'\U0001F481 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_tipping_hand_medium-dark_skin_tone:': u'\U0001F481 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_tipping_hand_medium-light_skin_tone:': u'\U0001F481 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_tipping_hand_medium_skin_tone:': u'\U0001F481 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_walking:': u'\U0001F6B6 \U0000200D \U00002640 \U0000FE0F',
u':woman_walking_dark_skin_tone:': u'\U0001F6B6 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_walking_light_skin_tone:': u'\U0001F6B6 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_walking_medium-dark_skin_tone:': u'\U0001F6B6 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_walking_medium-light_skin_tone:': u'\U0001F6B6 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_walking_medium_skin_tone:': u'\U0001F6B6 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman_wearing_turban:': u'\U0001F473 \U0000200D \U00002640 \U0000FE0F',
u':woman_wearing_turban_dark_skin_tone:': u'\U0001F473 \U0001F3FF \U0000200D \U00002640 \U0000FE0F',
u':woman_wearing_turban_light_skin_tone:': u'\U0001F473 \U0001F3FB \U0000200D \U00002640 \U0000FE0F',
u':woman_wearing_turban_medium-dark_skin_tone:': u'\U0001F473 \U0001F3FE \U0000200D \U00002640 \U0000FE0F',
u':woman_wearing_turban_medium-light_skin_tone:': u'\U0001F473 \U0001F3FC \U0000200D \U00002640 \U0000FE0F',
u':woman_wearing_turban_medium_skin_tone:': u'\U0001F473 \U0001F3FD \U0000200D \U00002640 \U0000FE0F',
u':woman’s_boot:': u'\U0001F462',
u':woman’s_clothes:': u'\U0001F45A',
u':woman’s_hat:': u'\U0001F452',
u':woman’s_sandal:': u'\U0001F461',
u':women_with_bunny_ears_partying:': u'\U0001F46F \U0000200D \U00002640 \U0000FE0F',
u':women_wrestling:': u'\U0001F93C \U0000200D \U00002640 \U0000FE0F',
u':women’s_room:': u'\U0001F6BA',
u':world_map:': u'\U0001F5FA',
u':worried_face:': u'\U0001F61F',
u':wrapped_gift:': u'\U0001F381',
u':wrench:': u'\U0001F527',
u':writing_hand:': u'\U0000270D',
u':writing_hand_dark_skin_tone:': u'\U0000270D \U0001F3FF',
u':writing_hand_light_skin_tone:': u'\U0000270D \U0001F3FB',
u':writing_hand_medium-dark_skin_tone:': u'\U0000270D \U0001F3FE',
u':writing_hand_medium-light_skin_tone:': u'\U0000270D \U0001F3FC',
u':writing_hand_medium_skin_tone:': u'\U0000270D \U0001F3FD',
u':yellow_heart:': u'\U0001F49B',
u':yen_banknote:': u'\U0001F4B4',
u':yin_yang:': u'\U0000262F',
u':zipper-mouth_face:': u'\U0001F910',
u':zzz:': u'\U0001F4A4',
u':Åland_Islands:': u'\U0001F1E6 \U0001F1FD',
}
EMOJI_ALIAS_UNICODE = dict(EMOJI_UNICODE.items(), **{
u':admission_tickets:': u'\U0001F39F',
u':aerial_tramway:': u'\U0001F6A1',
u':airplane:': u'\U00002708',
u':airplane_arriving:': u'\U0001F6EC',
u':airplane_departure:': u'\U0001F6EB',
u':alarm_clock:': u'\U000023F0',
u':alembic:': u'\U00002697',
u':space_invader:': u'\U0001F47E',
u':ambulance:': u'\U0001F691',
u':football:': u'\U0001F3C8',
u':amphora:': u'\U0001F3FA',
u':anchor:': u'\U00002693',
u':anger:': u'\U0001F4A2',
u':angry:': u'\U0001F620',
u':anguished:': u'\U0001F627',
u':ant:': u'\U0001F41C',
u':signal_strength:': u'\U0001F4F6',
u':arrows_counterclockwise:': u'\U0001F504',
u':aquarius:': u'\U00002652',
u':aries:': u'\U00002648',
u':arrow_heading_down:': u'\U00002935',
u':arrow_heading_up:': u'\U00002934',
u':articulated_lorry:': u'\U0001F69B',
u':art:': u'\U0001F3A8',
u':astonished:': u'\U0001F632',
u':athletic_shoe:': u'\U0001F45F',
u':atom_symbol:': u'\U0000269B',
u':eggplant:': u'\U0001F346',
u':atm:': u'\U0001F3E7',
u':car:': u'\U0001F697',
u':red_car:': u'\U0001F697',
u':baby:': u'\U0001F476',
u':angel:': u'\U0001F47C',
u':baby_bottle:': u'\U0001F37C',
u':baby_chick:': u'\U0001F424',
u':baby_symbol:': u'\U0001F6BC',
u':back:': u'\U0001F519',
u':camel:': u'\U0001F42B',
u':badminton_racquet_and_shuttlecock:': u'\U0001F3F8',
u':baggage_claim:': u'\U0001F6C4',
u':balloon:': u'\U0001F388',
u':ballot_box_with_ballot:': u'\U0001F5F3',
u':ballot_box_with_check:': u'\U00002611',
u':banana:': u'\U0001F34C',
u':bank:': u'\U0001F3E6',
u':dollar:': u'\U0001F4B5',
u':euro:': u'\U0001F4B6',
u':pound:': u'\U0001F4B7',
u':yen:': u'\U0001F4B4',
u':bar_chart:': u'\U0001F4CA',
u':barber:': u'\U0001F488',
u':baseball:': u'\U000026BE',
u':basketball:': u'\U0001F3C0',
u':bath:': u'\U0001F6C0',
u':bathtub:': u'\U0001F6C1',
u':battery:': u'\U0001F50B',
u':beach_with_umbrella:': u'\U0001F3D6',
u':bear:': u'\U0001F43B',
u':heartbeat:': u'\U0001F493',
u':bed:': u'\U0001F6CF',
u':beer:': u'\U0001F37A',
u':bell:': u'\U0001F514',
u':no_bell:': u'\U0001F515',
u':bellhop_bell:': u'\U0001F6CE',
u':bento:': u'\U0001F371',
u':bike:': u'\U0001F6B2',
u':bicyclist:': u'\U0001F6B4',
u':bikini:': u'\U0001F459',
u':8ball:': u'\U0001F3B1',
u':biohazard_sign:': u'\U00002623',
u':bird:': u'\U0001F426',
u':birthday:': u'\U0001F382',
u':black_circle_for_record:': u'\U000023FA',
u':clubs:': u'\U00002663',
u':diamonds:': u'\U00002666',
u':arrow_double_down:': u'\U000023EC',
u':hearts:': u'\U00002665',
u':black_large_square:': u'\U00002B1B',
u':rewind:': u'\U000023EA',
u':black_left__pointing_double_triangle_with_vertical_bar:': u'\U000023EE',
u':arrow_backward:': u'\U000025C0',
u':black_medium_small_square:': u'\U000025FE',
u':black_medium_square:': u'\U000025FC',
u':black_nib:': u'\U00002712',
u':question:': u'\U00002753',
u':fast_forward:': u'\U000023E9',
u':black_right__pointing_double_triangle_with_vertical_bar:': u'\U000023ED',
u':arrow_forward:': u'\U000025B6',
u':black_right__pointing_triangle_with_double_vertical_bar:': u'\U000023EF',
u':arrow_right:': u'\U000027A1',
u':scissors:': u'\U00002702',
u':black_small_square:': u'\U000025AA',
u':spades:': u'\U00002660',
u':black_square_button:': u'\U0001F532',
u':black_square_for_stop:': u'\U000023F9',
u':sunny:': u'\U00002600',
u':phone:': u'\U0000260E',
u':telephone:': u'\U0000260E',
u':recycle:': u'\U0000267B',
u':arrow_double_up:': u'\U000023EB',
u':blossom:': u'\U0001F33C',
u':blowfish:': u'\U0001F421',
u':blue_book:': u'\U0001F4D8',
u':blue_heart:': u'\U0001F499',
u':boar:': u'\U0001F417',
u':bomb:': u'\U0001F4A3',
u':bookmark:': u'\U0001F516',
u':bookmark_tabs:': u'\U0001F4D1',
u':books:': u'\U0001F4DA',
u':bottle_with_popping_cork:': u'\U0001F37E',
u':bouquet:': u'\U0001F490',
u':bow_and_arrow:': u'\U0001F3F9',
u':bowling:': u'\U0001F3B3',
u':boy:': u'\U0001F466',
u':bread:': u'\U0001F35E',
u':bride_with_veil:': u'\U0001F470',
u':bridge_at_night:': u'\U0001F309',
u':briefcase:': u'\U0001F4BC',
u':broken_heart:': u'\U0001F494',
u':bug:': u'\U0001F41B',
u':building_construction:': u'\U0001F3D7',
u':burrito:': u'\U0001F32F',
u':bus:': u'\U0001F68C',
u':busstop:': u'\U0001F68F',
u':bust_in_silhouette:': u'\U0001F464',
u':busts_in_silhouette:': u'\U0001F465',
u':cactus:': u'\U0001F335',
u':date:': u'\U0001F4C5',
u':camera:': u'\U0001F4F7',
u':camera_with_flash:': u'\U0001F4F8',
u':camping:': u'\U0001F3D5',
u':cancer:': u'\U0000264B',
u':candle:': u'\U0001F56F',
u':candy:': u'\U0001F36C',
u':capricorn:': u'\U00002651',
u':card_file_box:': u'\U0001F5C3',
u':card_index:': u'\U0001F4C7',
u':card_index_dividers:': u'\U0001F5C2',
u':carousel_horse:': u'\U0001F3A0',
u':flags:': u'\U0001F38F',
u':cat2:': u'\U0001F408',
u':cat:': u'\U0001F431',
u':joy_cat:': u'\U0001F639',
u':smirk_cat:': u'\U0001F63C',
u':chains:': u'\U000026D3',
u':chart_with_downwards_trend:': u'\U0001F4C9',
u':chart_with_upwards_trend:': u'\U0001F4C8',
u':chart:': u'\U0001F4B9',
u':mega:': u'\U0001F4E3',
u':cheese_wedge:': u'\U0001F9C0',
u':checkered_flag:': u'\U0001F3C1',
u':cherries:': u'\U0001F352',
u':cherry_blossom:': u'\U0001F338',
u':chestnut:': u'\U0001F330',
u':chicken:': u'\U0001F414',
u':children_crossing:': u'\U0001F6B8',
u':chipmunk:': u'\U0001F43F',
u':chocolate_bar:': u'\U0001F36B',
u':christmas_tree:': u'\U0001F384',
u':church:': u'\U000026EA',
u':cinema:': u'\U0001F3A6',
u':accept:': u'\U0001F251',
u':ideograph_advantage:': u'\U0001F250',
u':congratulations:': u'\U00003297',
u':secret:': u'\U00003299',
u':m:': u'\U000024C2',
u':circus_tent:': u'\U0001F3AA',
u':cityscape:': u'\U0001F3D9',
u':city_sunset:': u'\U0001F306',
u':clapper:': u'\U0001F3AC',
u':clap:': u'\U0001F44F',
u':classical_building:': u'\U0001F3DB',
u':beers:': u'\U0001F37B',
u':clipboard:': u'\U0001F4CB',
u':clock830:': u'\U0001F563',
u':clock8:': u'\U0001F557',
u':clock1130:': u'\U0001F566',
u':clock11:': u'\U0001F55A',
u':clock530:': u'\U0001F560',
u':clock5:': u'\U0001F554',
u':clock430:': u'\U0001F55F',
u':clock4:': u'\U0001F553',
u':clock930:': u'\U0001F564',
u':clock9:': u'\U0001F558',
u':clock130:': u'\U0001F55C',
u':clock1:': u'\U0001F550',
u':clock730:': u'\U0001F562',
u':clock7:': u'\U0001F556',
u':clock630:': u'\U0001F561',
u':clock6:': u'\U0001F555',
u':clock1030:': u'\U0001F565',
u':clock10:': u'\U0001F559',
u':clock330:': u'\U0001F55E',
u':clock3:': u'\U0001F552',
u':clock1230:': u'\U0001F567',
u':clock12:': u'\U0001F55B',
u':clock230:': u'\U0001F55D',
u':clock2:': u'\U0001F551',
u':arrows_clockwise:': u'\U0001F503',
u':repeat:': u'\U0001F501',
u':repeat_one:': u'\U0001F502',
u':closed_book:': u'\U0001F4D5',
u':closed_lock_with_key:': u'\U0001F510',
u':mailbox_closed:': u'\U0001F4EA',
u':mailbox:': u'\U0001F4EB',
u':closed_umbrella:': u'\U0001F302',
u':cloud:': u'\U00002601',
u':cloud_with_lightning:': u'\U0001F329',
u':cloud_with_rain:': u'\U0001F327',
u':cloud_with_snow:': u'\U0001F328',
u':cloud_with_tornado:': u'\U0001F32A',
u':cocktail:': u'\U0001F378',
u':coffin:': u'\U000026B0',
u':boom:': u'\U0001F4A5',
u':collision:': u'\U0001F4A5',
u':comet:': u'\U00002604',
u':compression:': u'\U0001F5DC',
u':confetti_ball:': u'\U0001F38A',
u':confounded:': u'\U0001F616',
u':confused:': u'\U0001F615',
u':construction:': u'\U0001F6A7',
u':construction_worker:': u'\U0001F477',
u':control_knobs:': u'\U0001F39B',
u':convenience_store:': u'\U0001F3EA',
u':rice:': u'\U0001F35A',
u':cookie:': u'\U0001F36A',
u':egg:': u'\U0001F373',
u':copyright:': u'\U000000A9',
u':couch_and_lamp:': u'\U0001F6CB',
u':couple_with_heart:': u'\U0001F491',
u':cow2:': u'\U0001F404',
u':cow:': u'\U0001F42E',
u':crab:': u'\U0001F980',
u':credit_card:': u'\U0001F4B3',
u':crescent_moon:': u'\U0001F319',
u':cricket_bat_and_ball:': u'\U0001F3CF',
u':crocodile:': u'\U0001F40A',
u':x:': u'\U0000274C',
u':crossed_flags:': u'\U0001F38C',
u':crossed_swords:': u'\U00002694',
u':crown:': u'\U0001F451',
u':crying_cat_face:': u'\U0001F63F',
u':cry:': u'\U0001F622',
u':crystal_ball:': u'\U0001F52E',
u':curly_loop:': u'\U000027B0',
u':currency_exchange:': u'\U0001F4B1',
u':curry:': u'\U0001F35B',
u':custard:': u'\U0001F36E',
u':customs:': u'\U0001F6C3',
u':cyclone:': u'\U0001F300',
u':dagger_knife:': u'\U0001F5E1',
u':dancer:': u'\U0001F483',
u':dango:': u'\U0001F361',
u':dark_sunglasses:': u'\U0001F576',
u':dash:': u'\U0001F4A8',
u':deciduous_tree:': u'\U0001F333',
u':truck:': u'\U0001F69A',
u':department_store:': u'\U0001F3EC',
u':derelict_house_building:': u'\U0001F3DA',
u':desert:': u'\U0001F3DC',
u':desert_island:': u'\U0001F3DD',
u':desktop_computer:': u'\U0001F5A5',
u':diamond_shape_with_a_dot_inside:': u'\U0001F4A0',
u':dart:': u'\U0001F3AF',
u':disappointed_relieved:': u'\U0001F625',
u':disappointed:': u'\U0001F61E',
u':dizzy_face:': u'\U0001F635',
u':dizzy:': u'\U0001F4AB',
u':do_not_litter:': u'\U0001F6AF',
u':dog2:': u'\U0001F415',
u':dog:': u'\U0001F436',
u':dolphin:': u'\U0001F42C',
u':flipper:': u'\U0001F42C',
u':door:': u'\U0001F6AA',
u':loop:': u'\U000027BF',
u':bangbang:': u'\U0000203C',
u':double_vertical_bar:': u'\U000023F8',
u':doughnut:': u'\U0001F369',
u':dove_of_peace:': u'\U0001F54A',
u':small_red_triangle_down:': u'\U0001F53B',
u':arrow_down_small:': u'\U0001F53D',
u':arrow_down:': u'\U00002B07',
u':dragon:': u'\U0001F409',
u':dragon_face:': u'\U0001F432',
u':dress:': u'\U0001F457',
u':dromedary_camel:': u'\U0001F42A',
u':droplet:': u'\U0001F4A7',
u':dvd:': u'\U0001F4C0',
u':e__mail:': u'\U0001F4E7',
u':ear:': u'\U0001F442',
u':corn:': u'\U0001F33D',
u':ear_of_rice:': u'\U0001F33E',
u':earth_americas:': u'\U0001F30E',
u':earth_asia:': u'\U0001F30F',
u':earth_africa:': u'\U0001F30D',
u':eight_pointed_black_star:': u'\U00002734',
u':eight_spoked_asterisk:': u'\U00002733',
u':eject_symbol:': u'\U000023CF',
u':bulb:': u'\U0001F4A1',
u':electric_plug:': u'\U0001F50C',
u':flashlight:': u'\U0001F526',
u':elephant:': u'\U0001F418',
u':emoji_modifier_fitzpatrick_type__1__2:': u'\U0001F3FB',
u':emoji_modifier_fitzpatrick_type__3:': u'\U0001F3FC',
u':emoji_modifier_fitzpatrick_type__4:': u'\U0001F3FD',
u':emoji_modifier_fitzpatrick_type__5:': u'\U0001F3FE',
u':emoji_modifier_fitzpatrick_type__6:': u'\U0001F3FF',
u':end:': u'\U0001F51A',
u':email:': u'\U00002709',
u':envelope:': u'\U00002709',
u':envelope_with_arrow:': u'\U0001F4E9',
u':european_castle:': u'\U0001F3F0',
u':european_post_office:': u'\U0001F3E4',
u':evergreen_tree:': u'\U0001F332',
u':interrobang:': u'\U00002049',
u':expressionless:': u'\U0001F611',
u':alien:': u'\U0001F47D',
u':eye:': u'\U0001F441',
u':eyeglasses:': u'\U0001F453',
u':eyes:': u'\U0001F440',
u':massage:': u'\U0001F486',
u':yum:': u'\U0001F60B',
u':scream:': u'\U0001F631',
u':kissing_heart:': u'\U0001F618',
u':sweat:': u'\U0001F613',
u':face_with_head__bandage:': u'\U0001F915',
u':triumph:': u'\U0001F624',
u':mask:': u'\U0001F637',
u':no_good:': u'\U0001F645',
u':ok_woman:': u'\U0001F646',
u':open_mouth:': u'\U0001F62E',
u':cold_sweat:': u'\U0001F630',
u':face_with_rolling_eyes:': u'\U0001F644',
u':stuck_out_tongue:': u'\U0001F61B',
u':stuck_out_tongue_closed_eyes:': u'\U0001F61D',
u':stuck_out_tongue_winking_eye:': u'\U0001F61C',
u':joy:': u'\U0001F602',
u':face_with_thermometer:': u'\U0001F912',
u':no_mouth:': u'\U0001F636',
u':factory:': u'\U0001F3ED',
u':fallen_leaf:': u'\U0001F342',
u':family:': u'\U0001F46A',
u':santa:': u'\U0001F385',
u':fax:': u'\U0001F4E0',
u':fearful:': u'\U0001F628',
u':ferris_wheel:': u'\U0001F3A1',
u':ferry:': u'\U000026F4',
u':field_hockey_stick_and_ball:': u'\U0001F3D1',
u':file_cabinet:': u'\U0001F5C4',
u':file_folder:': u'\U0001F4C1',
u':film_frames:': u'\U0001F39E',
u':film_projector:': u'\U0001F4FD',
u':fire:': u'\U0001F525',
u':fire_engine:': u'\U0001F692',
u':sparkler:': u'\U0001F387',
u':fireworks:': u'\U0001F386',
u':first_quarter_moon:': u'\U0001F313',
u':first_quarter_moon_with_face:': u'\U0001F31B',
u':fish:': u'\U0001F41F',
u':fish_cake:': u'\U0001F365',
u':fishing_pole_and_fish:': u'\U0001F3A3',
u':facepunch:': u'\U0001F44A',
u':punch:': u'\U0001F44A',
u':flag_for_Afghanistan:': u'\U0001F1E6 \U0001F1EB',
u':flag_for_Albania:': u'\U0001F1E6 \U0001F1F1',
u':flag_for_Algeria:': u'\U0001F1E9 \U0001F1FF',
u':flag_for_American_Samoa:': u'\U0001F1E6 \U0001F1F8',
u':flag_for_Andorra:': u'\U0001F1E6 \U0001F1E9',
u':flag_for_Angola:': u'\U0001F1E6 \U0001F1F4',
u':flag_for_Anguilla:': u'\U0001F1E6 \U0001F1EE',
u':flag_for_Antarctica:': u'\U0001F1E6 \U0001F1F6',
u':flag_for_Antigua_&_Barbuda:': u'\U0001F1E6 \U0001F1EC',
u':flag_for_Argentina:': u'\U0001F1E6 \U0001F1F7',
u':flag_for_Armenia:': u'\U0001F1E6 \U0001F1F2',
u':flag_for_Aruba:': u'\U0001F1E6 \U0001F1FC',
u':flag_for_Ascension_Island:': u'\U0001F1E6 \U0001F1E8',
u':flag_for_Australia:': u'\U0001F1E6 \U0001F1FA',
u':flag_for_Austria:': u'\U0001F1E6 \U0001F1F9',
u':flag_for_Azerbaijan:': u'\U0001F1E6 \U0001F1FF',
u':flag_for_Bahamas:': u'\U0001F1E7 \U0001F1F8',
u':flag_for_Bahrain:': u'\U0001F1E7 \U0001F1ED',
u':flag_for_Bangladesh:': u'\U0001F1E7 \U0001F1E9',
u':flag_for_Barbados:': u'\U0001F1E7 \U0001F1E7',
u':flag_for_Belarus:': u'\U0001F1E7 \U0001F1FE',
u':flag_for_Belgium:': u'\U0001F1E7 \U0001F1EA',
u':flag_for_Belize:': u'\U0001F1E7 \U0001F1FF',
u':flag_for_Benin:': u'\U0001F1E7 \U0001F1EF',
u':flag_for_Bermuda:': u'\U0001F1E7 \U0001F1F2',
u':flag_for_Bhutan:': u'\U0001F1E7 \U0001F1F9',
u':flag_for_Bolivia:': u'\U0001F1E7 \U0001F1F4',
u':flag_for_Bosnia_&_Herzegovina:': u'\U0001F1E7 \U0001F1E6',
u':flag_for_Botswana:': u'\U0001F1E7 \U0001F1FC',
u':flag_for_Bouvet_Island:': u'\U0001F1E7 \U0001F1FB',
u':flag_for_Brazil:': u'\U0001F1E7 \U0001F1F7',
u':flag_for_British_Indian_Ocean_Territory:': u'\U0001F1EE \U0001F1F4',
u':flag_for_British_Virgin_Islands:': u'\U0001F1FB \U0001F1EC',
u':flag_for_Brunei:': u'\U0001F1E7 \U0001F1F3',
u':flag_for_Bulgaria:': u'\U0001F1E7 \U0001F1EC',
u':flag_for_Burkina_Faso:': u'\U0001F1E7 \U0001F1EB',
u':flag_for_Burundi:': u'\U0001F1E7 \U0001F1EE',
u':flag_for_Cambodia:': u'\U0001F1F0 \U0001F1ED',
u':flag_for_Cameroon:': u'\U0001F1E8 \U0001F1F2',
u':flag_for_Canada:': u'\U0001F1E8 \U0001F1E6',
u':flag_for_Canary_Islands:': u'\U0001F1EE \U0001F1E8',
u':flag_for_Cape_Verde:': u'\U0001F1E8 \U0001F1FB',
u':flag_for_Caribbean_Netherlands:': u'\U0001F1E7 \U0001F1F6',
u':flag_for_Cayman_Islands:': u'\U0001F1F0 \U0001F1FE',
u':flag_for_Central_African_Republic:': u'\U0001F1E8 \U0001F1EB',
u':flag_for_Ceuta_&_Melilla:': u'\U0001F1EA \U0001F1E6',
u':flag_for_Chad:': u'\U0001F1F9 \U0001F1E9',
u':flag_for_Chile:': u'\U0001F1E8 \U0001F1F1',
u':flag_for_China:': u'\U0001F1E8 \U0001F1F3',
u':flag_for_Christmas_Island:': u'\U0001F1E8 \U0001F1FD',
u':flag_for_Clipperton_Island:': u'\U0001F1E8 \U0001F1F5',
u':flag_for_Cocos__Islands:': u'\U0001F1E8 \U0001F1E8',
u':flag_for_Colombia:': u'\U0001F1E8 \U0001F1F4',
u':flag_for_Comoros:': u'\U0001F1F0 \U0001F1F2',
u':flag_for_Congo____Brazzaville:': u'\U0001F1E8 \U0001F1EC',
u':flag_for_Congo____Kinshasa:': u'\U0001F1E8 \U0001F1E9',
u':flag_for_Cook_Islands:': u'\U0001F1E8 \U0001F1F0',
u':flag_for_Costa_Rica:': u'\U0001F1E8 \U0001F1F7',
u':flag_for_Croatia:': u'\U0001F1ED \U0001F1F7',
u':flag_for_Cuba:': u'\U0001F1E8 \U0001F1FA',
u':flag_for_Curaçao:': u'\U0001F1E8 \U0001F1FC',
u':flag_for_Cyprus:': u'\U0001F1E8 \U0001F1FE',
u':flag_for_Czech_Republic:': u'\U0001F1E8 \U0001F1FF',
u':flag_for_Côte_d’Ivoire:': u'\U0001F1E8 \U0001F1EE',
u':flag_for_Denmark:': u'\U0001F1E9 \U0001F1F0',
u':flag_for_Diego_Garcia:': u'\U0001F1E9 \U0001F1EC',
u':flag_for_Djibouti:': u'\U0001F1E9 \U0001F1EF',
u':flag_for_Dominica:': u'\U0001F1E9 \U0001F1F2',
u':flag_for_Dominican_Republic:': u'\U0001F1E9 \U0001F1F4',
u':flag_for_Ecuador:': u'\U0001F1EA \U0001F1E8',
u':flag_for_Egypt:': u'\U0001F1EA \U0001F1EC',
u':flag_for_El_Salvador:': u'\U0001F1F8 \U0001F1FB',
u':flag_for_Equatorial_Guinea:': u'\U0001F1EC \U0001F1F6',
u':flag_for_Eritrea:': u'\U0001F1EA \U0001F1F7',
u':flag_for_Estonia:': u'\U0001F1EA \U0001F1EA',
u':flag_for_Ethiopia:': u'\U0001F1EA \U0001F1F9',
u':flag_for_European_Union:': u'\U0001F1EA \U0001F1FA',
u':flag_for_Falkland_Islands:': u'\U0001F1EB \U0001F1F0',
u':flag_for_Faroe_Islands:': u'\U0001F1EB \U0001F1F4',
u':flag_for_Fiji:': u'\U0001F1EB \U0001F1EF',
u':flag_for_Finland:': u'\U0001F1EB \U0001F1EE',
u':flag_for_France:': u'\U0001F1EB \U0001F1F7',
u':flag_for_French_Guiana:': u'\U0001F1EC \U0001F1EB',
u':flag_for_French_Polynesia:': u'\U0001F1F5 \U0001F1EB',
u':flag_for_French_Southern_Territories:': u'\U0001F1F9 \U0001F1EB',
u':flag_for_Gabon:': u'\U0001F1EC \U0001F1E6',
u':flag_for_Gambia:': u'\U0001F1EC \U0001F1F2',
u':flag_for_Georgia:': u'\U0001F1EC \U0001F1EA',
u':flag_for_Germany:': u'\U0001F1E9 \U0001F1EA',
u':flag_for_Ghana:': u'\U0001F1EC \U0001F1ED',
u':flag_for_Gibraltar:': u'\U0001F1EC \U0001F1EE',
u':flag_for_Greece:': u'\U0001F1EC \U0001F1F7',
u':flag_for_Greenland:': u'\U0001F1EC \U0001F1F1',
u':flag_for_Grenada:': u'\U0001F1EC \U0001F1E9',
u':flag_for_Guadeloupe:': u'\U0001F1EC \U0001F1F5',
u':flag_for_Guam:': u'\U0001F1EC \U0001F1FA',
u':flag_for_Guatemala:': u'\U0001F1EC \U0001F1F9',
u':flag_for_Guernsey:': u'\U0001F1EC \U0001F1EC',
u':flag_for_Guinea:': u'\U0001F1EC \U0001F1F3',
u':flag_for_Guinea__Bissau:': u'\U0001F1EC \U0001F1FC',
u':flag_for_Guyana:': u'\U0001F1EC \U0001F1FE',
u':flag_for_Haiti:': u'\U0001F1ED \U0001F1F9',
u':flag_for_Heard_&_McDonald_Islands:': u'\U0001F1ED \U0001F1F2',
u':flag_for_Honduras:': u'\U0001F1ED \U0001F1F3',
u':flag_for_Hong_Kong:': u'\U0001F1ED \U0001F1F0',
u':flag_for_Hungary:': u'\U0001F1ED \U0001F1FA',
u':flag_for_Iceland:': u'\U0001F1EE \U0001F1F8',
u':flag_for_India:': u'\U0001F1EE \U0001F1F3',
u':flag_for_Indonesia:': u'\U0001F1EE \U0001F1E9',
u':flag_for_Iran:': u'\U0001F1EE \U0001F1F7',
u':flag_for_Iraq:': u'\U0001F1EE \U0001F1F6',
u':flag_for_Ireland:': u'\U0001F1EE \U0001F1EA',
u':flag_for_Isle_of_Man:': u'\U0001F1EE \U0001F1F2',
u':flag_for_Israel:': u'\U0001F1EE \U0001F1F1',
u':flag_for_Italy:': u'\U0001F1EE \U0001F1F9',
u':flag_for_Jamaica:': u'\U0001F1EF \U0001F1F2',
u':flag_for_Japan:': u'\U0001F1EF \U0001F1F5',
u':flag_for_Jersey:': u'\U0001F1EF \U0001F1EA',
u':flag_for_Jordan:': u'\U0001F1EF \U0001F1F4',
u':flag_for_Kazakhstan:': u'\U0001F1F0 \U0001F1FF',
u':flag_for_Kenya:': u'\U0001F1F0 \U0001F1EA',
u':flag_for_Kiribati:': u'\U0001F1F0 \U0001F1EE',
u':flag_for_Kosovo:': u'\U0001F1FD \U0001F1F0',
u':flag_for_Kuwait:': u'\U0001F1F0 \U0001F1FC',
u':flag_for_Kyrgyzstan:': u'\U0001F1F0 \U0001F1EC',
u':flag_for_Laos:': u'\U0001F1F1 \U0001F1E6',
u':flag_for_Latvia:': u'\U0001F1F1 \U0001F1FB',
u':flag_for_Lebanon:': u'\U0001F1F1 \U0001F1E7',
u':flag_for_Lesotho:': u'\U0001F1F1 \U0001F1F8',
u':flag_for_Liberia:': u'\U0001F1F1 \U0001F1F7',
u':flag_for_Libya:': u'\U0001F1F1 \U0001F1FE',
u':flag_for_Liechtenstein:': u'\U0001F1F1 \U0001F1EE',
u':flag_for_Lithuania:': u'\U0001F1F1 \U0001F1F9',
u':flag_for_Luxembourg:': u'\U0001F1F1 \U0001F1FA',
u':flag_for_Macau:': u'\U0001F1F2 \U0001F1F4',
u':flag_for_Macedonia:': u'\U0001F1F2 \U0001F1F0',
u':flag_for_Madagascar:': u'\U0001F1F2 \U0001F1EC',
u':flag_for_Malawi:': u'\U0001F1F2 \U0001F1FC',
u':flag_for_Malaysia:': u'\U0001F1F2 \U0001F1FE',
u':flag_for_Maldives:': u'\U0001F1F2 \U0001F1FB',
u':flag_for_Mali:': u'\U0001F1F2 \U0001F1F1',
u':flag_for_Malta:': u'\U0001F1F2 \U0001F1F9',
u':flag_for_Marshall_Islands:': u'\U0001F1F2 \U0001F1ED',
u':flag_for_Martinique:': u'\U0001F1F2 \U0001F1F6',
u':flag_for_Mauritania:': u'\U0001F1F2 \U0001F1F7',
u':flag_for_Mauritius:': u'\U0001F1F2 \U0001F1FA',
u':flag_for_Mayotte:': u'\U0001F1FE \U0001F1F9',
u':flag_for_Mexico:': u'\U0001F1F2 \U0001F1FD',
u':flag_for_Micronesia:': u'\U0001F1EB \U0001F1F2',
u':flag_for_Moldova:': u'\U0001F1F2 \U0001F1E9',
u':flag_for_Monaco:': u'\U0001F1F2 \U0001F1E8',
u':flag_for_Mongolia:': u'\U0001F1F2 \U0001F1F3',
u':flag_for_Montenegro:': u'\U0001F1F2 \U0001F1EA',
u':flag_for_Montserrat:': u'\U0001F1F2 \U0001F1F8',
u':flag_for_Morocco:': u'\U0001F1F2 \U0001F1E6',
u':flag_for_Mozambique:': u'\U0001F1F2 \U0001F1FF',
u':flag_for_Myanmar:': u'\U0001F1F2 \U0001F1F2',
u':flag_for_Namibia:': u'\U0001F1F3 \U0001F1E6',
u':flag_for_Nauru:': u'\U0001F1F3 \U0001F1F7',
u':flag_for_Nepal:': u'\U0001F1F3 \U0001F1F5',
u':flag_for_Netherlands:': u'\U0001F1F3 \U0001F1F1',
u':flag_for_New_Caledonia:': u'\U0001F1F3 \U0001F1E8',
u':flag_for_New_Zealand:': u'\U0001F1F3 \U0001F1FF',
u':flag_for_Nicaragua:': u'\U0001F1F3 \U0001F1EE',
u':flag_for_Niger:': u'\U0001F1F3 \U0001F1EA',
u':flag_for_Nigeria:': u'\U0001F1F3 \U0001F1EC',
u':flag_for_Niue:': u'\U0001F1F3 \U0001F1FA',
u':flag_for_Norfolk_Island:': u'\U0001F1F3 \U0001F1EB',
u':flag_for_North_Korea:': u'\U0001F1F0 \U0001F1F5',
u':flag_for_Northern_Mariana_Islands:': u'\U0001F1F2 \U0001F1F5',
u':flag_for_Norway:': u'\U0001F1F3 \U0001F1F4',
u':flag_for_Oman:': u'\U0001F1F4 \U0001F1F2',
u':flag_for_Pakistan:': u'\U0001F1F5 \U0001F1F0',
u':flag_for_Palau:': u'\U0001F1F5 \U0001F1FC',
u':flag_for_Palestinian_Territories:': u'\U0001F1F5 \U0001F1F8',
u':flag_for_Panama:': u'\U0001F1F5 \U0001F1E6',
u':flag_for_Papua_New_Guinea:': u'\U0001F1F5 \U0001F1EC',
u':flag_for_Paraguay:': u'\U0001F1F5 \U0001F1FE',
u':flag_for_Peru:': u'\U0001F1F5 \U0001F1EA',
u':flag_for_Philippines:': u'\U0001F1F5 \U0001F1ED',
u':flag_for_Pitcairn_Islands:': u'\U0001F1F5 \U0001F1F3',
u':flag_for_Poland:': u'\U0001F1F5 \U0001F1F1',
u':flag_for_Portugal:': u'\U0001F1F5 \U0001F1F9',
u':flag_for_Puerto_Rico:': u'\U0001F1F5 \U0001F1F7',
u':flag_for_Qatar:': u'\U0001F1F6 \U0001F1E6',
u':flag_for_Romania:': u'\U0001F1F7 \U0001F1F4',
u':flag_for_Russia:': u'\U0001F1F7 \U0001F1FA',
u':flag_for_Rwanda:': u'\U0001F1F7 \U0001F1FC',
u':flag_for_Réunion:': u'\U0001F1F7 \U0001F1EA',
u':flag_for_Samoa:': u'\U0001F1FC \U0001F1F8',
u':flag_for_San_Marino:': u'\U0001F1F8 \U0001F1F2',
u':flag_for_Saudi_Arabia:': u'\U0001F1F8 \U0001F1E6',
u':flag_for_Senegal:': u'\U0001F1F8 \U0001F1F3',
u':flag_for_Serbia:': u'\U0001F1F7 \U0001F1F8',
u':flag_for_Seychelles:': u'\U0001F1F8 \U0001F1E8',
u':flag_for_Sierra_Leone:': u'\U0001F1F8 \U0001F1F1',
u':flag_for_Singapore:': u'\U0001F1F8 \U0001F1EC',
u':flag_for_Sint_Maarten:': u'\U0001F1F8 \U0001F1FD',
u':flag_for_Slovakia:': u'\U0001F1F8 \U0001F1F0',
u':flag_for_Slovenia:': u'\U0001F1F8 \U0001F1EE',
u':flag_for_Solomon_Islands:': u'\U0001F1F8 \U0001F1E7',
u':flag_for_Somalia:': u'\U0001F1F8 \U0001F1F4',
u':flag_for_South_Africa:': u'\U0001F1FF \U0001F1E6',
u':flag_for_South_Georgia_&_South_Sandwich_Islands:': u'\U0001F1EC \U0001F1F8',
u':flag_for_South_Korea:': u'\U0001F1F0 \U0001F1F7',
u':flag_for_South_Sudan:': u'\U0001F1F8 \U0001F1F8',
u':flag_for_Spain:': u'\U0001F1EA \U0001F1F8',
u':flag_for_Sri_Lanka:': u'\U0001F1F1 \U0001F1F0',
u':flag_for_St._Barthélemy:': u'\U0001F1E7 \U0001F1F1',
u':flag_for_St._Helena:': u'\U0001F1F8 \U0001F1ED',
u':flag_for_St._Kitts_&_Nevis:': u'\U0001F1F0 \U0001F1F3',
u':flag_for_St._Lucia:': u'\U0001F1F1 \U0001F1E8',
u':flag_for_St._Martin:': u'\U0001F1F2 \U0001F1EB',
u':flag_for_St._Pierre_&_Miquelon:': u'\U0001F1F5 \U0001F1F2',
u':flag_for_St._Vincent_&_Grenadines:': u'\U0001F1FB \U0001F1E8',
u':flag_for_Sudan:': u'\U0001F1F8 \U0001F1E9',
u':flag_for_Suriname:': u'\U0001F1F8 \U0001F1F7',
u':flag_for_Svalbard_&_Jan_Mayen:': u'\U0001F1F8 \U0001F1EF',
u':flag_for_Swaziland:': u'\U0001F1F8 \U0001F1FF',
u':flag_for_Sweden:': u'\U0001F1F8 \U0001F1EA',
u':flag_for_Switzerland:': u'\U0001F1E8 \U0001F1ED',
u':flag_for_Syria:': u'\U0001F1F8 \U0001F1FE',
u':flag_for_São_Tomé_&_Príncipe:': u'\U0001F1F8 \U0001F1F9',
u':flag_for_Taiwan:': u'\U0001F1F9 \U0001F1FC',
u':flag_for_Tajikistan:': u'\U0001F1F9 \U0001F1EF',
u':flag_for_Tanzania:': u'\U0001F1F9 \U0001F1FF',
u':flag_for_Thailand:': u'\U0001F1F9 \U0001F1ED',
u':flag_for_Timor__Leste:': u'\U0001F1F9 \U0001F1F1',
u':flag_for_Togo:': u'\U0001F1F9 \U0001F1EC',
u':flag_for_Tokelau:': u'\U0001F1F9 \U0001F1F0',
u':flag_for_Tonga:': u'\U0001F1F9 \U0001F1F4',
u':flag_for_Trinidad_&_Tobago:': u'\U0001F1F9 \U0001F1F9',
u':flag_for_Tristan_da_Cunha:': u'\U0001F1F9 \U0001F1E6',
u':flag_for_Tunisia:': u'\U0001F1F9 \U0001F1F3',
u':flag_for_Turkey:': u'\U0001F1F9 \U0001F1F7',
u':flag_for_Turkmenistan:': u'\U0001F1F9 \U0001F1F2',
u':flag_for_Turks_&_Caicos_Islands:': u'\U0001F1F9 \U0001F1E8',
u':flag_for_Tuvalu:': u'\U0001F1F9 \U0001F1FB',
u':flag_for_U.S._Outlying_Islands:': u'\U0001F1FA \U0001F1F2',
u':flag_for_U.S._Virgin_Islands:': u'\U0001F1FB \U0001F1EE',
u':flag_for_Uganda:': u'\U0001F1FA \U0001F1EC',
u':flag_for_Ukraine:': u'\U0001F1FA \U0001F1E6',
u':flag_for_United_Arab_Emirates:': u'\U0001F1E6 \U0001F1EA',
u':flag_for_United_Kingdom:': u'\U0001F1EC \U0001F1E7',
u':flag_for_United_States:': u'\U0001F1FA \U0001F1F8',
u':flag_for_Uruguay:': u'\U0001F1FA \U0001F1FE',
u':flag_for_Uzbekistan:': u'\U0001F1FA \U0001F1FF',
u':flag_for_Vanuatu:': u'\U0001F1FB \U0001F1FA',
u':flag_for_Vatican_City:': u'\U0001F1FB \U0001F1E6',
u':flag_for_Venezuela:': u'\U0001F1FB \U0001F1EA',
u':flag_for_Vietnam:': u'\U0001F1FB \U0001F1F3',
u':flag_for_Wallis_&_Futuna:': u'\U0001F1FC \U0001F1EB',
u':flag_for_Western_Sahara:': u'\U0001F1EA \U0001F1ED',
u':flag_for_Yemen:': u'\U0001F1FE \U0001F1EA',
u':flag_for_Zambia:': u'\U0001F1FF \U0001F1F2',
u':flag_for_Zimbabwe:': u'\U0001F1FF \U0001F1FC',
u':flag_for_Åland_Islands:': u'\U0001F1E6 \U0001F1FD',
u':golf:': u'\U000026F3',
u':fleur__de__lis:': u'\U0000269C',
u':muscle:': u'\U0001F4AA',
u':floppy_disk:': u'\U0001F4BE',
u':flower_playing_cards:': u'\U0001F3B4',
u':flushed:': u'\U0001F633',
u':fog:': u'\U0001F32B',
u':foggy:': u'\U0001F301',
u':footprints:': u'\U0001F463',
u':fork_and_knife:': u'\U0001F374',
u':fork_and_knife_with_plate:': u'\U0001F37D',
u':fountain:': u'\U000026F2',
u':four_leaf_clover:': u'\U0001F340',
u':frame_with_picture:': u'\U0001F5BC',
u':fries:': u'\U0001F35F',
u':fried_shrimp:': u'\U0001F364',
u':frog:': u'\U0001F438',
u':hatched_chick:': u'\U0001F425',
u':frowning:': u'\U0001F626',
u':fuelpump:': u'\U000026FD',
u':full_moon:': u'\U0001F315',
u':full_moon_with_face:': u'\U0001F31D',
u':funeral_urn:': u'\U000026B1',
u':game_die:': u'\U0001F3B2',
u':gear:': u'\U00002699',
u':gem:': u'\U0001F48E',
u':gemini:': u'\U0000264A',
u':ghost:': u'\U0001F47B',
u':girl:': u'\U0001F467',
u':globe_with_meridians:': u'\U0001F310',
u':star2:': u'\U0001F31F',
u':goat:': u'\U0001F410',
u':golfer:': u'\U0001F3CC',
u':mortar_board:': u'\U0001F393',
u':grapes:': u'\U0001F347',
u':green_apple:': u'\U0001F34F',
u':green_book:': u'\U0001F4D7',
u':green_heart:': u'\U0001F49A',
u':grimacing:': u'\U0001F62C',
u':smile_cat:': u'\U0001F638',
u':grinning:': u'\U0001F600',
u':grin:': u'\U0001F601',
u':heartpulse:': u'\U0001F497',
u':guardsman:': u'\U0001F482',
u':guitar:': u'\U0001F3B8',
u':haircut:': u'\U0001F487',
u':hamburger:': u'\U0001F354',
u':hammer:': u'\U0001F528',
u':hammer_and_pick:': u'\U00002692',
u':hammer_and_wrench:': u'\U0001F6E0',
u':hamster:': u'\U0001F439',
u':handbag:': u'\U0001F45C',
u':raising_hand:': u'\U0001F64B',
u':hatching_chick:': u'\U0001F423',
u':headphones:': u'\U0001F3A7',
u':hear_no_evil:': u'\U0001F649',
u':heart_decoration:': u'\U0001F49F',
u':cupid:': u'\U0001F498',
u':gift_heart:': u'\U0001F49D',
u':heart:': u'\U00002764',
u':heavy_check_mark:': u'\U00002714',
u':heavy_division_sign:': u'\U00002797',
u':heavy_dollar_sign:': u'\U0001F4B2',
u':exclamation:': u'\U00002757',
u':heavy_exclamation_mark:': u'\U00002757',
u':heavy_heart_exclamation_mark_ornament:': u'\U00002763',
u':o:': u'\U00002B55',
u':heavy_minus_sign:': u'\U00002796',
u':heavy_multiplication_x:': u'\U00002716',
u':heavy_plus_sign:': u'\U00002795',
u':helicopter:': u'\U0001F681',
u':helm_symbol:': u'\U00002388',
u':helmet_with_white_cross:': u'\U000026D1',
u':herb:': u'\U0001F33F',
u':hibiscus:': u'\U0001F33A',
u':high_heel:': u'\U0001F460',
u':bullettrain_side:': u'\U0001F684',
u':bullettrain_front:': u'\U0001F685',
u':high_brightness:': u'\U0001F506',
u':zap:': u'\U000026A1',
u':hocho:': u'\U0001F52A',
u':knife:': u'\U0001F52A',
u':hole:': u'\U0001F573',
u':honey_pot:': u'\U0001F36F',
u':bee:': u'\U0001F41D',
u':traffic_light:': u'\U0001F6A5',
u':racehorse:': u'\U0001F40E',
u':horse:': u'\U0001F434',
u':horse_racing:': u'\U0001F3C7',
u':hospital:': u'\U0001F3E5',
u':coffee:': u'\U00002615',
u':hot_dog:': u'\U0001F32D',
u':hot_pepper:': u'\U0001F336',
u':hotsprings:': u'\U00002668',
u':hotel:': u'\U0001F3E8',
u':hourglass:': u'\U0000231B',
u':hourglass_flowing_sand:': u'\U000023F3',
u':house:': u'\U0001F3E0',
u':house_buildings:': u'\U0001F3D8',
u':house_with_garden:': u'\U0001F3E1',
u':hugging_face:': u'\U0001F917',
u':100:': u'\U0001F4AF',
u':hushed:': u'\U0001F62F',
u':ice_cream:': u'\U0001F368',
u':ice_hockey_stick_and_puck:': u'\U0001F3D2',
u':ice_skate:': u'\U000026F8',
u':imp:': u'\U0001F47F',
u':inbox_tray:': u'\U0001F4E5',
u':incoming_envelope:': u'\U0001F4E8',
u':information_desk_person:': u'\U0001F481',
u':information_source:': u'\U00002139',
u':capital_abcd:': u'\U0001F520',
u':abc:': u'\U0001F524',
u':abcd:': u'\U0001F521',
u':1234:': u'\U0001F522',
u':symbols:': u'\U0001F523',
u':izakaya_lantern:': u'\U0001F3EE',
u':lantern:': u'\U0001F3EE',
u':jack_o_lantern:': u'\U0001F383',
u':japanese_castle:': u'\U0001F3EF',
u':dolls:': u'\U0001F38E',
u':japanese_goblin:': u'\U0001F47A',
u':japanese_ogre:': u'\U0001F479',
u':post_office:': u'\U0001F3E3',
u':beginner:': u'\U0001F530',
u':jeans:': u'\U0001F456',
u':joystick:': u'\U0001F579',
u':kaaba:': u'\U0001F54B',
u':key:': u'\U0001F511',
u':keyboard:': u'\U00002328',
u':keycap_asterisk:': u'\U0000002A \U000020E3',
u':keycap_digit_eight:': u'\U00000038 \U000020E3',
u':keycap_digit_five:': u'\U00000035 \U000020E3',
u':keycap_digit_four:': u'\U00000034 \U000020E3',
u':keycap_digit_nine:': u'\U00000039 \U000020E3',
u':keycap_digit_one:': u'\U00000031 \U000020E3',
u':keycap_digit_seven:': u'\U00000037 \U000020E3',
u':keycap_digit_six:': u'\U00000036 \U000020E3',
u':keycap_digit_three:': u'\U00000033 \U000020E3',
u':keycap_digit_two:': u'\U00000032 \U000020E3',
u':keycap_digit_zero:': u'\U00000030 \U000020E3',
u':keycap_number_sign:': u'\U00000023 \U000020E3',
u':keycap_ten:': u'\U0001F51F',
u':kimono:': u'\U0001F458',
u':couplekiss:': u'\U0001F48F',
u':kiss:': u'\U0001F48B',
u':kissing_cat:': u'\U0001F63D',
u':kissing:': u'\U0001F617',
u':kissing_closed_eyes:': u'\U0001F61A',
u':kissing_smiling_eyes:': u'\U0001F619',
u':koala:': u'\U0001F428',
u':label:': u'\U0001F3F7',
u':beetle:': u'\U0001F41E',
u':large_blue_circle:': u'\U0001F535',
u':large_blue_diamond:': u'\U0001F537',
u':large_orange_diamond:': u'\U0001F536',
u':red_circle:': u'\U0001F534',
u':last_quarter_moon:': u'\U0001F317',
u':last_quarter_moon_with_face:': u'\U0001F31C',
u':latin_cross:': u'\U0000271D',
u':leaves:': u'\U0001F343',
u':ledger:': u'\U0001F4D2',
u':mag:': u'\U0001F50D',
u':left_luggage:': u'\U0001F6C5',
u':left_right_arrow:': u'\U00002194',
u':leftwards_arrow_with_hook:': u'\U000021A9',
u':arrow_left:': u'\U00002B05',
u':lemon:': u'\U0001F34B',
u':leo:': u'\U0000264C',
u':leopard:': u'\U0001F406',
u':level_slider:': u'\U0001F39A',
u':libra:': u'\U0000264E',
u':light_rail:': u'\U0001F688',
u':link:': u'\U0001F517',
u':linked_paperclips:': u'\U0001F587',
u':lion_face:': u'\U0001F981',
u':lipstick:': u'\U0001F484',
u':lock:': u'\U0001F512',
u':lock_with_ink_pen:': u'\U0001F50F',
u':lollipop:': u'\U0001F36D',
u':sob:': u'\U0001F62D',
u':love_hotel:': u'\U0001F3E9',
u':love_letter:': u'\U0001F48C',
u':low_brightness:': u'\U0001F505',
u':lower_left_ballpoint_pen:': u'\U0001F58A',
u':lower_left_crayon:': u'\U0001F58D',
u':lower_left_fountain_pen:': u'\U0001F58B',
u':lower_left_paintbrush:': u'\U0001F58C',
u':mahjong:': u'\U0001F004',
u':man:': u'\U0001F468',
u':couple:': u'\U0001F46B',
u':man_in_business_suit_levitating:': u'\U0001F574',
u':man_with_gua_pi_mao:': u'\U0001F472',
u':man_with_turban:': u'\U0001F473',
u':mans_shoe:': u'\U0001F45E',
u':shoe:': u'\U0001F45E',
u':mantelpiece_clock:': u'\U0001F570',
u':maple_leaf:': u'\U0001F341',
u':meat_on_bone:': u'\U0001F356',
u':black_circle:': u'\U000026AB',
u':white_circle:': u'\U000026AA',
u':melon:': u'\U0001F348',
u':memo:': u'\U0001F4DD',
u':pencil:': u'\U0001F4DD',
u':menorah_with_nine_branches:': u'\U0001F54E',
u':mens:': u'\U0001F6B9',
u':metro:': u'\U0001F687',
u':microphone:': u'\U0001F3A4',
u':microscope:': u'\U0001F52C',
u':military_medal:': u'\U0001F396',
u':milky_way:': u'\U0001F30C',
u':minibus:': u'\U0001F690',
u':minidisc:': u'\U0001F4BD',
u':iphone:': u'\U0001F4F1',
u':mobile_phone_off:': u'\U0001F4F4',
u':calling:': u'\U0001F4F2',
u':money__mouth_face:': u'\U0001F911',
u':moneybag:': u'\U0001F4B0',
u':money_with_wings:': u'\U0001F4B8',
u':monkey:': u'\U0001F412',
u':monkey_face:': u'\U0001F435',
u':monorail:': u'\U0001F69D',
u':rice_scene:': u'\U0001F391',
u':mosque:': u'\U0001F54C',
u':motor_boat:': u'\U0001F6E5',
u':motorway:': u'\U0001F6E3',
u':mount_fuji:': u'\U0001F5FB',
u':mountain:': u'\U000026F0',
u':mountain_bicyclist:': u'\U0001F6B5',
u':mountain_cableway:': u'\U0001F6A0',
u':mountain_railway:': u'\U0001F69E',
u':mouse2:': u'\U0001F401',
u':mouse:': u'\U0001F42D',
u':lips:': u'\U0001F444',
u':movie_camera:': u'\U0001F3A5',
u':moyai:': u'\U0001F5FF',
u':notes:': u'\U0001F3B6',
u':mushroom:': u'\U0001F344',
u':musical_keyboard:': u'\U0001F3B9',
u':musical_note:': u'\U0001F3B5',
u':musical_score:': u'\U0001F3BC',
u':nail_care:': u'\U0001F485',
u':name_badge:': u'\U0001F4DB',
u':national_park:': u'\U0001F3DE',
u':necktie:': u'\U0001F454',
u':ab:': u'\U0001F18E',
u':negative_squared_cross_mark:': u'\U0000274E',
u':a:': u'\U0001F170',
u':b:': u'\U0001F171',
u':o2:': u'\U0001F17E',
u':parking:': u'\U0001F17F',
u':nerd_face:': u'\U0001F913',
u':neutral_face:': u'\U0001F610',
u':new_moon:': u'\U0001F311',
u':honeybee:': u'\U0001F41D',
u':new_moon_with_face:': u'\U0001F31A',
u':newspaper:': u'\U0001F4F0',
u':night_with_stars:': u'\U0001F303',
u':no_bicycles:': u'\U0001F6B3',
u':no_entry:': u'\U000026D4',
u':no_entry_sign:': u'\U0001F6AB',
u':no_mobile_phones:': u'\U0001F4F5',
u':underage:': u'\U0001F51E',
u':no_pedestrians:': u'\U0001F6B7',
u':no_smoking:': u'\U0001F6AD',
u':non__potable_water:': u'\U0001F6B1',
u':arrow_upper_right:': u'\U00002197',
u':arrow_upper_left:': u'\U00002196',
u':nose:': u'\U0001F443',
u':notebook:': u'\U0001F4D3',
u':notebook_with_decorative_cover:': u'\U0001F4D4',
u':nut_and_bolt:': u'\U0001F529',
u':octopus:': u'\U0001F419',
u':oden:': u'\U0001F362',
u':office:': u'\U0001F3E2',
u':oil_drum:': u'\U0001F6E2',
u':ok_hand:': u'\U0001F44C',
u':old_key:': u'\U0001F5DD',
u':older_man:': u'\U0001F474',
u':older_woman:': u'\U0001F475',
u':om_symbol:': u'\U0001F549',
u':on:': u'\U0001F51B',
u':oncoming_automobile:': u'\U0001F698',
u':oncoming_bus:': u'\U0001F68D',
u':oncoming_police_car:': u'\U0001F694',
u':oncoming_taxi:': u'\U0001F696',
u':book:': u'\U0001F4D6',
u':open_book:': u'\U0001F4D6',
u':open_file_folder:': u'\U0001F4C2',
u':open_hands:': u'\U0001F450',
u':unlock:': u'\U0001F513',
u':mailbox_with_no_mail:': u'\U0001F4ED',
u':mailbox_with_mail:': u'\U0001F4EC',
u':ophiuchus:': u'\U000026CE',
u':cd:': u'\U0001F4BF',
u':orange_book:': u'\U0001F4D9',
u':orthodox_cross:': u'\U00002626',
u':outbox_tray:': u'\U0001F4E4',
u':ox:': u'\U0001F402',
u':package:': u'\U0001F4E6',
u':page_facing_up:': u'\U0001F4C4',
u':page_with_curl:': u'\U0001F4C3',
u':pager:': u'\U0001F4DF',
u':palm_tree:': u'\U0001F334',
u':panda_face:': u'\U0001F43C',
u':paperclip:': u'\U0001F4CE',
u':part_alternation_mark:': u'\U0000303D',
u':tada:': u'\U0001F389',
u':passenger_ship:': u'\U0001F6F3',
u':passport_control:': u'\U0001F6C2',
u':feet:': u'\U0001F43E',
u':paw_prints:': u'\U0001F43E',
u':peace_symbol:': u'\U0000262E',
u':peach:': u'\U0001F351',
u':pear:': u'\U0001F350',
u':walking:': u'\U0001F6B6',
u':pencil2:': u'\U0000270F',
u':penguin:': u'\U0001F427',
u':pensive:': u'\U0001F614',
u':performing_arts:': u'\U0001F3AD',
u':persevere:': u'\U0001F623',
u':bow:': u'\U0001F647',
u':person_frowning:': u'\U0001F64D',
u':raised_hands:': u'\U0001F64C',
u':person_with_ball:': u'\U000026F9',
u':person_with_blond_hair:': u'\U0001F471',
u':pray:': u'\U0001F64F',
u':person_with_pouting_face:': u'\U0001F64E',
u':computer:': u'\U0001F4BB',
u':pick:': u'\U000026CF',
u':pig2:': u'\U0001F416',
u':pig:': u'\U0001F437',
u':pig_nose:': u'\U0001F43D',
u':hankey:': u'\U0001F4A9',
u':poop:': u'\U0001F4A9',
u':shit:': u'\U0001F4A9',
u':pill:': u'\U0001F48A',
u':bamboo:': u'\U0001F38D',
u':pineapple:': u'\U0001F34D',
u':pisces:': u'\U00002653',
u':gun:': u'\U0001F52B',
u':place_of_worship:': u'\U0001F6D0',
u':black_joker:': u'\U0001F0CF',
u':police_car:': u'\U0001F693',
u':rotating_light:': u'\U0001F6A8',
u':cop:': u'\U0001F46E',
u':poodle:': u'\U0001F429',
u':popcorn:': u'\U0001F37F',
u':postal_horn:': u'\U0001F4EF',
u':postbox:': u'\U0001F4EE',
u':stew:': u'\U0001F372',
u':potable_water:': u'\U0001F6B0',
u':pouch:': u'\U0001F45D',
u':poultry_leg:': u'\U0001F357',
u':pouting_cat:': u'\U0001F63E',
u':rage:': u'\U0001F621',
u':prayer_beads:': u'\U0001F4FF',
u':princess:': u'\U0001F478',
u':printer:': u'\U0001F5A8',
u':loudspeaker:': u'\U0001F4E2',
u':purple_heart:': u'\U0001F49C',
u':purse:': u'\U0001F45B',
u':pushpin:': u'\U0001F4CC',
u':put_litter_in_its_place:': u'\U0001F6AE',
u':rabbit2:': u'\U0001F407',
u':rabbit:': u'\U0001F430',
u':racing_car:': u'\U0001F3CE',
u':racing_motorcycle:': u'\U0001F3CD',
u':radio:': u'\U0001F4FB',
u':radio_button:': u'\U0001F518',
u':radioactive_sign:': u'\U00002622',
u':railway_car:': u'\U0001F683',
u':railway_track:': u'\U0001F6E4',
u':rainbow:': u'\U0001F308',
u':fist:': u'\U0000270A',
u':hand:': u'\U0000270B',
u':raised_hand:': u'\U0000270B',
u':raised_hand_with_fingers_splayed:': u'\U0001F590',
u':raised_hand_with_part_between_middle_and_ring_fingers:': u'\U0001F596',
u':ram:': u'\U0001F40F',
u':rat:': u'\U0001F400',
u':blue_car:': u'\U0001F699',
u':apple:': u'\U0001F34E',
u':registered:': u'\U000000AE',
u':relieved:': u'\U0001F60C',
u':reminder_ribbon:': u'\U0001F397',
u':restroom:': u'\U0001F6BB',
u':reversed_hand_with_middle_finger_extended:': u'\U0001F595',
u':revolving_hearts:': u'\U0001F49E',
u':ribbon:': u'\U0001F380',
u':rice_ball:': u'\U0001F359',
u':rice_cracker:': u'\U0001F358',
u':mag_right:': u'\U0001F50E',
u':right_anger_bubble:': u'\U0001F5EF',
u':arrow_right_hook:': u'\U000021AA',
u':ring:': u'\U0001F48D',
u':sweet_potato:': u'\U0001F360',
u':robot_face:': u'\U0001F916',
u':rocket:': u'\U0001F680',
u':rolled__up_newspaper:': u'\U0001F5DE',
u':roller_coaster:': u'\U0001F3A2',
u':rooster:': u'\U0001F413',
u':rose:': u'\U0001F339',
u':rosette:': u'\U0001F3F5',
u':round_pushpin:': u'\U0001F4CD',
u':rowboat:': u'\U0001F6A3',
u':rugby_football:': u'\U0001F3C9',
u':runner:': u'\U0001F3C3',
u':running:': u'\U0001F3C3',
u':running_shirt_with_sash:': u'\U0001F3BD',
u':sagittarius:': u'\U00002650',
u':boat:': u'\U000026F5',
u':sailboat:': u'\U000026F5',
u':sake:': u'\U0001F376',
u':satellite:': u'\U0001F4E1',
u':saxophone:': u'\U0001F3B7',
u':scales:': u'\U00002696',
u':school:': u'\U0001F3EB',
u':school_satchel:': u'\U0001F392',
u':scorpion:': u'\U0001F982',
u':scorpius:': u'\U0000264F',
u':scroll:': u'\U0001F4DC',
u':seat:': u'\U0001F4BA',
u':see_no_evil:': u'\U0001F648',
u':seedling:': u'\U0001F331',
u':shamrock:': u'\U00002618',
u':shaved_ice:': u'\U0001F367',
u':sheep:': u'\U0001F411',
u':shield:': u'\U0001F6E1',
u':shinto_shrine:': u'\U000026E9',
u':ship:': u'\U0001F6A2',
u':stars:': u'\U0001F320',
u':shopping_bags:': u'\U0001F6CD',
u':cake:': u'\U0001F370',
u':shower:': u'\U0001F6BF',
u':sign_of_the_horns:': u'\U0001F918',
u':japan:': u'\U0001F5FE',
u':six_pointed_star:': u'\U0001F52F',
u':ski:': u'\U0001F3BF',
u':skier:': u'\U000026F7',
u':skull:': u'\U0001F480',
u':skull_and_crossbones:': u'\U00002620',
u':sleeping_accommodation:': u'\U0001F6CC',
u':sleeping:': u'\U0001F634',
u':zzz:': u'\U0001F4A4',
u':sleepy:': u'\U0001F62A',
u':sleuth_or_spy:': u'\U0001F575',
u':pizza:': u'\U0001F355',
u':slightly_frowning_face:': u'\U0001F641',
u':slightly_smiling_face:': u'\U0001F642',
u':slot_machine:': u'\U0001F3B0',
u':small_airplane:': u'\U0001F6E9',
u':small_blue_diamond:': u'\U0001F539',
u':small_orange_diamond:': u'\U0001F538',
u':heart_eyes_cat:': u'\U0001F63B',
u':smiley_cat:': u'\U0001F63A',
u':innocent:': u'\U0001F607',
u':heart_eyes:': u'\U0001F60D',
u':smiling_imp:': u'\U0001F608',
u':smiley:': u'\U0001F603',
u':sweat_smile:': u'\U0001F605',
u':smile:': u'\U0001F604',
u':laughing:': u'\U0001F606',
u':satisfied:': u'\U0001F606',
u':blush:': u'\U0001F60A',
u':sunglasses:': u'\U0001F60E',
u':smirk:': u'\U0001F60F',
u':smoking:': u'\U0001F6AC',
u':snail:': u'\U0001F40C',
u':snake:': u'\U0001F40D',
u':snow_capped_mountain:': u'\U0001F3D4',
u':snowboarder:': u'\U0001F3C2',
u':snowflake:': u'\U00002744',
u':snowman:': u'\U00002603',
u':soccer:': u'\U000026BD',
u':icecream:': u'\U0001F366',
u':soon:': u'\U0001F51C',
u':arrow_lower_right:': u'\U00002198',
u':arrow_lower_left:': u'\U00002199',
u':spaghetti:': u'\U0001F35D',
u':sparkle:': u'\U00002747',
u':sparkles:': u'\U00002728',
u':sparkling_heart:': u'\U0001F496',
u':speak_no_evil:': u'\U0001F64A',
u':speaker:': u'\U0001F508',
u':mute:': u'\U0001F507',
u':sound:': u'\U0001F509',
u':loud_sound:': u'\U0001F50A',
u':speaking_head_in_silhouette:': u'\U0001F5E3',
u':speech_balloon:': u'\U0001F4AC',
u':speedboat:': u'\U0001F6A4',
u':spider:': u'\U0001F577',
u':spider_web:': u'\U0001F578',
u':spiral_calendar_pad:': u'\U0001F5D3',
u':spiral_note_pad:': u'\U0001F5D2',
u':shell:': u'\U0001F41A',
u':sweat_drops:': u'\U0001F4A6',
u':sports_medal:': u'\U0001F3C5',
u':whale:': u'\U0001F433',
u':u5272:': u'\U0001F239',
u':u5408:': u'\U0001F234',
u':u55b6:': u'\U0001F23A',
u':u6307:': u'\U0001F22F',
u':u6708:': u'\U0001F237',
u':u6709:': u'\U0001F236',
u':u6e80:': u'\U0001F235',
u':u7121:': u'\U0001F21A',
u':u7533:': u'\U0001F238',
u':u7981:': u'\U0001F232',
u':u7a7a:': u'\U0001F233',
u':cl:': u'\U0001F191',
u':cool:': u'\U0001F192',
u':free:': u'\U0001F193',
u':id:': u'\U0001F194',
u':koko:': u'\U0001F201',
u':sa:': u'\U0001F202',
u':new:': u'\U0001F195',
u':ng:': u'\U0001F196',
u':ok:': u'\U0001F197',
u':sos:': u'\U0001F198',
u':up:': u'\U0001F199',
u':vs:': u'\U0001F19A',
u':stadium:': u'\U0001F3DF',
u':star_and_crescent:': u'\U0000262A',
u':star_of_david:': u'\U00002721',
u':station:': u'\U0001F689',
u':statue_of_liberty:': u'\U0001F5FD',
u':steam_locomotive:': u'\U0001F682',
u':ramen:': u'\U0001F35C',
u':stopwatch:': u'\U000023F1',
u':straight_ruler:': u'\U0001F4CF',
u':strawberry:': u'\U0001F353',
u':studio_microphone:': u'\U0001F399',
u':partly_sunny:': u'\U000026C5',
u':sun_with_face:': u'\U0001F31E',
u':sunflower:': u'\U0001F33B',
u':sunrise:': u'\U0001F305',
u':sunrise_over_mountains:': u'\U0001F304',
u':city_sunrise:': u'\U0001F307',
u':surfer:': u'\U0001F3C4',
u':sushi:': u'\U0001F363',
u':suspension_railway:': u'\U0001F69F',
u':swimmer:': u'\U0001F3CA',
u':synagogue:': u'\U0001F54D',
u':syringe:': u'\U0001F489',
u':shirt:': u'\U0001F455',
u':tshirt:': u'\U0001F455',
u':table_tennis_paddle_and_ball:': u'\U0001F3D3',
u':taco:': u'\U0001F32E',
u':tanabata_tree:': u'\U0001F38B',
u':tangerine:': u'\U0001F34A',
u':taurus:': u'\U00002649',
u':taxi:': u'\U0001F695',
u':tea:': u'\U0001F375',
u':calendar:': u'\U0001F4C6',
u':telephone_receiver:': u'\U0001F4DE',
u':telescope:': u'\U0001F52D',
u':tv:': u'\U0001F4FA',
u':tennis:': u'\U0001F3BE',
u':tent:': u'\U000026FA',
u':thermometer:': u'\U0001F321',
u':thinking_face:': u'\U0001F914',
u':thought_balloon:': u'\U0001F4AD',
u':three_button_mouse:': u'\U0001F5B1',
u':+1:': u'\U0001F44D',
u':thumbsup:': u'\U0001F44D',
u':__1:': u'\U0001F44E',
u':thumbsdown:': u'\U0001F44E',
u':thunder_cloud_and_rain:': u'\U000026C8',
u':ticket:': u'\U0001F3AB',
u':tiger2:': u'\U0001F405',
u':tiger:': u'\U0001F42F',
u':timer_clock:': u'\U000023F2',
u':tired_face:': u'\U0001F62B',
u':toilet:': u'\U0001F6BD',
u':tokyo_tower:': u'\U0001F5FC',
u':tomato:': u'\U0001F345',
u':tongue:': u'\U0001F445',
u':tophat:': u'\U0001F3A9',
u':top:': u'\U0001F51D',
u':trackball:': u'\U0001F5B2',
u':tractor:': u'\U0001F69C',
u':tm:': u'\U00002122',
u':train2:': u'\U0001F686',
u':tram:': u'\U0001F68A',
u':train:': u'\U0001F68B',
u':triangular_flag_on_post:': u'\U0001F6A9',
u':triangular_ruler:': u'\U0001F4D0',
u':trident:': u'\U0001F531',
u':trolleybus:': u'\U0001F68E',
u':trophy:': u'\U0001F3C6',
u':tropical_drink:': u'\U0001F379',
u':tropical_fish:': u'\U0001F420',
u':trumpet:': u'\U0001F3BA',
u':tulip:': u'\U0001F337',
u':turkey:': u'\U0001F983',
u':turtle:': u'\U0001F422',
u':twisted_rightwards_arrows:': u'\U0001F500',
u':two_hearts:': u'\U0001F495',
u':two_men_holding_hands:': u'\U0001F46C',
u':two_women_holding_hands:': u'\U0001F46D',
u':umbrella:': u'\U00002602',
u':umbrella_on_ground:': u'\U000026F1',
u':unamused:': u'\U0001F612',
u':unicorn_face:': u'\U0001F984',
u':small_red_triangle:': u'\U0001F53A',
u':arrow_up_small:': u'\U0001F53C',
u':arrow_up_down:': u'\U00002195',
u':upside__down_face:': u'\U0001F643',
u':arrow_up:': u'\U00002B06',
u':vertical_traffic_light:': u'\U0001F6A6',
u':vibration_mode:': u'\U0001F4F3',
u':v:': u'\U0000270C',
u':video_camera:': u'\U0001F4F9',
u':video_game:': u'\U0001F3AE',
u':vhs:': u'\U0001F4FC',
u':violin:': u'\U0001F3BB',
u':virgo:': u'\U0000264D',
u':volcano:': u'\U0001F30B',
u':volleyball:': u'\U0001F3D0',
u':waning_crescent_moon:': u'\U0001F318',
u':waning_gibbous_moon:': u'\U0001F316',
u':warning:': u'\U000026A0',
u':wastebasket:': u'\U0001F5D1',
u':watch:': u'\U0000231A',
u':water_buffalo:': u'\U0001F403',
u':wc:': u'\U0001F6BE',
u':ocean:': u'\U0001F30A',
u':watermelon:': u'\U0001F349',
u':waving_black_flag:': u'\U0001F3F4',
u':wave:': u'\U0001F44B',
u':waving_white_flag:': u'\U0001F3F3',
u':wavy_dash:': u'\U00003030',
u':waxing_crescent_moon:': u'\U0001F312',
u':moon:': u'\U0001F314',
u':waxing_gibbous_moon:': u'\U0001F314',
u':scream_cat:': u'\U0001F640',
u':weary:': u'\U0001F629',
u':wedding:': u'\U0001F492',
u':weight_lifter:': u'\U0001F3CB',
u':whale2:': u'\U0001F40B',
u':wheel_of_dharma:': u'\U00002638',
u':wheelchair:': u'\U0000267F',
u':point_down:': u'\U0001F447',
u':grey_exclamation:': u'\U00002755',
u':white_flower:': u'\U0001F4AE',
u':white_frowning_face:': u'\U00002639',
u':white_check_mark:': u'\U00002705',
u':white_large_square:': u'\U00002B1C',
u':point_left:': u'\U0001F448',
u':white_medium_small_square:': u'\U000025FD',
u':white_medium_square:': u'\U000025FB',
u':star:': u'\U00002B50',
u':grey_question:': u'\U00002754',
u':point_right:': u'\U0001F449',
u':white_small_square:': u'\U000025AB',
u':relaxed:': u'\U0000263A',
u':white_square_button:': u'\U0001F533',
u':white_sun_behind_cloud:': u'\U0001F325',
u':white_sun_behind_cloud_with_rain:': u'\U0001F326',
u':white_sun_with_small_cloud:': u'\U0001F324',
u':point_up_2:': u'\U0001F446',
u':point_up:': u'\U0000261D',
u':wind_blowing_face:': u'\U0001F32C',
u':wind_chime:': u'\U0001F390',
u':wine_glass:': u'\U0001F377',
u':wink:': u'\U0001F609',
u':wolf:': u'\U0001F43A',
u':woman:': u'\U0001F469',
u':dancers:': u'\U0001F46F',
u':boot:': u'\U0001F462',
u':womans_clothes:': u'\U0001F45A',
u':womans_hat:': u'\U0001F452',
u':sandal:': u'\U0001F461',
u':womens:': u'\U0001F6BA',
u':world_map:': u'\U0001F5FA',
u':worried:': u'\U0001F61F',
u':gift:': u'\U0001F381',
u':wrench:': u'\U0001F527',
u':writing_hand:': u'\U0000270D',
u':yellow_heart:': u'\U0001F49B',
u':yin_yang:': u'\U0000262F',
u':zipper__mouth_face:': u'\U0001F910'
})
UNICODE_EMOJI = {v: k for k, v in EMOJI_UNICODE.items()}
UNICODE_EMOJI_ALIAS = {v: k for k, v in EMOJI_ALIAS_UNICODE.items()}
| vjmac15/Lyilis | lib/emoji/unicode_codes.py | Python | gpl-3.0 | 195,719 | [
"FLEUR",
"Octopus"
] | dd76adca880b1b9adfd0a41932b616907deedc6e65320e1550ebf6ec93e756a2 |
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import scipy.io
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
# Returns mu,sigma for 20 hidden-states from feature-vectors(123,35) for Smooth, Moderate, and Rough Surface Models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((20,1)))
sigma = np.matrix(np.zeros((20,1)))
DIVS = m/20
while (index < 20):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((20,n)))
DIVS = m/20
for i in range(n):
index = 0
while (index < 20):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
### Simulation Data
tSamples = 400
datasmooth = scipy.io.loadmat('smooth.mat')
datamoderate = scipy.io.loadmat('medium.mat')
datarough = scipy.io.loadmat('rough.mat')
simulforce = np.zeros((tSamples,150))
datatime = np.arange(0,4,0.01)
dataforceSmooth = np.transpose(datasmooth['force'])
dataforceModerate = np.transpose(datamoderate['force'])
dataforceRough = np.transpose(datarough['force'])
j = 0
for i in dataforceSmooth:
simulforce[:,j] = i
j = j+1
j = 50
for i in dataforceModerate:
simulforce[:,j] = i
j = j+1
j = 100
for i in dataforceRough:
simulforce[:,j] = i
j = j+1
Fmat = np.matrix(simulforce)
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
#print " "
#print 'Total_Matrix_Shape:',m_tot,n_tot
mu_smooth,sigma_smooth = feature_to_mu_sigma(Fmat[0:tSamples,0:50])
mu_moderate,sigma_moderate = feature_to_mu_sigma(Fmat[0:tSamples,50:100])
mu_rough,sigma_rough = feature_to_mu_sigma(Fmat[0:tSamples,100:150])
#print [mu_smooth, sigma_smooth]
# HMM - Implementation:
# 10 Hidden States
# Force as Continuous Gaussian Observations from each hidden state
# Three HMM-Models for Smooth, Moderate, Rough Surfaces
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_smooth = np.zeros((20,2))
B_moderate = np.zeros((20,2))
B_rough = np.zeros((20,2))
for num_states in range(20):
B_smooth[num_states,0] = mu_smooth[num_states]
B_smooth[num_states,1] = sigma_smooth[num_states]
B_moderate[num_states,0] = mu_moderate[num_states]
B_moderate[num_states,1] = sigma_moderate[num_states]
B_rough[num_states,0] = mu_rough[num_states]
B_rough[num_states,1] = sigma_rough[num_states]
B_smooth = B_smooth.tolist()
B_moderate = B_moderate.tolist()
B_rough = B_rough.tolist()
# pi - initial probabilities per state
pi = [0.05] * 20
# generate Smooth, Moderate, Rough Surface models from parameters
model_smooth = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_smooth, pi) # Will be Trained
model_moderate = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_moderate, pi) # Will be Trained
model_rough = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rough, pi) # Will be Trained
trial_number = 1
smooth_final = np.matrix(np.zeros((30,1)))
moderate_final = np.matrix(np.zeros((30,1)))
rough_final = np.matrix(np.zeros((30,1)))
while (trial_number < 6):
# For Training
total_seq = Fmat[0:tSamples,:]
m_total, n_total = np.shape(total_seq)
#print 'Total_Sequence_Shape:', m_total, n_total
if (trial_number == 1):
j = 5
total_seq_smooth = total_seq[0:tSamples,1:5]
total_seq_moderate = total_seq[0:tSamples,51:55]
total_seq_rough = total_seq[0:tSamples,101:105]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+1:j+5]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+51:j+55]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+101:j+105]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_smooth = np.column_stack((total_seq[0:tSamples,0],total_seq[0:tSamples,2:5]))
total_seq_moderate = np.column_stack((total_seq[0:tSamples,50],total_seq[0:tSamples,52:55]))
total_seq_rough = np.column_stack((total_seq[0:tSamples,100],total_seq[0:tSamples,102:105]))
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0],total_seq[0:tSamples,j+2:j+5]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50],total_seq[0:tSamples,j+52:j+55]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100],total_seq[0:tSamples,j+102:j+105]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_smooth = np.column_stack((total_seq[0:tSamples,0:2],total_seq[0:tSamples,3:5]))
total_seq_moderate = np.column_stack((total_seq[0:tSamples,50:52],total_seq[0:tSamples,53:55]))
total_seq_rough = np.column_stack((total_seq[0:tSamples,100:102],total_seq[0:tSamples,103:105]))
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0:j+2],total_seq[0:tSamples,j+3:j+5]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50:j+52],total_seq[0:tSamples,j+53:j+55]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100:j+102],total_seq[0:tSamples,j+103:j+105]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_smooth = np.column_stack((total_seq[0:tSamples,0:3],total_seq[0:tSamples,4:5]))
total_seq_moderate = np.column_stack((total_seq[0:tSamples,50:53],total_seq[0:tSamples,54:55]))
total_seq_rough = np.column_stack((total_seq[0:tSamples,100:103],total_seq[0:tSamples,104:105]))
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0:j+3],total_seq[0:tSamples,j+4:j+5]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50:j+53],total_seq[0:tSamples,j+54:j+55]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100:j+103],total_seq[0:tSamples,j+104:j+105]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_smooth = total_seq[0:tSamples,0:4]
total_seq_moderate = total_seq[0:tSamples,50:54]
total_seq_rough = total_seq[0:tSamples,100:104]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0:j+4]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50:j+54]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100:j+104]))
j = j+5
train_seq_smooth = (np.array(total_seq_smooth).T).tolist()
train_seq_moderate = (np.array(total_seq_moderate).T).tolist()
train_seq_rough = (np.array(total_seq_rough).T).tolist()
#m,n = np.shape(train_seq_smooth)
#print m,n
#print train_seq_smooth
final_ts_smooth = ghmm.SequenceSet(F,train_seq_smooth)
final_ts_moderate = ghmm.SequenceSet(F,train_seq_moderate)
final_ts_rough = ghmm.SequenceSet(F,train_seq_rough)
model_smooth.baumWelch(final_ts_smooth)
model_moderate.baumWelch(final_ts_moderate)
model_rough.baumWelch(final_ts_rough)
# For Testing
if (trial_number == 1):
j = 5
total_seq_smooth = total_seq[0:tSamples,0]
total_seq_moderate = total_seq[0:tSamples,50]
total_seq_rough = total_seq[0:tSamples,100]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_smooth = total_seq[0:tSamples,1]
total_seq_moderate = total_seq[0:tSamples,51]
total_seq_rough = total_seq[0:tSamples,101]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+1]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+51]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+101]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_smooth = total_seq[0:tSamples,2]
total_seq_moderate = total_seq[0:tSamples,52]
total_seq_rough = total_seq[0:tSamples,102]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+2]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+52]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+102]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_smooth = total_seq[0:tSamples,3]
total_seq_moderate = total_seq[0:tSamples,53]
total_seq_rough = total_seq[0:tSamples,103]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+3]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+53]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+103]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_smooth = total_seq[0:tSamples,4]
total_seq_moderate = total_seq[0:tSamples,54]
total_seq_rough = total_seq[0:tSamples,104]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+4]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+54]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+104]))
j = j+5
total_seq_obj = np.matrix(np.column_stack((total_seq_smooth,total_seq_moderate,total_seq_rough)))
smooth = np.matrix(np.zeros(np.size(total_seq_obj,1)))
moderate = np.matrix(np.zeros(np.size(total_seq_obj,1)))
rough = np.matrix(np.zeros(np.size(total_seq_obj,1)))
m,n = np.shape(smooth)
print m,n
k = 0
while (k < np.size(total_seq_obj,1)):
test_seq_obj = (np.array(total_seq_obj[0:tSamples,k]).T).tolist()
new_test_seq_obj = np.array(sum(test_seq_obj,[]))
ts_obj = new_test_seq_obj
final_ts_obj = ghmm.EmissionSequence(F,ts_obj.tolist())
# Find Viterbi Path
path_smooth_obj = model_smooth.viterbi(final_ts_obj)
path_moderate_obj = model_moderate.viterbi(final_ts_obj)
path_rough_obj = model_rough.viterbi(final_ts_obj)
obj = max(path_smooth_obj[1],path_moderate_obj[1],path_rough_obj[1])
if obj == path_smooth_obj[1]:
smooth[0,k] = 1
elif obj == path_moderate_obj[1]:
moderate[0,k] = 1
else:
rough[0,k] = 1
k = k+1
#print smooth.T
smooth_final = smooth_final + smooth.T
moderate_final = moderate_final + moderate.T
rough_final = rough_final + rough.T
trial_number = trial_number + 1
#print smooth_final
#print moderate_final
#print rough_final
# Confusion Matrix
cmat = np.zeros((3,3))
arrsum_smooth = np.zeros((3,1))
arrsum_moderate = np.zeros((3,1))
arrsum_rough= np.zeros((3,1))
k = 10
i = 0
while (k < 31):
arrsum_smooth[i] = np.sum(smooth_final[k-10:k,0])
arrsum_moderate[i] = np.sum(moderate_final[k-10:k,0])
arrsum_rough[i] = np.sum(rough_final[k-10:k,0])
i = i+1
k = k+10
i=0
while (i < 3):
j=0
while (j < 3):
if (i == 0):
cmat[i][j] = arrsum_smooth[j]
elif (i == 1):
cmat[i][j] = arrsum_moderate[j]
else:
cmat[i][j] = arrsum_rough[j]
j = j+1
i = i+1
#print cmat
# Plot Confusion Matrix
Nlabels = 3
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5])
ax.set_xticklabels(['Smooth', 'Moderate', 'Rough'])
ax.set_yticks([2.5,1.5,0.5])
ax.set_yticklabels(['Smooth', 'Moderate', 'Rough'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 3):
j = 0
while (j < 3):
pp.text(j+0.5,2.5-i,cmat[i][j])
j = j+1
i = i+1
pp.show()
| tapomayukh/projects_in_python | sandbox_tapo/src/AI/Code for Project-3/HMM Code/hmm_crossvalidation_force_20_states.py | Python | mit | 17,322 | [
"Gaussian",
"Mayavi"
] | b1e9cfcecfed31467178823a43308c87b7dce1060ced4db82d5dbee7236b8078 |
#!/usr/bin/python -O
############################################################################
# Copyright (c) 2015 Saint Petersburg State University
# Copyright (c) 2011-2014 Saint Petersburg Academic University
# All Rights Reserved
# See file LICENSE for details.
############################################################################
import os
import sys
import shutil
import support
import process_cfg
from site import addsitedir
from distutils import dir_util
def prepare_config_corr(filename, cfg, ext_python_modules_home):
addsitedir(ext_python_modules_home)
if sys.version.startswith('2.'):
import pyyaml2 as pyyaml
elif sys.version.startswith('3.'):
import pyyaml3 as pyyaml
data = pyyaml.load(open(filename, 'r'))
data["dataset"] = cfg.dataset
data["output_dir"] = cfg.output_dir
data["work_dir"] = process_cfg.process_spaces(cfg.tmp_dir)
#data["hard_memory_limit"] = cfg.max_memory
data["max_nthreads"] = cfg.max_threads
data["bwa"] = cfg.bwa
file_c = open(filename, 'w')
pyyaml.dump(data, file_c,
default_flow_style=False, default_style='"', width=float("inf"))
file_c.close()
def run_corrector(configs_dir, execution_home, cfg,
ext_python_modules_home, log, to_correct, result):
addsitedir(ext_python_modules_home)
if sys.version.startswith('2.'):
import pyyaml2 as pyyaml
elif sys.version.startswith('3.'):
import pyyaml3 as pyyaml
dst_configs = os.path.join(cfg.output_dir, "configs")
if os.path.exists(dst_configs):
shutil.rmtree(dst_configs)
dir_util.copy_tree(os.path.join(configs_dir, "corrector"), dst_configs, preserve_times=False)
cfg_file_name = os.path.join(dst_configs, "corrector.info")
cfg.tmp_dir = support.get_tmp_dir(prefix="corrector_")
prepare_config_corr(cfg_file_name, cfg, ext_python_modules_home)
binary_name = "corrector"
command = [os.path.join(execution_home, binary_name),
os.path.abspath(cfg_file_name), os.path.abspath(to_correct)]
log.info("\n== Running contig polishing tool: " + ' '.join(command) + "\n")
log.info("\n== Dataset description file was created: " + cfg_file_name + "\n")
support.sys_call(command, log)
if not os.path.isfile(result):
support.error("Mismatch correction finished abnormally: " + result + " not found!")
if os.path.isdir(cfg.tmp_dir):
shutil.rmtree(cfg.tmp_dir)
| INNUENDOWEB/INNUca | src/SPAdes-3.11.0-Linux/share/spades/spades_pipeline/corrector_logic.py | Python | gpl-3.0 | 2,484 | [
"BWA"
] | 6b6ec537f80ab43597839f31a0e961f78926ae2570dfba648e7124652cc68ec5 |
"""
LMS Course Home page object
"""
from collections import OrderedDict
from bok_choy.page_object import PageObject
from .bookmarks import BookmarksPage
from .course_page import CoursePage
from .courseware import CoursewarePage
from .staff_view import StaffPreviewPage
class CourseHomePage(CoursePage):
"""
Course home page, including course outline.
"""
url_path = "course/"
HEADER_RESUME_COURSE_SELECTOR = '.page-header .action-resume-course'
def is_browser_on_page(self):
return self.q(css='.course-outline').present
def __init__(self, browser, course_id):
super(CourseHomePage, self).__init__(browser, course_id)
self.course_id = course_id
self.outline = CourseOutlinePage(browser, self)
self.preview = StaffPreviewPage(browser, self)
# TODO: TNL-6546: Remove the following
self.course_outline_page = False
def click_bookmarks_button(self):
""" Click on Bookmarks button """
self.q(css='.bookmarks-list-button').first.click()
bookmarks_page = BookmarksPage(self.browser, self.course_id)
bookmarks_page.visit()
def resume_course_from_header(self):
"""
Navigate to courseware using Resume Course button in the header.
"""
self.q(css=self.HEADER_RESUME_COURSE_SELECTOR).first.click()
courseware_page = CoursewarePage(self.browser, self.course_id)
courseware_page.wait_for_page()
def search_for_term(self, search_term):
"""
Search within a class for a particular term.
"""
self.q(css='.search-form > .search-input').fill(search_term)
self.q(css='.search-form > .search-button').click()
return CourseSearchResultsPage(self.browser, self.course_id)
class CourseOutlinePage(PageObject):
"""
Course outline fragment of page.
"""
url = None
SECTION_SELECTOR = '.outline-item.section:nth-of-type({0})'
SECTION_TITLES_SELECTOR = '.section-name h3'
SUBSECTION_SELECTOR = SECTION_SELECTOR + ' .subsection:nth-of-type({1}) .outline-item'
SUBSECTION_TITLES_SELECTOR = SECTION_SELECTOR + ' .subsection .subsection-title'
OUTLINE_RESUME_COURSE_SELECTOR = '.outline-item .resume-right'
def __init__(self, browser, parent_page):
super(CourseOutlinePage, self).__init__(browser)
self.parent_page = parent_page
def is_browser_on_page(self):
return self.parent_page.is_browser_on_page
@property
def sections(self):
"""
Return a dictionary representation of sections and subsections.
Example:
{
'Introduction': ['Course Overview'],
'Week 1': ['Lesson 1', 'Lesson 2', 'Homework']
'Final Exam': ['Final Exam']
}
You can use these titles in `go_to_section` to navigate to the section.
"""
# Dict to store the result
outline_dict = OrderedDict()
section_titles = self._section_titles()
# Get the section titles for each chapter
for sec_index, sec_title in enumerate(section_titles):
if len(section_titles) < 1:
raise ValueError("Could not find subsections for '{0}'".format(sec_title))
else:
# Add one to convert list index (starts at 0) to CSS index (starts at 1)
outline_dict[sec_title] = self._subsection_titles(sec_index + 1)
return outline_dict
@property
def num_sections(self):
"""
Return the number of sections
"""
return len(self.q(css=self.SECTION_TITLES_SELECTOR))
@property
def num_subsections(self, section_title=None):
"""
Return the number of subsections.
Arguments:
section_title: The section for which to return the number of
subsections. If None, default to the first section.
"""
if section_title:
section_index = self._section_title_to_index(section_title)
if not section_index:
return
else:
section_index = 1
return len(self.q(css=self.SUBSECTION_TITLES_SELECTOR.format(section_index)))
def go_to_section(self, section_title, subsection_title):
"""
Go to the section/subsection in the courseware.
Every section must have at least one subsection, so specify
both the section and subsection title.
Example:
go_to_section("Week 1", "Lesson 1")
"""
section_index = self._section_title_to_index(section_title)
if section_index is None:
raise ValueError("Could not find section '{0}'".format(section_title))
try:
subsection_index = self._subsection_titles(section_index + 1).index(subsection_title)
except ValueError:
raise ValueError("Could not find subsection '{0}' in section '{1}'".format(
subsection_title, section_title
))
# Convert list indices (start at zero) to CSS indices (start at 1)
subsection_css = self.SUBSECTION_SELECTOR.format(section_index + 1, subsection_index + 1)
# Click the subsection and ensure that the page finishes reloading
self.q(css=subsection_css).first.click()
self._wait_for_course_section(section_title, subsection_title)
def go_to_section_by_index(self, section_index, subsection_index):
"""
Go to the section/subsection in the courseware.
Every section must have at least one subsection, so specify both the
section and subsection indices.
Arguments:
section_index: A 0-based index of the section to navigate to.
subsection_index: A 0-based index of the subsection to navigate to.
"""
try:
section_title = self._section_titles()[section_index]
except IndexError:
raise ValueError("Section index '{0}' is out of range.".format(section_index))
try:
subsection_title = self._subsection_titles(section_index + 1)[subsection_index]
except IndexError:
raise ValueError("Subsection index '{0}' in section index '{1}' is out of range.".format(
subsection_index, section_index
))
self.go_to_section(section_title, subsection_title)
def _section_title_to_index(self, section_title):
"""
Get the section title index given the section title.
"""
try:
section_index = self._section_titles().index(section_title)
except ValueError:
raise ValueError("Could not find section '{0}'".format(section_title))
return section_index
def resume_course_from_outline(self):
"""
Navigate to courseware using Resume Course button in the header.
"""
self.q(css=self.OUTLINE_RESUME_COURSE_SELECTOR).first.click()
courseware_page = CoursewarePage(self.browser, self.parent_page.course_id)
courseware_page.wait_for_page()
def _section_titles(self):
"""
Return a list of all section titles on the page.
"""
return self.q(css=self.SECTION_TITLES_SELECTOR).map(lambda el: el.text.strip()).results
def _subsection_titles(self, section_index):
"""
Return a list of all subsection titles on the page
for the section at index `section_index` (starts at 1).
"""
subsection_css = self.SUBSECTION_TITLES_SELECTOR.format(section_index)
return self.q(css=subsection_css).map(
lambda el: el.get_attribute('innerHTML').strip()
).results
def _wait_for_course_section(self, section_title, subsection_title):
"""
Ensures the user navigates to the course content page with the correct section and subsection.
"""
courseware_page = CoursewarePage(self.browser, self.parent_page.course_id)
courseware_page.wait_for_page()
# TODO: TNL-6546: Remove this if/visit_course_outline_page
if self.parent_page.course_outline_page:
courseware_page.nav.visit_course_outline_page()
self.wait_for(
promise_check_func=lambda: courseware_page.nav.is_on_section(section_title, subsection_title),
description="Waiting for course page with section '{0}' and subsection '{1}'".format(section_title, subsection_title)
)
class CourseSearchResultsPage(CoursePage):
"""
Course search page
"""
# url = "courses/{course_id}/search/?query={query_string}"
def is_browser_on_page(self):
return self.q(css='.page-content > .search-results').present
def __init__(self, browser, course_id):
super(CourseSearchResultsPage, self).__init__(browser, course_id)
self.course_id = course_id
@property
def search_results(self):
return self.q(css='.search-results-item')
| miptliot/edx-platform | common/test/acceptance/pages/lms/course_home.py | Python | agpl-3.0 | 8,981 | [
"VisIt"
] | 7a19d9ad11514dd4e2bf90521d831b820c1c9ccfaf26785cadb5b1f78b5fe0d4 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# unit - some simple unit tests against migfs
# Copyright (C) 2003-2011 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Unit test migfs"""
import os
import sys
import traceback
from migfs import default_block_size
debug_mode = False
def debug(line):
if debug_mode:
print 'DEBUG: %s' % line
def show_diff(result, expected):
"""Shared function for displaying difference between result and expected"""
max_len = 32
part_len = max_len / 2
if len(result) > max_len:
first = result[:part_len] + ' .. ' + result[-part_len:]
else:
first = result
if len(expected) > max_len:
second = expected[:part_len] + ' .. ' + expected[-part_len:]
else:
second = expected
print "\t'%s' != '%s'\n\t(len: %d vs. %d)" % (first, second,
len(result), len(expected))
def clean_test(test_dir):
"""Clean up everything in test_dir"""
name = 'clean up'
print 'Starting %s test' % name
for (root, dirs, files) in os.walk(test_dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(test_dir)
success = not os.path.exists(test_dir)
print 'Got expected result:\t\t%s' % success
def prepare_test(test_path):
"""Create and manipulate some subdirs including one for test_path"""
name = 'create parent dir'
print 'Starting %s test' % name
target = os.path.dirname(test_path)
try:
os.makedirs(target)
except Exception, exc:
print '\tFailed to %s (%s): %s' % (name, target, exc)
success = os.path.isdir(target)
print 'Got expected result:\t\t%s' % success
name = 'create sub dir'
print 'Starting %s test' % name
target = os.path.join(target, 'sub')
try:
os.mkdir(target)
except Exception, exc:
print '\tFailed to %s (%s): %s' % (name, target, exc)
success = os.path.isdir(target)
print 'Got expected result:\t\t%s' % success
name = 'move sub dir'
print 'Starting %s test' % name
tmp_path = target + '.tmp'
try:
os.rename(target, tmp_path)
except Exception, exc:
print '\tFailed to %s (%s): %s' % (name, target, exc)
success = os.path.isdir(tmp_path) and not os.path.exists(target)
print 'Got expected result:\t\t%s' % success
name = 'remove sub dir'
print 'Starting %s test' % name
target = tmp_path
try:
os.rmdir(target)
except Exception, exc:
print '\tFailed to %s (%s): %s' % (name, target, exc)
success = not os.path.exists(target)
print 'Got expected result:\t\t%s' % success
def write_test(test_path):
"""Write test using test_path"""
data_len = 4
tests = [('create file', ''), ('short write', '123'), ('long write'
, '123' * default_block_size)]
for (name, val) in tests:
print 'Starting %s test' % name
fd = open(test_path, 'w')
debug('opened %s' % test_path)
if val:
fd.write(val)
debug('wrote %s ...' % val[:data_len])
fd.close()
debug('closed %s' % test_path)
fd = open(test_path, 'r')
debug('opened %s' % test_path)
result = fd.read()
debug('read %s ... from %s' % (result[:data_len], test_path))
fd.close()
debug('closed %s' % test_path)
success = result == val
print 'Got expected result:\t\t%s' % success
if not success:
show_diff(val, result)
def append_test(test_path):
"""Append test using test_path"""
tests = [('short append', '123'), ('long append', '123'
* default_block_size)]
prefix = 'abc'
for (name, val) in tests:
print 'Starting %s test' % name
fd = open(test_path, 'w')
fd.write(prefix)
fd.close()
fd = open(test_path, 'a')
if val:
fd.write(val)
fd.close()
fd = open(test_path, 'r')
result = fd.read()
fd.close()
success = result[len(prefix):] == val
print 'Got expected result:\t\t%s' % success
if not success:
show_diff(val, result)
def modify_test(test_path):
"""Modify test using test_path"""
original = 'ABCD' * default_block_size
short_string = '123'
long_string = '1234567890'
tests = [
('short prefix modify', short_string, 0),
('short modify', short_string, default_block_size + 3),
('short suffix modify', short_string, len(original)
- len(short_string)),
('long prefix modify', long_string, 0),
('long modify', long_string * default_block_size,
default_block_size + 3),
('long suffix modify', long_string, len(original)
- len(long_string)),
]
for (name, val, modify_index) in tests:
print 'Starting %s test' % name
fd = open(test_path, 'w')
fd.write(original)
fd.close()
fd = open(test_path, 'r+')
fd.seek(modify_index)
if val:
fd.write(val)
fd.close()
fd = open(test_path, 'r')
result = fd.read()
fd.close()
expected_result = original[:modify_index] + val\
+ original[modify_index + len(val):]
success = result == expected_result
print 'Got expected result:\t\t%s' % success
if not success:
show_diff(val, result)
# ## Main ###
mount_point = 'mig-home'
# do_mount = False
do_mount = True
# debug_mode = True
if len(sys.argv) > 1:
mount_point = sys.argv[1]
test_dir = os.path.join(mount_point, 'migfs-test')
test_path = os.path.join(test_dir, 'migfs-test', 'child', 'grandchild',
'testfile.txt')
if not os.path.isdir(mount_point):
print 'creating missing mount point %s' % mount_point
try:
os.mkdir(mount_point)
except OSError, ose:
print 'Failed to create missing mount point %s: %s'\
% (mount_point, ose)
sys.exit(1)
print '--- Starting unit tests ---'
print
if do_mount:
os.system('./mount.migfs none %s' % mount_point)
try:
prepare_test(test_path)
write_test(test_path)
append_test(test_path)
modify_test(test_path)
clean_test(test_dir)
except Exception, err:
print 'Error during test: %s' % err
print 'DEBUG: %s' % traceback.format_exc()
print
print '--- End of unit tests ---'
if do_mount:
os.system('fusermount -u -z %s' % mount_point)
| heromod/migrid | mig/migfs-fuse/unit.py | Python | gpl-2.0 | 7,325 | [
"Brian"
] | 66bde0b2d3850e36f93cb9c8b6b73382902ce403f6fe3b298c9278e8bd8f2f04 |
"""Predictor classes."""
from abc import ABC, abstractmethod
import logging
from typing import Iterable, Sequence
import acton.database
import acton.kde_predictor
import GPy as gpy
import numpy
import sklearn.base
import sklearn.linear_model
import sklearn.model_selection
import sklearn.preprocessing
from numpy.random import multivariate_normal, gamma, multinomial
class Predictor(ABC):
"""Base class for predictors.
Attributes
----------
prediction_type : str
What kind of predictions this class generates, e.g. classification.s
"""
prediction_type = 'classification'
@abstractmethod
def fit(self, ids: Iterable[int]):
"""Fits the predictor to labelled data.
Parameters
----------
ids
List of IDs of instances to train from.
"""
@abstractmethod
def predict(self, ids: Sequence[int]) -> (numpy.ndarray, numpy.ndarray):
"""Predicts labels of instances.
Notes
-----
Unlike in scikit-learn, predictions are always real-valued.
Predicted labels for a classification problem are represented by
predicted probabilities of each class.
Parameters
----------
ids
List of IDs of instances to predict labels for.
Returns
-------
numpy.ndarray
An N x T x C array of corresponding predictions.
numpy.ndarray
A N array of confidences (or None if not applicable).
"""
@abstractmethod
def reference_predict(
self, ids: Sequence[int]) -> (numpy.ndarray, numpy.ndarray):
"""Predicts labels using the best possible method.
Parameters
----------
ids
List of IDs of instances to predict labels for.
Returns
-------
numpy.ndarray
An N x 1 x C array of corresponding predictions.
numpy.ndarray
A N array of confidences (or None if not applicable).
"""
class _InstancePredictor(Predictor):
"""Wrapper for a scikit-learn instance.
Attributes
----------
_db : acton.database.Database
Database storing features and labels.
_instance : sklearn.base.BaseEstimator
scikit-learn predictor instance.
"""
def __init__(self, instance: sklearn.base.BaseEstimator,
db: acton.database.Database):
"""
Arguments
---------
instance
scikit-learn predictor instance.
db
Database storing features and labels.
"""
self._db = db
self._instance = instance
def fit(self, ids: Iterable[int]):
"""Fits the predictor to labelled data.
Parameters
----------
ids
List of IDs of instances to train from.
"""
features = self._db.read_features(ids)
labels = self._db.read_labels([0], ids)
self._instance.fit(features, labels.ravel())
def predict(self, ids: Sequence[int]) -> (numpy.ndarray, None):
"""Predicts labels of instances.
Notes
-----
Unlike in scikit-learn, predictions are always real-valued.
Predicted labels for a classification problem are represented by
predicted probabilities of each class.
Parameters
----------
ids
List of IDs of instances to predict labels for.
Returns
-------
numpy.ndarray
An N x 1 x C array of corresponding predictions.
numpy.ndarray
A N array of confidences (or None if not applicable).
"""
features = self._db.read_features(ids)
try:
probs = self._instance.predict_proba(features)
return probs.reshape((probs.shape[0], 1, probs.shape[1])), None
except AttributeError:
probs = self._instance.predict(features)
if len(probs.shape) == 1:
return probs.reshape((probs.shape[0], 1, 1)), None
else:
raise NotImplementedError()
def reference_predict(self, ids: Sequence[int]) -> (numpy.ndarray, None):
"""Predicts labels using the best possible method.
Parameters
----------
ids
List of IDs of instances to predict labels for.
Returns
-------
numpy.ndarray
An N x 1 x C array of corresponding predictions.
numpy.ndarray
A N array of confidences (or None if not applicable).
"""
return self.predict(ids)
def from_instance(predictor: sklearn.base.BaseEstimator,
db: acton.database.Database, regression: bool=False
) -> Predictor:
"""Converts a scikit-learn predictor instance into a Predictor instance.
Arguments
---------
predictor
scikit-learn predictor.
db
Database storing features and labels.
regression
Whether this predictor does regression (as opposed to classification).
Returns
-------
Predictor
Predictor instance wrapping the scikit-learn predictor.
"""
ip = _InstancePredictor(predictor, db)
if regression:
ip.prediction_type = 'regression'
return ip
def from_class(Predictor: type, regression: bool=False) -> type:
"""Converts a scikit-learn predictor class into a Predictor class.
Arguments
---------
Predictor
scikit-learn predictor class.
regression
Whether this predictor does regression (as opposed to classification).
Returns
-------
type
Predictor class wrapping the scikit-learn class.
"""
class Predictor_(_InstancePredictor):
def __init__(self, db, **kwargs):
super().__init__(instance=None, db=db)
self._instance = Predictor(**kwargs)
if regression:
Predictor_.prediction_type = 'regression'
return Predictor_
class Committee(Predictor):
"""A predictor using a committee of other predictors.
Attributes
----------
n_classifiers : int
Number of logistic regression classifiers in the committee.
subset_size : float
Percentage of known labels to take subsets of to train the
classifier. Lower numbers increase variety.
_db : acton.database.Database
Database storing features and labels.
_committee : List[sklearn.linear_model.LogisticRegression]
Underlying committee of logistic regression classifiers.
_reference_predictor : Predictor
Reference predictor trained on all known labels.
"""
def __init__(self, Predictor: type, db: acton.database.Database,
n_classifiers: int=10, subset_size: float=0.6,
**kwargs: dict):
"""
Parameters
----------
Predictor
Predictor to use in the committee.
db
Database storing features and labels.
n_classifiers
Number of logistic regression classifiers in the committee.
subset_size
Percentage of known labels to take subsets of to train the
classifier. Lower numbers increase variety.
kwargs
Keyword arguments passed to the underlying Predictor.
"""
self.n_classifiers = n_classifiers
self.subset_size = subset_size
self._db = db
self._committee = [Predictor(db=db, **kwargs)
for _ in range(n_classifiers)]
self._reference_predictor = Predictor(db=db, **kwargs)
def fit(self, ids: Iterable[int]):
"""Fits the predictor to labelled data.
Parameters
----------
ids
List of IDs of instances to train from.
"""
# Get labels so we can stratify a split.
labels = self._db.read_labels([0], ids)
for classifier in self._committee:
# Take a subsets to introduce variety.
try:
subset, _ = sklearn.model_selection.train_test_split(
ids, train_size=self.subset_size, stratify=labels)
except ValueError:
# Too few labels.
subset = ids
classifier.fit(subset)
self._reference_predictor.fit(ids)
def predict(self, ids: Sequence[int]) -> (numpy.ndarray, numpy.ndarray):
"""Predicts labels of instances.
Notes
-----
Unlike in scikit-learn, predictions are always real-valued.
Predicted labels for a classification problem are represented by
predicted probabilities of each class.
Parameters
----------
ids
List of IDs of instances to predict labels for.
Returns
-------
numpy.ndarray
An N x T x C array of corresponding predictions.
numpy.ndarray
A N array of confidences (or None if not applicable).
"""
predictions = numpy.concatenate(
[classifier.predict(ids)[0]
for classifier in self._committee],
axis=1)
assert predictions.shape[:2] == (len(ids), len(self._committee))
stdevs = predictions.std(axis=1).mean(axis=1)
return predictions, stdevs
def reference_predict(
self, ids: Sequence[int]) -> (numpy.ndarray, numpy.ndarray):
"""Predicts labels using the best possible method.
Parameters
----------
ids
List of IDs of instances to predict labels for.
Returns
-------
numpy.ndarray
An N x 1 x C array of corresponding predictions.
numpy.ndarray
A N array of confidences (or None if not applicable).
"""
_, stdevs = self.predict(ids)
return self._reference_predictor.predict(ids)[0], stdevs
def AveragePredictions(predictor: Predictor) -> Predictor:
"""Wrapper for a predictor that averages predicted probabilities.
Notes
-----
This effectively reduces the number of predictors to 1.
Arguments
---------
predictor
Predictor to wrap.
Returns
-------
Predictor
Predictor with averaged predictions.
"""
predictor.predict_ = predictor.predict
def predict(features: numpy.ndarray) -> (numpy.ndarray, numpy.ndarray):
predictions, stdevs = predictor.predict_(features)
predictions = predictions.mean(axis=1)
return predictions.reshape(
(predictions.shape[0], 1, predictions.shape[1])), stdevs
predictor.predict = predict
return predictor
class GPClassifier(Predictor):
"""Classifier using Gaussian processes.
Attributes
----------
max_iters : int
Maximum optimisation iterations.
label_encoder : sklearn.preprocessing.LabelEncoder
Encodes labels as integers.
model_ : gpy.models.GPClassification
GP model.
_db : acton.database.Database
Database storing features and labels.
"""
def __init__(self, db: acton.database.Database, max_iters: int=50000,
n_jobs: int=1):
"""
Parameters
----------
db
Database.
max_iters
Maximum optimisation iterations.
n_jobs
Does nothing; here for compatibility with sklearn.
"""
self._db = db
self.max_iters = max_iters
def fit(self, ids: Iterable[int]):
"""Fits the predictor to labelled data.
Parameters
----------
ids
List of IDs of instances to train from.
"""
features = self._db.read_features(ids)
labels = self._db.read_labels([0], ids).ravel()
self.label_encoder_ = sklearn.preprocessing.LabelEncoder()
labels = self.label_encoder_.fit_transform(labels).reshape((-1, 1))
if len(self.label_encoder_.classes_) > 2:
raise ValueError(
'GPClassifier only supports binary classification.')
self.model_ = gpy.models.GPClassification(features, labels)
self.model_.optimize('bfgs', max_iters=self.max_iters)
def predict(self, ids: Sequence[int]) -> (numpy.ndarray, numpy.ndarray):
"""Predicts labels of instances.
Notes
-----
Unlike in scikit-learn, predictions are always real-valued.
Predicted labels for a classification problem are represented by
predicted probabilities of each class.
Parameters
----------
ids
List of IDs of instances to predict labels for.
Returns
-------
numpy.ndarray
An N x 1 x C array of corresponding predictions.
numpy.ndarray
A N array of confidences (or None if not applicable).
"""
features = self._db.read_features(ids)
p_predictions, variances = self.model_.predict(features)
n_predictions = 1 - p_predictions
predictions = numpy.concatenate([n_predictions, p_predictions], axis=1)
logging.debug('Variance: {}'.format(variances))
if isinstance(variances, float) and numpy.isnan(variances):
variances = None
else:
variances = variances.ravel()
assert variances.shape == (len(ids),)
assert predictions.shape[1] == 2
return predictions.reshape((-1, 1, 2)), variances
def reference_predict(
self, ids: Sequence[int]) -> (numpy.ndarray, numpy.ndarray):
"""Predicts labels using the best possible method.
Parameters
----------
ids
List of IDs of instances to predict labels for.
Returns
-------
numpy.ndarray
An N x 1 x C array of corresponding predictions.
numpy.ndarray
A N array of confidences (or None if not applicable).
"""
return self.predict(ids)
class TensorPredictor(Predictor):
"""Predict labels for each tensor entry.
Attributes
----------
_db : acton.database.Database
Database storing features and labels.
n_particles:
Number of particles for Thompson sampling.
n_relations:
Number of relations (K)
n_entities:
Number of entities (N)
n_dim
Number of latent dimensions (D)
var_r
variance of prior of R
var_e
variance of prior of E
var_x
variance of X
sample_prior
indicates whether sample prior
E
P x N x D entity features
R
P x K x D x D relation features
X
K x N x N labels
"""
def __init__(self,
db: acton.database.Database,
n_particles: int = 5,
var_r: int = 1, var_e: int = 1,
var_x: float = 0.1,
sample_prior: bool = False,
n_jobs: int=1
):
"""
Arguments
---------
db
Database storing features and labels.
n_particles:
Number of particles for Thompson sampling.
var_r
variance of prior of R
var_e
variance of prior of E
var_x
variance of X
sample_prior
indicates whether sample prior
"""
self._db = db
self.n_particles = n_particles
self.var_r = var_r
self.var_e = var_e
self.var_x = var_x
self.var_e = numpy.ones(self.n_particles) * self.var_e
self.var_r = numpy.ones(self.n_particles) * self.var_r
self.p_weights = numpy.ones(self.n_particles) / self.n_particles
self.sample_prior = sample_prior
self.E, self.R = self._db.read_features()
# X : numpy.ndarray
# Fully observed tensor with shape
# (n_relations, n_entities, n_entities)
all_ = []
self.X = self._db.read_labels(all_) # read all labels
def fit(self, ids: Iterable[tuple],
inc_sub: bool,
subn_entities: int,
subn_relations: int):
"""Update posteriors.
Parameters
----------
ids
List of IDs of labelled instances.
inc_sub
indicates whether increasing subsampling size when gets more labels
subn_entities
number of entities for subsampling
subn_relations
number of relations for subsampling
Returns
-------
seq : (numpy.ndarray, numpy.ndarray)
Returns a updated posteriors for E and R.
"""
# use certain number of subsampling, rather than percent
# self.sub_percent = sub_percent
# self.subn_entities = round(self.n_entities * self.sub_percent)
# self.subn_relations = round(self.n_relations * self.sub_percent)
assert self.n_particles == self.E.shape[0] == self.R.shape[0]
self.n_relations = self.X.shape[0]
self.n_entities = self.X.shape[1]
self.n_dim = self.E.shape[2]
assert self.E.shape[2] == self.R.shape[2]
obs_mask = numpy.zeros_like(self.X)
for _id in ids:
r_k, e_i, e_j = _id
obs_mask[r_k, e_i, e_j] = 1
cur_obs = numpy.zeros_like(self.X)
for k in range(self.n_relations):
cur_obs[k][obs_mask[k] == 1] = self.X[k][obs_mask[k] == 1]
# cur_obs[cur_obs.nonzero()] = 1
self.obs_sum = numpy.sum(numpy.sum(obs_mask, 1), 1)
self.valid_relations = \
numpy.nonzero(numpy.sum(numpy.sum(self.X, 1), 1))[0]
# totoal_size = self.n_relations * self.n_entities * self.n_dim
if numpy.sum(self.obs_sum) > 1000:
self.subn_entities = 10
self.subn_relations = 10
else:
self.subn_entities = int(subn_entities)
self.subn_relations = int(subn_relations)
self.features = numpy.zeros(
[2 * self.n_entities * self.n_relations, self.n_dim])
self.xi = numpy.zeros([2 * self.n_entities * self.n_relations])
# only consider the situation where one element is recommended each time
next_idx = ids[-1]
self.p_weights *= \
self.compute_particle_weight(next_idx, cur_obs, obs_mask)
self.p_weights /= numpy.sum(self.p_weights)
ESS = 1. / numpy.sum((self.p_weights ** 2))
if ESS < self.n_particles / 2.:
self.resample()
if self.subn_entities == self.n_entities \
and self.subn_relations == self.n_relations:
logging.debug("Sampling all.")
sub_relids = None
sub_entids = None
else:
logging.debug("Subsampling {} entities and {} relations".format(
self.subn_entities, self.subn_relations))
sub_relids = numpy.random.choice(
self.n_relations, self.subn_relations, replace=False)
sub_entids = numpy.random.choice(
self.n_entities, self.subn_entities, replace=False)
for p in range(self.n_particles):
self._sample_relations(
cur_obs, obs_mask,
self.E[p],
self.R[p],
self.var_r[p],
sub_relids
)
self._sample_entities(
cur_obs,
obs_mask,
self.E[p],
self.R[p],
self.var_e[p],
sub_entids
)
if self.sample_prior:
self._sample_prior()
def predict(self, ids: Sequence[int] = None) -> (numpy.ndarray, None):
"""Predicts labels of instances.
Notes
-----
Unlike in scikit-learn, predictions are always real-valued.
Predicted labels for a classification problem are represented by
predicted probabilities of each class.
Parameters
----------
ids
List of IDs of instances to predict labels for.
Returns
-------
numpy.ndarray
An K x D x D array of corresponding predictions.
"""
p = multinomial(1, self.p_weights).argmax()
# reconstruct
X = numpy.zeros([self.n_relations, self.n_entities, self.n_entities])
for k in range(self.n_relations):
X[k] = numpy.dot(numpy.dot(self.E[p], self.R[p][k]), self.E[p].T)
# logging.critical('R[0, 2,4]: {}'.format(self.R[0,2,4]))
return X, None
def reference_predict(self, ids: Sequence[int]) -> (numpy.ndarray, None):
"""Predicts labels using the best possible method.
Parameters
----------
ids
List of IDs of instances to predict labels for.
Returns
-------
numpy.ndarray
An N x 1 x C array of corresponding predictions.
numpy.ndarray
A N array of confidences (or None if not applicable).
"""
return self.predict(ids)
def _sample_prior(self):
self._samplevar_r()
self._samplevar_e()
def resample(self):
count = multinomial(self.n_particles, self.p_weights)
logging.debug("[RESAMPLE] %s", str(count))
new_E = list()
new_R = list()
for p in range(self.n_particles):
for i in range(count[p]):
new_E.append(self.E[p].copy())
new_R.append(self.R[p].copy())
self.E = numpy.asarray(new_E)
self.R = numpy.asarray(new_R)
self.p_weights = numpy.ones(self.n_particles) / self.n_particles
def compute_particle_weight(self, next_idx, X, mask):
from scipy.stats import norm
r_k, e_i, e_j = next_idx
log_weight = numpy.zeros(self.n_particles)
for p in range(self.n_particles):
mean = numpy.dot(
numpy.dot(self.E[p][e_i], self.R[p][r_k]),
self.E[p][e_j]
)
log_weight[p] = norm.logpdf(X[next_idx], mean, self.var_x)
log_weight -= numpy.max(log_weight)
weight = numpy.exp(log_weight)
weight += 1e-10
return weight / numpy.sum(weight)
def _samplevar_r(self):
for p in range(self.n_particles):
self.var_r[p] = 1. / gamma(
0.5 * self.n_relations * self.n_dim * self.n_dim + self.r_alpha,
1. / (0.5 * numpy.sum(self.R[p] ** 2) + self.r_beta))
logging.debug("Sampled var_r %.3f", numpy.mean(self.var_r))
def _samplevar_e(self):
for p in range(self.n_particles):
self.var_e[p] = 1. / gamma(
0.5 * self.n_entities * self.n_dim + self.e_alpha,
1. / (0.5 * numpy.sum(self.E[p] ** 2) + self.e_beta))
logging.debug("Sampled var_e %.3f", numpy.mean(self.var_e))
def _sample_entities(self, X, mask, E, R, var_e, sample_idx=None):
RE = numpy.zeros([self.n_relations, self.n_entities, self.n_dim])
RTE = numpy.zeros([self.n_relations, self.n_entities, self.n_dim])
for k in range(self.n_relations):
RE[k] = numpy.dot(R[k], E.T).T
RTE[k] = numpy.dot(R[k].T, E.T).T
if isinstance(sample_idx, type(None)):
sample_idx = range(self.n_entities)
for i in sample_idx:
self._sample_entity(X, mask, E, R, i, var_e, RE, RTE)
for k in range(self.n_relations):
RE[k][i] = numpy.dot(R[k], E[i])
RTE[k][i] = numpy.dot(R[k].T, E[i])
def _sample_entity(self, X, mask, E, R, i, var_e, RE=None, RTE=None):
nz_r = mask[:, i, :].nonzero()
nz_c = mask[:, :, i].nonzero()
nnz_r = nz_r[0].size
nnz_c = nz_c[0].size
nnz_all = nnz_r + nnz_c
self.features[:nnz_r] = RE[nz_r]
self.features[nnz_r:nnz_all] = RTE[nz_c]
self.xi[:nnz_r] = X[:, i, :][nz_r]
self.xi[nnz_r:nnz_all] = X[:, :, i][nz_c]
_xi = self.xi[:nnz_all] * self.features[:nnz_all].T
xi = numpy.sum(_xi, 1) / self.var_x
_lambda = numpy.identity(self.n_dim) / var_e
_lambda += numpy.dot(
self.features[:nnz_all].T,
self.features[:nnz_all]) / self.var_x
# mu = numpy.linalg.solve(_lambda, xi)
# E[i] = normal(mu, _lambda)
inv_lambda = numpy.linalg.inv(_lambda)
mu = numpy.dot(inv_lambda, xi)
E[i] = multivariate_normal(mu, inv_lambda)
numpy.mean(numpy.diag(inv_lambda))
# logging.info('Mean variance E, %d, %f', i, mean_var)
def _sample_relations(self, X, mask, E, R, var_r, sample_idx=None):
EXE = numpy.kron(E, E)
if isinstance(sample_idx, type(None)):
sample_idx = range(self.n_relations)
for k in self.valid_relations:
if k in sample_idx:
if self.obs_sum[k] != 0:
self._sample_relation(X, mask, E, R, k, EXE, var_r)
else:
R[k] = numpy.random.normal(
0, var_r, size=[self.n_dim, self.n_dim])
def _sample_relation(self, X, mask, E, R, k, EXE, var_r):
_lambda = numpy.identity(self.n_dim ** 2) / var_r
xi = numpy.zeros(self.n_dim ** 2)
kron = EXE[mask[k].flatten() == 1]
if kron.shape[0] != 0:
_lambda += numpy.dot(kron.T, kron)
xi += numpy.sum(X[k, mask[k] == 1].flatten() * kron.T, 1)
_lambda /= self.var_x
# mu = numpy.linalg.solve(_lambda, xi) / self.var_x
inv_lambda = numpy.linalg.inv(_lambda)
mu = numpy.dot(inv_lambda, xi) / self.var_x
# R[k] = normal(mu, _lambda).reshape([self.n_dim, self.n_dim])
R[k] = multivariate_normal(
mu, inv_lambda).reshape([self.n_dim, self.n_dim])
numpy.mean(numpy.diag(inv_lambda))
# logging.info('Mean variance R, %d, %f', k, mean_var)
# Helper functions to generate predictor classes.
def _logistic_regression() -> type:
return from_class(sklearn.linear_model.LogisticRegression)
def _linear_regression() -> type:
return from_class(sklearn.linear_model.LinearRegression, regression=True)
def _logistic_regression_committee() -> type:
def make_committee(db, *args, **kwargs):
return Committee(_logistic_regression(), db, *args, **kwargs)
return make_committee
def _kde() -> type:
return from_class(acton.kde_predictor.KDEClassifier)
PREDICTORS = {
'LogisticRegression': _logistic_regression(),
'LogisticRegressionCommittee': _logistic_regression_committee(),
'LinearRegression': _linear_regression(),
'KDE': _kde(),
'GPC': GPClassifier,
'TensorPredictor': TensorPredictor
}
| chengsoonong/acton | acton/predictors.py | Python | bsd-3-clause | 26,833 | [
"Gaussian"
] | 8e74b5c356cca0328f890651985ad1d9cf9c69dc3b7a0daf3ff6a79d1c17e143 |
from com.im.lac.types import MoleculeObjectIterable
from com.im.lac.util import StreamProvider
from com.im.lac.util import CloseableMoleculeObjectQueue
from com.im.lac.types import MoleculeObject
from java import lang
import sys
from java.util import ArrayList
from java.util.stream import Stream
from java.lang import Thread, InterruptedException
from java.lang import Class
lang.System.loadLibrary('GraphMolWrap')
# Pull it in as a stream of string
from org.RDKit import *
from mol_parsing.rdkit_parse import get_or_create_rdmol, parse_mol_obj
# this gets the body converted to a Iterator of MoleculeObjects
def read_in():
counter = 0
while mols.hasNext():
counter +=1
molobj = mols.next()
# Now get the molecule
rdmol, molobj = get_or_create_rdmol(molobj)
if not rdmol:
print molobj.getSource()
continue
# Put this representation to the function
molobj.putValue("me", counter)
# Add to the queue
out_mols_here.add(molobj)
# Close the queue to stop the blocking
out_mols_here.close()
class ObjReadThread(Thread):
def run(self):
try:
read_in()
self.stop()
except:
out_mols_here.close()
self.stop()
# raise
# Get the previous body and set the next one
provider = request.getBody(StreamProvider)
if provider:
print "found a provider"
mols = provider.getStream().iterator()
else:
mols = request.getBody(MoleculeObjectIterable)
if not mols:
provider = request.getBody(Stream)
if provider:
mols = provider.getStream().iterator()
if mols:
out_mols_here = CloseableMoleculeObjectQueue(40)
request.setBody(out_mols_here)
my_thread = ObjReadThread()
my_thread.start()
else:
print "can't convert. found " + request.getBody().getClass().getName() | InformaticsMatters/squonk | components/rdkit-camel/src/main/python/molecule_objects.py | Python | apache-2.0 | 1,872 | [
"RDKit"
] | 0c8549ccdd15ffb166410271570764996f1dfac64c254834bcc3081fc068dd88 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.utils
from frappe.utils import cstr, flt, getdate, comma_and
from frappe import _
from frappe.model.mapper import get_mapped_doc
from erpnext.controllers.selling_controller import SellingController
class SalesOrder(SellingController):
tname = 'Sales Order Item'
fname = 'sales_order_details'
person_tname = 'Target Detail'
partner_tname = 'Partner Target Detail'
territory_tname = 'Territory Target Detail'
def validate_mandatory(self):
# validate transaction date v/s delivery date
if self.delivery_date:
if getdate(self.transaction_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Sales Order Date"))
def validate_po(self):
# validate p.o date v/s delivery date
if self.po_date and self.delivery_date and getdate(self.po_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Purchase Order Date"))
if self.po_no and self.customer:
so = frappe.db.sql("select name from `tabSales Order` \
where ifnull(po_no, '') = %s and name != %s and docstatus < 2\
and customer = %s", (self.po_no, self.name, self.customer))
if so and so[0][0]:
frappe.msgprint(_("Warning: Sales Order {0} already exists against same Purchase Order number").format(so[0][0]))
def validate_for_items(self):
check_list, flag = [], 0
chk_dupl_itm = []
for d in self.get('sales_order_details'):
e = [d.item_code, d.description, d.warehouse, d.prevdoc_docname or '']
f = [d.item_code, d.description]
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == 'Yes':
if not d.warehouse:
frappe.throw(_("Reserved warehouse required for stock item {0}").format(d.item_code))
if e in check_list:
frappe.throw(_("Item {0} has been entered twice").format(d.item_code))
else:
check_list.append(e)
else:
if f in chk_dupl_itm:
frappe.throw(_("Item {0} has been entered twice").format(d.item_code))
else:
chk_dupl_itm.append(f)
# used for production plan
d.transaction_date = self.transaction_date
tot_avail_qty = frappe.db.sql("select projected_qty from `tabBin` \
where item_code = %s and warehouse = %s", (d.item_code,d.warehouse))
d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0
def validate_sales_mntc_quotation(self):
for d in self.get('sales_order_details'):
if d.prevdoc_docname:
res = frappe.db.sql("select name from `tabQuotation` where name=%s and order_type = %s", (d.prevdoc_docname, self.order_type))
if not res:
frappe.msgprint(_("Quotation {0} not of type {1}").format(d.prevdoc_docname, self.order_type))
def validate_order_type(self):
super(SalesOrder, self).validate_order_type()
def validate_delivery_date(self):
if self.order_type == 'Sales' and not self.delivery_date:
frappe.throw(_("Please enter 'Expected Delivery Date'"))
self.validate_sales_mntc_quotation()
def validate_proj_cust(self):
if self.project_name and self.customer_name:
res = frappe.db.sql("""select name from `tabProject` where name = %s
and (customer = %s or ifnull(customer,'')='')""",
(self.project_name, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project_name))
def validate(self):
super(SalesOrder, self).validate()
self.validate_order_type()
self.validate_delivery_date()
self.validate_mandatory()
self.validate_proj_cust()
self.validate_po()
self.validate_uom_is_integer("stock_uom", "qty")
self.validate_for_items()
self.validate_warehouse()
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self,'sales_order_details')
self.validate_with_previous_doc()
if not self.status:
self.status = "Draft"
from erpnext.utilities import validate_status
validate_status(self.status, ["Draft", "Submitted", "Stopped",
"Cancelled"])
if not self.billing_status: self.billing_status = 'Not Billed'
if not self.delivery_status: self.delivery_status = 'Not Delivered'
def validate_warehouse(self):
from erpnext.stock.utils import validate_warehouse_company
warehouses = list(set([d.warehouse for d in
self.get(self.fname) if d.warehouse]))
for w in warehouses:
validate_warehouse_company(w, self.company)
def validate_with_previous_doc(self):
super(SalesOrder, self).validate_with_previous_doc(self.tname, {
"Quotation": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="], ["currency", "="]]
}
})
def update_enquiry_status(self, prevdoc, flag):
enq = frappe.db.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc)
if enq:
frappe.db.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0]))
def update_prevdoc_status(self, flag):
for quotation in list(set([d.prevdoc_docname for d in self.get(self.fname)])):
if quotation:
doc = frappe.get_doc("Quotation", quotation)
if doc.docstatus==2:
frappe.throw(_("Quotation {0} is cancelled").format(quotation))
doc.set_status(update=True)
def on_submit(self):
self.update_stock_ledger(update_stock = 1)
self.check_credit(self.grand_total)
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.grand_total, self)
self.update_prevdoc_status('submit')
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
# Cannot cancel stopped SO
if self.status == 'Stopped':
frappe.throw(_("Stopped order cannot be cancelled. Unstop to cancel."))
self.check_nextdoc_docstatus()
self.update_stock_ledger(update_stock = -1)
self.update_prevdoc_status('cancel')
frappe.db.set(self, 'status', 'Cancelled')
def check_nextdoc_docstatus(self):
# Checks Delivery Note
submit_dn = frappe.db.sql_list("""select t1.name from `tabDelivery Note` t1,`tabDelivery Note Item` t2
where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1""", self.name)
if submit_dn:
frappe.throw(_("Delivery Notes {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_dn)))
# Checks Sales Invoice
submit_rv = frappe.db.sql_list("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.sales_order = %s and t1.docstatus = 1""",
self.name)
if submit_rv:
frappe.throw(_("Sales Invoice {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_rv)))
#check maintenance schedule
submit_ms = frappe.db.sql_list("""select t1.name from `tabMaintenance Schedule` t1,
`tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""", self.name)
if submit_ms:
frappe.throw(_("Maintenance Schedule {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_ms)))
# check maintenance visit
submit_mv = frappe.db.sql_list("""select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""",self.name)
if submit_mv:
frappe.throw(_("Maintenance Visit {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_mv)))
# check production order
pro_order = frappe.db.sql_list("""select name from `tabProduction Order`
where sales_order = %s and docstatus = 1""", self.name)
if pro_order:
frappe.throw(_("Production Order {0} must be cancelled before cancelling this Sales Order").format(comma_and(pro_order)))
def check_modified_date(self):
mod_db = frappe.db.get_value("Sales Order", self.name, "modified")
date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" %
( mod_db, cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name))
def stop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(-1)
frappe.db.set(self, 'status', 'Stopped')
frappe.msgprint(_("{0} {1} status is Stopped").format(self.doctype, self.name))
def unstop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(1)
frappe.db.set(self, 'status', 'Submitted')
frappe.msgprint(_("{0} {1} status is Unstopped").format(self.doctype, self.name))
def update_stock_ledger(self, update_stock):
from erpnext.stock.utils import update_bin
for d in self.get_item_list():
if frappe.db.get_value("Item", d['item_code'], "is_stock_item") == "Yes":
args = {
"item_code": d['item_code'],
"warehouse": d['reserved_warehouse'],
"reserved_qty": flt(update_stock) * flt(d['reserved_qty']),
"posting_date": self.transaction_date,
"voucher_type": self.doctype,
"voucher_no": self.name,
"is_amended": self.amended_from and 'Yes' or 'No'
}
update_bin(args)
def on_update(self):
pass
def get_portal_page(self):
return "order" if self.docstatus==1 else None
@frappe.whitelist()
def make_material_request(source_name, target_doc=None):
def postprocess(source, doc):
doc.material_request_type = "Purchase"
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Material Request",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order_no",
"stock_uom": "uom"
}
}
}, target_doc, postprocess)
return doc
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.base_amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.base_rate)
target.amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.rate)
target.qty = flt(source.qty) - flt(source.delivered_qty)
target_doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Delivery Note Item",
"field_map": {
"rate": "rate",
"name": "prevdoc_detail_docname",
"parent": "against_sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.delivered_qty < doc.qty
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return target_doc
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None):
def set_missing_values(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.amount = flt(source.amount) - flt(source.billed_amt)
target.base_amount = target.amount * flt(source_parent.conversion_rate)
target.qty = source.rate and target.amount / flt(source.rate) or source.qty
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Sales Invoice",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "so_detail",
"parent": "sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.base_amount==0 or doc.billed_amt < doc.amount
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doclist
@frappe.whitelist()
def make_maintenance_schedule(source_name, target_doc=None):
maint_schedule = frappe.db.sql("""select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s and t1.docstatus=1""", source_name)
if not maint_schedule:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Schedule",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Schedule Item",
"field_map": {
"parent": "prevdoc_docname"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
},
"add_if_empty": True
}
}, target_doc)
return doclist
| gangadharkadam/office_erp | erpnext/selling/doctype/sales_order/sales_order.py | Python | agpl-3.0 | 13,651 | [
"VisIt"
] | ba33e2822bc032ac031c391d693465f49b2585e7acefa20a143be5dd6b1f7dfd |
"""Defines the HTTPError class.
Copyright 2013 by Rackspace Hosting, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import sys
if sys.version_info < (2, 7): # pragma: no cover
from ordereddict import OrderedDict
else: # pragma: no cover
from collections import OrderedDict
from falcon import util
class HTTPError(Exception):
"""Represents a generic HTTP error.
Raise this or a child class to have Falcon automagically return pretty
error responses (with an appropriate HTTP status code) to the client
when something goes wrong.
Attributes:
status: HTTP status line, such as "748 Confounded by Ponies".
title: Error title to send to the client.
description: Description of the error to send to the client.
headers: A dictionary of extra headers to add to the response.
link: An href that the client can provide to the user for getting help.
code: An internal application code that a user can reference when
requesting support for the error.
"""
__slots__ = (
'status',
'title',
'description',
'headers',
'link',
'code'
)
def __init__(self, status, title, description=None, headers=None,
href=None, href_text=None, code=None):
"""Initialize with information that can be reported to the client
Falcon will catch instances of HTTPError (and subclasses), then use
the associated information to generate a nice response for the client.
Args:
status: HTTP status code and text, such as "400 Bad Request"
title: Human-friendly error title. Set to None if you wish Falcon
to return an empty response body (all remaining args will
be ignored except for headers.) Do this only when you don't
wish to disclose sensitive information about why a request was
refused, or if the status and headers are self-descriptive.
description: Human-friendly description of the error, along with a
helpful suggestion or two (default None).
headers: A dictionary of extra headers to return in the
response to the client (default None).
href: A URL someone can visit to find out more information
(default None). Unicode characters are percent-encoded.
href_text: If href is given, use this as the friendly
title/description for the link (defaults to "API documentation
for this error").
code: An internal code that customers can reference in their
support request or to help them when searching for knowledge
base articles related to this error.
"""
self.status = status
self.title = title
self.description = description
self.headers = headers
self.code = code
if href:
link = self.link = OrderedDict()
link['text'] = (href_text or 'API documention for this error')
link['href'] = util.percent_escape(href)
link['rel'] = 'help'
else:
self.link = None
def json(self):
"""Returns a pretty JSON-encoded version of the exception
Note: Excludes the HTTP status line, since the results of this call
are meant to be returned in the body of an HTTP response.
Returns:
A JSON representation of the exception except the status line, or
NONE if title was set to None.
"""
if self.title is None:
return None
obj = OrderedDict()
obj['title'] = self.title
if self.description:
obj['description'] = self.description
if self.code:
obj['code'] = self.code
if self.link:
obj['link'] = self.link
return json.dumps(obj, indent=4, separators=(',', ': '),
ensure_ascii=False)
| openilabs/falconlab | env/lib/python2.7/site-packages/falcon/http_error.py | Python | mit | 4,535 | [
"VisIt"
] | f66705aac4080dbb357cb770bd244edab68fdd49a57ac10f44ff75eda9de9819 |
# pylint: disable=arguments-differ
""" Models for the shopping cart and assorted purchase types """
from collections import namedtuple
from datetime import datetime
from datetime import timedelta
from decimal import Decimal
import json
import analytics
from io import BytesIO
from django.db.models import Q, F
import pytz
import logging
import smtplib
import StringIO
import csv
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from django.dispatch import receiver
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _, ugettext_lazy
from django.db import transaction
from django.db.models import Sum, Count
from django.db.models.signals import post_save, post_delete
from django.core.urlresolvers import reverse
from model_utils.managers import InheritanceManager
from model_utils.models import TimeStampedModel
from django.core.mail.message import EmailMessage
from xmodule.modulestore.django import modulestore
from eventtracking import tracker
from courseware.courses import get_course_by_id
from config_models.models import ConfigurationModel
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_string
from student.models import CourseEnrollment, UNENROLL_DONE, EnrollStatusChange
from util.query import use_read_replica_if_available
from openedx.core.djangoapps.xmodule_django.models import CourseKeyField
from .exceptions import (
InvalidCartItem,
PurchasedCallbackException,
ItemAlreadyInCartException,
AlreadyEnrolledInCourseException,
CourseDoesNotExistException,
MultipleCouponsNotAllowedException,
InvalidStatusToRetire,
UnexpectedOrderItemStatus,
ItemNotFoundInCartException
)
from shoppingcart.pdf import PDFInvoice
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
log = logging.getLogger("shoppingcart")
ORDER_STATUSES = (
# The user is selecting what he/she wants to purchase.
('cart', 'cart'),
# The user has been sent to the external payment processor.
# At this point, the order should NOT be modified.
# If the user returns to the payment flow, he/she will start a new order.
('paying', 'paying'),
# The user has successfully purchased the items in the order.
('purchased', 'purchased'),
# The user's order has been refunded.
('refunded', 'refunded'),
# The user's order went through, but the order was erroneously left
# in 'cart'.
('defunct-cart', 'defunct-cart'),
# The user's order went through, but the order was erroneously left
# in 'paying'.
('defunct-paying', 'defunct-paying'),
)
# maps order statuses to their defunct states
ORDER_STATUS_MAP = {
'cart': 'defunct-cart',
'paying': 'defunct-paying',
}
# we need a tuple to represent the primary key of various OrderItem subclasses
OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk'])
class OrderTypes(object):
"""
This class specify purchase OrderTypes.
"""
PERSONAL = 'personal'
BUSINESS = 'business'
ORDER_TYPES = (
(PERSONAL, 'personal'),
(BUSINESS, 'business'),
)
class Order(models.Model):
"""
This is the model for an order. Before purchase, an Order and its related OrderItems are used
as the shopping cart.
FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'.
"""
class Meta(object):
app_label = "shoppingcart"
user = models.ForeignKey(User, db_index=True)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES)
purchase_time = models.DateTimeField(null=True, blank=True)
refunded_time = models.DateTimeField(null=True, blank=True)
# Now we store data needed to generate a reasonable receipt
# These fields only make sense after the purchase
bill_to_first = models.CharField(max_length=64, blank=True)
bill_to_last = models.CharField(max_length=64, blank=True)
bill_to_street1 = models.CharField(max_length=128, blank=True)
bill_to_street2 = models.CharField(max_length=128, blank=True)
bill_to_city = models.CharField(max_length=64, blank=True)
bill_to_state = models.CharField(max_length=8, blank=True)
bill_to_postalcode = models.CharField(max_length=16, blank=True)
bill_to_country = models.CharField(max_length=64, blank=True)
bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits
bill_to_cardtype = models.CharField(max_length=32, blank=True)
# a JSON dump of the CC processor response, for completeness
processor_reply_dump = models.TextField(blank=True)
# bulk purchase registration code workflow billing details
company_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_email = models.CharField(max_length=255, null=True, blank=True)
recipient_name = models.CharField(max_length=255, null=True, blank=True)
recipient_email = models.CharField(max_length=255, null=True, blank=True)
customer_reference_number = models.CharField(max_length=63, null=True, blank=True)
order_type = models.CharField(max_length=32, default='personal', choices=OrderTypes.ORDER_TYPES)
@classmethod
def get_cart_for_user(cls, user):
"""
Always use this to preserve the property that at most 1 order per user has status = 'cart'
"""
# find the newest element in the db
try:
cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get()
except ObjectDoesNotExist:
# if nothing exists in the database, create a new cart
cart_order, _created = cls.objects.get_or_create(user=user, status='cart')
return cart_order
@classmethod
def does_user_have_cart(cls, user):
"""
Returns a boolean whether a shopping cart (Order) exists for the specified user
"""
return cls.objects.filter(user=user, status='cart').exists()
@classmethod
def user_cart_has_items(cls, user, item_types=None):
"""
Returns true if the user (anonymous user ok) has
a cart with items in it. (Which means it should be displayed.
If a item_type is passed in, then we check to see if the cart has at least one of
those types of OrderItems
"""
if not user.is_authenticated():
return False
cart = cls.get_cart_for_user(user)
if not item_types:
# check to see if the cart has at least some item in it
return cart.has_items()
else:
# if the caller is explicitly asking to check for particular types
for item_type in item_types:
if cart.has_items(item_type):
return True
return False
@classmethod
def remove_cart_item_from_order(cls, item, user):
"""
Removes the item from the cart if the item.order.status == 'cart'.
Also removes any code redemption associated with the order_item
"""
if item.order.status == 'cart':
log.info("order item %s removed for user %s", str(item.id), user)
item.delete()
# remove any redemption entry associated with the item
CouponRedemption.remove_code_redemption_from_item(item, user)
@property
def total_cost(self):
"""
Return the total cost of the cart. If the order has been purchased, returns total of
all purchased and not refunded items.
"""
return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status))
def has_items(self, item_type=None):
"""
Does the cart have any items in it?
If an item_type is passed in then we check to see if there are any items of that class type
"""
if not item_type:
return self.orderitem_set.exists()
else:
items = self.orderitem_set.all().select_subclasses()
for item in items:
if isinstance(item, item_type):
return True
return False
def reset_cart_items_prices(self):
"""
Reset the items price state in the user cart
"""
for item in self.orderitem_set.all():
if item.is_discounted:
item.unit_cost = item.list_price
item.save()
def clear(self):
"""
Clear out all the items in the cart
"""
self.orderitem_set.all().delete()
@transaction.atomic
def start_purchase(self):
"""
Start the purchase process. This will set the order status to "paying",
at which point it should no longer be modified.
Future calls to `Order.get_cart_for_user()` will filter out orders with
status "paying", effectively creating a new (empty) cart.
"""
if self.status == 'cart':
self.status = 'paying'
self.save()
for item in OrderItem.objects.filter(order=self).select_subclasses():
item.start_purchase()
def update_order_type(self):
"""
updating order type. This method wil inspect the quantity associated with the OrderItem.
In the application, it is implied that when qty > 1, then the user is to purchase
'RegistrationCodes' which are randomly generated strings that users can distribute to
others in order for them to enroll in paywalled courses.
The UI/UX may change in the future to make the switching between PaidCourseRegistration
and CourseRegCodeItems a more explicit UI gesture from the purchaser
"""
cart_items = self.orderitem_set.all()
is_order_type_business = False
for cart_item in cart_items:
if cart_item.qty > 1:
is_order_type_business = True
items_to_delete = []
old_to_new_id_map = []
if is_order_type_business:
for cart_item in cart_items:
if hasattr(cart_item, 'paidcourseregistration'):
course_reg_code_item = CourseRegCodeItem.add_to_order(
self, cart_item.paidcourseregistration.course_id, cart_item.qty,
)
# update the discounted prices if coupon redemption applied
course_reg_code_item.list_price = cart_item.list_price
course_reg_code_item.unit_cost = cart_item.unit_cost
course_reg_code_item.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": course_reg_code_item.id})
else:
for cart_item in cart_items:
if hasattr(cart_item, 'courseregcodeitem'):
paid_course_registration = PaidCourseRegistration.add_to_order(
self, cart_item.courseregcodeitem.course_id,
)
# update the discounted prices if coupon redemption applied
paid_course_registration.list_price = cart_item.list_price
paid_course_registration.unit_cost = cart_item.unit_cost
paid_course_registration.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": paid_course_registration.id})
for item in items_to_delete:
item.delete()
self.order_type = OrderTypes.BUSINESS if is_order_type_business else OrderTypes.PERSONAL
self.save()
return old_to_new_id_map
def generate_pdf_receipt(self, order_items):
"""
Generates the pdf receipt for the given order_items
and returns the pdf_buffer.
"""
items_data = []
for item in order_items:
item_total = item.qty * item.unit_cost
items_data.append({
'item_description': item.pdf_receipt_display_name,
'quantity': item.qty,
'list_price': item.get_list_price(),
'discount': item.get_list_price() - item.unit_cost,
'item_total': item_total
})
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id),
date=self.purchase_time,
is_invoice=False,
total_cost=self.total_cost,
payment_received=self.total_cost,
balance=0
).generate_pdf(pdf_buffer)
return pdf_buffer
def generate_registration_codes_csv(self, orderitems, site_name):
"""
this function generates the csv file
"""
course_info = []
csv_file = StringIO.StringIO()
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Course Name', 'Registration Code', 'URL'])
for item in orderitems:
course_id = item.course_id
course = get_course_by_id(item.course_id, depth=0)
registration_codes = CourseRegistrationCode.objects.filter(course_id=course_id, order=self)
course_info.append((course.display_name, ' (' + course.start_datetime_text() + '-' + course.end_datetime_text() + ')'))
for registration_code in registration_codes:
redemption_url = reverse('register_code_redemption', args=[registration_code.code])
url = '{base_url}{redemption_url}'.format(base_url=site_name, redemption_url=redemption_url)
csv_writer.writerow([unicode(course.display_name).encode("utf-8"), registration_code.code, url])
return csv_file, course_info
def send_confirmation_emails(self, orderitems, is_order_type_business, csv_file, pdf_file, site_name, courses_info):
"""
send confirmation e-mail
"""
recipient_list = [(self.user.username, self.user.email, 'user')] # pylint: disable=no-member
if self.company_contact_email:
recipient_list.append((self.company_contact_name, self.company_contact_email, 'company_contact'))
joined_course_names = ""
if self.recipient_email:
recipient_list.append((self.recipient_name, self.recipient_email, 'email_recipient'))
courses_names_with_dates = [course_info[0] + course_info[1] for course_info in courses_info]
joined_course_names = " " + ", ".join(courses_names_with_dates)
if not is_order_type_business:
subject = _("Order Payment Confirmation")
else:
subject = _('Confirmation and Registration Codes for the following courses: {course_name_list}').format(
course_name_list=joined_course_names
)
dashboard_url = '{base_url}{dashboard}'.format(
base_url=site_name,
dashboard=reverse('dashboard')
)
try:
from_address = configuration_helpers.get_value(
'email_from_address',
settings.PAYMENT_SUPPORT_EMAIL
)
# Send a unique email for each recipient. Don't put all email addresses in a single email.
for recipient in recipient_list:
message = render_to_string(
'emails/business_order_confirmation_email.txt' if is_order_type_business else 'emails/order_confirmation_email.txt',
{
'order': self,
'recipient_name': recipient[0],
'recipient_type': recipient[2],
'site_name': site_name,
'order_items': orderitems,
'course_names': ", ".join([course_info[0] for course_info in courses_info]),
'dashboard_url': dashboard_url,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'order_placed_by': '{username} ({email})'.format(
username=self.user.username, email=self.user.email
),
'has_billing_info': settings.FEATURES['STORE_BILLING_INFO'],
'platform_name': configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME),
'payment_support_email': configuration_helpers.get_value(
'payment_support_email', settings.PAYMENT_SUPPORT_EMAIL,
),
'payment_email_signature': configuration_helpers.get_value('payment_email_signature'),
}
)
email = EmailMessage(
subject=subject,
body=message,
from_email=from_address,
to=[recipient[1]]
)
# Only the business order is HTML formatted. A single seat order confirmation is plain text.
if is_order_type_business:
email.content_subtype = "html"
if csv_file:
email.attach(u'RegistrationCodesRedemptionUrls.csv', csv_file.getvalue(), 'text/csv')
if pdf_file is not None:
email.attach(u'ReceiptOrder{}.pdf'.format(str(self.id)), pdf_file.getvalue(), 'application/pdf')
else:
file_buffer = StringIO.StringIO(_('pdf download unavailable right now, please contact support.'))
email.attach(u'pdf_not_available.txt', file_buffer.getvalue(), 'text/plain')
email.send()
except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually
log.error('Failed sending confirmation e-mail for order %d', self.id)
def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',
country='', ccnum='', cardtype='', processor_reply_dump=''):
"""
Call to mark this order as purchased. Iterates through its OrderItems and calls
their purchased_callback
`first` - first name of person billed (e.g. John)
`last` - last name of person billed (e.g. Smith)
`street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center)
`street2` - second line of a street address of the billing address (e.g. Suite 101)
`city` - city of the billing address (e.g. Cambridge)
`state` - code of the state, province, or territory of the billing address (e.g. MA)
`postalcode` - postal code of the billing address (e.g. 02142)
`country` - country code of the billing address (e.g. US)
`ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111)
`cardtype` - 3-digit code representing the card type used (e.g. 001)
`processor_reply_dump` - all the parameters returned by the processor
"""
if self.status == 'purchased':
log.error(
u"`purchase` method called on order {}, but order is already purchased.".format(self.id) # pylint: disable=no-member
)
return
self.status = 'purchased'
self.purchase_time = datetime.now(pytz.utc)
self.bill_to_first = first
self.bill_to_last = last
self.bill_to_city = city
self.bill_to_state = state
self.bill_to_country = country
self.bill_to_postalcode = postalcode
if settings.FEATURES['STORE_BILLING_INFO']:
self.bill_to_street1 = street1
self.bill_to_street2 = street2
self.bill_to_ccnum = ccnum
self.bill_to_cardtype = cardtype
self.processor_reply_dump = processor_reply_dump
# save these changes on the order, then we can tell when we are in an
# inconsistent state
self.save()
# this should return all of the objects with the correct types of the
# subclasses
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
site_name = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
if self.order_type == OrderTypes.BUSINESS:
self.update_order_type()
for item in orderitems:
item.purchase_item()
csv_file = None
courses_info = []
if self.order_type == OrderTypes.BUSINESS:
#
# Generate the CSV file that contains all of the RegistrationCodes that have already been
# generated when the purchase has transacted
#
csv_file, courses_info = self.generate_registration_codes_csv(orderitems, site_name)
try:
pdf_file = self.generate_pdf_receipt(orderitems)
except Exception: # pylint: disable=broad-except
log.exception('Exception at creating pdf file.')
pdf_file = None
try:
self.send_confirmation_emails(
orderitems, self.order_type == OrderTypes.BUSINESS,
csv_file, pdf_file, site_name, courses_info
)
except Exception: # pylint: disable=broad-except
# Catch all exceptions here, since the Django view implicitly
# wraps this in a transaction. If the order completes successfully,
# we don't want to roll back just because we couldn't send
# the confirmation email.
log.exception('Error occurred while sending payment confirmation email')
self._emit_order_event('Completed Order', orderitems)
def refund(self):
"""
Refund the given order. As of right now, this just marks the order as refunded.
"""
self.status = 'refunded'
self.save()
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
self._emit_order_event('Refunded Order', orderitems)
def _emit_order_event(self, event_name, orderitems):
"""
Emit an analytics event with the given name for this Order. Will iterate over all associated
OrderItems and add them as products in the event as well.
"""
try:
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(self.user.id, event_name, {
'orderId': self.id,
'total': str(self.total_cost),
'currency': self.currency,
'products': [item.analytics_data() for item in orderitems]
}, context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
})
except Exception: # pylint: disable=broad-except
# Capturing all exceptions thrown while tracking analytics events. We do not want
# an operation to fail because of an analytics event, so we will capture these
# errors in the logs.
log.exception(
u'Unable to emit {event} event for user {user} and order {order}'.format(
event=event_name, user=self.user.id, order=self.id)
)
def add_billing_details(self, company_name='', company_contact_name='', company_contact_email='', recipient_name='',
recipient_email='', customer_reference_number=''):
"""
This function is called after the user selects a purchase type of "Business" and
is asked to enter the optional billing details. The billing details are updated
for that order.
company_name - Name of purchasing organization
company_contact_name - Name of the key contact at the company the sale was made to
company_contact_email - Email of the key contact at the company the sale was made to
recipient_name - Name of the company should the invoice be sent to
recipient_email - Email of the company should the invoice be sent to
customer_reference_number - purchase order number of the organization associated with this Order
"""
self.company_name = company_name
self.company_contact_name = company_contact_name
self.company_contact_email = company_contact_email
self.recipient_name = recipient_name
self.recipient_email = recipient_email
self.customer_reference_number = customer_reference_number
self.save()
def generate_receipt_instructions(self):
"""
Call to generate specific instructions for each item in the order. This gets displayed on the receipt
page, typically. Instructions are something like "visit your dashboard to see your new courses".
This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding
to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped
html instructions
"""
instruction_set = set([]) # heh. not ia32 or alpha or sparc
instruction_dict = {}
order_items = OrderItem.objects.filter(order=self).select_subclasses()
for item in order_items:
item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()
instruction_dict[item_pk_with_subclass] = set_of_html
instruction_set.update(set_of_html)
return instruction_dict, instruction_set
def retire(self):
"""
Method to "retire" orders that have gone through to the payment service
but have (erroneously) not had their statuses updated.
This method only works on orders that satisfy the following conditions:
1) the order status is either "cart" or "paying" (otherwise we raise
an InvalidStatusToRetire error)
2) the order's order item's statuses match the order's status (otherwise
we throw an UnexpectedOrderItemStatus error)
"""
# if an order is already retired, no-op:
if self.status in ORDER_STATUS_MAP.values():
return
if self.status not in ORDER_STATUS_MAP.keys():
raise InvalidStatusToRetire(
"order status {order_status} is not 'paying' or 'cart'".format(
order_status=self.status
)
)
for item in self.orderitem_set.all():
if item.status != self.status:
raise UnexpectedOrderItemStatus(
"order_item status is different from order status"
)
self.status = ORDER_STATUS_MAP[self.status]
self.save()
for item in self.orderitem_set.all():
item.retire()
def find_item_by_course_id(self, course_id):
"""
course_id: Course id of the item to find
Returns OrderItem from the Order given a course_id
Raises exception ItemNotFoundException when the item
having the given course_id is not present in the cart
"""
cart_items = OrderItem.objects.filter(order=self).select_subclasses()
found_items = []
for item in cart_items:
if getattr(item, 'course_id', None):
if item.course_id == course_id:
found_items.append(item)
if not found_items:
raise ItemNotFoundInCartException
return found_items
class OrderItem(TimeStampedModel):
"""
This is the basic interface for order items.
Order items are line items that fill up the shopping carts and orders.
Each implementation of OrderItem should provide its own purchased_callback as
a method.
"""
class Meta(object):
app_label = "shoppingcart"
objects = InheritanceManager()
order = models.ForeignKey(Order, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user
user = models.ForeignKey(User, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES, db_index=True)
qty = models.IntegerField(default=1)
unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
list_price = models.DecimalField(decimal_places=2, max_digits=30, null=True)
line_desc = models.CharField(default="Misc. Item", max_length=1024)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
fulfilled_time = models.DateTimeField(null=True, db_index=True)
refund_requested_time = models.DateTimeField(null=True, db_index=True)
service_fee = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
# general purpose field, not user-visible. Used for reporting
report_comments = models.TextField(default="")
@property
def line_cost(self):
""" Return the total cost of this OrderItem """
return self.qty * self.unit_cost
@classmethod
def add_to_order(cls, order, *args, **kwargs):
"""
A suggested convenience function for subclasses.
NOTE: This does not add anything to the cart. That is left up to the
subclasses to implement for themselves
"""
# this is a validation step to verify that the currency of the item we
# are adding is the same as the currency of the order we are adding it
# to
currency = kwargs.get('currency', 'usd')
if order.currency != currency and order.orderitem_set.exists():
raise InvalidCartItem(_("Trying to add a different currency into the cart"))
@transaction.atomic
def purchase_item(self):
"""
This is basically a wrapper around purchased_callback that handles
modifying the OrderItem itself
"""
self.purchased_callback()
self.status = 'purchased'
self.fulfilled_time = datetime.now(pytz.utc)
self.save()
def start_purchase(self):
"""
Start the purchase process. This will set the order item status to "paying",
at which point it should no longer be modified.
"""
self.status = 'paying'
self.save()
def purchased_callback(self):
"""
This is called on each inventory item in the shopping cart when the
purchase goes through.
"""
raise NotImplementedError
def generate_receipt_instructions(self):
"""
This is called on each item in a purchased order to generate receipt instructions.
This should return a list of `ReceiptInstruction`s in HTML string
Default implementation is to return an empty set
"""
return self.pk_with_subclass, set([])
@property
def pk_with_subclass(self):
"""
Returns a named tuple that annotates the pk of this instance with its class, to fully represent
a pk of a subclass (inclusive) of OrderItem
"""
return OrderItemSubclassPK(type(self), self.pk)
@property
def is_discounted(self):
"""
Returns True if the item a discount coupon has been applied to the OrderItem and False otherwise.
Earlier, the OrderItems were stored with an empty list_price if a discount had not been applied.
Now we consider the item to be non discounted if list_price is None or list_price == unit_cost. In
these lines, an item is discounted if it's non-None and list_price and unit_cost mismatch.
This should work with both new and old records.
"""
return self.list_price and self.list_price != self.unit_cost
def get_list_price(self):
"""
Returns the unit_cost if no discount has been applied, or the list_price if it is defined.
"""
return self.list_price if self.list_price else self.unit_cost
@property
def single_item_receipt_template(self):
"""
The template that should be used when there's only one item in the order
"""
return 'shoppingcart/receipt.html'
@property
def single_item_receipt_context(self):
"""
Extra variables needed to render the template specified in
`single_item_receipt_template`
"""
return {}
def additional_instruction_text(self, **kwargs): # pylint: disable=unused-argument
"""
Individual instructions for this order item.
Currently, only used for emails.
"""
return ''
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
This can be overridden by the subclasses of OrderItem
"""
course_key = getattr(self, 'course_id', None)
if course_key:
course = get_course_by_id(course_key, depth=0)
return course.display_name
else:
raise Exception(
"Not Implemented. OrderItems that are not Course specific should have"
" a overridden pdf_receipt_display_name property"
)
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
The default implementation returns defaults for most attributes. When no name or
category is specified by the implementation, the string 'N/A' is placed for the
name and category. This should be handled appropriately by all implementations.
Returns
A dictionary containing analytics data for this OrderItem.
"""
return {
'id': self.id,
'sku': type(self).__name__,
'name': 'N/A',
'price': str(self.unit_cost),
'quantity': self.qty,
'category': 'N/A',
}
def retire(self):
"""
Called by the `retire` method defined in the `Order` class. Retires
an order item if its (and its order's) status was erroneously not
updated to "purchased" after the order was processed.
"""
self.status = ORDER_STATUS_MAP[self.status]
self.save()
class Invoice(TimeStampedModel):
"""
This table capture all the information needed to support "invoicing"
which is when a user wants to purchase Registration Codes,
but will not do so via a Credit Card transaction.
"""
class Meta(object):
app_label = "shoppingcart"
company_name = models.CharField(max_length=255, db_index=True)
company_contact_name = models.CharField(max_length=255)
company_contact_email = models.CharField(max_length=255)
recipient_name = models.CharField(max_length=255)
recipient_email = models.CharField(max_length=255)
address_line_1 = models.CharField(max_length=255)
address_line_2 = models.CharField(max_length=255, null=True, blank=True)
address_line_3 = models.CharField(max_length=255, null=True, blank=True)
city = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=255, null=True)
zip = models.CharField(max_length=15, null=True)
country = models.CharField(max_length=64, null=True)
# This field has been deprecated.
# The total amount can now be calculated as the sum
# of each invoice item associated with the invoice.
# For backwards compatibility, this field is maintained
# and written to during invoice creation.
total_amount = models.FloatField()
# This field has been deprecated in order to support
# invoices for items that are not course-related.
# Although this field is still maintained for backwards
# compatibility, you should use CourseRegistrationCodeInvoiceItem
# to look up the course ID for purchased redeem codes.
course_id = CourseKeyField(max_length=255, db_index=True)
internal_reference = models.CharField(
max_length=255,
null=True,
blank=True,
help_text=ugettext_lazy("Internal reference code for this invoice.")
)
customer_reference_number = models.CharField(
max_length=63,
null=True,
blank=True,
help_text=ugettext_lazy("Customer's reference code for this invoice.")
)
is_valid = models.BooleanField(default=True)
@classmethod
def get_invoice_total_amount_for_course(cls, course_key):
"""
returns the invoice total amount generated by course.
"""
result = cls.objects.filter(course_id=course_key, is_valid=True).aggregate(total=Sum('total_amount'))
total = result.get('total', 0)
return total if total else 0
def generate_pdf_invoice(self, course, course_price, quantity, sale_price):
"""
Generates the pdf invoice for the given course
and returns the pdf_buffer.
"""
discount_per_item = float(course_price) - sale_price / quantity
list_price = course_price - discount_per_item
items_data = [{
'item_description': course.display_name,
'quantity': quantity,
'list_price': list_price,
'discount': discount_per_item,
'item_total': quantity * list_price
}]
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id),
date=datetime.now(pytz.utc),
is_invoice=True,
total_cost=float(self.total_amount),
payment_received=0,
balance=float(self.total_amount)
).generate_pdf(pdf_buffer)
return pdf_buffer
def snapshot(self):
"""Create a snapshot of the invoice.
A snapshot is a JSON-serializable representation
of the invoice's state, including its line items
and associated transactions (payments/refunds).
This is useful for saving the history of changes
to the invoice.
Returns:
dict
"""
return {
'internal_reference': self.internal_reference,
'customer_reference': self.customer_reference_number,
'is_valid': self.is_valid,
'contact_info': {
'company_name': self.company_name,
'company_contact_name': self.company_contact_name,
'company_contact_email': self.company_contact_email,
'recipient_name': self.recipient_name,
'recipient_email': self.recipient_email,
'address_line_1': self.address_line_1,
'address_line_2': self.address_line_2,
'address_line_3': self.address_line_3,
'city': self.city,
'state': self.state,
'zip': self.zip,
'country': self.country,
},
'items': [
item.snapshot()
for item in InvoiceItem.objects.filter(invoice=self).select_subclasses()
],
'transactions': [
trans.snapshot()
for trans in InvoiceTransaction.objects.filter(invoice=self)
],
}
def __unicode__(self):
label = (
unicode(self.internal_reference)
if self.internal_reference
else u"No label"
)
created = (
self.created.strftime("%Y-%m-%d")
if self.created
else u"No date"
)
return u"{label} ({date_created})".format(
label=label, date_created=created
)
INVOICE_TRANSACTION_STATUSES = (
# A payment/refund is in process, but money has not yet been transferred
('started', 'started'),
# A payment/refund has completed successfully
# This should be set ONLY once money has been successfully exchanged.
('completed', 'completed'),
# A payment/refund was promised, but was cancelled before
# money had been transferred. An example would be
# cancelling a refund check before the recipient has
# a chance to deposit it.
('cancelled', 'cancelled')
)
class InvoiceTransaction(TimeStampedModel):
"""Record payment and refund information for invoices.
There are two expected use cases:
1) We send an invoice to someone, and they send us a check.
We then manually create an invoice transaction to represent
the payment.
2) We send an invoice to someone, and they pay us. Later, we
need to issue a refund for the payment. We manually
create a transaction with a negative amount to represent
the refund.
"""
class Meta(object):
app_label = "shoppingcart"
invoice = models.ForeignKey(Invoice)
amount = models.DecimalField(
default=0.0, decimal_places=2, max_digits=30,
help_text=ugettext_lazy(
"The amount of the transaction. Use positive amounts for payments"
" and negative amounts for refunds."
)
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
comments = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy("Optional: provide additional information for this transaction")
)
status = models.CharField(
max_length=32,
default='started',
choices=INVOICE_TRANSACTION_STATUSES,
help_text=ugettext_lazy(
"The status of the payment or refund. "
"'started' means that payment is expected, but money has not yet been transferred. "
"'completed' means that the payment or refund was received. "
"'cancelled' means that payment or refund was expected, but was cancelled before money was transferred. "
)
)
created_by = models.ForeignKey(User)
last_modified_by = models.ForeignKey(User, related_name='last_modified_by_user')
@classmethod
def get_invoice_transaction(cls, invoice_id):
"""
if found Returns the Invoice Transaction object for the given invoice_id
else returns None
"""
try:
return cls.objects.get(Q(invoice_id=invoice_id), Q(status='completed') | Q(status='refunded'))
except InvoiceTransaction.DoesNotExist:
return None
@classmethod
def get_total_amount_of_paid_course_invoices(cls, course_key):
"""
returns the total amount of the paid invoices.
"""
result = cls.objects.filter(amount__gt=0, invoice__course_id=course_key, status='completed').aggregate(
total=Sum(
'amount',
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
total = result.get('total', 0)
return total if total else 0
def snapshot(self):
"""Create a snapshot of the invoice transaction.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'amount': unicode(self.amount),
'currency': self.currency,
'comments': self.comments,
'status': self.status,
'created_by': self.created_by.username,
'last_modified_by': self.last_modified_by.username
}
class InvoiceItem(TimeStampedModel):
"""
This is the basic interface for invoice items.
Each invoice item represents a "line" in the invoice.
For example, in an invoice for course registration codes,
there might be an invoice item representing 10 registration
codes for the DemoX course.
"""
class Meta(object):
app_label = "shoppingcart"
objects = InheritanceManager()
invoice = models.ForeignKey(Invoice, db_index=True)
qty = models.IntegerField(
default=1,
help_text=ugettext_lazy("The number of items sold.")
)
unit_price = models.DecimalField(
default=0.0,
decimal_places=2,
max_digits=30,
help_text=ugettext_lazy("The price per item sold, including discounts.")
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
def snapshot(self):
"""Create a snapshot of the invoice item.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'qty': self.qty,
'unit_price': unicode(self.unit_price),
'currency': self.currency
}
class CourseRegistrationCodeInvoiceItem(InvoiceItem):
"""
This is an invoice item that represents a payment for
a course registration.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
def snapshot(self):
"""Create a snapshot of the invoice item.
This is the same as a snapshot for other invoice items,
with the addition of a `course_id` field.
Returns:
dict
"""
snapshot = super(CourseRegistrationCodeInvoiceItem, self).snapshot()
snapshot['course_id'] = unicode(self.course_id)
return snapshot
class InvoiceHistory(models.Model):
"""History of changes to invoices.
This table stores snapshots of invoice state,
including the associated line items and transactions
(payments/refunds).
Entries in the table are created, but never deleted
or modified.
We use Django signals to save history entries on change
events. These signals are fired within a database
transaction, so the history record is created only
if the invoice change is successfully persisted.
"""
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
invoice = models.ForeignKey(Invoice)
# JSON-serialized representation of the current state
# of the invoice, including its line items and
# transactions (payments/refunds).
snapshot = models.TextField(blank=True)
@classmethod
def save_invoice_snapshot(cls, invoice):
"""Save a snapshot of the invoice's current state.
Arguments:
invoice (Invoice): The invoice to save.
"""
cls.objects.create(
invoice=invoice,
snapshot=json.dumps(invoice.snapshot())
)
@staticmethod
def snapshot_receiver(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Signal receiver that saves a snapshot of an invoice.
Arguments:
sender: Not used, but required by Django signals.
instance (Invoice, InvoiceItem, or InvoiceTransaction)
"""
if isinstance(instance, Invoice):
InvoiceHistory.save_invoice_snapshot(instance)
elif hasattr(instance, 'invoice'):
InvoiceHistory.save_invoice_snapshot(instance.invoice)
class Meta(object):
get_latest_by = "timestamp"
app_label = "shoppingcart"
# Hook up Django signals to record changes in the history table.
# We record any change to an invoice, invoice item, or transaction.
# We also record any deletion of a transaction, since users can delete
# transactions via Django admin.
# Note that we need to include *each* InvoiceItem subclass
# here, since Django signals do not fire automatically for subclasses
# of the "sender" class.
post_save.connect(InvoiceHistory.snapshot_receiver, sender=Invoice)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=CourseRegistrationCodeInvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
post_delete.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
class CourseRegistrationCode(models.Model):
"""
This table contains registration codes
With registration code, a user can register for a course for free
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True, unique=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created_by = models.ForeignKey(User, related_name='created_by_user')
created_at = models.DateTimeField(auto_now_add=True)
order = models.ForeignKey(Order, db_index=True, null=True, related_name="purchase_order")
mode_slug = models.CharField(max_length=100, null=True)
is_valid = models.BooleanField(default=True)
# For backwards compatibility, we maintain the FK to "invoice"
# In the future, we will remove this in favor of the FK
# to "invoice_item" (which can be used to look up the invoice).
invoice = models.ForeignKey(Invoice, null=True)
invoice_item = models.ForeignKey(CourseRegistrationCodeInvoiceItem, null=True)
@classmethod
def order_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via bulk purchase scenario.
"""
return cls.objects.filter(order__isnull=False, course_id=course_id)
@classmethod
def invoice_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via invoice.
"""
return cls.objects.filter(invoice__isnull=False, course_id=course_id)
class RegistrationCodeRedemption(models.Model):
"""
This model contains the registration-code redemption info
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True, null=True)
registration_code = models.ForeignKey(CourseRegistrationCode, db_index=True)
redeemed_by = models.ForeignKey(User, db_index=True)
redeemed_at = models.DateTimeField(auto_now_add=True, null=True)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def registration_code_used_for_enrollment(cls, course_enrollment):
"""
Returns RegistrationCodeRedemption object if registration code
has been used during the course enrollment else Returns None.
"""
# theoretically there could be more than one (e.g. someone self-unenrolls
# then re-enrolls with a different regcode)
reg_codes = cls.objects.filter(course_enrollment=course_enrollment).order_by('-redeemed_at')
if reg_codes:
# return the first one. In all normal use cases of registration codes
# the user will only have one
return reg_codes[0]
return None
@classmethod
def is_registration_code_redeemed(cls, course_reg_code):
"""
Checks the existence of the registration code
in the RegistrationCodeRedemption
"""
return cls.objects.filter(registration_code__code=course_reg_code).exists()
@classmethod
def get_registration_code_redemption(cls, code, course_id):
"""
Returns the registration code redemption object if found else returns None.
"""
try:
code_redemption = cls.objects.get(registration_code__code=code, registration_code__course_id=course_id)
except cls.DoesNotExist:
code_redemption = None
return code_redemption
@classmethod
def create_invoice_generated_registration_redemption(cls, course_reg_code, user): # pylint: disable=invalid-name
"""
This function creates a RegistrationCodeRedemption entry in case the registration codes were invoice generated
and thus the order_id is missing.
"""
code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, redeemed_by=user)
code_redemption.save()
return code_redemption
class SoftDeleteCouponManager(models.Manager):
""" Use this manager to get objects that have a is_active=True """
def get_active_coupons_queryset(self):
"""
filter the is_active = True Coupons only
"""
return super(SoftDeleteCouponManager, self).get_queryset().filter(is_active=True)
def get_queryset(self):
"""
get all the coupon objects
"""
return super(SoftDeleteCouponManager, self).get_queryset()
class Coupon(models.Model):
"""
This table contains coupon codes
A user can get a discount offer on course if provide coupon code
"""
class Meta(object):
app_label = "shoppingcart"
code = models.CharField(max_length=32, db_index=True)
description = models.CharField(max_length=255, null=True, blank=True)
course_id = CourseKeyField(max_length=255)
percentage_discount = models.IntegerField(default=0)
created_by = models.ForeignKey(User)
created_at = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
expiration_date = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return "[Coupon] code: {} course: {}".format(self.code, self.course_id)
objects = SoftDeleteCouponManager()
@property
def display_expiry_date(self):
"""
return the coupon expiration date in the readable format
"""
return (self.expiration_date - timedelta(days=1)).strftime("%B %d, %Y") if self.expiration_date else None
class CouponRedemption(models.Model):
"""
This table contain coupon redemption info
"""
class Meta(object):
app_label = "shoppingcart"
order = models.ForeignKey(Order, db_index=True)
user = models.ForeignKey(User, db_index=True)
coupon = models.ForeignKey(Coupon, db_index=True)
@classmethod
def remove_code_redemption_from_item(cls, item, user):
"""
If an item removed from shopping cart then we will remove
the corresponding redemption info of coupon code
"""
order_item_course_id = item.course_id
try:
# Try to remove redemption information of coupon code, If exist.
coupon_redemption = cls.objects.get(
user=user,
coupon__course_id=order_item_course_id if order_item_course_id else CourseKeyField.Empty,
order=item.order_id
)
coupon_redemption.delete()
log.info(
u'Coupon "%s" redemption entry removed for user "%s" for order item "%s"',
coupon_redemption.coupon.code,
user,
str(item.id),
)
except CouponRedemption.DoesNotExist:
log.debug(u'Code redemption does not exist for order item id=%s.', str(item.id))
@classmethod
def remove_coupon_redemption_from_cart(cls, user, cart):
"""
This method delete coupon redemption
"""
coupon_redemption = cls.objects.filter(user=user, order=cart)
if coupon_redemption:
coupon_redemption.delete()
log.info(u'Coupon redemption entry removed for user %s for order %s', user, cart.id)
@classmethod
def get_discount_price(cls, percentage_discount, value):
"""
return discounted price against coupon
"""
discount = Decimal("{0:.2f}".format(Decimal(percentage_discount / 100.00) * value))
return value - discount
@classmethod
def add_coupon_redemption(cls, coupon, order, cart_items):
"""
add coupon info into coupon_redemption model
"""
is_redemption_applied = False
coupon_redemptions = cls.objects.filter(order=order, user=order.user)
for coupon_redemption in coupon_redemptions:
if coupon_redemption.coupon.code != coupon.code or coupon_redemption.coupon.id == coupon.id:
log.exception(
u"Coupon redemption already exist for user '%s' against order id '%s'",
order.user.username,
order.id,
)
raise MultipleCouponsNotAllowedException
for item in cart_items:
if item.course_id:
if item.course_id == coupon.course_id:
coupon_redemption = cls(order=order, user=order.user, coupon=coupon)
coupon_redemption.save()
discount_price = cls.get_discount_price(coupon.percentage_discount, item.unit_cost)
item.list_price = item.unit_cost
item.unit_cost = discount_price
item.save()
log.info(
u"Discount generated for user %s against order id '%s'",
order.user.username,
order.id,
)
is_redemption_applied = True
return is_redemption_applied
return is_redemption_applied
@classmethod
def get_top_discount_codes_used(cls, course_id):
"""
Returns the top discount codes used.
QuerySet = [
{
'coupon__percentage_discount': 22,
'coupon__code': '12',
'coupon__used_count': '2',
},
{
...
}
]
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).values(
'coupon__code', 'coupon__percentage_discount'
).annotate(coupon__used_count=Count('coupon__code')).order_by('-coupon__used_count')
@classmethod
def get_total_coupon_code_purchases(cls, course_id):
"""
returns total seats purchases using coupon codes
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).aggregate(Count('coupon'))
class PaidCourseRegistration(OrderItem):
"""
This is an inventory item for paying for a course registration
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def get_self_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the count of paid_course items filter by course_id and status.
"""
return cls.objects.filter(course_id=course_key, status=status).count()
@classmethod
def get_course_item_for_user_enrollment(cls, user, course_id, course_enrollment):
"""
Returns PaidCourseRegistration object if user has payed for
the course enrollment else Returns None
"""
try:
return cls.objects.filter(course_id=course_id, user=user, course_enrollment=course_enrollment,
status='purchased').latest('id')
except PaidCourseRegistration.DoesNotExist:
return None
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("paidcourseregistration")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None): # pylint: disable=arguments-differ
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning(
u"User %s tried to add PaidCourseRegistration for course %s, already in cart id %s",
order.user.email,
course_id,
order.id,
)
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency)
item, __ = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id)
item.status = order.status
item.mode = course_mode.slug
item.qty = 1
item.unit_cost = cost
item.list_price = cost
item.line_desc = _(u'Registration for Course: {course_name}').format(
course_name=course.display_name_with_default_escaped)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
CourseEnrollment.send_signal_full(EnrollStatusChange.paid_start,
user=order.user, mode=item.mode, course_id=course_id,
cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchased, this should enroll the user in the course. We are assuming that
course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment
would in fact be quite silly since there's a clear back door.
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
# enroll in course and link to the enrollment_id
self.course_enrollment = CourseEnrollment.enroll(user=self.user, course_key=self.course_id, mode=self.mode)
self.save()
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
self.course_enrollment.send_signal(EnrollStatusChange.paid_complete,
cost=self.line_cost, currency=self.currency)
def generate_receipt_instructions(self):
"""
Generates instructions when the user has purchased a PaidCourseRegistration.
Basically tells the user to visit the dashboard to see their new classes
"""
notification = _(
u"Please visit your {link_start}dashboard{link_end} "
u"to see your new course."
).format(
link_start=u'<a href="{url}">'.format(url=reverse('dashboard')),
link_end=u'</a>',
)
return self.pk_with_subclass, set([notification])
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return PaidCourseRegistrationAnnotation.objects.get(course_id=self.course_id).annotation
except PaidCourseRegistrationAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the Order Item is associated with a course, additional fields will be populated with
course information. If there is a mode associated, the mode data is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(PaidCourseRegistration, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItem(OrderItem):
"""
This is an inventory item for paying for
generating course registration codes
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
@classmethod
def get_bulk_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the sum of bulk purchases seats.
"""
total = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(total=Sum('qty'))
if result['total'] is not None:
total = result['total']
return total
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("courseregcodeitem")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum(
F('qty') * F('unit_cost'),
output_field=models.DecimalField(decimal_places=2, max_digits=30)
)
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, qty, mode_slug=CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG,
cost=None, currency=None): # pylint: disable=arguments-differ
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_SHOPPINGCART_MODE
course_mode = CourseMode.DEFAULT_SHOPPINGCART_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(CourseRegCodeItem, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id) # pylint: disable=unused-variable
item.status = order.status
item.mode = course_mode.slug
item.unit_cost = cost
item.list_price = cost
item.qty = qty
item.line_desc = _(u'Enrollment codes for Course: {course_name}').format(
course_name=course.display_name_with_default_escaped)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
The purchase is completed, this OrderItem type will generate Registration Codes that will
be redeemed by users
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
total_registration_codes = int(self.qty)
# we need to import here because of a circular dependency
# we should ultimately refactor code to have save_registration_code in this models.py
# file, but there's also a shared dependency on a random string generator which
# is in another PR (for another feature)
from instructor.views.api import save_registration_code
for i in range(total_registration_codes): # pylint: disable=unused-variable
save_registration_code(self.user, self.course_id, self.mode, order=self.order)
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost))
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return CourseRegCodeItemAnnotation.objects.get(course_id=self.course_id).annotation
except CourseRegCodeItemAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the OrderItem is associated with a course, additional fields will be populated with
course information. If a mode is available, it will be included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CourseRegCodeItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItemAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class PaidCourseRegistrationAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class CertificateItem(OrderItem):
"""
This is an inventory item for purchasing certificates
"""
class Meta(object):
app_label = "shoppingcart"
course_id = CourseKeyField(max_length=128, db_index=True)
course_enrollment = models.ForeignKey(CourseEnrollment)
mode = models.SlugField()
@receiver(UNENROLL_DONE)
def refund_cert_callback(sender, course_enrollment=None, skip_refund=False, **kwargs): # pylint: disable=no-self-argument,unused-argument
"""
When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment
occurred in a verified certificate that was within the refund deadline. If so, it actually performs the
refund.
Returns the refunded certificate on a successful refund; else, it returns nothing.
"""
# Only refund verified cert unenrollments that are within bounds of the expiration date
if (not course_enrollment.refundable()) or skip_refund:
return
target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')
try:
target_cert = target_certs[0]
except IndexError:
log.warning(
u"Matching CertificateItem not found while trying to refund. User %s, Course %s",
course_enrollment.user,
course_enrollment.course_id,
)
return
target_cert.status = 'refunded'
target_cert.refund_requested_time = datetime.now(pytz.utc)
target_cert.save()
target_cert.order.refund()
order_number = target_cert.order_id
# send billing an email so they can handle refunding
subject = _("[Refund] User-Requested Refund")
message = "User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user,
user_email=course_enrollment.user.email,
order_number=order_number)
to_email = [settings.PAYMENT_SUPPORT_EMAIL]
from_email = configuration_helpers.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
try:
send_mail(subject, message, from_email, to_email, fail_silently=False)
except Exception as exception: # pylint: disable=broad-except
err_str = ('Failed sending email to billing to request a refund for verified certificate'
' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\n{exception}')
log.error(err_str.format(
user=course_enrollment.user,
course=course_enrollment.course_id,
ce_id=course_enrollment.id,
order=order_number,
exception=exception,
))
return target_cert
@classmethod
@transaction.atomic
def add_to_order(cls, order, course_id, cost, mode, currency='usd'):
"""
Add a CertificateItem to an order
Returns the CertificateItem object after saving
`order` - an order that this item should be added to, generally the cart order
`course_id` - the course that we would like to purchase as a CertificateItem
`cost` - the amount the user will be paying for this CertificateItem
`mode` - the course mode that this certificate is going to be issued for
This item also creates a new enrollment if none exists for this user and this course.
Example Usage:
cart = Order.get_cart_for_user(user)
CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified')
"""
super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency)
course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id)
# do some validation on the enrollment mode
valid_modes = CourseMode.modes_for_course_dict(course_id)
if mode in valid_modes:
mode_info = valid_modes[mode]
else:
msg = u"Mode {mode} does not exist for {course_id}".format(mode=mode, course_id=course_id)
log.error(msg)
raise InvalidCartItem(
_(u"Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id)
)
item, _created = cls.objects.get_or_create(
order=order,
user=order.user,
course_id=course_id,
course_enrollment=course_enrollment,
mode=mode,
)
item.status = order.status
item.qty = 1
item.unit_cost = cost
item.list_price = cost
course_name = modulestore().get_course(course_id).display_name
# Translators: In this particular case, mode_name refers to a
# particular mode (i.e. Honor Code Certificate, Verified Certificate, etc)
# by which a user could enroll in the given course.
item.line_desc = _("{mode_name} for course {course}").format(
mode_name=mode_info.name,
course=course_name
)
item.currency = currency
order.currency = currency
order.save()
item.save()
# signal course added to cart
course_enrollment.send_signal(EnrollStatusChange.paid_start, cost=cost, currency=currency)
return item
def purchased_callback(self):
"""
When purchase goes through, activate and update the course enrollment for the correct mode
"""
self.course_enrollment.change_mode(self.mode)
self.course_enrollment.activate()
self.course_enrollment.send_signal(EnrollStatusChange.upgrade_complete,
cost=self.unit_cost, currency=self.currency)
def additional_instruction_text(self):
verification_reminder = ""
refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 14 days after the course "
"start date. ")
is_enrollment_mode_verified = self.course_enrollment.is_verified_enrollment()
is_professional_mode_verified = self.course_enrollment.is_professional_enrollment()
if is_enrollment_mode_verified:
domain = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
path = reverse('verify_student_verify_now', kwargs={'course_id': unicode(self.course_id)})
verification_url = "http://{domain}{path}".format(domain=domain, path=path)
verification_reminder = _(
"If you haven't verified your identity yet, please start the verification process ({verification_url})."
).format(verification_url=verification_url)
if is_professional_mode_verified:
refund_reminder_msg = _("You can unenroll in the course and receive a full refund for 2 days after the "
"course start date. ")
refund_reminder = _(
"{refund_reminder_msg}"
"To receive your refund, contact {billing_email}. "
"Please include your order number in your email. "
"Please do NOT include your credit card information."
).format(
refund_reminder_msg=refund_reminder_msg,
billing_email=settings.PAYMENT_SUPPORT_EMAIL
)
# Need this to be unicode in case the reminder strings
# have been translated and contain non-ASCII unicode
return u"{verification_reminder} {refund_reminder}".format(
verification_reminder=verification_reminder,
refund_reminder=refund_reminder
)
@classmethod
def verified_certificates_count(cls, course_id, status):
"""Return a queryset of CertificateItem for every verified enrollment in course_id with the given status."""
return use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status).count())
# TODO combine these three methods into one
@classmethod
def verified_certificates_monetary_field_sum(cls, course_id, status, field_to_aggregate):
"""
Returns a Decimal indicating the total sum of field_to_aggregate for all verified certificates with a particular status.
Sample usages:
- status 'refunded' and field_to_aggregate 'unit_cost' will give the total amount of money refunded for course_id
- status 'purchased' and field_to_aggregate 'service_fees' gives the sum of all service fees for purchased certificates
etc
"""
query = use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status)).aggregate(Sum(field_to_aggregate))[field_to_aggregate + '__sum']
if query is None:
return Decimal(0.00)
else:
return query
@classmethod
def verified_certificates_contributing_more_than_minimum(cls, course_id):
return use_read_replica_if_available(
CertificateItem.objects.filter(
course_id=course_id,
mode='verified',
status='purchased',
unit_cost__gt=(CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd')))).count()
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the CertificateItem is associated with a course, additional fields will be populated with
course information. If there is a mode associated with the certificate, it is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CertificateItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class DonationConfiguration(ConfigurationModel):
"""Configure whether donations are enabled on the site."""
class Meta(ConfigurationModel.Meta):
app_label = "shoppingcart"
class Donation(OrderItem):
"""A donation made by a user.
Donations can be made for a specific course or to the organization as a whole.
Users can choose the donation amount.
"""
class Meta(object):
app_label = "shoppingcart"
# Types of donations
DONATION_TYPES = (
("general", "A general donation"),
("course", "A donation to a particular course")
)
# The type of donation
donation_type = models.CharField(max_length=32, default="general", choices=DONATION_TYPES)
# If a donation is made for a specific course, then store the course ID here.
# If the donation is made to the organization as a whole,
# set this field to CourseKeyField.Empty
course_id = CourseKeyField(max_length=255, db_index=True)
@classmethod
@transaction.atomic
def add_to_order(cls, order, donation_amount, course_id=None, currency='usd'):
"""Add a donation to an order.
Args:
order (Order): The order to add this donation to.
donation_amount (Decimal): The amount the user is donating.
Keyword Args:
course_id (CourseKey): If provided, associate this donation with a particular course.
currency (str): The currency used for the the donation.
Raises:
InvalidCartItem: The provided course ID is not valid.
Returns:
Donation
"""
# This will validate the currency but won't actually add the item to the order.
super(Donation, cls).add_to_order(order, currency=currency)
# Create a line item description, including the name of the course
# if this is a per-course donation.
# This will raise an exception if the course can't be found.
description = cls._line_item_description(course_id=course_id)
params = {
"order": order,
"user": order.user,
"status": order.status,
"qty": 1,
"unit_cost": donation_amount,
"currency": currency,
"line_desc": description
}
if course_id is not None:
params["course_id"] = course_id
params["donation_type"] = "course"
else:
params["donation_type"] = "general"
return cls.objects.create(**params)
def purchased_callback(self):
"""Donations do not need to be fulfilled, so this method does nothing."""
pass
def generate_receipt_instructions(self):
"""Provide information about tax-deductible donations in the receipt.
Returns:
tuple of (Donation, unicode)
"""
return self.pk_with_subclass, set([self._tax_deduction_msg()])
def additional_instruction_text(self, **kwargs):
"""Provide information about tax-deductible donations in the confirmation email.
Returns:
unicode
"""
return self._tax_deduction_msg()
def _tax_deduction_msg(self):
"""Return the translated version of the tax deduction message.
Returns:
unicode
"""
return _(
u"We greatly appreciate this generous contribution and your support of the {platform_name} mission. "
u"This receipt was prepared to support charitable contributions for tax purposes. "
u"We confirm that neither goods nor services were provided in exchange for this gift."
).format(platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME))
@classmethod
def _line_item_description(cls, course_id=None):
"""Create a line-item description for the donation.
Includes the course display name if provided.
Keyword Arguments:
course_id (CourseKey)
Raises:
CourseDoesNotExistException: The course ID is not valid.
Returns:
unicode
"""
# If a course ID is provided, include the display name of the course
# in the line item description.
if course_id is not None:
course = modulestore().get_course(course_id)
if course is None:
msg = u"Could not find a course with the ID '{course_id}'".format(course_id=course_id)
log.error(msg)
raise CourseDoesNotExistException(
_(u"Could not find a course with the ID '{course_id}'").format(course_id=course_id)
)
return _(u"Donation for {course}").format(course=course.display_name)
# The donation is for the organization as a whole, not a specific course
else:
return _(u"Donation for {platform_name}").format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
)
@property
def single_item_receipt_context(self):
return {
'receipt_has_donation_item': True,
}
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the donation is associated with a course, additional fields will be populated with
course information. When no name or category is specified by the implementation, the
platform name is used as a default value for required event fields, to declare that
the Order is specific to the platform, rather than a specific product name or category.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(Donation, self).analytics_data()
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
else:
data['name'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
data['category'] = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
return data
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
"""
return self._line_item_description(course_id=self.course_id)
| chrisndodge/edx-platform | lms/djangoapps/shoppingcart/models.py | Python | agpl-3.0 | 91,666 | [
"VisIt"
] | 3b7af1650888bb1fe1a2cbd573afb1292221ec53c27b8d5d178172a6e21c70e0 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import time
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 142:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def feature_vector_diff(Zt1,Zt2,i): # For 1.2 Seconds (Wipe Container_Movable: All Trials)
data_matrix = np.array([0,0,0])
n = i+121
while (i < n):
data_instant = np.array([Zt1[i,3],Zt1[i,4],Zt2[i,1]])
data_matrix = np.row_stack([data_matrix, data_instant])
i = i+3
Fvec_a = np.matrix(data_matrix[1:,0]).T
max_a = np.max(abs(Fvec_a))
min_a = np.min(abs(Fvec_a))
mean_a = np.mean(Fvec_a)
std_a = np.std(Fvec_a)
#Fvec_a = (Fvec_a)/max_a
#Fvec_a = (Fvec_a-mean_a)
#Fvec_a = (Fvec_a-mean_a)/max_a
#Fvec_a = (Fvec_a-mean_a)/std_a
Fvec_b = np.matrix(data_matrix[1:,1]).T
max_b = np.max(abs(Fvec_b))
min_b = np.min(abs(Fvec_b))
mean_b = np.mean(Fvec_b)
std_b = np.std(Fvec_b)
#Fvec_b = (Fvec_b)/max_b
#Fvec_b = (Fvec_b-mean_b)
#Fvec_b = (Fvec_b-mean_b)/max_b
#Fvec_b = (Fvec_b-mean_b)/std_b
Fvec_c = np.matrix(data_matrix[1:,2]).T
max_c = np.max(abs(Fvec_c))
min_c = np.min(abs(Fvec_c))
mean_c = np.mean(Fvec_c)
std_c = np.std(Fvec_c)
#Fvec_c = (Fvec_c)/max_c
#Fvec_c = (Fvec_c-mean_c)
#Fvec_c = (Fvec_c-mean_c)/max_c
#Fvec_c = (Fvec_c-mean_c)/std_c
Fvec_c = Fvec_c*np.max((max_a,max_b))/max_c
Fvec = np.row_stack([Fvec_a,Fvec_b,Fvec_c])
n_Fvec, m_Fvec = np.shape(Fvec)
print 'Feature_Vector_Shape:',n_Fvec, m_Fvec
return Fvec
if __name__ == '__main__':
# Time-manipulation for Video
index = 0
while (index < 140):
print 'Getting data:'
time.sleep(0.1)
index = index+1
Fmat = np.matrix(np.zeros((123,142)))
Fmat[:,0:140] = Fmat_original
# New Objects (Two Objects)
# First_Object
ta_no_fo_t1 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/First_Object/time_varying_data_first_object_trial_3.pkl')
fa_no_fo_t1 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/First_Object/time_varying_tracking_data_first_object_trial_3.pkl')
# Second_Object
ta_no_so_t1 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/Second_Object/time_varying_data_second_object_trial_3.pkl')
fa_no_so_t1 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Objects/Two_objects/Second_Object/time_varying_tracking_data_second_object_trial_3.pkl')
# Creating Feature Vector
Fmat[:,140] = feature_vector_diff(ta_no_fo_t1,fa_no_fo_t1,300)
Fmat[:,141] = feature_vector_diff(ta_no_so_t1,fa_no_so_t1,300)
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W_mov_fixed = eigvec_total[:,0:12]
W_soft_rigid = eigvec_total[:,0:8]
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y_mov_fixed = (W_mov_fixed.T)*B
Y_soft_rigid = (W_soft_rigid.T)*B
#Using PYMVPA
Y_train_mov_fixed = Y_mov_fixed[:,:140]
Y_test_mov_fixed_1st_object = Y_mov_fixed[:,140]
Y_test_mov_fixed_2nd_object = Y_mov_fixed[:,141]
PCA_training_data_mov_fixed = np.array(Y_train_mov_fixed.T)
PCA_test_data_mov_fixed_1st_object = np.array(Y_test_mov_fixed_1st_object.T)
PCA_test_data_mov_fixed_2nd_object = np.array(Y_test_mov_fixed_2nd_object.T)
PCA_training_label_1 = ['Fixed']*35 + ['Movable']*35 + ['Fixed']*35 + ['Movable']*35
PCA_test_1_label_1st_object = ['Fixed']*1
PCA_test_2_label_1st_object = ['Movable']*1
PCA_test_1_label_2nd_object = ['Fixed']*1
PCA_test_2_label_2nd_object = ['Movable']*1
PCA_training_chunk = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
PCA_test_1_chunk_1st_object = ['Fixed']*1
PCA_test_2_chunk_1st_object = ['Movable']*1
PCA_test_1_chunk_2nd_object = ['Fixed']*1
PCA_test_2_chunk_2nd_object = ['Movable']*1
clf_mov_fixed = kNN(k=3)
terr_mov_fixed = TransferError(clf_mov_fixed)
ds_training_1 = Dataset(samples=PCA_training_data_mov_fixed,labels=PCA_training_label_1,chunks=PCA_training_chunk)
ds_test_1 = Dataset(samples=PCA_test_data_mov_fixed_1st_object,labels=PCA_test_1_label_1st_object,chunks=PCA_test_1_chunk_1st_object)
ds_test_2 = Dataset(samples=PCA_test_data_mov_fixed_1st_object,labels=PCA_test_2_label_1st_object,chunks=PCA_test_2_chunk_1st_object)
ds_test_3 = Dataset(samples=PCA_test_data_mov_fixed_2nd_object,labels=PCA_test_1_label_2nd_object,chunks=PCA_test_1_chunk_2nd_object)
ds_test_4 = Dataset(samples=PCA_test_data_mov_fixed_2nd_object,labels=PCA_test_2_label_2nd_object,chunks=PCA_test_2_chunk_2nd_object)
error_1 = terr_mov_fixed(ds_test_1,ds_training_1)
error_2 = terr_mov_fixed(ds_test_2,ds_training_1)
error_3 = terr_mov_fixed(ds_test_3,ds_training_1)
error_4 = terr_mov_fixed(ds_test_4,ds_training_1)
error_fixed_movable_1st_object = min(error_1,error_2)
error_fixed_movable_2nd_object = min(error_3,error_4)
if error_fixed_movable_1st_object == error_1 and error_fixed_movable_2nd_object == error_3:
print "Both objects are Fixed"
elif error_fixed_movable_1st_object == error_1 and error_fixed_movable_2nd_object == error_4:
print "One object is Fixed and the other is Movable"
elif error_fixed_movable_1st_object == error_2 and error_fixed_movable_2nd_object == error_3:
print "One object is Fixed and the other is Movable"
elif error_fixed_movable_1st_object == error_2 and error_fixed_movable_2nd_object == error_4:
print "Both objects are Movable"
if error_fixed_movable_1st_object == error_1:
Y_train_soft_rigid = np.concatenate((Y_soft_rigid[:,:35],Y_soft_rigid[:,70:105]),axis=1)
Y_test_soft_rigid_1st_object = Y_soft_rigid[:,140]
PCA_training_data_soft_rigid = np.array(Y_train_soft_rigid.T)
PCA_test_data_soft_rigid_1st_object = np.array(Y_test_soft_rigid_1st_object.T)
PCA_training_label_2 = ['Rigid']*35 + ['Soft']*35
PCA_test_3_label_1st_object = ['Rigid']*1
PCA_test_4_label_1st_object = ['Soft']*1
PCA_training_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5
PCA_test_3_chunk_1st_object = ['Rigid']*1
PCA_test_4_chunk_1st_object = ['Soft']*1
clf_soft_rigid = kNN(k=4)
terr_soft_rigid = TransferError(clf_soft_rigid)
ds_training_2 = Dataset(samples=PCA_training_data_soft_rigid,labels=PCA_training_label_2,chunks=PCA_training_chunk_1)
ds_test_5 = Dataset(samples=PCA_test_data_soft_rigid_1st_object,labels=PCA_test_3_label_1st_object,chunks=PCA_test_3_chunk_1st_object)
ds_test_6 = Dataset(samples=PCA_test_data_soft_rigid_1st_object,labels=PCA_test_4_label_1st_object,chunks=PCA_test_4_chunk_1st_object)
error_5 = terr_soft_rigid(ds_test_5,ds_training_2)
error_6 = terr_soft_rigid(ds_test_6,ds_training_2)
error_soft_rigid_1st_object = min(error_5,error_6)
if error_soft_rigid_1st_object == error_5:
print "Object is Rigid"
elif error_soft_rigid_1st_object == error_6:
print "Object is Soft"
if error_fixed_movable_2nd_object == error_3:
Y_train_soft_rigid = np.concatenate((Y_soft_rigid[:,:35],Y_soft_rigid[:,70:105]),axis=1)
Y_test_soft_rigid_2nd_object = Y_soft_rigid[:,141]
PCA_training_data_soft_rigid = np.array(Y_train_soft_rigid.T)
PCA_test_data_soft_rigid_2nd_object = np.array(Y_test_soft_rigid_2nd_object.T)
PCA_training_label_2 = ['Rigid']*35 + ['Soft']*35
PCA_test_3_label_2nd_object = ['Rigid']*1
PCA_test_4_label_2nd_object = ['Soft']*1
PCA_training_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5
PCA_test_3_chunk_2nd_object = ['Rigid']*1
PCA_test_4_chunk_2nd_object = ['Soft']*1
clf_soft_rigid = kNN(k=4)
terr_soft_rigid = TransferError(clf_soft_rigid)
ds_training_2 = Dataset(samples=PCA_training_data_soft_rigid,labels=PCA_training_label_2,chunks=PCA_training_chunk_1)
ds_test_5 = Dataset(samples=PCA_test_data_soft_rigid_2nd_object,labels=PCA_test_3_label_2nd_object,chunks=PCA_test_3_chunk_2nd_object)
ds_test_6 = Dataset(samples=PCA_test_data_soft_rigid_2nd_object,labels=PCA_test_4_label_2nd_object,chunks=PCA_test_4_chunk_2nd_object)
error_5 = terr_soft_rigid(ds_test_5,ds_training_2)
error_6 = terr_soft_rigid(ds_test_6,ds_training_2)
error_soft_rigid_2nd_object = min(error_5,error_6)
if error_soft_rigid_2nd_object == error_5:
print "Object is Rigid"
elif error_soft_rigid_2nd_object == error_6:
print "Object is Soft"
| tapomayukh/projects_in_python | classification/Classification_with_kNN/Multiple_Contact_Classification/Final_New_classify_2_objects_2_categories_1200ms_scaled.py | Python | mit | 12,548 | [
"Mayavi"
] | 868422c64d90bb8372c8fafb374d28438b00f7af70edbebf7654a4825b626fbc |
import numpy as np
import scipy.special as sps
def wixi(x):
""" Complex Error Function (Faddeeva/Voigt).
w(i*x) = exp(x**2) * ( 1-erf(x) )
This function is called by other functions within this module.
We are using the scipy.special.wofz module which calculates
w(z) = exp(-z**2) * ( 1-erf(-iz) )
z = i*x
"""
z = x*1j
wixi = sps.wofz(z)
# We should have a real solution. Make sure nobody complains about
# some zero-value imaginary numbers.
return np.real_if_close(wixi)
def CF_Gxyz_TIR_gauss(parms, tau):
u""" Three-dimensional free diffusion with a Gaussian lateral
detection profile and an exponentially decaying profile
in axial direction.
x = sqrt(D*τ)*κ
κ = 1/d_eva
w(i*x) = exp(x²)*erfc(x)
gz = κ * [ sqrt(D*τ/π) + (1 - 2*D*τ*κ)/(2*κ) * w(i*x) ]
g2D = 1 / [ π (r₀² + 4*D*τ) ]
G = 1/C_3D * g2D * gz
*parms* - a list of parameters.
Parameters (parms[i]):
[0] D Diffusion coefficient
[1] r₀ Lateral extent of the detection volume
[2] d_eva Evanescent field depth
[3] C_3D Particle concentration in the confocal volume
*tau* - lag time
"""
# model 6013
D = parms[0]
r0 = parms[1]
deva = parms[2]
Conc = parms[3]
# Calculate sigma: width of the gaussian approximation of the PSF
Veff = np.pi * r0**2 * deva
Neff = Conc * Veff
taudiff = r0**2/(4*D)
# 2D gauss component
# G2D = 1/N2D * g2D = 1/(Aeff*Conc.2D) * g2D
g2D = 1 / ((1.+tau/taudiff))
# 1d TIR component
# Axial correlation
kappa = 1/deva
x = np.sqrt(D*tau)*kappa
w_ix = wixi(x)
# Gz = 1/N1D * gz = kappa / Conc.1D * gz
gz = kappa * (np.sqrt(D*tau/np.pi) -
(2*D*tau*kappa**2 - 1)/(2*kappa) * w_ix)
# gz * g2D * 1/( deva *A2D) * 1 / Conc3D
# Neff is not the actual particle number. This formula just looks nicer
# this way.
# What would be easier to get is:
# 1 / (Conc * deva * np.pi * r0) * gz * g2D
return 1 / (Neff) * g2D * gz
def CF_Gxyz_TIR_gauss_trip(parms, tau):
u""" Three-dimensional free diffusion with a Gaussian lateral
detection profile and an exponentially decaying profile
in axial direction, including a triplet component.
x = sqrt(D*τ)*κ
κ = 1/d_eva
w(i*x) = exp(x²)*erfc(x)
gz = κ * [ sqrt(D*τ/π) + (1 - 2*D*τ*κ)/(2*κ) * w(i*x) ]
g2D = 1 / [ π (r₀² + 4*D*τ) ]
triplet = 1 + T/(1-T)*exp(-τ/τ_trip)
G = 1/C_3D * g2D * gz * triplet
*parms* - a list of parameters.
Parameters (parms[i]):
[0] D Diffusion coefficient
[1] r₀ Lateral extent of the detection volume
[2] d_eva Evanescent field depth
[3] C_3D Particle concentration in the confocal volume
[4] τ_trip Characteristic residence time in triplet state
[5] T Fraction of particles in triplet (non-fluorescent) state
0 <= T < 1
*tau* - lag time
"""
# model 6014
D = parms[0]
r0 = parms[1]
deva = parms[2]
Conc = parms[3]
tautrip = parms[4]
T = parms[5]
# Calculate sigma: width of the gaussian approximation of the PSF
Veff = np.pi * r0**2 * deva
Neff = Conc * Veff
taudiff = r0**2/(4*D)
# 2D gauss component
# G2D = 1/N2D * g2D = 1/(Aeff*Conc.2D) * g2D
g2D = 1 / ((1.+tau/taudiff))
# 1d TIR component
# Axial correlation
kappa = 1/deva
x = np.sqrt(D*tau)*kappa
w_ix = wixi(x)
# Gz = 1/N1D * gz = kappa / Conc.1D * gz
gz = kappa * (np.sqrt(D*tau/np.pi) -
(2*D*tau*kappa**2 - 1)/(2*kappa) * w_ix)
# triplet
if tautrip == 0 or T == 0:
triplet = 1
else:
triplet = 1 + T/(1-T) * np.exp(-tau/tautrip)
# Neff is not the actual particle number. This formula just looks nicer
# this way.
# What would be easier to get is:
# 1 / (Conc * deva * np.pi * r0) * gz * g2D
return 1 / (Neff) * g2D * gz * triplet
def MoreInfo_6013(parms, countrate=None):
u"""Supplementary variables:
Beware that the effective volume is chosen arbitrarily.
Correlation function at lag time τ=0:
[4] G(τ=0)
Effective detection volume:
[5] V_eff = π * r₀² * d_eva
Effective particle concentration:
[6] C_3D [nM] = C_3D [1000/µm³] * 10000/6.0221415
"""
#D = parms[0]
r0 = parms[1]
deva = parms[2]
Conc = parms[3]
Info = list()
# Detection area:
Veff = np.pi * r0**2 * deva
Neff = Conc * Veff
# Correlation function at tau = 0
G_0 = CF_Gxyz_TIR_gauss(parms, 0)
Info.append(["G(0)", G_0])
Info.append(["V_eff [al]", Veff])
Info.append(["C_3D [nM]", Conc * 10000/6.0221415])
if countrate is not None:
# CPP
cpp = countrate/Neff
Info.append(["cpp [kHz]", cpp])
return Info
def MoreInfo_6014(parms, countrate=None):
u"""Supplementary variables:
Beware that the effective volume is chosen arbitrarily.
Correlation function at lag time τ=0:
[6] G(τ=0)
Effective detection volume:
[7] V_eff = π * r₀² * d_eva
Effective particle concentration:
[8] C_3D [nM] = C_3D [1000/µm³] * 10000/6.0221415
"""
#D = parms[0]
r0 = parms[1]
deva = parms[2]
Conc = parms[3]
Info = list()
# Detection area:
Veff = np.pi * r0**2 * deva
Neff = Conc * Veff
# Correlation function at tau = 0
G_0 = CF_Gxyz_TIR_gauss(parms, 0)
Info.append(["G(0)", G_0])
Info.append(["V_eff [al]", Veff])
Info.append(["C_3D [nM]", Conc * 10000/6.0221415])
if countrate is not None:
# CPP
cpp = countrate/Neff
Info.append(["cpp [kHz]", cpp])
return Info
def get_boundaries_6014(parms):
# strictly positive
boundaries = [[0, None]]*len(parms)
boundaries[5] = [0, 1]
return boundaries
def get_boundaries_6013(parms):
# strictly positive
boundaries = [[0, None]]*len(parms)
return boundaries
# 3D Model TIR gaussian
m_3dtirsq6013 = [6013, "3D", "Simple 3D diffusion w/ TIR",
CF_Gxyz_TIR_gauss]
labels_6013 = [u"D [10 µm²/s]",
u"r₀ [100 nm]",
u"d_eva [100 nm]",
u"C_3D [1000/µm³)"]
values_6013 = [2.5420,
9.44,
1.0,
0.03011]
# For user comfort we add values that are human readable.
# Theese will be used for output that only humans can read.
labels_human_readable_6013 = [u"D [µm²/s]",
u"r₀ [nm]",
u"d_eva [nm]",
u"C_3D [1/µm³]"]
values_factor_human_readable_6013 = [10,
100,
100,
1000]
valuestofit_6013 = [True, False, False, True]
parms_6013 = [labels_6013, values_6013, valuestofit_6013,
labels_human_readable_6013, values_factor_human_readable_6013]
# Pack the models
model1 = dict()
model1["Parameters"] = parms_6013
model1["Definitions"] = m_3dtirsq6013
model1["Supplements"] = MoreInfo_6013
model1["Boundaries"] = get_boundaries_6013(values_6013)
# 3D Model TIR gaussian + triplet
m_3dtirsq6014 = [6014, "T+3D", "Simple 3D diffusion + triplet w/ TIR",
CF_Gxyz_TIR_gauss_trip]
labels_6014 = [u"D [10 µm²/s]",
u"r₀ [100 nm]",
u"d_eva [100 nm]",
u"C_3D [1000/µm³)",
u"τ_trip [ms]",
u"T"]
values_6014 = [2.5420,
9.44,
1.0,
0.03011,
0.001,
0.01]
labels_human_readable_6014 = [u"D [µm²/s]",
u"r₀ [nm]",
u"d_eva [nm]",
u"C_3D [1/µm³]",
u"τ_trip [µs]",
u"T"]
values_factor_human_readable_6014 = [10,
100,
100,
1000,
1000,
1]
valuestofit_6014 = [True, False, False, True, False, False]
parms_6014 = [labels_6014, values_6014, valuestofit_6014,
labels_human_readable_6014, values_factor_human_readable_6014]
# Pack the models
model2 = dict()
model2["Parameters"] = parms_6014
model2["Definitions"] = m_3dtirsq6014
model2["Supplements"] = MoreInfo_6014
model2["Boundaries"] = get_boundaries_6014(values_6014)
Modelarray = [model1, model2]
| paulmueller/PyCorrFit | pycorrfit/models/MODEL_TIRF_gaussian_1C.py | Python | gpl-2.0 | 8,849 | [
"Gaussian"
] | 610f5ae1cff88e7c3be41ea5417bdf3b831e0abb9b0a9319d59e25a86d4f229c |
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizer."""
import lingvo.compat as tf
from lingvo.core import cluster_factory
from lingvo.core import layers
from lingvo.core import optimizer
from lingvo.core import py_utils
from lingvo.core import test_utils
import numpy as np
class OptimizerTest(test_utils.TestCase):
def testCompositeOptimizer(self):
adam_op = optimizer.Adam.Params()
rmsprop_op = optimizer.RMSProp.Params()
adam_rmsprop_opt = optimizer.CompositeOptimizer.Params().Set(
optimizer_map={
'fc/w': (adam_op, 1.),
'fc/b': (rmsprop_op, 1.),
'default_optimizer': (adam_op, 1.)
}).Instantiate()
adam_op_2 = optimizer.Adam.Params().Set(name='adam_2')
unspecified_comp_opt = optimizer.CompositeOptimizer.Params().Set(
optimizer_map={
'fc/w': (adam_op_2, 1.),
'default_optimizer': (adam_op_2, 1.)
}).Instantiate()
sgd_op = optimizer.SGD.Params()
adagrad_op = optimizer.Adagrad.Params()
overlapping_comp_opt = optimizer.CompositeOptimizer.Params().Set(
optimizer_map={
'fc/w': (sgd_op, 1.),
'.': (adagrad_op, 1.),
'default_optimizer': (adagrad_op, 1.)
}).Instantiate()
params = layers.FCLayer.Params()
params.name = 'fc'
params.dtype = tf.float64
params.input_dim = 3
params.output_dim = 2
params.batch_norm = False
fc_layer = layers.FCLayer(params)
inputs = tf.placeholder(shape=[2, 4, 3], dtype=tf.float64)
output = fc_layer.FPropDefaultTheta(inputs)
loss = tf.reduce_sum(output)
var_grads = py_utils.ComputeGradients(loss, fc_layer.vars)
self.assertIn('composite_optimizer_train_op',
adam_rmsprop_opt.Apply(1e-1, var_grads).name)
self.assertIn('composite_optimizer_train_op',
unspecified_comp_opt.Apply(1e-1, var_grads).name)
with self.assertRaisesRegex(
Exception,
'Variable fc/w/var:0 is matched 2 times by regex',
):
overlapping_comp_opt.Apply(1e-1, var_grads)
def testAccumulator(self):
# testAccumulator compares
# - explicit averaging of independently computed var_grads1 and
# var_grads2,
# - Accumulator(SGD) optimizer effectively doing this over 2 steps.
np.random.seed(12345)
np_input1 = np.random.normal(0.1, 0.5, [2, 4, 3])
np.random.seed(12346)
np_input2 = np.random.normal(0.1, 0.5, [2, 4, 3])
with self.session(use_gpu=True, graph=tf.Graph()) as sess:
tf.random.set_seed(123456)
params = layers.ProjectionLayer.Params()
params.name = 'proj'
params.dtype = tf.float64
params.input_dim = 3
params.output_dim = 2
params.params_init = py_utils.WeightInit.Gaussian(0.01, 123456)
params.batch_norm = False
proj_layer = layers.ProjectionLayer(params)
inputs1 = tf.placeholder(shape=[2, 4, 3], dtype=tf.float64)
in_padding1 = tf.zeros([2, 4, 1], dtype=tf.float64)
inputs2 = tf.placeholder(shape=[2, 4, 3], dtype=tf.float64)
in_padding2 = tf.zeros([2, 4, 1], dtype=tf.float64)
output1 = proj_layer.FPropDefaultTheta(inputs1, in_padding1)
output2 = proj_layer.FPropDefaultTheta(inputs2, in_padding2)
loss1 = tf.reduce_sum(output1)
loss2 = tf.reduce_sum(output2)
var_grads1 = py_utils.ComputeGradients(loss1, proj_layer.vars)
var_grads2 = py_utils.ComputeGradients(loss2, proj_layer.vars)
op = optimizer.SGD.Params()
opt = op.Instantiate()
lr = 1e-1
with tf.control_dependencies([loss1, loss2]):
var_update_op1 = opt.Apply(
lr, py_utils.ApplyGradMultiplier(var_grads1, 1. / 2.))
with tf.control_dependencies([var_update_op1]):
var_update_op2 = opt.Apply(
lr, py_utils.ApplyGradMultiplier(var_grads2, 1. / 2.))
self.evaluate(tf.global_variables_initializer())
vars1 = self.evaluate(proj_layer.vars.Flatten())
loss1_1, grads1_1, loss1_2, grads1_2 = sess.run(
[
loss1,
var_grads1.Transform(tuple), loss2,
var_grads2.Transform(tuple)
],
feed_dict={
inputs1: np_input1,
inputs2: np_input2,
},
)
sess.run([var_update_op2],
feed_dict={
inputs1: np_input1,
inputs2: np_input2,
})
vars1_1 = self.evaluate(proj_layer.vars.Flatten())
with self.session(use_gpu=True, graph=tf.Graph()) as sess:
tf.random.set_seed(123456)
params = layers.ProjectionLayer.Params()
params.name = 'proj'
params.dtype = tf.float64
params.input_dim = 3
params.output_dim = 2
params.params_init = py_utils.WeightInit.Gaussian(0.01, 123456)
params.batch_norm = False
proj_layer = layers.ProjectionLayer(params)
in_padding1 = tf.zeros([2, 4, 1], dtype=tf.float64)
inputs1 = tf.placeholder(shape=[2, 4, 3], dtype=tf.float64)
output1 = proj_layer.FPropDefaultTheta(inputs1, in_padding1)
loss = tf.reduce_sum(output1)
var_grads = py_utils.ComputeGradients(loss, proj_layer.vars)
op = optimizer.Accumulator.Params().Set(
accum_steps=2, dtype=tf.float64, optimizer_tpl=optimizer.SGD.Params())
opt = op.Instantiate()
lr = 1e-1
with cluster_factory.ForTestingWorker(add_summary=True):
var_update_op = opt.Apply(lr, var_grads)
increment_global_step_op = tf.assign_add(
py_utils.GetOrCreateGlobalStepVar(), 1)
self.evaluate(tf.global_variables_initializer())
vars2 = self.evaluate(proj_layer.vars.Flatten())
loss2_1, grads2_1 = sess.run([loss, var_grads.Transform(tuple)],
feed_dict={
inputs1: np_input1,
})
loss2_2, grads2_2 = sess.run([loss, var_grads.Transform(tuple)],
feed_dict={
inputs1: np_input2,
})
acc_0 = self.evaluate(
[v for v in tf.global_variables() if 'grad_accumulator' in v.name])[0]
sess.run([var_update_op], feed_dict={
inputs1: np_input1,
})
acc_1 = self.evaluate(
[v for v in tf.global_variables() if 'grad_accumulator' in v.name])[0]
vars2_intermediate = self.evaluate(proj_layer.vars.Flatten())
self.evaluate(increment_global_step_op)
sess.run([var_update_op], feed_dict={
inputs1: np_input2,
})
acc_2 = self.evaluate(
[v for v in tf.global_variables() if 'grad_accumulator' in v.name])[0]
vars2_1 = self.evaluate(proj_layer.vars.Flatten())
summary = tf.Summary.FromString(self.evaluate(tf.summary.merge_all()))
tf.logging.info(f'summary: {summary}')
self.assertEqual(summary.value[0].tag, 'sgd_lr')
self.assertAllClose(vars1, vars2)
self.assertAllClose(acc_0, np.zeros_like(acc_0))
self.assertAllClose(acc_1, grads2_1['w'][1])
self.assertAllClose(acc_2, np.zeros_like(acc_0))
self.assertAllClose(loss1_1, loss2_1)
self.assertAllClose(loss1_2, loss2_2)
self.assertAllClose(grads1_1, grads2_1)
self.assertAllClose(grads1_2, grads2_2)
self.assertAllClose(vars1, vars2_intermediate)
self.assertAllClose(vars2[0], grads2_1['w'][0])
self.assertAllClose(vars2[0], grads2_2['w'][0])
self.assertAllClose(
vars1[0] - 0.5 * lr * (grads1_1['w'][1] + grads1_2['w'][1]), vars1_1[0])
self.assertAllClose(
vars2[0] - 0.5 * lr * (grads2_1['w'][1] + grads2_2['w'][1]), vars2_1[0])
self.assertAllClose(vars2, vars2_intermediate)
self.assertAllClose(vars1_1, vars2_1)
if __name__ == '__main__':
tf.test.main()
| tensorflow/lingvo | lingvo/core/optimizer_test.py | Python | apache-2.0 | 8,526 | [
"Gaussian"
] | 065511a319388c9dbe05cb11bb5be7c0f695856aad60cbf9cee8b85caf11dc72 |
#! /usr/bin/env python
"""Generate Java code from an ASDL description."""
# TO DO
# handle fields that have a type but no name
import os, sys, traceback
import asdl
TABSIZE = 4
MAX_COL = 100
def reflow_lines(s, depth):
"""Reflow the line s indented depth tabs.
Return a sequence of lines where no line extends beyond MAX_COL
when properly indented. The first line is properly indented based
exclusively on depth * TABSIZE. All following lines -- these are
the reflowed lines generated by this function -- start at the same
column as the first character beyond the opening { in the first
line.
"""
size = MAX_COL - depth * TABSIZE
if len(s) < size:
return [s]
lines = []
cur = s
padding = ""
while len(cur) > size:
i = cur.rfind(' ', 0, size)
assert i != -1, "Impossible line to reflow: %s" % `s`
lines.append(padding + cur[:i])
if len(lines) == 1:
# find new size based on brace
j = cur.find('{', 0, i)
if j >= 0:
j += 2 # account for the brace and the space after it
size -= j
padding = " " * j
cur = cur[i+1:]
else:
lines.append(padding + cur)
return lines
class EmitVisitor(asdl.VisitorBase):
"""Visit that emits lines"""
def __init__(self, dir):
self.dir = dir
super(EmitVisitor, self).__init__()
def open(self, package, name, refersToPythonTree=1, useDataOutput=0):
path = os.path.join(self.dir, package, "%s.java" % name)
open(path, "w")
self.file = open(os.path.join(self.dir, package, "%s.java" % name), "w")
print >> self.file, "// Autogenerated AST node"
print >> self.file, 'package org.python.antlr.%s;' % package
if refersToPythonTree:
print >> self.file, 'import org.antlr.runtime.CommonToken;'
print >> self.file, 'import org.antlr.runtime.Token;'
print >> self.file, 'import org.python.antlr.AST;'
print >> self.file, 'import org.python.antlr.PythonTree;'
print >> self.file, 'import org.python.antlr.adapter.AstAdapters;'
print >> self.file, 'import org.python.antlr.base.excepthandler;'
print >> self.file, 'import org.python.antlr.base.expr;'
print >> self.file, 'import org.python.antlr.base.mod;'
print >> self.file, 'import org.python.antlr.base.slice;'
print >> self.file, 'import org.python.antlr.base.stmt;'
print >> self.file, 'import org.python.core.ArgParser;'
print >> self.file, 'import org.python.core.AstList;'
print >> self.file, 'import org.python.core.Py;'
print >> self.file, 'import org.python.core.PyObject;'
print >> self.file, 'import org.python.core.PyString;'
print >> self.file, 'import org.python.core.PyStringMap;'
print >> self.file, 'import org.python.core.PyType;'
print >> self.file, 'import org.python.core.Visitproc;'
print >> self.file, 'import org.python.expose.ExposedGet;'
print >> self.file, 'import org.python.expose.ExposedMethod;'
print >> self.file, 'import org.python.expose.ExposedNew;'
print >> self.file, 'import org.python.expose.ExposedSet;'
print >> self.file, 'import org.python.expose.ExposedType;'
if useDataOutput:
print >> self.file, 'import java.io.DataOutputStream;'
print >> self.file, 'import java.io.IOException;'
print >> self.file, 'import java.util.ArrayList;'
print >> self.file
def close(self):
self.file.close()
def emit(self, s, depth):
# XXX reflow long lines?
lines = reflow_lines(s, depth)
for line in lines:
line = (" " * TABSIZE * depth) + line + "\n"
self.file.write(line)
# This step will add a 'simple' boolean attribute to all Sum and Product
# nodes and add a 'typedef' link to each Field node that points to the
# Sum or Product node that defines the field.
class AnalyzeVisitor(EmitVisitor):
index = 0
def makeIndex(self):
self.index += 1
return self.index
def visitModule(self, mod):
self.types = {}
for dfn in mod.dfns:
self.types[str(dfn.name)] = dfn.value
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
sum.simple = 1
for t in sum.types:
if t.fields:
sum.simple = 0
break
for t in sum.types:
if not sum.simple:
t.index = self.makeIndex()
self.visit(t, name, depth)
def visitProduct(self, product, name, depth):
product.simple = 0
product.index = self.makeIndex()
for f in product.fields:
self.visit(f, depth + 1)
def visitConstructor(self, cons, name, depth):
for f in cons.fields:
self.visit(f, depth + 1)
def visitField(self, field, depth):
field.typedef = self.types.get(str(field.type))
# The code generator itself.
#
class JavaVisitor(EmitVisitor):
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type, depth=0):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if sum.simple and not name == "excepthandler":
self.simple_sum(sum, name, depth)
self.simple_sum_wrappers(sum, name, depth)
else:
self.sum_with_constructor(sum, name, depth)
def simple_sum(self, sum, name, depth):
self.open("ast", "%sType" % name, refersToPythonTree=0)
self.emit('import org.python.antlr.AST;', depth)
self.emit('', 0)
self.emit("public enum %(name)sType {" % locals(), depth)
self.emit("UNDEFINED,", depth + 1)
for i in range(len(sum.types) - 1):
type = sum.types[i]
self.emit("%s," % type.name, depth + 1)
self.emit("%s;" % sum.types[len(sum.types) - 1].name, depth + 1)
self.emit("}", depth)
self.close()
def simple_sum_wrappers(self, sum, name, depth):
for i in range(len(sum.types)):
type = sum.types[i]
self.open("op", type.name, refersToPythonTree=0)
self.emit('import org.python.antlr.AST;', depth)
self.emit('import org.python.antlr.base.%s;' % name, depth)
self.emit('import org.python.antlr.PythonTree;', depth)
self.emit('import org.python.core.Py;', depth)
self.emit('import org.python.core.PyObject;', depth)
self.emit('import org.python.core.PyString;', depth)
self.emit('import org.python.core.PyType;', depth)
self.emit('import org.python.expose.ExposedGet;', depth)
self.emit('import org.python.expose.ExposedMethod;', depth)
self.emit('import org.python.expose.ExposedNew;', depth)
self.emit('import org.python.expose.ExposedSet;', depth)
self.emit('import org.python.expose.ExposedType;', depth)
self.emit('', 0)
self.emit('@ExposedType(name = "_ast.%s", base = %s.class)' % (type.name, name), depth)
self.emit("public class %s extends PythonTree {" % type.name, depth)
self.emit('public static final PyType TYPE = PyType.fromClass(%s.class);' % type.name, depth + 1)
self.emit('', 0)
self.emit("public %s() {" % (type.name), depth)
self.emit("}", depth)
self.emit('', 0)
self.emit("public %s(PyType subType) {" % (type.name), depth)
self.emit("super(subType);", depth + 1)
self.emit("}", depth)
self.emit('', 0)
self.emit("@ExposedNew", depth)
self.emit("@ExposedMethod", depth)
self.emit("public void %s___init__(PyObject[] args, String[] keywords) {}" % type.name, depth)
self.emit('', 0)
self.attributes(type, name, depth);
self.emit('@ExposedMethod', depth + 1)
self.emit('public PyObject __int__() {', depth + 1)
self.emit("return %s___int__();" % type.name, depth + 2)
self.emit("}", depth + 1)
self.emit('', 0)
self.emit("final PyObject %s___int__() {" % type.name, depth + 1)
self.emit('return Py.newInteger(%s);' % str(i + 1), depth + 2)
self.emit("}", depth + 1)
self.emit('', 0)
self.emit("}", depth)
self.close()
def attributes(self, obj, name, depth):
field_list = []
if hasattr(obj, "fields"):
for f in obj.fields:
field_list.append('new PyString("%s")' % f.name)
if len(field_list) > 0:
self.emit("private final static PyString[] fields =", depth + 1)
self.emit("new PyString[] {%s};" % ", ".join(field_list), depth+1)
self.emit('@ExposedGet(name = "_fields")', depth + 1)
self.emit("public PyString[] get_fields() { return fields; }", depth+1)
self.emit("", 0)
else:
self.emit("private final static PyString[] fields = new PyString[0];", depth+1)
self.emit('@ExposedGet(name = "_fields")', depth + 1)
self.emit("public PyString[] get_fields() { return fields; }", depth+1)
self.emit("", 0)
if str(name) in ('stmt', 'expr', 'excepthandler'):
att_list = ['new PyString("lineno")', 'new PyString("col_offset")']
self.emit("private final static PyString[] attributes =", depth + 1)
self.emit("new PyString[] {%s};" % ", ".join(att_list), depth + 1)
self.emit('@ExposedGet(name = "_attributes")', depth + 1)
self.emit("public PyString[] get_attributes() { return attributes; }", depth + 1)
self.emit("", 0)
else:
self.emit("private final static PyString[] attributes = new PyString[0];", depth+1)
self.emit('@ExposedGet(name = "_attributes")', depth + 1)
self.emit("public PyString[] get_attributes() { return attributes; }", depth+1)
self.emit("", 0)
def sum_with_constructor(self, sum, name, depth):
self.open("base", "%s" % name)
self.emit('@ExposedType(name = "_ast.%s", base = AST.class)' % name, depth)
self.emit("public abstract class %(name)s extends PythonTree {" %
locals(), depth)
self.emit("", 0)
self.emit("public static final PyType TYPE = PyType.fromClass(%s.class);" % name, depth + 1);
self.attributes(sum, name, depth);
self.emit("public %(name)s() {" % locals(), depth+1)
self.emit("}", depth+1)
self.emit("", 0)
self.emit("public %(name)s(PyType subType) {" % locals(), depth+1)
self.emit("}", depth+1)
self.emit("", 0)
self.emit("public %(name)s(int ttype, Token token) {" % locals(), depth+1)
self.emit("super(ttype, token);", depth+2)
self.emit("}", depth+1)
self.emit("", 0)
self.emit("public %(name)s(Token token) {" % locals(), depth+1)
self.emit("super(token);", depth+2)
self.emit("}", depth+1)
self.emit("", 0)
self.emit("public %(name)s(PythonTree node) {" % locals(), depth+1)
self.emit("super(node);", depth+2)
self.emit("}", depth+1)
self.emit("", 0)
self.emit("}", depth)
self.close()
for t in sum.types:
self.visit(t, name, depth)
def visitProduct(self, product, name, depth):
self.open("ast", "%s" % name, useDataOutput=1)
self.emit('@ExposedType(name = "_ast.%s", base = AST.class)' % name, depth)
self.emit("public class %(name)s extends PythonTree {" % locals(), depth)
self.emit("public static final PyType TYPE = PyType.fromClass(%s.class);" % name, depth + 1);
for f in product.fields:
self.visit(f, depth + 1)
self.emit("", depth)
self.attributes(product, name, depth)
self.javaMethods(product, name, name, True, product.fields,
depth+1)
if str(name) in indexer_support:
self.indexerSupport(str(name), depth)
self.emit("}", depth)
self.close()
def visitConstructor(self, cons, name, depth):
self.open("ast", cons.name, useDataOutput=1)
ifaces = []
for f in cons.fields:
if str(f.type) == "expr_context":
ifaces.append("Context")
if ifaces:
s = "implements %s " % ", ".join(ifaces)
else:
s = ""
self.emit('@ExposedType(name = "_ast.%s", base = %s.class)' % (cons.name, name), depth);
self.emit("public class %s extends %s %s{" %
(cons.name, name, s), depth)
self.emit("public static final PyType TYPE = PyType.fromClass(%s.class);" % cons.name, depth);
for f in cons.fields:
self.visit(f, depth + 1)
self.emit("", depth)
self.attributes(cons, name, depth)
self.javaMethods(cons, name, cons.name, False, cons.fields, depth+1)
if "Context" in ifaces:
self.emit("public void setContext(expr_contextType c) {", depth + 1)
self.emit('this.ctx = c;', depth + 2)
self.emit("}", depth + 1)
self.emit("", 0)
if str(name) in ('stmt', 'expr', 'excepthandler'):
# The lineno property
self.emit("private int lineno = -1;", depth + 1)
self.emit('@ExposedGet(name = "lineno")', depth + 1)
self.emit("public int getLineno() {", depth + 1)
self.emit("if (lineno != -1) {", depth + 2);
self.emit("return lineno;", depth + 3);
self.emit("}", depth + 2)
self.emit('return getLine();', depth + 2)
self.emit("}", depth + 1)
self.emit("", 0)
self.emit('@ExposedSet(name = "lineno")', depth + 1)
self.emit("public void setLineno(int num) {", depth + 1)
self.emit("lineno = num;", depth + 2);
self.emit("}", depth + 1)
self.emit("", 0)
# The col_offset property
self.emit("private int col_offset = -1;", depth + 1)
self.emit('@ExposedGet(name = "col_offset")', depth + 1)
self.emit("public int getCol_offset() {", depth + 1)
self.emit("if (col_offset != -1) {", depth + 2);
self.emit("return col_offset;", depth + 3);
self.emit("}", depth + 2)
self.emit('return getCharPositionInLine();', depth + 2)
self.emit("}", depth + 1)
self.emit("", 0)
self.emit('@ExposedSet(name = "col_offset")', depth + 1)
self.emit("public void setCol_offset(int num) {", depth + 1)
self.emit("col_offset = num;", depth + 2);
self.emit("}", depth + 1)
self.emit("", 0)
if str(cons.name) in indexer_support:
self.indexerSupport(str(cons.name), depth)
self.emit("}", depth)
self.close()
def javaConstructorHelper(self, fields, depth):
for f in fields:
#if f.seq:
# self.emit("this.%s = new %s(%s);" % (f.name,
# self.javaType(f), f.name), depth+1)
#else:
self.emit("this.%s = %s;" % (f.name, f.name), depth+1)
fparg = self.fieldDef(f)
not_simple = True
if f.typedef is not None and f.typedef.simple:
not_simple = False
#For now ignoring String -- will want to revisit
if not_simple and fparg.find("String") == -1:
if f.seq:
self.emit("if (%s == null) {" % f.name, depth+1);
self.emit("this.%s = new ArrayList<%s>();" % (f.name, self.javaType(f, False)), depth+2)
self.emit("}", depth+1)
self.emit("for(PythonTree t : this.%(name)s) {" % {"name":f.name}, depth+1)
self.emit("addChild(t);", depth+2)
self.emit("}", depth+1)
elif str(f.type) == "expr":
self.emit("addChild(%s);" % (f.name), depth+1)
#XXX: this method used to emit a pickle(DataOutputStream ostream) for cPickle support.
# If we want to re-add it, see Jython 2.2's pickle method in its ast nodes.
def javaMethods(self, type, name, clsname, is_product, fields, depth):
self.javaConstructors(type, name, clsname, is_product, fields, depth)
# The toString() method
self.emit('@ExposedGet(name = "repr")', depth)
self.emit("public String toString() {", depth)
self.emit('return "%s";' % clsname, depth+1)
self.emit("}", depth)
self.emit("", 0)
# The toStringTree() method
self.emit("public String toStringTree() {", depth)
self.emit('StringBuffer sb = new StringBuffer("%s(");' % clsname,
depth+1)
for f in fields:
self.emit('sb.append("%s=");' % f.name, depth+1)
self.emit("sb.append(dumpThis(%s));" % f.name, depth+1)
self.emit('sb.append(",");', depth+1)
self.emit('sb.append(")");', depth+1)
self.emit("return sb.toString();", depth+1)
self.emit("}", depth)
self.emit("", 0)
# The accept() method
self.emit("public <R> R accept(VisitorIF<R> visitor) throws Exception {", depth)
if is_product:
self.emit('traverse(visitor);', depth+1)
self.emit('return null;', depth+1)
else:
self.emit('return visitor.visit%s(this);' % clsname, depth+1)
self.emit("}", depth)
self.emit("", 0)
# The visitChildren() method
self.emit("public void traverse(VisitorIF<?> visitor) throws Exception {", depth)
for f in fields:
if self.bltinnames.has_key(str(f.type)):
continue
if f.typedef.simple:
continue
if f.seq:
self.emit('if (%s != null) {' % f.name, depth+1)
self.emit('for (PythonTree t : %s) {' % f.name,
depth+2)
self.emit('if (t != null)', depth+3)
self.emit('t.accept(visitor);', depth+4)
self.emit('}', depth+2)
self.emit('}', depth+1)
else:
self.emit('if (%s != null)' % f.name, depth+1)
self.emit('%s.accept(visitor);' % f.name, depth+2)
self.emit('}', depth)
self.emit("", 0)
self.emit('public PyObject __dict__;', depth)
self.emit("", 0)
self.emit('@Override', depth)
self.emit('public PyObject fastGetDict() {', depth)
self.emit('ensureDict();', depth+1)
self.emit('return __dict__;', depth+1)
self.emit('}', depth)
self.emit("", 0)
self.emit('@ExposedGet(name = "__dict__")', depth)
self.emit('public PyObject getDict() {', depth)
self.emit('return fastGetDict();', depth+1)
self.emit('}', depth)
self.emit("", 0)
self.emit('private void ensureDict() {', depth)
self.emit('if (__dict__ == null) {', depth+1)
self.emit('__dict__ = new PyStringMap();', depth+2)
self.emit('}', depth+1)
self.emit('}', depth)
self.emit("", 0)
def javaConstructors(self, type, name, clsname, is_product, fields, depth):
self.emit("public %s(PyType subType) {" % (clsname), depth)
self.emit("super(subType);", depth + 1)
self.emit("}", depth)
if len(fields) > 0:
self.emit("public %s() {" % (clsname), depth)
self.emit("this(TYPE);", depth + 1)
self.emit("}", depth)
fnames = ['"%s"' % f.name for f in fields]
else:
fnames = []
if str(name) in ('stmt', 'expr', 'excepthandler'):
fnames.extend(['"lineno"', '"col_offset"'])
fpargs = ", ".join(fnames)
self.emit("@ExposedNew", depth)
self.emit("@ExposedMethod", depth)
self.emit("public void %s___init__(PyObject[] args, String[] keywords) {" % clsname, depth)
self.emit('ArgParser ap = new ArgParser("%s", args, keywords, new String[]' % clsname, depth + 1)
self.emit('{%s}, %s, true);' % (fpargs, len(fields)), depth + 2)
i = 0
for f in fields:
self.emit("set%s(ap.getPyObject(%s, Py.None));" % (self.processFieldName(f.name),
str(i)), depth+1)
i += 1
if str(name) in ('stmt', 'expr', 'excepthandler'):
self.emit("int lin = ap.getInt(%s, -1);" % str(i), depth + 1)
self.emit("if (lin != -1) {", depth + 1)
self.emit("setLineno(lin);", depth + 2)
self.emit("}", depth + 1)
self.emit("", 0)
self.emit("int col = ap.getInt(%s, -1);" % str(i+1), depth + 1)
self.emit("if (col != -1) {", depth + 1)
self.emit("setLineno(col);", depth + 2)
self.emit("}", depth + 1)
self.emit("", 0)
self.emit("}", depth)
self.emit("", 0)
fpargs = ", ".join(["PyObject %s" % f.name for f in fields])
self.emit("public %s(%s) {" % (clsname, fpargs), depth)
for f in fields:
self.emit("set%s(%s);" % (self.processFieldName(f.name), f.name), depth+1)
self.emit("}", depth)
self.emit("", 0)
token = asdl.Field('Token', 'token')
token.typedef = False
fpargs = ", ".join([self.fieldDef(f) for f in [token] + fields])
self.emit("public %s(%s) {" % (clsname, fpargs), depth)
self.emit("super(token);", depth+1)
self.javaConstructorHelper(fields, depth)
self.emit("}", depth)
self.emit("", 0)
ttype = asdl.Field('int', 'ttype')
ttype.typedef = False
fpargs = ", ".join([self.fieldDef(f) for f in [ttype, token] + fields])
self.emit("public %s(%s) {" % (clsname, fpargs), depth)
self.emit("super(ttype, token);", depth+1)
self.javaConstructorHelper(fields, depth)
self.emit("}", depth)
self.emit("", 0)
tree = asdl.Field('PythonTree', 'tree')
tree.typedef = False
fpargs = ", ".join([self.fieldDef(f) for f in [tree] + fields])
self.emit("public %s(%s) {" % (clsname, fpargs), depth)
self.emit("super(tree);", depth+1)
self.javaConstructorHelper(fields, depth)
self.emit("}", depth)
self.emit("", 0)
#This is mainly a kludge to turn get/setType -> get/setExceptType because
#getType conflicts with a method on PyObject.
def processFieldName(self, name):
name = str(name).capitalize()
if name == "Type":
name = "ExceptType"
return name
def visitField(self, field, depth):
self.emit("private %s;" % self.fieldDef(field), depth)
self.emit("public %s getInternal%s() {" % (self.javaType(field),
str(field.name).capitalize()), depth)
self.emit("return %s;" % field.name, depth+1)
self.emit("}", depth)
self.emit('@ExposedGet(name = "%s")' % field.name, depth)
self.emit("public PyObject get%s() {" % self.processFieldName(field.name), depth)
if field.seq:
self.emit("return new AstList(%s, AstAdapters.%sAdapter);" % (field.name, field.type), depth+1)
else:
if str(field.type) == 'identifier':
self.emit("if (%s == null) return Py.None;" % field.name, depth+1)
self.emit("return new PyString(%s);" % field.name, depth+1)
elif str(field.type) == 'string' or str(field.type) == 'object':
self.emit("return (PyObject)%s;" % field.name, depth+1)
elif str(field.type) == 'bool':
self.emit("if (%s) return Py.True;" % field.name, depth+1)
self.emit("return Py.False;", depth+1)
elif str(field.type) == 'int':
self.emit("return Py.newInteger(%s);" % field.name, depth+1)
elif field.typedef.simple:
self.emit("return AstAdapters.%s2py(%s);" % (str(field.type), field.name), depth+1)
else:
self.emit("return %s;" % field.name, depth+1)
#self.emit("return Py.None;", depth+1)
self.emit("}", depth)
self.emit('@ExposedSet(name = "%s")' % field.name, depth)
self.emit("public void set%s(PyObject %s) {" % (self.processFieldName(field.name), field.name), depth)
if field.seq:
#self.emit("this.%s = new %s(" % (field.name, self.javaType(field)), depth+1)
self.emit("this.%s = AstAdapters.py2%sList(%s);" % (field.name, str(field.type), field.name), depth+1)
else:
self.emit("this.%s = AstAdapters.py2%s(%s);" % (field.name, str(field.type), field.name), depth+1)
self.emit("}", depth)
self.emit("", 0)
bltinnames = {
'int' : 'Integer',
'bool' : 'Boolean',
'identifier' : 'String',
'string' : 'Object',
'object' : 'Object', # was PyObject
#Below are for enums
'boolop' : 'boolopType',
'cmpop' : 'cmpopType',
'expr_context' : 'expr_contextType',
'operator' : 'operatorType',
'unaryop' : 'unaryopType',
}
def fieldDef(self, field):
jtype = self.javaType(field)
name = field.name
return "%s %s" % (jtype, name)
def javaType(self, field, check_seq=True):
jtype = str(field.type)
jtype = self.bltinnames.get(jtype, jtype)
if check_seq and field.seq:
return "java.util.List<%s>" % jtype
return jtype
def indexerSupport(self, name, depth):
self.file.write(indexer_support[name])
class VisitorVisitor(EmitVisitor):
def __init__(self, dir):
EmitVisitor.__init__(self, dir)
self.ctors = []
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
self.open("ast", "VisitorIF", refersToPythonTree=0)
self.emit('public interface VisitorIF<R> {', 0)
for ctor in self.ctors:
self.emit("public R visit%s(%s node) throws Exception;" %
(ctor, ctor), 1)
self.emit('}', 0)
self.close()
self.open("ast", "VisitorBase")
self.emit('public abstract class VisitorBase<R> implements VisitorIF<R> {', 0)
for ctor in self.ctors:
self.emit("public R visit%s(%s node) throws Exception {" %
(ctor, ctor), 1)
self.emit("R ret = unhandled_node(node);", 2)
self.emit("traverse(node);", 2)
self.emit("return ret;", 2)
self.emit('}', 1)
self.emit('', 0)
self.emit("abstract protected R unhandled_node(PythonTree node) throws Exception;", 1)
self.emit("abstract public void traverse(PythonTree node) throws Exception;", 1)
self.emit('}', 0)
self.close()
def visitType(self, type, depth=1):
self.visit(type.value, type.name, depth)
def visitSum(self, sum, name, depth):
if not sum.simple:
for t in sum.types:
self.visit(t, name, depth)
def visitProduct(self, product, name, depth):
pass
def visitConstructor(self, cons, name, depth):
self.ctors.append(cons.name)
class ChainOfVisitors:
def __init__(self, *visitors):
self.visitors = visitors
def visit(self, object):
for v in self.visitors:
v.visit(object)
def main(outdir, grammar="Python.asdl"):
mod = asdl.parse(grammar)
if not asdl.check(mod):
sys.exit(1)
c = ChainOfVisitors(AnalyzeVisitor(outdir),
JavaVisitor(outdir),
VisitorVisitor(outdir))
c.visit(mod)
indexer_support = {"Attribute": """ // Support for indexer below
private Name attrName;
public Name getInternalAttrName() {
return attrName;
}
public Attribute(Token token, expr value, Name attr, expr_contextType ctx) {
super(token);
this.value = value;
addChild(value);
this.attr = attr.getText();
this.attrName = attr;
this.ctx = ctx;
}
public Attribute(Integer ttype, Token token, expr value, Name attr, expr_contextType ctx) {
super(ttype, token);
this.value = value;
addChild(value);
this.attr = attr.getText();
this.attrName = attr;
this.ctx = ctx;
}
// End indexer support
""",
"ClassDef": """ // Support for indexer below
private Name nameNode;
public Name getInternalNameNode() {
return nameNode;
}
public ClassDef(Token token, Name name, java.util.List<expr> bases, java.util.List<stmt>
body, java.util.List<expr> decorator_list) {
super(token);
this.name = name.getText();
this.nameNode = name;
this.bases = bases;
if (bases == null) {
this.bases = new ArrayList<expr>();
}
for(PythonTree t : this.bases) {
addChild(t);
}
this.body = body;
if (body == null) {
this.body = new ArrayList<stmt>();
}
for(PythonTree t : this.body) {
addChild(t);
}
this.decorator_list = decorator_list;
if (decorator_list == null) {
this.decorator_list = new ArrayList<expr>();
}
for(PythonTree t : this.decorator_list) {
addChild(t);
}
}
// End indexer support
""",
"FunctionDef": """ // Support for indexer below
private Name nameNode;
public Name getInternalNameNode() {
return nameNode;
}
public FunctionDef(Token token, Name name, arguments args, java.util.List<stmt> body,
java.util.List<expr> decorator_list) {
super(token);
this.name = name.getText();
this.nameNode = name;
this.args = args;
this.body = body;
if (body == null) {
this.body = new ArrayList<stmt>();
}
for(PythonTree t : this.body) {
addChild(t);
}
this.decorator_list = decorator_list;
if (decorator_list == null) {
this.decorator_list = new ArrayList<expr>();
}
for(PythonTree t : this.decorator_list) {
addChild(t);
}
}
// End indexer support
""",
"Global": """ // Support for indexer below
private java.util.List<Name> nameNodes;
public java.util.List<Name> getInternalNameNodes() {
return nameNodes;
}
public Global(Token token, java.util.List<String> names, java.util.List<Name> nameNodes) {
super(token);
this.names = names;
this.nameNodes = nameNodes;
}
// End indexer support
""",
"ImportFrom": """ // Support for indexer below
private java.util.List<Name> moduleNames;
public java.util.List<Name> getInternalModuleNames() {
return moduleNames;
}
public ImportFrom(Token token,
String module, java.util.List<Name> moduleNames,
java.util.List<alias> names, Integer level) {
super(token);
this.module = module;
this.names = names;
if (names == null) {
this.names = new ArrayList<alias>();
}
for(PythonTree t : this.names) {
addChild(t);
}
this.moduleNames = moduleNames;
if (moduleNames == null) {
this.moduleNames = new ArrayList<Name>();
}
for(PythonTree t : this.moduleNames) {
addChild(t);
}
this.level = level;
}
// End indexer support
""",
"alias": """ // Support for indexer below
private java.util.List<Name> nameNodes;
public java.util.List<Name> getInternalNameNodes() {
return nameNodes;
}
private Name asnameNode;
public Name getInternalAsnameNode() {
return asnameNode;
}
// [import] name [as asname]
public alias(Name name, Name asname) {
this(java.util.Arrays.asList(new Name[]{name}), asname);
}
// [import] ...foo.bar.baz [as asname]
public alias(java.util.List<Name> nameNodes, Name asname) {
this.nameNodes = nameNodes;
this.name = dottedNameListToString(nameNodes);
if (asname != null) {
this.asnameNode = asname;
this.asname = asname.getInternalId();
}
}
// End indexer support
""",
"arguments": """ // Support for indexer below
private Name varargName;
public Name getInternalVarargName() {
return varargName;
}
private Name kwargName;
public Name getInternalKwargName() {
return kwargName;
}
// XXX: vararg and kwarg are deliberately moved to the end of the
// method signature to avoid clashes with the (Token, List<expr>,
// String, String, List<expr>) version of the constructor.
public arguments(Token token, java.util.List<expr> args, Name vararg, Name kwarg,
java.util.List<expr> defaults) {
super(token);
this.args = args;
if (args == null) {
this.args = new ArrayList<expr>();
}
for(PythonTree t : this.args) {
addChild(t);
}
this.vararg = vararg == null ? null : vararg.getText();
this.varargName = vararg;
this.kwarg = kwarg == null ? null : kwarg.getText();
this.kwargName = kwarg;
this.defaults = defaults;
if (defaults == null) {
this.defaults = new ArrayList<expr>();
}
for(PythonTree t : this.defaults) {
addChild(t);
}
}
// End indexer support
/* Traverseproc implementation */
@Override
public int traverse(Visitproc visit, Object arg) {
int retVal = super.traverse(visit, arg);
if (retVal != 0) {
return retVal;
}
if (args != null) {
for (PyObject ob: args) {
if (ob != null) {
retVal = visit.visit(ob, arg);
if (retVal != 0) {
return retVal;
}
}
}
}
if (defaults != null) {
for (PyObject ob: defaults) {
if (ob != null) {
retVal = visit.visit(ob, arg);
if (retVal != 0) {
return retVal;
}
}
}
}
return 0;
}
@Override
public boolean refersDirectlyTo(PyObject ob) {
if (ob == null) {
return false;
} else if (args != null && args.contains(ob)) {
return true;
} else if (defaults != null && defaults.contains(ob)) {
return true;
} else {
return super.refersDirectlyTo(ob);
}
}
""",
"keyword": """
/* Traverseproc implementation */
@Override
public int traverse(Visitproc visit, Object arg) {
return value != null ? visit.visit(value, arg) : 0;
}
@Override
public boolean refersDirectlyTo(PyObject ob) {
return ob != null && (ob == value || super.refersDirectlyTo(ob));
}
""",
"comprehension": """
/* Traverseproc implementation */
@Override
public int traverse(Visitproc visit, Object arg) {
int retVal = super.traverse(visit, arg);
if (retVal != 0) {
return retVal;
}
if (iter != null) {
retVal = visit.visit(iter, arg);
if (retVal != 0) {
return retVal;
}
}
if (ifs != null) {
for (PyObject ob: ifs) {
if (ob != null) {
retVal = visit.visit(ob, arg);
if (retVal != 0) {
return retVal;
}
}
}
}
return target != null ? visit.visit(target, arg) : 0;
}
@Override
public boolean refersDirectlyTo(PyObject ob) {
if (ob == null) {
return false;
} else if (ifs != null && ifs.contains(ob)) {
return true;
} else {
return ob == iter || ob == target || super.refersDirectlyTo(ob);
}
}
""",
}
if __name__ == "__main__":
import sys
import getopt
usage = "Usage: python %s [-o outdir] [grammar]" % sys.argv[0]
OUT_DIR = '../src/org/python/antlr/'
try:
opts, args = getopt.getopt(sys.argv[1:], 'o:')
except:
print usage
sys.exit(1)
for o, v in opts:
if o == '-o' and v != '':
OUT_DIR = v
if len(opts) > 1 or len(args) > 1:
print usage
sys.exit(1)
if len(args) == 1:
main(OUT_DIR, args[0])
else:
main(OUT_DIR)
| alvin319/CarnotKE | jyhton/ast/asdl_antlr.py | Python | apache-2.0 | 37,458 | [
"VisIt"
] | 8671e63ad5752a5adac68d78838260b503191ab4f6c21f2fd174891c2444384d |
"""
Basic visualization of neurite morphologies using matplotlib.
Usage is restricted to morphologies in the sWC format with the three-point soma `standard <http://neuromorpho.org/neuroMorpho/SomaFormat.html>`_
"""
import sys,time
import os, sys
from matplotlib.cm import get_cmap
from Crypto.Protocol.AllOrNothing import isInt
sys.setrecursionlimit(10000)
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.animation as animation
import pylab as pl
from matplotlib import collections as mc
from PIL import Image
from numpy.linalg import inv
from McNeuron import Neuron
from McNeuron import Node
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.gridspec as gridspec
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank
from pylab import plot,subplot,axis,stem,show,figure, Normalize
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
import pylab as pl
import matplotlib
from matplotlib import collections as mc
from matplotlib.patches import Circle, Wedge, Polygon
from matplotlib.collections import PatchCollection
def get_2d_image(path, size, dpi, background, show_width):
neu = McNeuron.Neuron(file_format = 'swc without attributes', input_file=path)
depth = neu.location[2,:]
p = neu.location[0:2,:]
widths= 5*neu.diameter
widths[0:3] = 0
m = min(depth)
M = max(depth)
depth = background * ((depth - m)/(M-m))
colors = []
lines = []
patches = []
for i in range(neu.n_soma):
x1 = neu.location[0,i]
y1 = neu.location[1,i]
r = 1*neu.diameter[i]
circle = Circle((x1, y1), r, color = str(depth[i]), ec = 'none',fc = 'none')
patches.append(circle)
pa = PatchCollection(patches, cmap=matplotlib.cm.gray)
pa.set_array(depth[0]*np.zeros(neu.n_soma))
for i in range(len(neu.nodes_list)):
colors.append(str(depth[i]))
j = neu.parent_index[i]
lines.append([(p[0,i],p[1,i]),(p[0,j],p[1,j])])
if(show_width):
lc = mc.LineCollection(lines, colors=colors, linewidths = widths)
else:
lc = mc.LineCollection(lines, colors=colors)
fig, ax = plt.subplots()
ax.add_collection(lc)
ax.add_collection(pa)
fig.set_size_inches([size + 1, size + 1])
fig.set_dpi(dpi)
plt.axis('off')
plt.xlim((min(p[0,:]),max(p[0,:])))
plt.ylim((min(p[1,:]),max(p[1,:])))
plt.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
border = (dpi/2)
return np.squeeze(data[border:-border,border:-border,0])
def projection_on_plane(neuron, normal_vec = np.array([0,0,1]), distance = 10, resolution = np.array([256,256]), gap = 3.0):
"""
Parameters
----------
return
------
dependency
----------
This function needs following data from neuron:
location
diameter
parent_index
"""
# projection all the nodes on the plane and finding the right pixel for their centers
image = np.zeros(resolution)
shift = resolution[0]/2
normal_vec1 = np.array([0,0,1])
normal_vec2 = np.array([0,1,0])
P = project_points(neuron.location, normal_vec1, normal_vec2)
for n in neuron.nodes_list:
if(n.parent != None):
n1, n2, dis = project_point(n, normal_vec1, normal_vec2)
pix1 = np.floor(n1/gap) + shift
pix2 = np.floor(n2/gap) + shift
if(0 <= pix1 and 0 <= pix2 and pix1<resolution[0] and pix2 < resolution[1]):
image[pix1, pix2] = dis
return image
def project_points(location, normal_vectors):
"""
Parameters
----------
normal_vectors : array of shape [2,3]
Each row should a normal vector and both of them should be orthogonal.
location : array of shape [3, n_nodes]
the location of n_nodes number of points
Returns
-------
cordinates: array of shape [2, n_nodes]
The cordinates of the location on the plane defined by the normal vectors.
"""
cordinates = np.dot(normal_vectors, location)
return cordinates
def depth_points(location, orthogonal_vector):
"""
Parameters
----------
orthogonal_vector : array of shape [3]
orthogonal_vector that define the plane
location : array of shape [3, n_nodes]
the location of n_nodes number of points
Returns
-------
depth: array of shape [n_nodes]
The depth of the cordinates when they project on the plane.
"""
depth = np.dot(orthogonal_vector, location)
return depth
def make_image(neuron, A, scale_depth, index_neuron):
normal_vectors = A[0:2,:]
orthogonal_vector = A[2,:]
depth = depth_points(neuron.location, orthogonal_vector)
p = project_points(neuron.location, normal_vectors)
m = min(depth)
M = max(depth)
depth = scale_depth * ((depth - m)/(M-m))
colors = []
lines = []
for i in range(len(neuron.nodes_list)):
colors.append((depth[i],depth[i],depth[i],1))
j = neuron.parent_index[i]
lines.append([(p[0,i],p[1,i]),(p[0,j],p[1,j])])
lc = mc.LineCollection(lines, colors=colors, linewidths=2)
fig, ax = pl.subplots()
ax.add_collection(lc)
pl.axis('off')
pl.xlim((min(p[0,:]),max(p[0,:])))
pl.ylim((min(p[1,:]),max(p[1,:])))
Name = "neuron" + str(index_neuron[0]+1) + "resample" + str(index_neuron[1]+1) + "angle" + str(index_neuron[2]+1) + ".png"
fig.savefig(Name,figsize=(6, 6), dpi=80)
img = Image.open(Name)
img.load()
data = np.asarray( img, dtype="int32" )
data = data[:,:,0]
return data
def random_unitary_basis(kappa):
Ax1 = random_2d_rotation_in_3d('x', kappa)
Ay1 = random_2d_rotation_in_3d('y', kappa)
Az1 = random_2d_rotation_in_3d('z', kappa)
Ax2 = random_2d_rotation_in_3d('x', kappa)
Ay1 = random_2d_rotation_in_3d('y', kappa)
Az1 = random_2d_rotation_in_3d('z', kappa)
A = np.dot(np.dot(Ax1,Ay1),Az1)
B = np.dot(np.dot(Az1,Ay1),Ax1)
return np.dot(A,B)
def random_2d_rotation_in_3d(axis, kappa):
theta = np.random.vonmises(0, kappa, 1)
A = np.eye(3)
if axis is 'z':
A[0,0] = np.cos(theta)
A[1,0] = np.sin(theta)
A[0,1] = - np.sin(theta)
A[1,1] = np.cos(theta)
return A
if axis is 'y':
A[0,0] = np.cos(theta)
A[2,0] = np.sin(theta)
A[0,2] = - np.sin(theta)
A[2,2] = np.cos(theta)
return A
if axis is 'x':
A[1,1] = np.cos(theta)
A[2,1] = np.sin(theta)
A[1,2] = - np.sin(theta)
A[2,2] = np.cos(theta)
return A
def make_six_matrix(A):
six = []
six.append(A[[0,1,2],:])
six.append(A[[0,2,1],:])
six.append(A[[1,2,0],:])
six.append(A[[1,0,2],:])
six.append(A[[2,0,1],:])
six.append(A[[2,1,0],:])
return six
def make_six_images(neuron,scale_depth,neuron_index, kappa):
#A = random_unitary_basis(kappa)
A = np.eye(3)
six = make_six_matrix(A)
D = []
for i in range(6):
a = np.append(neuron_index,i)
D.append(make_image(neuron, six[i], scale_depth, a))
return D
def generate_data(path, scale_depth, n_camrea, kappa):
"""
input
-----
path : list
list of all the pathes of swc. each element of the list should be a string.
scale_depth : float in the interval [0,1]
a value to differentiate between the background and gray level in the image.
n_camera : int
number of different angles to set the six images. For each angle, six images will be generated (up,down and four sides)
kappa : float
The width of the distribution that the angles come from. Large value for kappa results in the angles close to x aixs
kappa = 1 is equvalent to the random angle.
output
------
Data : list of length
"""
Data = []
for i in range(len(path)):
print path[i]
neuron = Neuron(file_format = 'swc without attributes', input_file=path[i])
if(len(neuron.nodes_list) != 0):
for j in range(n_camrea):
D = np.asarray(make_six_images(neuron,scale_depth,np.array([i,j]), kappa))
Data.append(D)
return Data
def get_all_path(directory):
fileSet = []
for root, dirs, files in os.walk(directory):
for fileName in files:
if(fileName[-3:] == 'swc'):
fileSet.append(directory + root.replace(directory, "") + os.sep + fileName)
return fileSet
def plot_2d(neuron, show_depth, line_width):
depth = neuron.location[0,:]
m = min(depth)
M = max(depth)
depth = ((depth - m)/(M-m))
p = neuron.location[0:2,:]
colors = []
lines = []
for i in range(len(neuron.nodes_list)):
colors.append((depth[i],depth[i],depth[i],1))
j = neuron.parent_index[i]
lines.append([(p[0,i],p[1,i]),(p[0,j],p[1,j])])
if(show_depth == False):
lc = mc.LineCollection(lines, colors='k', linewidths=line_width)
else:
lc = mc.LineCollection(lines, colors=colors, linewidths=line_width)
fig, ax = pl.subplots()
ax.add_collection(lc)
pl.axis('off')
pl.xlim((min(p[0,:]),max(p[0,:])))
pl.ylim((min(p[1,:]),max(p[1,:])))
def plot_dendrograph(neuron):
print 1
def plot_2D(neuron,
background = 1,
show_width = False,
show_depth = False,
size = 5,
dpi = 80,
line_width = 1,
show_soma = False,
give_image = False,
red_after = False,
node_red = 0,
translation = (0,0),
scale_on = False,
scale = (1,1),
save = []):
depth = neuron.location[2,:]
p = neuron.location[0:2,:]
if scale_on:
p[0,:] = scale[0] * (p[0,:]-min(p[0,:]))/(max(p[0,:]) - min(p[0,:]) )
p[1,:] = scale[1] * (p[1,:]-min(p[1,:]))/(max(p[1,:]) - min(p[1,:]) )
widths= neuron.diameter
#widths[0:3] = 0
m = min(depth)
M = max(depth)
depth = background * ((depth - m)/(M-m))
colors = []
lines = []
patches = []
for i in range(neuron.n_soma):
x1 = neuron.location[0,i] + translation[0]
y1 = neuron.location[1,i] + translation[1]
r = widths[i]
circle = Circle((x1, y1), r, color = str(depth[i]), ec = 'none',fc = 'none')
patches.append(circle)
pa = PatchCollection(patches, cmap=matplotlib.cm.gray)
pa.set_array(depth[0]*np.zeros(neuron.n_soma))
for i in range(len(neuron.nodes_list)):
colors.append(str(depth[i]))
j = neuron.parent_index[i]
lines.append([(p[0,i] + translation[0],p[1,i] + translation[1]),(p[0,j] + translation[0],p[1,j] + translation[1])])
if(show_width):
if(show_depth):
lc = mc.LineCollection(lines, colors=colors, linewidths = line_width*widths)
else:
lc = mc.LineCollection(lines, linewidths = line_width*widths)
else:
if(show_depth):
lc = mc.LineCollection(lines, colors=colors, linewidths = line_width)
else:
lc = mc.LineCollection(lines, linewidths = line_width, color = 'k')
if(give_image):
if(red_after):
line1 = []
line2 = []
(I1,) = np.where(~np.isnan(neuron.connection[:,node_red]))
(I2,) = np.where(np.isnan(neuron.connection[:,node_red]))
for i in I1:
j = neuron.parent_index[i]
line1.append([(p[0,i],p[1,i]),(p[0,j],p[1,j])])
lc1 = mc.LineCollection(line1, linewidths = 2*line_width, color = 'r')
for i in I2:
j = neuron.parent_index[i]
line2.append([(p[0,i],p[1,i]),(p[0,j],p[1,j])])
lc2 = mc.LineCollection(line2, linewidths = line_width, color = 'k')
return (lc1, lc2, (min(p[0,:]),max(p[0,:])), (min(p[1,:]),max(p[1,:])))
else:
return (lc, (min(p[0,:]),max(p[0,:])), (min(p[1,:]),max(p[1,:])))
else:
fig, ax = plt.subplots()
ax.add_collection(lc)
if(show_soma):
ax.add_collection(pa)
fig.set_size_inches([size + 1, size + 1])
fig.set_dpi(dpi)
plt.axis('off')
plt.xlim((min(p[0,:]),max(p[0,:])))
plt.ylim((min(p[1,:]),max(p[1,:])))
plt.draw()
if(len(save)!=0):
plt.savefig(save, format = "eps")
# def plot_2D(neuron, background = 1, show_width = False, show_depth = False , size = 5, dpi = 80, line_width = 1):
# depth = neuron.location[2,:]
# p = neuron.location[0:2,:]
# widths= neuron.diameter
# m = min(depth)
# M = max(depth)
# depth = background * ((depth - m)/(M-m))
# colors = []
# lines = []
# patches = []
#
# for i in range(neuron.n_soma):
# x1 = neuron.location[0,i]
# y1 = neuron.location[1,i]
# r = neuron.diameter[i]
# circle = Circle((x1, y1), r, color = str(depth[i]), ec = 'none',fc = 'none')
# patches.append(circle)
#
# pa = PatchCollection(patches, cmap=matplotlib.cm.gray)
# pa.set_array(depth[0]*np.zeros(neuron.n_soma))
#
# for i in range(len(neuron.nodes_list)):
# colors.append(str(depth[i]))
# j = neuron.parent_index[i]
# lines.append([(p[0,i],p[1,i]),(p[0,j],p[1,j])])
# if(show_width):
# if(show_depth):
# lc = mc.LineCollection(lines, colors=colors, linewidths = line_width*widths)
# else:
# lc = mc.LineCollection(lines, linewidths = line_width*widths)
# else:
# if(show_depth):
# lc = mc.LineCollection(lines, colors=colors, linewidths = line_width)
# else:
# lc = mc.LineCollection(lines, linewidths = line_width)
#
# fig, ax = plt.subplots()
# ax.add_collection(lc)
# #ax.add_collection(pa)
# fig.set_size_inches([size + 1, size + 1])
# fig.set_dpi(dpi)
# plt.axis('off')
# plt.xlim((min(p[0,:]),max(p[0,:])))
# plt.ylim((min(p[1,:]),max(p[1,:])))
# plt.draw()
# return fig
def plot_3D(neuron, color_scheme="default", color_mapping=None,
synapses=None, save_image="animation",show_radius=True):
"""
3D matplotlib plot of a neuronal morphology. The SWC has to be formatted with a "three point soma".
Colors can be provided and synapse location marked
Parameters
-----------
color_scheme: string
"default" or "neuromorpho". "neuronmorpho" is high contrast
color_mapping: list[float] or list[list[float,float,float]]
Default is None. If present, this is a list[N] of colors
where N is the number of compartments, which roughly corresponds to the
number of lines in the SWC file. If in format of list[float], this list
is normalized and mapped to the jet color map, if in format of
list[list[float,float,float,float]], the 4 floats represt R,G,B,A
respectively and must be between 0-255. When not None, this argument
overrides the color_scheme argument(Note the difference with segments).
synapses : vector of bools
Default is None. If present, draw a circle or dot in a distinct color
at the location of the corresponding compartment. This is a
1xN vector.
save_image: string
Default is None. If present, should be in format "file_name.extension",
and figure produced will be saved as this filename.
show_radius : boolean
True (default) to plot the actual radius. If set to False,
the radius will be taken from `btmorph2\config.py`
"""
if show_radius==False:
plot_radius = config.fake_radius
if color_scheme == 'default':
my_color_list = config.c_scheme_default['neurite']
elif color_scheme == 'neuromorpho':
my_color_list = config.c_scheme_nm['neurite']
else:
raise Exception("Not valid color scheme")
#print 'my_color_list: ', my_color_list
fig, ax = plt.subplots()
if color_mapping is not None:
if isinstance(color_mapping[0], int):
jet = plt.get_cmap('jet')
norm = colors.Normalize(np.min(color_mapping), np.max(color_mapping))
scalarMap = cm.ScalarMappable(norm=norm, cmap=jet)
Z = [[0, 0], [0, 0]]
levels = np.linspace(np.min(color_mapping), np.max(color_mapping), 100)
CS3 = plt.contourf(Z, levels, cmap=jet)
plt.clf()
ax = fig.gca(projection='3d')
index = 0
for node in neuron.nodes_list: # not ordered but that has little importance here
# draw a line segment from parent to current point
c_x = node.xyz[0]
c_y = node.xyz[1]
c_z = node.xyz[2]
c_r = node.r
if index < 3:
pass
else:
parent = node.parent
p_x = parent.xyz[0]
p_y = parent.xyz[1]
p_z = parent.xyz[2]
# p_r = parent.content['p3d'].radius
# print 'index:', index, ', len(cs)=', len(color_mapping)
if show_radius==False:
line_width = plot_radius
else:
line_width = c_r/2.0
if color_mapping is None:
ax.plot([p_x, c_x], [p_y, c_y], [p_z, c_z], my_color_list[node.set_type_from_name() - 1], linewidth=line_width)
else:
if isinstance(color_mapping[0], int):
c = scalarMap.to_rgba(color_mapping[index])
elif isinstance(color_mapping[0], list):
c = [float(x) / 255 for x in color_mapping[index]]
ax.plot([p_x, c_x], [p_y, c_y], [p_z, c_z], c=c, linewidth=c_r/2.0)
# add the synapses
if synapses is not None:
if synapses[index]:
ax.scatter(c_x, c_y, c_z, c='r')
index += 1
#minv, maxv = neuron.get_boundingbox()
#minv = min(minv)
#maxv = max(maxv)
#ax.auto_scale_xyz([minv, maxv], [minv, maxv], [minv, maxv])
index = 0
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if color_mapping is not None:
if isinstance(color_mapping[0], int):
cb = plt.colorbar(CS3) # bit of a workaround, but it seems to work
ticks_f = np.linspace(np.min(color_mapping)-1, np.max(color_mapping)+1, 5)
ticks_i = map(int, ticks_f)
cb.set_ticks(ticks_i)
# set the bg color
fig = plt.gcf()
ax = fig.gca()
if color_scheme == 'default':
ax.set_axis_bgcolor(config.c_scheme_default['bg'])
elif color_scheme == 'neuromorpho':
ax.set_axis_bgcolor(config.c_scheme_nm['bg'])
if save_image is not None:
plt.savefig(save_image)
plt.show()
return fig
def animate(neuron, color_scheme="default", color_mapping=None,
synapses=None, save_image=None, axis="z"):
"""
3D matplotlib plot of a neuronal morphology. The SWC has to be formatted with a "three point soma".
Colors can be provided and synapse location marked
Parameters
-----------
color_scheme: string
"default" or "neuromorpho". "neuronmorpho" is high contrast
color_mapping: list[float] or list[list[float,float,float]]
Default is None. If present, this is a list[N] of colors
where N is the number of compartments, which roughly corresponds to the
number of lines in the SWC file. If in format of list[float], this list
is normalized and mapped to the jet color map, if in format of
list[list[float,float,float,float]], the 4 floats represt R,G,B,A
respectively and must be between 0-255. When not None, this argument
overrides the color_scheme argument(Note the difference with segments).
synapses : vector of bools
Default is None. If present, draw a circle or dot in a distinct color
at the location of the corresponding compartment. This is a
1xN vector.
save_image: string
Default is None. If present, should be in format "file_name.extension",
and figure produced will be saved as this filename.
"""
if color_scheme == 'default':
my_color_list = config.c_scheme_default['neurite']
elif color_scheme == 'neuromorpho':
my_color_list = config.c_scheme_nm['neurite']
else:
raise Exception("Not valid color scheme")
print 'my_color_list: ', my_color_list
fig, ax = plt.subplots()
if color_mapping is not None:
if isinstance(color_mapping[0], int):
jet = plt.get_cmap('jet')
norm = colors.Normalize(np.min(color_mapping), np.max(color_mapping))
scalarMap = cm.ScalarMappable(norm=norm, cmap=jet)
Z = [[0, 0], [0, 0]]
levels = np.linspace(np.min(color_mapping), np.max(color_mapping), 100)
CS3 = plt.contourf(Z, levels, cmap=jet)
plt.clf()
ax = fig.gca(projection='3d')
index = 0
for node in neuron.nodes_list: # not ordered but that has little importance here
# draw a line segment from parent to current point
c_x = node.xyz[0]
c_y = node.xyz[1]
c_z = node.xyz[2]
c_r = node.r
if index < 3:
pass
else:
parent = node.parent
p_x = parent.xyz[0]
p_y = parent.xyz[1]
p_z = parent.xyz[2]
# p_r = parent.content['p3d'].radius
# print 'index:', index, ', len(cs)=', len(color_mapping)
if color_mapping is None:
ax.plot([p_x, c_x], [p_y, c_y], [p_z, c_z], my_color_list[node.set_type_from_name() - 1], linewidth=c_r/2.0)
else:
if isinstance(color_mapping[0], int):
c = scalarMap.to_rgba(color_mapping[index])
elif isinstance(color_mapping[0], list):
c = [float(x) / 255 for x in color_mapping[index]]
ax.plot([p_x, c_x], [p_y, c_y], [p_z, c_z], c=c, linewidth=c_r/2.0)
# add the synapses
if synapses is not None:
if synapses[index]:
ax.scatter(c_x, c_y, c_z, c='r')
index += 1
#minv, maxv = neuron.get_boundingbox()
#minv = min(minv)
#maxv = max(maxv)
#ax.auto_scale_xyz([minv, maxv], [minv, maxv], [minv, maxv])
index = 0
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if color_mapping is not None:
if isinstance(color_mapping[0], int):
cb = plt.colorbar(CS3) # bit of a workaround, but it seems to work
ticks_f = np.linspace(np.min(color_mapping)-1, np.max(color_mapping)+1, 5)
ticks_i = map(int, ticks_f)
cb.set_ticks(ticks_i)
# set the bg color
fig = plt.gcf()
ax = fig.gca()
if color_scheme == 'default':
ax.set_axis_bgcolor(config.c_scheme_default['bg'])
elif color_scheme == 'neuromorpho':
ax.set_axis_bgcolor(config.c_scheme_nm['bg'])
anim = animation.FuncAnimation(fig, _animate_rotation,fargs=(ax,), frames=60)
#anim.save(save_image + ".gif", writer='imagemagick', fps=4)
# anim.save(save_image + ".gif", writer='ffmpeg', fps=4)
return fig
def _animate_rotation(nframe,fargs):
fargs.view_init(elev=0, azim=nframe*6)
def plot_3D_Forest(neuron, color_scheme="default", save_image=None):
"""
3D matplotlib plot of a neuronal morphology. The Forest has to be formatted with a "three point soma".
Colors can be provided and synapse location marked
Parameters
-----------
color_scheme: string
"default" or "neuromorpho". "neuronmorpho" is high contrast
save_image: string
Default is None. If present, should be in format "file_name.extension",
and figure produced will be saved as this filename.
"""
my_color_list = ['r','g','b','c','m','y','r--','b--','g--']
# resolve some potentially conflicting arguments
if color_scheme == 'default':
my_color_list = config.c_scheme_default['neurite']
elif color_scheme == 'neuromorpho':
my_color_list = config.c_scheme_nm['neurite']
else:
raise Exception("Not valid color scheme")
print 'my_color_list: ', my_color_list
fig, ax = plt.subplots()
ax = fig.gca(projection='3d')
index = 0
for node in neuron.nodes_list:
c_x = node.xyz[0]
c_y = node.xyz[1]
c_z = node.xyz[2]
c_r = node.r
if index < 3:
pass
else:
parent = node.parent
p_x = parent.xyz[0]
p_y = parent.xyz[1]
p_z = parent.xyz[2]
# p_r = parent.content['p3d'].radius
# print 'index:', index, ', len(cs)=', len(color_mapping)
ax.plot([p_x, c_x], [p_y, c_y], [p_z, c_z], my_color_list[node.set_type_from_name() - 1], linewidth=c_r/2.0)
index += 1
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
if save_image is not None:
plt.savefig(save_image)
return fig
def important_node_full_matrix(neuron):
lines = []
(branch_index,) = np.where(neuron.branch_order==2)
(end_nodes,) = np.where(neuron.branch_order==0)
important_node = np.append(branch_index,end_nodes)
parent_important = neuron.parent_index_for_node_subset(important_node)
important_node = np.append(0, important_node)
L = []
for i in parent_important:
(j,) = np.where(important_node==i)
L = np.append(L,j)
matrix = np.zeros([len(L),len(L)])
for i in range(len(L)):
if(L[i]!=0):
matrix[i,L[i]-1] = 1
B = inv(np.eye(len(L)) - matrix)
return B
def decompose_immediate_children(matrix):
"""
Parameters
----------
matrix : numpy array of shape (n,n)
The matrix of connetion. matrix(i,j) is one is j is a grandparent of i.
Return
------
L : list of numpy array of square shape
L consists of decomposition of matrix to immediate children of root.
"""
a = matrix.sum(axis = 1)
(children,) = np.where(a == 1)
L = []
for ch in children:
(ind,) = np.where(matrix[:,ch]==1)
ind = ind[ind!=ch]
L.append(matrix[np.ix_(ind,ind)])
p = np.zeros(len(L))
for i in range(len(L)):
p[i] = L[i].shape[0]
s = np.argsort(p)
List = []
for i in range(len(L)):
List.append(L[s[i]])
return List
def box(x_min, x_max, y, matrix, line):
"""
The box region for each node in the tree.
"""
L = decompose_immediate_children(matrix)
length = np.zeros(len(L)+1)
for i in range(1,1+len(L)):
length[i] = L[i-1].shape[0] + 1
for i in range(len(L)):
x_left = x_min + (x_max-x_min)*(sum(length[0:i+1])/sum(length))
x_right = x_min + (x_max-x_min)*(sum(length[0:i+2])/sum(length))
line.append([((x_min + x_max)/2., y),((x_left + x_right)/2.,y-1)])
if(L[i].shape[0] > 0):
box(x_left, x_right, y-1, L[i], line)
return line
def plot_dedrite_tree(neuron, save = []):
B = important_node_full_matrix(neuron)
L = decompose_immediate_children(B)
l = box(0.,1.,0.,B,[])
min_y = 0
for i in l:
min_y = min(min_y, i[1][1])
lc = mc.LineCollection(l)
fig, ax = plt.subplots()
ax.add_collection(lc)
plt.axis('off')
plt.xlim((0,1))
plt.ylim((min_y,0))
plt.draw()
if(len(save)!=0):
plt.savefig(save, format = "eps")
| RoozbehFarhoodi/McNeuron | McNeuron/visualize.py | Python | mit | 27,682 | [
"NEURON"
] | b640b872edf49cbc4df887a0bcef83d9dda96b431f00aefbe4ecb68113ce3791 |
from brian import (NeuronGroup, Network, StateMonitor,
second, ms, volt, mV)
import numpy as np
import matplotlib.pyplot as plt
network = Network()
XT = -50*mV
DeltaT = 0.05*mV/ms
eqs = "dX/dt = DeltaT*exp((X-XT)/DeltaT) : volt"
neuron = NeuronGroup(1, eqs, threshold="X>=XT", reset=-65*mV)
neuron.X = -65*mV
network.add(neuron)
vmon = StateMonitor(neuron, "X", record=True)
network.add(vmon)
network.run(1*second)
plt.figure("Voltage")
plt.plot(vmon.times, vmon[0])
plt.show()
| achilleas-k/brian-scripts | expo_iaf.py | Python | apache-2.0 | 502 | [
"Brian",
"NEURON"
] | 43eb5fd95b8637b06b93fbd00933a9ff2d60c341fb8e7eb983d2427561fb5877 |
"""Tests for setendings.py.
Checks that `end_lineno` and `end_col_offset` node properties are set.
"""
import unittest
from python_ta.transforms.setendings import *
PATH = 'examples/ending_locations/'
class TestEndingLocations(unittest.TestCase):
"""The method, ending_transformer.visit(module) walks the given astroid
*tree* and transform each encountered node. Only the nodes which have
transforms registered will actually be replaced or changed.
We store the correct values as a tuple:
(fromlineno, end_lineno, col_offset, end_col_offset)
"""
def get_file_as_module(self, file_location):
"""Given a filepath (file_location), parse with astroid, and return
the module.
"""
with open(file_location) as f:
content = f.read()
return self.get_string_as_module(content)
def get_string_as_module(self, string):
"""Parse the string with astroid, and return the module.
Also initialize the ending transformer here.
"""
source_lines = string.split('\n')
# Instantiate a visitor, and register the transform functions to it.
self.ending_transformer = init_register_ending_setters(source_lines)
return astroid.parse(string)
def set_and_check(self, module, node_class, expected):
"""Example is either in a file, or provided as a string literal.
"""
self.ending_transformer.visit(module) # Apply all transforms.
props = [(node.fromlineno, node.end_lineno,
node.col_offset, node.end_col_offset)
for node in module.nodes_of_class(node_class)
]
self.assertEqual(expected, props)
# def test_arguments(self):
# expected = [(1, 2, 8, 30), (5, 5, 14, 14), (8, 8, 12, 12), (9, 9, 14, 18)]
# module = self.get_file_as_module(PATH + 'arguments.py')
# self.set_and_check(module, astroid.Arguments, expected)
def test_assert(self):
expected = [(1, 1, 0, 43), (2, 2, 0, 11)]
module = self.get_file_as_module(PATH + 'Assert.py')
self.set_and_check(module, astroid.Assert, expected)
def test_assign(self):
expected = [(1, 1, 0, 5), (2, 2, 0, 9), (3, 3, 0, 11), (4, 4, 0, 8), (5, 5, 0, 6)]
module = self.get_file_as_module(PATH + 'Assign.py')
self.set_and_check(module, astroid.Assign, expected)
def test_assignattr(self):
"""
Given 'self.name = 10', we want to highlight 'self.name' rather than
just 'self'.
"""
expected = [(3, 3, 8, 17), (4, 4, 8, 19)]
module = self.get_file_as_module(PATH + 'AssignAttr.py')
self.set_and_check(module, astroid.AssignAttr, expected)
# def test_assignname(self):
# """
# """
# expected = [(1, 1, 0, 5)]
# module = self.get_file_as_module(PATH + 'AssignName.py')
# self.set_and_check(module, astroid.Assign, expected)
def test_asyncfor(self):
"""Note: col_offset property always set after the 'async' keyword.
"""
expected = [(3, 7, 4, 16)]
module = self.get_file_as_module(PATH + 'AsyncFor.py')
self.set_and_check(module, astroid.AsyncFor, expected)
# def test_asyncfunctiondef(self):
# """
# """
# expected = [(1, 2, 6, 12)]
# module = self.get_file_as_module(PATH + 'AsyncFunctionDef.py')
# self.set_and_check(module, astroid.AsyncFunctionDef, expected)
# def test_asyncwith(self):
# """
# """
# expected = [(2, 3, 10, 12)]
# module = self.get_file_as_module(PATH + 'AsyncWith.py')
# self.set_and_check(module, astroid.AsyncWith, expected)
def test_attribute(self):
"""Note: Setting the attribute node by its last child doesn't include
the attribute in determining the end_col_offset.
"""
expected = [(1, 1, 0, 12), (2, 2, 0, 14)]
module = self.get_file_as_module(PATH + 'Attribute.py')
self.set_and_check(module, astroid.Attribute, expected)
# def test_augassign(self):
# """
# """
# expected = [(1, 1, 0, 6)]
# module = self.get_file_as_module(PATH + 'AugAssign.py')
# self.set_and_check(module, astroid.AugAssign, expected)
# def test_await(self):
# """Note: col_offset property always set before the 'await' keyword.
# Aside: this example shows the case where setting end_col_offset by the
# child (i.e. arguments.Name) doesn't capture some information like the
# parenthesis in the parent arguments.Call node.
# """
# expected = [(5, 5, 4, 25)]
# module = self.get_file_as_module(PATH + 'Await.py')
# self.set_and_check(module, astroid.Await, expected)
# def test_binop(self):
# """note: value of col_offset = 6, is weird but we didn't set it.
# first (depends on pre/postorder) binop is ((1 + 2) + 3), then (1 + 2)
# TODO: add the "( (100) * (42) )" test
# """
# expected = [(1, 1, 6, 9), (1, 1, 0, 5)]
# example = '''1 + 2 + 3'''
# module = self.get_string_as_module(example)
# self.set_and_check(module, astroid.BinOp, expected)
# def test_boolop(self):
# """
# """
# expected = [(1, 1, 4, 13)]
# module = self.get_file_as_module(PATH + 'BoolOp.py')
# self.set_and_check(module, astroid.BoolOp, expected)
# def test_break(self):
# """
# """
# expected = [(2, 2, 4, 9)]
# module = self.get_file_as_module(PATH + 'Break.py')
# self.set_and_check(module, astroid.Break, expected)
def test_call(self):
"""Note: the end_col_offset is 1 left of the last ')'.
>>>print(1, 2, 3,
>>> 4)
"""
expected = [(1, 2, 0, 9)]
module = self.get_file_as_module(PATH + 'Call.py')
self.set_and_check(module, astroid.Call, expected)
# def test_classdef(self):
# """Note: this is set to the last statement in the class definition.
# """
# expected = [(1, 2, 0, 8)]
# module = self.get_file_as_module(PATH + 'ClassDef.py')
# self.set_and_check(module, astroid.ClassDef, expected)
# def test_compare(self):
# """
# """
# expected = [(1, 1, 0, 5)]
# module = self.get_file_as_module(PATH + 'Compare.py')
# self.set_and_check(module, astroid.Compare, expected)
def test_comprehension(self):
"""
Could be in a SetComp, ListComp, or GeneratorExp.. in each respective
case, the subsequent char could be either a brace, bracket, or paren.
Aside: col_offset should start from beginning of the 'for'.
"""
expected = [(1, 1, 7, 20), (2, 2, 7, 16), (2, 2, 21, 36), (3, 3, 9, 18), (3, 3, 23, 40)]
module = self.get_file_as_module(PATH + 'Comprehension.py')
self.set_and_check(module, astroid.Comprehension, expected)
def test_const(self):
"""
"""
expected = [(1, 1, 0, 6), (2, 2, 4, 6), (3, 3, 0, 3), (4, 4, 0, 8),
(5, 7, 0, 1), (8, 8, 6, 11), (8, 8, 13, 25)]
module = self.get_file_as_module(PATH + 'Const.py')
self.set_and_check(module, astroid.Const, expected)
def test_continue(self):
"""
"""
expected = [(2, 2, 4, 12)]
module = self.get_file_as_module(PATH + 'Continue.py')
self.set_and_check(module, astroid.Continue, expected)
def test_decorators(self):
"""
Include the right parens (note: only if decorator takes args)
"""
expected = [(1, 2, 0, 27), (6, 6, 0, 9)]
module = self.get_file_as_module(PATH + 'Decorators.py')
self.set_and_check(module, astroid.Decorators, expected)
def test_delattr(self):
"""Include the 'del' keyword in the col_offset property.
Include the attribute name in the end_col_offset property.
"""
expected = [(4, 4, 8, 21), (5, 5, 8, 23)]
module = self.get_file_as_module(PATH + 'DelAttr.py')
self.set_and_check(module, astroid.DelAttr, expected)
def test_delete(self):
"""Include the 'del' keyword in the col_offset property.
"""
expected = [(1, 1, 0, 5), (2, 2, 0, 22)]
module = self.get_file_as_module(PATH + 'Delete.py')
self.set_and_check(module, astroid.Delete, expected)
def test_delname(self):
"""Include the 'del' keyword in the col_offset property.
"""
expected = [(1, 1, 0, 5)]
module = self.get_file_as_module(PATH + 'DelName.py')
self.set_and_check(module, astroid.DelName, expected)
def test_dict(self):
expected = [(1, 1, 6, 32), (2, 5, 4, 1), (6, 9, 4, 6)]
module = self.get_file_as_module(PATH + 'Dict.py')
self.set_and_check(module, astroid.Dict, expected)
def test_dictcomp(self):
"""Buggy
"""
expected = [(1, 1, 0, 29), (2, 2, 0, 37), (3, 7, 0, 1)]
module = self.get_file_as_module(PATH + 'DictComp.py')
self.set_and_check(module, astroid.DictComp, expected)
# def test_dictunpack(self):
# """NODE EXAMPLE DOES NOT EXIST
# """
# expected = []
# module = self.get_file_as_module(PATH + 'DictUnpack.py')
# self.set_and_check(module, astroid.DictUnpack, expected)
# def test_ellipsis(self):
# expected = [(1, 1, 0, 3)]
# module = self.get_file_as_module(PATH + 'Ellipsis.py')
# self.set_and_check(module, astroid.Ellipsis, expected)
# def test_emptynode(self):
# """NODE EXAMPLE DOES NOT EXIST
# """
# expected = []
# module = self.get_file_as_module(PATH + 'EmptyNode.py')
# self.set_and_check(module, astroid.EmptyNode, expected)
# def test_excepthandler(self):
# expected = [(3, 4, 0, 8)]
# module = self.get_file_as_module(PATH + 'ExceptHandler.py')
# self.set_and_check(module, astroid.ExceptHandler, expected)
# def test_exec(self):
# """NODE EXAMPLE DOES NOT EXIST
# """
# expected = []
# module = self.get_file_as_module(PATH + 'Exec.py')
# self.set_and_check(module, astroid.Exec, expected)
# def test_expr(self):
# """TODO: test all the Expr nodes in 'Slice.py'
# """
# expected = [(1, 1, 0, 12), (2, 2, 0, 13), (3, 3, 0, 11), (4, 4, 0, 17)]
# module = self.get_file_as_module(PATH + 'Expr.py')
# self.set_and_check(module, astroid.Expr, expected)
def test_extslice(self):
"""
"""
expected = [(1, 1, 1, 8), (2, 2, 2, 14), (3, 3, 1, 8), (4, 4, 2, 15), (5, 6, 1, 8)]
module = self.get_file_as_module(PATH + 'ExtSlice.py')
self.set_and_check(module, astroid.ExtSlice, expected)
# def test_for(self):
# expected = [(1, 2, 0, 9)]
# module = self.get_file_as_module(PATH + 'For.py')
# self.set_and_check(module, astroid.For, expected)
# def test_functiondef(self):
# expected = [(1, 2, 0, 8)]
# module = self.get_file_as_module(PATH + 'FunctionDef.py')
# self.set_and_check(module, astroid.FunctionDef, expected)
def test_generatorexp(self):
expected = [(1, 1, 0, 37), (2, 2, 0, 43)]
module = self.get_file_as_module(PATH + 'GeneratorExp.py')
self.set_and_check(module, astroid.GeneratorExp, expected)
# def test_global(self):
# """
# """
# expected = [(2, 2, 4, 12)]
# module = self.get_file_as_module(PATH + 'Global.py')
# self.set_and_check(module, astroid.Global, expected)
# def test_if(self):
# """
# """
# expected = [(1, 4, 0, 8), (3, 4, 5, 8)]
# module = self.get_file_as_module(PATH + 'If.py')
# self.set_and_check(module, astroid.If, expected)
# def test_ifexp(self):
# """
# """
# expected = [(1, 1, 4, 20)]
# module = self.get_file_as_module(PATH + 'IfExp.py')
# self.set_and_check(module, astroid.IfExp, expected)
# def test_import(self):
# """
# """
# expected = [(1, 1, 0, 14)]
# module = self.get_file_as_module(PATH + 'Import.py')
# self.set_and_check(module, astroid.Import, expected)
# def test_importfrom(self):
# """
# """
# expected = [(1, 1, 0, 47)]
# module = self.get_file_as_module(PATH + 'ImportFrom.py')
# self.set_and_check(module, astroid.ImportFrom, expected)
def test_index(self):
"""Should include the enclosing brackets, e.g. "[1]" instead of "1".
"""
expected = [(1, 1, 1, 5), (2, 2, 2, 10), (3, 3, 2, 15)]
module = self.get_file_as_module(PATH + 'Index.py')
self.set_and_check(module, astroid.Index, expected)
def test_keyword(self):
"""Include the name of the keyword, contained in 'node.arg' attribute.
"""
expected = [(1, 1, 4, 12), (2, 2, 5, 15)]
module = self.get_file_as_module(PATH + 'Keyword.py')
self.set_and_check(module, astroid.Keyword, expected)
# def test_lambda(self):
# """
# """
# expected = [(1, 1, 6, 15), (2, 2, 7, 25)]
# module = self.get_file_as_module(PATH + 'Lambda.py')
# self.set_and_check(module, astroid.Lambda, expected)
# def test_list(self):
# """
# """
# expected = [(1, 1, 0, 2)]
# module = self.get_file_as_module(PATH + 'List.py')
# self.set_and_check(module, astroid.List, expected)
# def test_listcomp(self):
# """Buggy
# """
# expected = [(1, 1, 0, 24), (2, 2, 0, 49)]
# module = self.get_file_as_module(PATH + 'ListComp.py')
# self.set_and_check(module, astroid.ListComp, expected)
# def test_module(self):
# """
# """
# expected = [(0, 2, 0, 1)]
# module = self.get_file_as_module(PATH + 'Module.py')
# self.set_and_check(module, astroid.Module, expected)
# def test_name(self):
# """
# """
# expected = [(1, 1, 0, 6)]
# module = self.get_file_as_module(PATH + 'Name.py')
# self.set_and_check(module, astroid.Name, expected)
# def test_nonlocal(self):
# """
# """
# expected = [(3, 3, 4, 14)]
# module = self.get_file_as_module(PATH + 'Nonlocal.py')
# self.set_and_check(module, astroid.Nonlocal, expected)
# def test_pass(self):
# """
# """
# expected = [(1, 1, 0, 4)]
# module = self.get_file_as_module(PATH + 'Pass.py')
# self.set_and_check(module, astroid.Pass, expected)
# def test_print(self):
# """NODE EXAMPLE DOES NOT EXIST
# """
# expected = []
# module = self.get_file_as_module(PATH + 'Print.py')
# self.set_and_check(module, astroid.Print, expected)
# def test_raise(self):
# expected = [(1, 1, 0, 23)]
# module = self.get_file_as_module(PATH + 'Raise.py')
# self.set_and_check(module, astroid.Raise, expected)
# def test_repr(self):
# """NODE EXAMPLE DOES NOT EXIST
# """
# expected = []
# module = self.get_file_as_module(PATH + 'Repr.py')
# self.set_and_check(module, astroid.Repr, expected)
# def test_return(self):
# """
# """
# expected = [(1, 1, 0, 8)]
# module = self.get_file_as_module(PATH + 'Return.py')
# self.set_and_check(module, astroid.Return, expected)
def test_set(self):
expected = [(1, 1, 0, 3), (2, 2, 0, 6), (3, 3, 0, 12)]
module = self.get_file_as_module(PATH + 'Set.py')
self.set_and_check(module, astroid.Set, expected)
def test_setcomp(self):
expected = [(1, 1, 0, 25), (2, 2, 0, 63)]
module = self.get_file_as_module(PATH + 'SetComp.py')
self.set_and_check(module, astroid.SetComp, expected)
def test_slice(self):
"""Note: col_offset and end_col_offset are set to the first constant
encountered, either on left or right side of colon.
Should capture both brackets..
"""
expected = [(1, 1, 2, 3),
(2, 2, 3, 8),
(3, 3, 2, 4),
(4, 4, 3, 13),
(5, 5, 2, 8),
(6, 6, 8, 30),
(7, 8, 2, 2),
(9, 9, 2, 4),
(9, 9, 6, 7),
(10, 10, 2, 3),
(10, 10, 5, 7),
(11, 11, 2, 3),
(11, 11, 5, 6),
(12, 12, 2, 4),
(13, 13, 2, 3),
(13, 13, 10, 11),
(14, 14, 6, 11),
(15, 15, 2, 3),
(15, 15, 5, 6),
(15, 15, 9, 10)
]
module = self.get_file_as_module(PATH + 'Slice.py')
self.set_and_check(module, astroid.Slice, expected)
# def test_starred(self):
# """
# """
# expected = [(1, 1, 0, 2)]
# module = self.get_file_as_module(PATH + 'Starred.py')
# self.set_and_check(module, astroid.Starred, expected)
def test_subscript(self):
expected = [(1, 1, 0, 4),
(2, 2, 0, 8),
(3, 3, 0, 4),
(4, 4, 0, 9),
(5, 5, 0, 5),
(6, 6, 0, 14),
(7, 7, 0, 9),
(8, 8, 4, 31),
(9, 10, 0, 3),
(11, 11, 0, 8),
(11, 11, 0, 5),
(12, 12, 0, 8),
(12, 12, 0, 4),
(13, 13, 0, 7),
(13, 13, 0, 4),
(14, 14, 0, 5),
(15, 15, 0, 12),
(15, 15, 0, 4),
(16, 16, 4, 12)
]
module = self.get_file_as_module(PATH + 'Subscript.py')
self.set_and_check(module, astroid.Subscript, expected)
# def test_tryexcept(self):
# """
# """
# expected = [(1, 4, 0, 8)]
# module = self.get_file_as_module(PATH + 'TryExcept.py')
# self.set_and_check(module, astroid.TryExcept, expected)
# def test_tryfinally(self):
# """
# """
# expected = [(1, 6, 0, 8)]
# module = self.get_file_as_module(PATH + 'TryFinally.py')
# self.set_and_check(module, astroid.TryFinally, expected)
def test_tuple(self):
expected = [(1, 1, 0, 6), (2, 2, 0, 11), (3, 3, 0, 5), (4, 4, 0, 7),
(5, 5, 0, 12), (6, 8, 0, 8), (9, 9, 0, 4), (10, 10, 0, 7),
(11, 13, 0, 17), (14, 14, 0, 6), (15, 15, 7, 13),
(16, 16, 4, 10), (17, 17, 0, 10), (17, 17, 0, 4),
(17, 17, 6, 10), (18, 18, 0, 6), (20, 20, 0, 6)
]
module = self.get_file_as_module(PATH + 'Tuple.py')
self.set_and_check(module, astroid.Tuple, expected)
# def test_unaryop(self):
# """
# """
# expected = [(1, 1, 0, 8)]
# module = self.get_file_as_module(PATH + 'UnaryOp.py')
# self.set_and_check(module, astroid.UnaryOp, expected)
# def test_while(self):
# """
# """
# expected = [(1, 2, 0, 9)]
# module = self.get_file_as_module(PATH + 'While.py')
# self.set_and_check(module, astroid.While, expected)
# def test_with(self):
# """
# """
# expected = [(1, 2, 0, 8)]
# module = self.get_file_as_module(PATH + 'With.py')
# self.set_and_check(module, astroid.With, expected)
# def test_yield(self):
# """
# """
# expected = [(1, 1, 0, 5)]
# module = self.get_file_as_module(PATH + 'Yield.py')
# self.set_and_check(module, astroid.Yield, expected)
# def test_yieldfrom(self):
# """
# """
# expected = [(2, 2, 4, 16)]
# module = self.get_file_as_module(PATH + 'YieldFrom.py')
# self.set_and_check(module, astroid.YieldFrom, expected)
if __name__ == '__main__':
unittest.main() # run tests
| RyanDJLee/pyta | tests/test_setendings.py | Python | gpl-3.0 | 20,344 | [
"VisIt"
] | 84a783ceb6ee6b2ff135d03c7ace6c0573bda29753868e85d3d8a6e5393c05cc |
from brainiak.eventseg.event import EventSegment
from scipy.special import comb
import numpy as np
import pytest
from sklearn.exceptions import NotFittedError
def test_create_event_segmentation():
es = EventSegment(5)
assert es, "Invalid EventSegment instance"
def test_fit_shapes():
K = 5
V = 3
T = 10
es = EventSegment(K, n_iter=2)
sample_data = np.random.rand(V, T)
es.fit(sample_data.T)
assert es.segments_[0].shape == (T, K), "Segmentation from fit " \
"has incorrect shape"
assert np.isclose(np.sum(es.segments_[0], axis=1), np.ones(T)).all(), \
"Segmentation from learn_events not correctly normalized"
T2 = 15
sample_data2 = np.random.rand(V, T2)
test_segments, test_ll = es.find_events(sample_data2.T)
assert test_segments.shape == (T2, K), "Segmentation from find_events " \
"has incorrect shape"
assert np.isclose(np.sum(test_segments, axis=1), np.ones(T2)).all(), \
"Segmentation from find_events not correctly normalized"
es_invalid = EventSegment(K)
with pytest.raises(ValueError):
es_invalid.model_prior(K-1)
# ``with`` block is about to end with no error.
pytest.fail("T < K should cause error")
with pytest.raises(ValueError):
es_invalid.set_event_patterns(np.zeros((V, K-1)))
pytest.fail("#Events < K should cause error")
def test_simple_boundary():
es = EventSegment(2)
random_state = np.random.RandomState(0)
sample_data = np.array([[1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1]]) + \
random_state.rand(2, 7) * 10
es.fit(sample_data.T)
events = np.argmax(es.segments_[0], axis=1)
assert np.array_equal(events, [0, 0, 0, 1, 1, 1, 1]),\
"Failed to correctly segment two events"
events_predict = es.predict(sample_data.T)
assert np.array_equal(events_predict, [0, 0, 0, 1, 1, 1, 1]), \
"Error in predict interface"
def test_event_transfer():
es = EventSegment(2)
sample_data = np.asarray([[1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1]])
with pytest.raises(NotFittedError):
seg = es.find_events(sample_data.T)[0]
pytest.fail("Should need to set variance")
with pytest.raises(NotFittedError):
seg = es.find_events(sample_data.T, np.asarray([1, 1]))[0]
pytest.fail("Should need to set patterns")
es.set_event_patterns(np.asarray([[1, 0], [0, 1]]))
seg = es.find_events(sample_data.T, np.asarray([1, 1]))[0]
events = np.argmax(seg, axis=1)
assert np.array_equal(events, [0, 0, 0, 1, 1, 1, 1]),\
"Failed to correctly transfer two events to new data"
def test_weighted_var():
es = EventSegment(2)
D = np.zeros((8, 4))
for t in range(4):
D[t, :] = (1/np.sqrt(4/3)) * np.array([-1, -1, 1, 1])
for t in range(4, 8):
D[t, :] = (1 / np.sqrt(4 / 3)) * np.array([1, 1, -1, -1])
mean_pat = D[[0, 4], :].T
weights = np.zeros((8, 2))
weights[:, 0] = [1, 1, 1, 1, 0, 0, 0, 0]
weights[:, 1] = [0, 0, 0, 0, 1, 1, 1, 1]
assert np.array_equal(
es.calc_weighted_event_var(D, weights, mean_pat), [0, 0]),\
"Failed to compute variance with 0/1 weights"
weights[:, 0] = [1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5]
weights[:, 1] = [0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1]
true_var = (4 * 0.5 * 12)/(6 - 5/6) * np.ones(2) / 4
assert np.allclose(
es.calc_weighted_event_var(D, weights, mean_pat), true_var),\
"Failed to compute variance with fractional weights"
def test_sym():
es = EventSegment(4)
evpat = np.repeat(np.arange(10).reshape(-1, 1), 4, axis=1)
es.set_event_patterns(evpat)
D = np.repeat(np.arange(10).reshape(1, -1), 20, axis=0)
ev = es.find_events(D, var=1)[0]
# Check that events 1-4 and 2-3 are symmetric
assert np.all(np.isclose(ev[:, :2], np.fliplr(np.flipud(ev[:, 2:])))),\
"Fit with constant data is not symmetric"
def test_chains():
es = EventSegment(5, event_chains=np.array(['A', 'A', 'B', 'B', 'B']))
es.set_event_patterns(np.array([[1, 1, 0, 0, 0],
[0, 0, 1, 1, 1]]))
sample_data = np.array([[0, 0, 0], [1, 1, 1]])
seg = es.find_events(sample_data.T, 0.1)[0]
ev = np.nonzero(seg > 0.99)[1]
assert np.array_equal(ev, [2, 3, 4]),\
"Failed to fit with multiple chains"
def test_prior():
K = 10
T = 100
es = EventSegment(K)
mp = es.model_prior(T)[0]
p_bound = np.zeros((T, K-1))
norm = comb(T-1, K-1)
for t in range(T-1):
for k in range(K-1):
# See supplementary material of Neuron paper
# https://doi.org/10.1016/j.neuron.2017.06.041
p_bound[t+1, k] = comb(t, k) * comb(T-t-2, K-k-2) / norm
p_bound = np.cumsum(p_bound, axis=0)
mp_gt = np.zeros((T, K))
for k in range(K):
if k == 0:
mp_gt[:, k] = 1 - p_bound[:, 0]
elif k == K - 1:
mp_gt[:, k] = p_bound[:, k-1]
else:
mp_gt[:, k] = p_bound[:, k-1] - p_bound[:, k]
assert np.all(np.isclose(mp, mp_gt)),\
"Prior does not match analytic solution"
| lcnature/brainiak | tests/eventseg/test_event.py | Python | apache-2.0 | 5,227 | [
"NEURON"
] | b32f7bf02849be47ef7bbad46cedb9f54875ce89d97389945246672ed50284f0 |
# -*- coding:utf-8 -*-
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
import json
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from pypln.web.core.models import Corpus
__all__ = ["CorpusListViewTest", "CorpusDetailViewTest"]
class CorpusListViewTest(TestCase):
fixtures = ['users', 'corpora']
def test_requires_login(self):
response = self.client.get(reverse('corpus-list'))
self.assertEqual(response.status_code, 403)
def test_only_lists_corpora_that_belongs_to_the_authenticated_user(self):
self.client.login(username="user", password="user")
response = self.client.get(reverse('corpus-list'))
self.assertEqual(response.status_code, 200)
expected_data = Corpus.objects.filter(
owner=User.objects.get(username="user"))
object_list = response.renderer_context['view'].get_queryset()
self.assertEqual(list(expected_data), list(object_list))
def test_create_new_corpus(self):
user = User.objects.get(username="user")
self.assertEqual(len(user.corpus_set.all()), 1)
self.client.login(username="user", password="user")
response = self.client.post(reverse('corpus-list'), {"name": "Corpus",
"description": "description"})
self.assertEqual(response.status_code, 201)
self.assertEqual(len(user.corpus_set.all()), 2)
def test_cant_create_new_corpus_for_another_user(self):
self.client.login(username="user", password="user")
# We try to set 'admin' as the owner (id=1)
response = self.client.post(reverse('corpus-list'), {"name": "Corpus",
"description": "description", "owner": 1})
self.assertEqual(response.status_code, 201)
# but the view sets the request user as the owner anyway
self.assertEqual(response.data["owner"], "user")
def test_cant_create_duplicate_corpus(self):
user = User.objects.get(username="user")
self.assertEqual(len(user.corpus_set.all()), 1)
self.client.login(username="user", password="user")
# A corpus with this information already exists (loaded by fixtures)
response = self.client.post(reverse('corpus-list'), {"name": "User Test Corpus",
"description": "description"})
self.assertEqual(response.status_code, 400)
self.assertEqual(len(user.corpus_set.all()), 1)
class CorpusDetailViewTest(TestCase):
fixtures = ['users', 'corpora']
def test_requires_login(self):
response = self.client.get(reverse('corpus-detail', kwargs={'pk': 2}))
self.assertEqual(response.status_code, 403)
def test_shows_corpus_correctly(self):
self.client.login(username="user", password="user")
corpus = Corpus.objects.filter(owner__username="user")[0]
response = self.client.get(reverse('corpus-detail',
kwargs={'pk': corpus.id}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.renderer_context['view'].get_object(), corpus)
def test_returns_404_for_inexistent_corpus(self):
self.client.login(username="user", password="user")
response = self.client.get(reverse('corpus-detail',
kwargs={'pk': 9999}))
self.assertEqual(response.status_code, 404)
def test_returns_404_if_user_is_not_the_owner_of_the_corpus(self):
self.client.login(username="user", password="user")
corpus = Corpus.objects.filter(owner__username="admin")[0]
response = self.client.get(reverse('corpus-detail',
kwargs={'pk': corpus.id}))
self.assertEqual(response.status_code, 404)
def test_edit_corpus(self):
self.client.login(username="user", password="user")
corpus = Corpus.objects.filter(owner__username="user")[0]
response = self.client.put(reverse('corpus-detail',
kwargs={'pk': corpus.id}), json.dumps({"name": "New name",
"description": "New description"}), content_type="application/json")
self.assertEqual(response.status_code, 200)
updated_corpus = Corpus.objects.filter(owner__username="user")[0]
self.assertEqual(updated_corpus.name, "New name")
self.assertEqual(updated_corpus.description, "New description")
def test_cant_change_name_to_one_that_already_exists_for_this_user(self):
self.client.login(username="user", password="user")
user = User.objects.get(username="user")
conflicting_corpus = Corpus.objects.create(name="Conflicting name",
owner=user, description="This corpus is here to create a conflict")
corpus = Corpus.objects.filter(owner__username="user")[0]
response = self.client.put(reverse('corpus-detail',
kwargs={'pk': corpus.id}), json.dumps({"name": "Conflicting name",
"description": "New description"}), content_type="application/json")
self.assertEqual(response.status_code, 400)
not_updated_corpus = Corpus.objects.filter(owner__username="user")[0]
self.assertEqual(not_updated_corpus.name, "User Test Corpus")
self.assertEqual(not_updated_corpus.description, "This corpus belongs to the user 'user'")
def test_cant_edit_other_peoples_corpora(self):
"""
A PUT request to another person's corpus actually raises Http404, as
if the document did not exist. Since rest_framework uses PUT-as-create,
this means a new object is created with the provided information.
"""
self.client.login(username="user", password="user")
corpus = Corpus.objects.filter(owner__username="admin")[0]
response = self.client.put(reverse('corpus-detail',
kwargs={'pk': corpus.id}), json.dumps({"name": "New name",
"description": "New description"}), content_type="application/json")
self.assertEqual(response.status_code, 404)
reloaded_corpus = Corpus.objects.filter(owner__username="admin")[0]
self.assertNotEqual(reloaded_corpus.name, "New name")
self.assertNotEqual(reloaded_corpus.description, "New description")
def test_cant_change_the_owner_of_a_corpus(self):
self.client.login(username="user", password="user")
corpus = Corpus.objects.filter(owner__username="user")[0]
# We try to set 'admin' as the owner (id=1)
response = self.client.put(reverse('corpus-detail',
kwargs={'pk': corpus.id}), json.dumps({"name": "Corpus",
"description": "description", "owner": 1}),
content_type="application/json")
self.assertEqual(response.status_code, 200)
# but the view sets the request user as the owner anyway
self.assertEqual(response.data["owner"], "user")
def test_delete_a_corpus(self):
self.client.login(username="user", password="user")
self.assertEqual(len(Corpus.objects.filter(owner__username="user")), 1)
corpus = Corpus.objects.filter(owner__username="user")[0]
response = self.client.delete(reverse('corpus-detail',
kwargs={'pk': corpus.id}))
self.assertEqual(response.status_code, 204)
self.assertEqual(len(Corpus.objects.filter(owner__username="user")), 0)
def test_cant_delete_other_peoples_corpora(self):
self.client.login(username="user", password="user")
self.assertEqual(len(Corpus.objects.filter(owner__username="user")), 1)
corpus = Corpus.objects.filter(owner__username="admin")[0]
response = self.client.delete(reverse('corpus-detail',
kwargs={'pk': corpus.id}))
self.assertEqual(response.status_code, 404)
self.assertEqual(len(Corpus.objects.filter(owner__username="user")), 1)
| flavioamieiro/pypln.web | pypln/web/core/tests/views/test_corpus.py | Python | gpl-3.0 | 8,500 | [
"NAMD"
] | 7528066fa3a7f70c7efb8f4ac3f2981336da19d1919f2ac7ddc490cffcfc61b2 |
# -*- coding: utf-8 -*-
# Author: Óscar Nájera
# License: 3-clause BSD
"""
Sphinx-Gallery Generator
========================
Attaches Sphinx-Gallery to Sphinx in order to generate the galleries
when building the documentation.
"""
from __future__ import division, print_function, absolute_import
import codecs
import copy
from datetime import timedelta, datetime
from importlib import import_module
import re
import os
import pathlib
from xml.sax.saxutils import quoteattr, escape
from sphinx.errors import ConfigError, ExtensionError
from sphinx.util.console import red
from . import sphinx_compatibility, glr_path_static, __version__ as _sg_version
from .utils import _replace_md5, _has_optipng, _has_pypandoc
from .backreferences import _finalize_backreferences
from .gen_rst import (generate_dir_rst, SPHX_GLR_SIG, _get_memory_base,
_get_readme)
from .scrapers import _scraper_dict, _reset_dict, _import_matplotlib
from .docs_resolv import embed_code_links
from .downloads import generate_zipfiles
from .sorting import NumberOfCodeLinesSortKey
from .binder import copy_binder_files, check_binder_conf
from .directives import MiniGallery
_KNOWN_CSS = ('gallery', 'gallery-binder', 'gallery-dataframe',
'gallery-rendered-html')
class DefaultResetArgv:
def __repr__(self):
return "DefaultResetArgv"
def __call__(self, gallery_conf, script_vars):
return []
DEFAULT_GALLERY_CONF = {
'filename_pattern': re.escape(os.sep) + 'plot',
'ignore_pattern': r'__init__\.py',
'examples_dirs': os.path.join('..', 'examples'),
'reset_argv': DefaultResetArgv(),
'subsection_order': None,
'within_subsection_order': NumberOfCodeLinesSortKey,
'gallery_dirs': 'auto_examples',
'backreferences_dir': None,
'doc_module': (),
'reference_url': {},
'capture_repr': ('_repr_html_', '__repr__'),
'ignore_repr_types': r'',
# Build options
# -------------
# We use a string for 'plot_gallery' rather than simply the Python boolean
# `True` as it avoids a warning about unicode when controlling this value
# via the command line switches of sphinx-build
'plot_gallery': 'True',
'download_all_examples': True,
'abort_on_example_error': False,
'failing_examples': {},
'passing_examples': [],
'stale_examples': [], # ones that did not need to be run due to md5sum
'run_stale_examples': False,
'expected_failing_examples': set(),
'thumbnail_size': (400, 280), # Default CSS does 0.4 scaling (160, 112)
'min_reported_time': 0,
'binder': {},
'image_scrapers': ('matplotlib',),
'compress_images': (),
'reset_modules': ('matplotlib', 'seaborn'),
'first_notebook_cell': '%matplotlib inline',
'last_notebook_cell': None,
'notebook_images': False,
'pypandoc': False,
'remove_config_comments': False,
'show_memory': False,
'show_signature': True,
'junit': '',
'log_level': {'backreference_missing': 'warning'},
'inspect_global_variables': True,
'css': _KNOWN_CSS,
'matplotlib_animations': False,
}
logger = sphinx_compatibility.getLogger('sphinx-gallery')
def _bool_eval(x):
if isinstance(x, str):
try:
x = eval(x)
except TypeError:
pass
return bool(x)
def parse_config(app):
"""Process the Sphinx Gallery configuration."""
plot_gallery = _bool_eval(app.builder.config.plot_gallery)
src_dir = app.builder.srcdir
abort_on_example_error = _bool_eval(
app.builder.config.abort_on_example_error)
lang = app.builder.config.highlight_language
gallery_conf = _complete_gallery_conf(
app.config.sphinx_gallery_conf, src_dir, plot_gallery,
abort_on_example_error, lang, app.builder.name, app)
# this assures I can call the config in other places
app.config.sphinx_gallery_conf = gallery_conf
app.config.html_static_path.append(glr_path_static())
return gallery_conf
def _complete_gallery_conf(sphinx_gallery_conf, src_dir, plot_gallery,
abort_on_example_error, lang='python',
builder_name='html', app=None):
gallery_conf = copy.deepcopy(DEFAULT_GALLERY_CONF)
gallery_conf.update(sphinx_gallery_conf)
if sphinx_gallery_conf.get('find_mayavi_figures', False):
logger.warning(
"Deprecated image scraping variable `find_mayavi_figures`\n"
"detected, use `image_scrapers` instead as:\n\n"
" image_scrapers=('matplotlib', 'mayavi')",
type=DeprecationWarning)
gallery_conf['image_scrapers'] += ('mayavi',)
gallery_conf.update(plot_gallery=plot_gallery)
gallery_conf.update(abort_on_example_error=abort_on_example_error)
# XXX anything that can only be a bool (rather than str) should probably be
# evaluated this way as it allows setting via -D on the command line
for key in ('run_stale_examples',):
gallery_conf[key] = _bool_eval(gallery_conf[key])
gallery_conf['src_dir'] = src_dir
gallery_conf['app'] = app
if gallery_conf.get("mod_example_dir", False):
backreferences_warning = """\n========
Sphinx-Gallery found the configuration key 'mod_example_dir'. This
is deprecated, and you should now use the key 'backreferences_dir'
instead. Support for 'mod_example_dir' will be removed in a subsequent
version of Sphinx-Gallery. For more details, see the backreferences
documentation:
https://sphinx-gallery.github.io/configuration.html#references-to-examples""" # noqa: E501
gallery_conf['backreferences_dir'] = gallery_conf['mod_example_dir']
logger.warning(
backreferences_warning,
type=DeprecationWarning)
# Check capture_repr
capture_repr = gallery_conf['capture_repr']
supported_reprs = ['__repr__', '__str__', '_repr_html_']
if isinstance(capture_repr, tuple):
for rep in capture_repr:
if rep not in supported_reprs:
raise ConfigError("All entries in 'capture_repr' must be one "
"of %s, got: %s" % (supported_reprs, rep))
else:
raise ConfigError("'capture_repr' must be a tuple, got: %s"
% (type(capture_repr),))
# Check ignore_repr_types
if not isinstance(gallery_conf['ignore_repr_types'], str):
raise ConfigError("'ignore_repr_types' must be a string, got: %s"
% (type(gallery_conf['ignore_repr_types']),))
# deal with show_memory
gallery_conf['memory_base'] = 0.
if gallery_conf['show_memory']:
if not callable(gallery_conf['show_memory']): # True-like
try:
from memory_profiler import memory_usage # noqa
except ImportError:
logger.warning("Please install 'memory_profiler' to enable "
"peak memory measurements.")
gallery_conf['show_memory'] = False
else:
def call_memory(func):
mem, out = memory_usage(func, max_usage=True, retval=True,
multiprocess=True)
try:
mem = mem[0] # old MP always returned a list
except TypeError: # 'float' object is not subscriptable
pass
return mem, out
gallery_conf['call_memory'] = call_memory
gallery_conf['memory_base'] = _get_memory_base(gallery_conf)
else:
gallery_conf['call_memory'] = gallery_conf['show_memory']
if not gallery_conf['show_memory']: # can be set to False above
def call_memory(func):
return 0., func()
gallery_conf['call_memory'] = call_memory
assert callable(gallery_conf['call_memory'])
# deal with scrapers
scrapers = gallery_conf['image_scrapers']
if not isinstance(scrapers, (tuple, list)):
scrapers = [scrapers]
scrapers = list(scrapers)
for si, scraper in enumerate(scrapers):
if isinstance(scraper, str):
if scraper in _scraper_dict:
scraper = _scraper_dict[scraper]
else:
orig_scraper = scraper
try:
scraper = import_module(scraper)
scraper = getattr(scraper, '_get_sg_image_scraper')
scraper = scraper()
except Exception as exp:
raise ConfigError('Unknown image scraper %r, got:\n%s'
% (orig_scraper, exp))
scrapers[si] = scraper
if not callable(scraper):
raise ConfigError('Scraper %r was not callable' % (scraper,))
gallery_conf['image_scrapers'] = tuple(scrapers)
del scrapers
# Here we try to set up matplotlib but don't raise an error,
# we will raise an error later when we actually try to use it
# (if we do so) in scrapers.py.
# In principle we could look to see if there is a matplotlib scraper
# in our scrapers list, but this would be backward incompatible with
# anyone using or relying on our Agg-setting behavior (e.g., for some
# custom matplotlib SVG scraper as in our docs).
# Eventually we can make this a config var like matplotlib_agg or something
# if people need us not to set it to Agg.
try:
_import_matplotlib()
except (ImportError, ValueError):
pass
# compress_images
compress_images = gallery_conf['compress_images']
if isinstance(compress_images, str):
compress_images = [compress_images]
elif not isinstance(compress_images, (tuple, list)):
raise ConfigError('compress_images must be a tuple, list, or str, '
'got %s' % (type(compress_images),))
compress_images = list(compress_images)
allowed_values = ('images', 'thumbnails')
pops = list()
for ki, kind in enumerate(compress_images):
if kind not in allowed_values:
if kind.startswith('-'):
pops.append(ki)
continue
raise ConfigError('All entries in compress_images must be one of '
'%s or a command-line switch starting with "-", '
'got %r' % (allowed_values, kind))
compress_images_args = [compress_images.pop(p) for p in pops[::-1]]
if len(compress_images) and not _has_optipng():
logger.warning(
'optipng binaries not found, PNG %s will not be optimized'
% (' and '.join(compress_images),))
compress_images = ()
gallery_conf['compress_images'] = compress_images
gallery_conf['compress_images_args'] = compress_images_args
# deal with resetters
resetters = gallery_conf['reset_modules']
if not isinstance(resetters, (tuple, list)):
resetters = [resetters]
resetters = list(resetters)
for ri, resetter in enumerate(resetters):
if isinstance(resetter, str):
if resetter not in _reset_dict:
raise ConfigError('Unknown module resetter named %r'
% (resetter,))
resetters[ri] = _reset_dict[resetter]
elif not callable(resetter):
raise ConfigError('Module resetter %r was not callable'
% (resetter,))
gallery_conf['reset_modules'] = tuple(resetters)
lang = lang if lang in ('python', 'python3', 'default') else 'python'
gallery_conf['lang'] = lang
del resetters
# Ensure the first cell text is a string if we have it
first_cell = gallery_conf.get("first_notebook_cell")
if (not isinstance(first_cell, str)) and (first_cell is not None):
raise ConfigError("The 'first_notebook_cell' parameter must be type "
"str or None, found type %s" % type(first_cell))
# Ensure the last cell text is a string if we have it
last_cell = gallery_conf.get("last_notebook_cell")
if (not isinstance(last_cell, str)) and (last_cell is not None):
raise ConfigError("The 'last_notebook_cell' parameter must be type str"
" or None, found type %s" % type(last_cell))
# Check pypandoc
pypandoc = gallery_conf['pypandoc']
if not isinstance(pypandoc, (dict, bool)):
raise ConfigError("'pypandoc' parameter must be of type bool or dict,"
"got: %s." % type(pypandoc))
gallery_conf['pypandoc'] = dict() if pypandoc is True else pypandoc
has_pypandoc, version = _has_pypandoc()
if isinstance(gallery_conf['pypandoc'], dict) and has_pypandoc is None:
logger.warning("'pypandoc' not available. Using Sphinx-Gallery to "
"convert rst text blocks to markdown for .ipynb files.")
gallery_conf['pypandoc'] = False
elif isinstance(gallery_conf['pypandoc'], dict):
logger.info("Using pandoc version: %s to convert rst text blocks to "
"markdown for .ipynb files" % (version,))
else:
logger.info("Using Sphinx-Gallery to convert rst text blocks to "
"markdown for .ipynb files.")
if isinstance(pypandoc, dict):
accepted_keys = ('extra_args', 'filters')
for key in pypandoc:
if key not in accepted_keys:
raise ConfigError("'pypandoc' only accepts the following key "
"values: %s, got: %s."
% (accepted_keys, key))
# Make it easy to know which builder we're in
gallery_conf['builder_name'] = builder_name
gallery_conf['titles'] = {}
# Ensure 'backreferences_dir' is str, pathlib.Path or None
backref = gallery_conf['backreferences_dir']
if (not isinstance(backref, (str, pathlib.Path))) and \
(backref is not None):
raise ConfigError("The 'backreferences_dir' parameter must be of type "
"str, pathlib.Path or None, "
"found type %s" % type(backref))
# if 'backreferences_dir' is pathlib.Path, make str for Python <=3.5
# compatibility
if isinstance(backref, pathlib.Path):
gallery_conf['backreferences_dir'] = str(backref)
# binder
gallery_conf['binder'] = check_binder_conf(gallery_conf['binder'])
if not isinstance(gallery_conf['css'], (list, tuple)):
raise ConfigError('gallery_conf["css"] must be list or tuple, got %r'
% (gallery_conf['css'],))
for css in gallery_conf['css']:
if css not in _KNOWN_CSS:
raise ConfigError('Unknown css %r, must be one of %r'
% (css, _KNOWN_CSS))
if gallery_conf['app'] is not None: # can be None in testing
gallery_conf['app'].add_css_file(css + '.css')
return gallery_conf
def get_subsections(srcdir, examples_dir, gallery_conf):
"""Return the list of subsections of a gallery.
Parameters
----------
srcdir : str
absolute path to directory containing conf.py
examples_dir : str
path to the examples directory relative to conf.py
gallery_conf : dict
The gallery configuration.
Returns
-------
out : list
sorted list of gallery subsection folder names
"""
sortkey = gallery_conf['subsection_order']
subfolders = [subfolder for subfolder in os.listdir(examples_dir)
if _get_readme(os.path.join(examples_dir, subfolder),
gallery_conf, raise_error=False) is not None]
base_examples_dir_path = os.path.relpath(examples_dir, srcdir)
subfolders_with_path = [os.path.join(base_examples_dir_path, item)
for item in subfolders]
sorted_subfolders = sorted(subfolders_with_path, key=sortkey)
return [subfolders[i] for i in [subfolders_with_path.index(item)
for item in sorted_subfolders]]
def _prepare_sphx_glr_dirs(gallery_conf, srcdir):
"""Creates necessary folders for sphinx_gallery files """
examples_dirs = gallery_conf['examples_dirs']
gallery_dirs = gallery_conf['gallery_dirs']
if not isinstance(examples_dirs, list):
examples_dirs = [examples_dirs]
if not isinstance(gallery_dirs, list):
gallery_dirs = [gallery_dirs]
if bool(gallery_conf['backreferences_dir']):
backreferences_dir = os.path.join(
srcdir, gallery_conf['backreferences_dir'])
if not os.path.exists(backreferences_dir):
os.makedirs(backreferences_dir)
return list(zip(examples_dirs, gallery_dirs))
def generate_gallery_rst(app):
"""Generate the Main examples gallery reStructuredText
Start the Sphinx-Gallery configuration and recursively scan the examples
directories in order to populate the examples gallery
"""
logger.info('generating gallery...', color='white')
gallery_conf = parse_config(app)
seen_backrefs = set()
costs = []
workdirs = _prepare_sphx_glr_dirs(gallery_conf,
app.builder.srcdir)
# Check for duplicate filenames to make sure linking works as expected
examples_dirs = [ex_dir for ex_dir, _ in workdirs]
files = collect_gallery_files(examples_dirs, gallery_conf)
check_duplicate_filenames(files)
check_spaces_in_filenames(files)
for examples_dir, gallery_dir in workdirs:
examples_dir = os.path.join(app.builder.srcdir, examples_dir)
gallery_dir = os.path.join(app.builder.srcdir, gallery_dir)
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
this_fhindex, this_costs = generate_dir_rst(
examples_dir, gallery_dir, gallery_conf, seen_backrefs)
costs += this_costs
write_computation_times(gallery_conf, gallery_dir, this_costs)
# we create an index.rst with all examples
index_rst_new = os.path.join(gallery_dir, 'index.rst.new')
with codecs.open(index_rst_new, 'w', encoding='utf-8') as fhindex:
# :orphan: to suppress "not included in TOCTREE" sphinx warnings
fhindex.write(":orphan:\n\n" + this_fhindex)
for subsection in get_subsections(
app.builder.srcdir, examples_dir, gallery_conf):
src_dir = os.path.join(examples_dir, subsection)
target_dir = os.path.join(gallery_dir, subsection)
this_fhindex, this_costs = \
generate_dir_rst(src_dir, target_dir, gallery_conf,
seen_backrefs)
fhindex.write(this_fhindex)
costs += this_costs
write_computation_times(gallery_conf, target_dir, this_costs)
if gallery_conf['download_all_examples']:
download_fhindex = generate_zipfiles(
gallery_dir, app.builder.srcdir)
fhindex.write(download_fhindex)
if (app.config.sphinx_gallery_conf['show_signature']):
fhindex.write(SPHX_GLR_SIG)
_replace_md5(index_rst_new, mode='t')
_finalize_backreferences(seen_backrefs, gallery_conf)
if gallery_conf['plot_gallery']:
logger.info("computation time summary:", color='white')
lines, lens = _format_for_writing(
costs, os.path.normpath(gallery_conf['src_dir']), kind='console')
for name, t, m in lines:
text = (' - %s: ' % (name,)).ljust(lens[0] + 10)
if t is None:
text += '(not run)'
logger.info(text)
else:
t_float = float(t.split()[0])
if t_float >= gallery_conf['min_reported_time']:
text += t.rjust(lens[1]) + ' ' + m.rjust(lens[2])
logger.info(text)
# Also create a junit.xml file, useful e.g. on CircleCI
write_junit_xml(gallery_conf, app.builder.outdir, costs)
SPHX_GLR_COMP_TIMES = """
:orphan:
.. _{0}:
Computation times
=================
"""
def _sec_to_readable(t):
"""Convert a number of seconds to a more readable representation."""
# This will only work for < 1 day execution time
# And we reserve 2 digits for minutes because presumably
# there aren't many > 99 minute scripts, but occasionally some
# > 9 minute ones
t = datetime(1, 1, 1) + timedelta(seconds=t)
t = '{0:02d}:{1:02d}.{2:03d}'.format(
t.hour * 60 + t.minute, t.second,
int(round(t.microsecond / 1000.)))
return t
def cost_name_key(cost_name):
cost, name = cost_name
# sort by descending computation time, descending memory, alphabetical name
return (-cost[0], -cost[1], name)
def _format_for_writing(costs, path, kind='rst'):
lines = list()
for cost in sorted(costs, key=cost_name_key):
if kind == 'rst': # like in sg_execution_times
name = ':ref:`sphx_glr_{0}_{1}` (``{1}``)'.format(
path, os.path.basename(cost[1]))
t = _sec_to_readable(cost[0][0])
else: # like in generate_gallery
assert kind == 'console'
name = os.path.relpath(cost[1], path)
t = '%0.2f sec' % (cost[0][0],)
m = '{0:.1f} MB'.format(cost[0][1])
lines.append([name, t, m])
lens = [max(x) for x in zip(*[[len(item) for item in cost]
for cost in lines])]
return lines, lens
def write_computation_times(gallery_conf, target_dir, costs):
total_time = sum(cost[0][0] for cost in costs)
if total_time == 0:
return
target_dir_clean = os.path.relpath(
target_dir, gallery_conf['src_dir']).replace(os.path.sep, '_')
new_ref = 'sphx_glr_%s_sg_execution_times' % target_dir_clean
with codecs.open(os.path.join(target_dir, 'sg_execution_times.rst'), 'w',
encoding='utf-8') as fid:
fid.write(SPHX_GLR_COMP_TIMES.format(new_ref))
fid.write('**{0}** total execution time for **{1}** files:\n\n'
.format(_sec_to_readable(total_time), target_dir_clean))
lines, lens = _format_for_writing(costs, target_dir_clean)
del costs
hline = ''.join(('+' + '-' * (length + 2)) for length in lens) + '+\n'
fid.write(hline)
format_str = ''.join('| {%s} ' % (ii,)
for ii in range(len(lines[0]))) + '|\n'
for line in lines:
line = [ll.ljust(len_) for ll, len_ in zip(line, lens)]
text = format_str.format(*line)
assert len(text) == len(hline)
fid.write(text)
fid.write(hline)
def write_junit_xml(gallery_conf, target_dir, costs):
if not gallery_conf['junit'] or not gallery_conf['plot_gallery']:
return
failing_as_expected, failing_unexpectedly, passing_unexpectedly = \
_parse_failures(gallery_conf)
n_tests = 0
n_failures = 0
n_skips = 0
elapsed = 0.
src_dir = gallery_conf['src_dir']
output = ''
for cost in costs:
(t, _), fname = cost
if not any(fname in x for x in (gallery_conf['passing_examples'],
failing_unexpectedly,
failing_as_expected,
passing_unexpectedly)):
continue # not subselected by our regex
title = gallery_conf['titles'][fname]
output += (
u'<testcase classname={0!s} file={1!s} line="1" '
u'name={2!s} time="{3!r}">'
.format(quoteattr(os.path.splitext(os.path.basename(fname))[0]),
quoteattr(os.path.relpath(fname, src_dir)),
quoteattr(title), t))
if fname in failing_as_expected:
output += u'<skipped message="expected example failure"></skipped>'
n_skips += 1
elif fname in failing_unexpectedly or fname in passing_unexpectedly:
if fname in failing_unexpectedly:
traceback = gallery_conf['failing_examples'][fname]
else: # fname in passing_unexpectedly
traceback = 'Passed even though it was marked to fail'
n_failures += 1
output += (u'<failure message={0!s}>{1!s}</failure>'
.format(quoteattr(traceback.splitlines()[-1].strip()),
escape(traceback)))
output += u'</testcase>'
n_tests += 1
elapsed += t
output += u'</testsuite>'
output = (u'<?xml version="1.0" encoding="utf-8"?>'
u'<testsuite errors="0" failures="{0}" name="sphinx-gallery" '
u'skipped="{1}" tests="{2}" time="{3}">'
.format(n_failures, n_skips, n_tests, elapsed)) + output
# Actually write it
fname = os.path.normpath(os.path.join(target_dir, gallery_conf['junit']))
junit_dir = os.path.dirname(fname)
if not os.path.isdir(junit_dir):
os.makedirs(junit_dir)
with codecs.open(fname, 'w', encoding='utf-8') as fid:
fid.write(output)
def touch_empty_backreferences(app, what, name, obj, options, lines):
"""Generate empty back-reference example files.
This avoids inclusion errors/warnings if there are no gallery
examples for a class / module that is being parsed by autodoc"""
if not bool(app.config.sphinx_gallery_conf['backreferences_dir']):
return
examples_path = os.path.join(app.srcdir,
app.config.sphinx_gallery_conf[
"backreferences_dir"],
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def _expected_failing_examples(gallery_conf):
return set(
os.path.normpath(os.path.join(gallery_conf['src_dir'], path))
for path in gallery_conf['expected_failing_examples'])
def _parse_failures(gallery_conf):
"""Split the failures."""
failing_examples = set(gallery_conf['failing_examples'].keys())
expected_failing_examples = _expected_failing_examples(gallery_conf)
failing_as_expected = failing_examples.intersection(
expected_failing_examples)
failing_unexpectedly = failing_examples.difference(
expected_failing_examples)
passing_unexpectedly = expected_failing_examples.difference(
failing_examples)
# filter from examples actually run
passing_unexpectedly = [
src_file for src_file in passing_unexpectedly
if re.search(gallery_conf.get('filename_pattern'), src_file)]
return failing_as_expected, failing_unexpectedly, passing_unexpectedly
def summarize_failing_examples(app, exception):
"""Collects the list of falling examples and prints them with a traceback.
Raises ValueError if there where failing examples.
"""
if exception is not None:
return
# Under no-plot Examples are not run so nothing to summarize
if not app.config.sphinx_gallery_conf['plot_gallery']:
logger.info('Sphinx-Gallery gallery_conf["plot_gallery"] was '
'False, so no examples were executed.', color='brown')
return
gallery_conf = app.config.sphinx_gallery_conf
failing_as_expected, failing_unexpectedly, passing_unexpectedly = \
_parse_failures(gallery_conf)
if failing_as_expected:
logger.info("Examples failing as expected:", color='brown')
for fail_example in failing_as_expected:
logger.info('%s failed leaving traceback:', fail_example,
color='brown')
logger.info(gallery_conf['failing_examples'][fail_example],
color='brown')
fail_msgs = []
if failing_unexpectedly:
fail_msgs.append(red("Unexpected failing examples:"))
for fail_example in failing_unexpectedly:
fail_msgs.append(fail_example + ' failed leaving traceback:\n' +
gallery_conf['failing_examples'][fail_example] +
'\n')
if passing_unexpectedly:
fail_msgs.append(red("Examples expected to fail, but not failing:\n") +
"Please remove these examples from\n" +
"sphinx_gallery_conf['expected_failing_examples']\n" +
"in your conf.py file"
"\n".join(passing_unexpectedly))
# standard message
n_good = len(gallery_conf['passing_examples'])
n_tot = len(gallery_conf['failing_examples']) + n_good
n_stale = len(gallery_conf['stale_examples'])
logger.info('\nSphinx-Gallery successfully executed %d out of %d '
'file%s subselected by:\n\n'
' gallery_conf["filename_pattern"] = %r\n'
' gallery_conf["ignore_pattern"] = %r\n'
'\nafter excluding %d file%s that had previously been run '
'(based on MD5).\n'
% (n_good, n_tot, 's' if n_tot != 1 else '',
gallery_conf['filename_pattern'],
gallery_conf['ignore_pattern'],
n_stale, 's' if n_stale != 1 else '',
),
color='brown')
if fail_msgs:
raise ExtensionError(
"Here is a summary of the problems encountered "
"when running the examples\n\n" + "\n".join(fail_msgs) +
"\n" + "-" * 79)
def collect_gallery_files(examples_dirs, gallery_conf):
"""Collect python files from the gallery example directories."""
files = []
for example_dir in examples_dirs:
for root, dirnames, filenames in os.walk(example_dir):
for filename in filenames:
if filename.endswith('.py'):
if re.search(gallery_conf['ignore_pattern'],
filename) is None:
files.append(os.path.join(root, filename))
return files
def check_duplicate_filenames(files):
"""Check for duplicate filenames across gallery directories."""
# Check whether we'll have duplicates
used_names = set()
dup_names = list()
for this_file in files:
this_fname = os.path.basename(this_file)
if this_fname in used_names:
dup_names.append(this_file)
else:
used_names.add(this_fname)
if len(dup_names) > 0:
logger.warning(
'Duplicate example file name(s) found. Having duplicate file '
'names will break some links. '
'List of files: {}'.format(sorted(dup_names),))
def check_spaces_in_filenames(files):
"""Check for spaces in filenames across example directories."""
regex = re.compile(r'[\s]')
files_with_space = list(filter(regex.search, files))
if files_with_space:
logger.warning(
'Example file name(s) with space(s) found. Having space(s) in '
'file names will break some links. '
'List of files: {}'.format(sorted(files_with_space),))
def get_default_config_value(key):
def default_getter(conf):
return conf['sphinx_gallery_conf'].get(key, DEFAULT_GALLERY_CONF[key])
return default_getter
def setup(app):
"""Setup Sphinx-Gallery sphinx extension"""
sphinx_compatibility._app = app
app.add_config_value('sphinx_gallery_conf', DEFAULT_GALLERY_CONF, 'html')
for key in ['plot_gallery', 'abort_on_example_error']:
app.add_config_value(key, get_default_config_value(key), 'html')
if 'sphinx.ext.autodoc' in app.extensions:
app.connect('autodoc-process-docstring', touch_empty_backreferences)
# Add the custom directive
app.add_directive('minigallery', MiniGallery)
app.connect('builder-inited', generate_gallery_rst)
app.connect('build-finished', copy_binder_files)
app.connect('build-finished', summarize_failing_examples)
app.connect('build-finished', embed_code_links)
metadata = {'parallel_read_safe': True,
'parallel_write_safe': True,
'version': _sg_version}
return metadata
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| Eric89GXL/sphinx-gallery | sphinx_gallery/gen_gallery.py | Python | bsd-3-clause | 32,422 | [
"Mayavi"
] | 703555067397bb99ccb2896386b3a4e46233489e2eff8a9e7143891c435b9f36 |
"""
Taken from https://github.com/brentp/pyfasta/blob/452d1ce5406ed73c4149b6d201bc65e4aa8afc27/tests/bench.py
"""
from itertools import islice
from tempfile import NamedTemporaryFile
import pyfaidx
import pyfasta
import pysam
from Bio import SeqIO
import time
import random
import os
import sys
from subprocess import call, check_output
import tracemalloc
random.seed(1234)
SEQLEN = 1000000
try:
nreads = int(sys.argv[1])
except IndexError:
nreads = 10000
read_len = 1000
def mean(s):
return sum(s) / len(s)
def make_intervals(nreads=nreads, seqlen=SEQLEN, readlen=read_len):
for _ in range(nreads):
start = random.randint(0, seqlen)
end = min(seqlen, start + readlen)
yield (start, end)
intervals = tuple(make_intervals())
def make_long_fasta(filename, nrecs=250, seqlen=SEQLEN):
headers = []
with open(filename, 'w') as f:
s = "ACTGACTGAC"
for i in range(nrecs):
h = "header%i" % i
headers.append(h)
f.write('>' + h + '\n')
for line in pyfaidx.wrap_sequence(80, s * (seqlen//10)):
f.write(line)
return headers
def read_dict(f, headers):
for k in islice(headers, 0, None, 10):
for start, end in intervals:
str(f[k][start:end])
def read_faidx(f, headers):
for k in islice(headers, 0, None, 10):
for start, end in intervals:
str(f.fetch(k, start + 1, end))
def read_fastahack(f, headers):
for k in islice(headers, 0, None, 10):
for start, end in intervals:
str(f.get_sub_sequence(k, start, end))
def read_pysam(f, headers):
tstart = time.time()
for k in islice(headers, 0, None, 100):
for start, end in intervals:
if time.time() - tstart > 300:
print(k)
tstart = time.time()
str(pysam.faidx(f, '{0}:{1}-{2}'.format(k, start + 1, end)))
def read_samtools(f, headers):
tstart = time.time()
for k in islice(headers, 0, None, 100):
for start, end in intervals:
if time.time() - tstart > 300:
print(k)
tstart = time.time()
check_output(['samtools', 'faidx', f, '{0}:{1}-{2}'.format(k, start + 1, end)])
def main():
fa_file = NamedTemporaryFile()
index = fa_file.name + '.fai'
headers = make_long_fasta(fa_file.name)
def pyfaidx_fasta(n):
print('timings for pyfaidx.Fasta')
ti = []
tf = []
for _ in range(n):
t = time.time()
f = pyfaidx.Fasta(fa_file.name)
ti.append(time.time() - t)
t = time.time()
read_dict(f, headers)
tf.append(time.time() - t)
os.remove(index)
# profile memory usage and report timings
tracemalloc.start()
f = pyfaidx.Fasta(fa_file.name)
read_dict(f, headers)
os.remove(index)
print(tracemalloc.get_traced_memory())
print(mean(ti))
print(mean(tf)/nreads/10*1000*1000)
tracemalloc.stop()
def pyfaidx_faidx(n):
print('timings for pyfaidx.Faidx')
ti = []
tf = []
for _ in range(n):
t = time.time()
f = pyfaidx.Faidx(fa_file.name)
ti.append(time.time() - t)
t = time.time()
read_faidx(f, headers)
tf.append(time.time() - t)
os.remove(index)
# profile memory usage and report timings
tracemalloc.start()
f = pyfaidx.Faidx(fa_file.name)
read_faidx(f, headers)
os.remove(index)
print(tracemalloc.get_traced_memory())
print(mean(ti))
print(mean(tf)/nreads/10*1000*1000)
tracemalloc.stop()
def fastahack_fetch(n):
print('timings for fastahack.FastaHack')
ti = []
tf = []
for _ in range(n):
t = time.time()
f = fastahack.FastaHack(fa_file.name)
ti.append(time.time() - t)
t = time.time()
read_fastahack(f, headers)
tf.append(time.time() - t)
os.remove(index)
# profile memory usage and report timings
tracemalloc.start()
f = fastahack.FastaHack(fa_file.name)
read_fastahack(f, headers)
os.remove(index)
print(tracemalloc.get_traced_memory())
print(mean(ti))
print(mean(tf)/nreads/10*1000*1000)
tracemalloc.stop()
def pyfasta_fseek(n):
print('timings for pyfasta.Fasta (fseek)')
ti = []
tf = []
for _ in range(n):
t = time.time()
f = pyfasta.Fasta(fa_file.name, record_class=pyfasta.FastaRecord)
ti.append(time.time() - t)
t = time.time()
read_dict(f, headers)
tf.append(time.time() - t)
os.remove(fa_file.name + '.flat')
os.remove(fa_file.name + '.gdx')
# profile memory usage and report timings
tracemalloc.start()
f = pyfasta.Fasta(fa_file.name, record_class=pyfasta.FastaRecord)
read_dict(f, headers)
os.remove(fa_file.name + '.flat')
os.remove(fa_file.name + '.gdx')
print(tracemalloc.get_traced_memory())
print(mean(ti))
print(mean(tf)/nreads/10*1000*1000)
tracemalloc.stop()
def pyfasta_fasta(n):
print('timings for pyfasta.Fasta')
ti = []
tf = []
for _ in range(n):
t = time.time()
f = pyfasta.Fasta(fa_file.name)
ti.append(time.time() - t)
t = time.time()
read_dict(f, headers)
tf.append(time.time() - t)
os.remove(fa_file.name + '.flat')
os.remove(fa_file.name + '.gdx')
# profile memory usage and report timings
tracemalloc.start()
f = pyfasta.Fasta(fa_file.name)
read_dict(f, headers)
os.remove(fa_file.name + '.flat')
os.remove(fa_file.name + '.gdx')
print(tracemalloc.get_traced_memory())
print(mean(ti))
print(mean(tf)/nreads/10*1000*1000)
tracemalloc.stop()
def pysam_faidx(n):
print('timings for pysam.faidx')
ti = []
tf = []
for _ in range(n):
t = time.time()
pysam.faidx(fa_file.name)
ti.append(time.time() - t)
t = time.time()
read_pysam(fa_file.name, headers)
tf.append(time.time() - t)
os.remove(index)
# profile memory usage and report timings
tracemalloc.start()
pysam.faidx(fa_file.name)
read_pysam(fa_file.name, headers)
os.remove(index)
print(tracemalloc.get_traced_memory())
print(mean(ti))
print(mean(tf)/nreads/10*1000*1000)
tracemalloc.stop()
def samtools_faidx(n):
print('timings for samtools faidx')
ti = []
tf = []
for _ in range(n):
t = time.time()
call(['samtools', 'faidx', fa_file.name])
ti.append(time.time() - t)
t = time.time()
read_samtools(fa_file.name, headers)
tf.append(time.time() - t)
os.remove(index)
print(mean(ti))
print(mean(tf)/nreads/100*1000*1000)
def seqio_read(n):
print('timings for Bio.SeqIO')
ti = []
tf = []
for _ in range(n):
t = time.time()
fh = open(fa_file.name)
f = SeqIO.to_dict(SeqIO.parse(fh, "fasta"))
ti.append(time.time() - t)
t = time.time()
read_dict(f, headers)
tf.append(time.time() - t)
fh.close()
# profile memory usage and report timings
tracemalloc.start()
fh = open(fa_file.name)
f = SeqIO.to_dict(SeqIO.parse(fh, "fasta"))
read_dict(f, headers)
fh.close()
print(tracemalloc.get_traced_memory())
print(mean(ti))
print(mean(tf)/nreads/100*1000*1000)
tracemalloc.stop()
n = 3
pyfaidx_fasta(n)
pyfaidx_faidx(n)
pyfasta_fasta(n)
pyfasta_fseek(n)
seqio_read(n)
#fastahack_fetch(n)
samtools_faidx(n)
pysam_faidx(n)
if __name__ == "__main__":
main()
| mattions/pyfaidx | scripts/benchmark.py | Python | bsd-3-clause | 8,301 | [
"pysam"
] | e8c6c4449758f2ee3407c5b212293cf27c9699c0976718764e1dc92a1c7c8caf |
# coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Fit 2 GMMs to 2 point clouds using likelihood and (approx) W2 distance.
Suppose we have two large point clouds and want to estimate a coupling and a
W2 distance between them. In https://arxiv.org/abs/1907.05254, Delon & Desolneux
propose fitting a GMM to each point cloud while simultaneously minimizing a
Wasserstein-like distance called MW2 between the fitted GMMs. MW2 is an upper
bound on W2, the Wasserstein distance between the GMMs. Here we implement
their algorithm as well as a generalization that allows for reweightings using
generalized, penalized expectation-maximization (see section 6.2 of Delon &
Desolneux).
As in `fit_gmm.py`, we assume that the observations $X_0$ and $X_1$ from
batches 0 and 1 are generated by GMMs with parameters $\Theta_0$ and $\Theta_1$,
respectively. We will use $\Theta$ to denote the combined parameters
for the two GMMs. We denote the (unobserved) components that gave rise to the
observations $X_i$ as $Z_i$.
Our goal is to maximize a weighted sum of the likelihood of the observations $X$
under the fitted GMMs and a measure of distance, $MW_2$, between the fitted
GMMs. The problem would be a straightforward maximization exercise if we knew
the components $Z$ that generated each observation $X$. Because the $Z$ are
unobserved, however, we use EM:
We start with an initial estimate of $\Theta$, $\Theta^{(t)}$.
* The E-step: We use the current $\Theta^{(t)}$ to estimate the likelihood of
all possible cluster attributions for each observation $X$.
* The M-step: We form the function $Q(\Theta|\Theta^{(t)})$,
the log likelihood of our observations averaged over all possible
assignments. We then obtain an updated parameter estimate, $\Theta^{(t+1)}$,
by numerically maximizing the sum of $Q$ and our GMM distance penalty.
It can be shown that if we maximize the penalized $Q$ above, this procedure will
increase or leave unchanged the penalized log likelihood for $\Theta$. We
iterate over these two steps until convergence. Note that the resulting
estimate for $\Theta$ may only be a *local* maximum of the penalized
likelihood function.
Sample usage:
# (Note that we usually initialize a pair to a single GMM that we fit to a
# pooled set, then the two GMMs separate as we optimize the pair.)
pair_init = gaussian_mixture_pair.GaussianMixturePair(
gmm0=gmm0,
gmm1=gmm1,
epsilon=1.e-2,
tau=1.)
fit_model_em_fn = fit_gmm_pair.get_fit_model_em_fn(
weight_transport=0.1,
weight_splitting=1.,
epsilon=pair_init.epsilon,
jit=True)
pair, loss = fit_model_em_fn(
pair=pair_init,
points0=samples_gmm0,
points1=samples_gmm1,
point_weights0=None,
point_weights1=None,
em_steps=30,
m_steps=20,
verbose=True)
"""
# TODO(geoffd): look into refactoring so we jit higher level functions
import functools
import math
from typing import Callable, NamedTuple, Optional, Tuple
import jax
import jax.numpy as jnp
import optax
from ott.tools.gaussian_mixture import fit_gmm
from ott.tools.gaussian_mixture import gaussian_mixture
from ott.tools.gaussian_mixture import gaussian_mixture_pair
LOG2 = math.log(2)
class Observations(NamedTuple):
"""Weighted observations and their E-step assignment probabilities."""
points: jnp.ndarray
point_weights: jnp.ndarray
assignment_probs: jnp.ndarray
# Model fit
def get_q(
gmm: gaussian_mixture.GaussianMixture,
obs: Observations) -> jnp.ndarray:
r"""Get Q(\Theta|\Theta^{(t)}).
Here Q is the log likelihood for our observations based on the current
parameter estimates for \Theta and averaged over the current component
assignment probabilities. See the overview of EM above for more details.
Args:
gmm: GMM model parameterized by Theta
obs: weighted observations with component assignments computed in the E step
for \Theta^{(t)}
Returns:
Q(\Theta|\Theta^{(t)})
"""
# Q = E_Z log p(X, Z| Theta)
# = \sum_Z P(Z|X, Theta^(t)) [log p(X, Z | Theta)]
# Here P(Z|X, theta^(t)) is the set of assignment probabilities
# we computed in the E step.
# log p(X, Z| theta) is given by
log_p_x_z = (gmm.conditional_log_prob(obs.points) + # p(X | Z, theta)
gmm.log_component_weights()) # p(Z | theta)
return (
jnp.sum(
obs.point_weights *
jnp.sum(log_p_x_z * obs.assignment_probs, axis=-1),
axis=0) /
jnp.sum(obs.point_weights, axis=0))
# Objective function
@functools.lru_cache()
def get_objective_fn(weight_transport: float):
"""Get the total loss function with static parameters in a closure.
Args:
weight_transport: weight for the transport penalty
Returns:
A function that returns the objective for a GaussianMixturePair.
"""
def _objective_fn(
pair: gaussian_mixture_pair.GaussianMixturePair,
obs0: Observations,
obs1: Observations,
) -> jnp.ndarray:
"""Compute the objective function for a pair of GMMs.
Args:
pair: pair of GMMs + coupling for which to evaluate the objective
obs0: first set of observations
obs1: second set of observations
Returns:
The objective to be minimized in the M-step.
"""
q0 = get_q(gmm=pair.gmm0, obs=obs0)
q1 = get_q(gmm=pair.gmm1, obs=obs1)
cost_matrix = pair.get_cost_matrix()
sinkhorn_output = pair.get_sinkhorn(cost_matrix=cost_matrix)
transport_penalty = sinkhorn_output.reg_ot_cost
return q0 + q1 - weight_transport * transport_penalty
return _objective_fn
def print_losses(
iteration: int,
weight_transport: float,
pair: gaussian_mixture_pair.GaussianMixturePair,
obs0: Observations,
obs1: Observations):
"""Print the loss components for diagnostic purposes."""
q0 = get_q(gmm=pair.gmm0, obs=obs0)
q1 = get_q(gmm=pair.gmm1, obs=obs1)
cost_matrix = pair.get_cost_matrix()
sinkhorn_output = pair.get_sinkhorn(cost_matrix=cost_matrix)
transport_penalty = sinkhorn_output.reg_ot_cost
objective = q0 + q1 - weight_transport * transport_penalty
print((f'{iteration:3d} {q0:.3f} {q1:.3f} '
f'transport:{transport_penalty:.3f} '
f'objective:{objective:.3f}'),
flush=True)
# The E-step for a single GMM
def do_e_step(
e_step_fn: Callable[[gaussian_mixture.GaussianMixture, jnp.ndarray],
jnp.ndarray],
gmm: gaussian_mixture.GaussianMixture,
points: jnp.ndarray,
point_weights: jnp.ndarray,
) -> Observations:
assignment_probs = e_step_fn(gmm, points)
return Observations(points=points,
point_weights=point_weights,
assignment_probs=assignment_probs)
# The M-step
def get_m_step_fn(
learning_rate: float,
objective_fn,
jit: bool):
"""Get a function that performs the M-step of the EM algorithm.
We precompile and precompute a few quantities that we put into a closure.
Args:
learning_rate: learning rate to use for the Adam optimizer
objective_fn: the objective function to maximize
jit: if True, precompile key methods
Returns:
A function that performs the M-step of EM.
"""
grad_objective_fn = jax.grad(objective_fn, argnums=(0,))
gmm_m_step_fn = gaussian_mixture.GaussianMixture.from_points_and_assignment_probs
if jit:
grad_objective_fn = jax.jit(grad_objective_fn)
gmm_m_step_fn = jax.jit(gmm_m_step_fn)
opt_init, opt_update = optax.chain(
# Set the parameters of Adam. Note the learning_rate is not here.
optax.scale_by_adam(b1=0.9, b2=0.999, eps=1e-8),
optax.scale(learning_rate)
)
def _m_step_fn(
pair: gaussian_mixture_pair.GaussianMixturePair,
obs0: Observations,
obs1: Observations,
steps: int,
) -> gaussian_mixture_pair.GaussianMixturePair:
"""Perform the M-step on a pair of Gaussian mixtures.
Args:
pair: GMM parameters to optimize
obs0: first set of observations
obs1: second set of observations
steps: number of optimization steps to use when maximizing the objective
Returns:
A GaussianMixturePair with updated parameters.
"""
params = (pair,)
state = opt_init(params)
for _ in range(steps):
grad_objective = grad_objective_fn(pair, obs0, obs1)
updates, state = opt_update(grad_objective, state, params)
params = optax.apply_updates(params, updates)
for j, gmm in enumerate((params[0].gmm0, params[0].gmm1)):
if gmm.has_nans():
raise ValueError(f'NaN in gmm{j}')
return params[0]
return _m_step_fn
def get_fit_model_em_fn(
weight_transport: float,
learning_rate: float = 0.001,
jit: bool = True,
):
"""Get a function that performs penalized EM.
We precompile and precompute a few quantities that we put into a closure.
Args:
weight_transport: weight for the transportation loss in the total loss
learning_rate: learning rate to use for the Adam optimizer
jit: if True, precompile key methods
Returns:
A function that performs generalized, penalized EM.
"""
objective_fn = get_objective_fn(weight_transport=weight_transport)
e_step_fn = fit_gmm.get_assignment_probs
if jit:
objective_fn = jax.jit(objective_fn)
e_step_fn = jax.jit(e_step_fn)
m_step_fn = get_m_step_fn(
learning_rate=learning_rate,
objective_fn=objective_fn,
jit=jit)
def _fit_model_em(
pair: gaussian_mixture_pair.GaussianMixturePair,
points0: jnp.ndarray,
points1: jnp.ndarray,
point_weights0: Optional[jnp.ndarray],
point_weights1: Optional[jnp.ndarray],
em_steps: int,
m_steps: int = 50,
verbose: bool = False,
) -> Tuple[gaussian_mixture_pair.GaussianMixturePair, float]:
"""Optimize a GaussianMixturePair using penalized EM.
Args:
pair: GaussianMixturePair to optimize
points0: observations associated with pair.gmm0
points1: observations associated with pair.gmm1
point_weights0: weights for points0
point_weights1: weights for points1
em_steps: number of EM steps to perform
m_steps: number of gradient descent steps to perform in the M-step
verbose: if True, print status messages
Returns:
An updated GaussianMixturePair and the final loss.
"""
if point_weights0 is None:
point_weights0 = jnp.ones(points0.shape[0])
if point_weights1 is None:
point_weights1 = jnp.ones(points1.shape[0])
if pair.lock_gmm1:
obs1 = do_e_step(
e_step_fn=e_step_fn,
gmm=pair.gmm1,
points=points1,
point_weights=point_weights1)
for i in range(em_steps):
# E-step
obs0 = do_e_step(
e_step_fn=e_step_fn,
gmm=pair.gmm0,
points=points0,
point_weights=point_weights0)
if not pair.lock_gmm1:
obs1 = do_e_step(
e_step_fn=e_step_fn,
gmm=pair.gmm1,
points=points1,
point_weights=point_weights1)
# print current losses
if verbose:
print_losses(
iteration=i,
weight_transport=weight_transport,
pair=pair,
obs0=obs0,
obs1=obs1)
# the M-step
pair = m_step_fn(pair=pair, obs0=obs0, obs1=obs1, steps=m_steps)
# final E-step before computing the loss
obs0 = do_e_step(
e_step_fn=e_step_fn,
gmm=pair.gmm0,
points=points0,
point_weights=point_weights0)
if not pair.lock_gmm1:
obs1 = do_e_step(
e_step_fn=e_step_fn,
gmm=pair.gmm1,
points=points1,
point_weights=point_weights1)
loss = objective_fn(pair=pair, obs0=obs0, obs1=obs1)
return pair, loss
return _fit_model_em
| google-research/ott | ott/tools/gaussian_mixture/fit_gmm_pair.py | Python | apache-2.0 | 12,305 | [
"Gaussian"
] | 72b723ff337f69cad94b1009ed9d64b0d0d8deac4fa64cfcddc8177d0b9e2848 |
from cached_property import cached_property
from pathlib import Path
from qgis.PyQt.QtCore import pyqtSignal
from qgis.PyQt.QtCore import Qt
from ThreeDiToolbox.datasource.threedi_results import ThreediResult
from ThreeDiToolbox.models.base import BaseModel
from ThreeDiToolbox.models.base_fields import CheckboxField
from ThreeDiToolbox.models.base_fields import ValueField
from ThreeDiToolbox.utils.layer_from_netCDF import get_or_create_flowline_layer
from ThreeDiToolbox.utils.layer_from_netCDF import get_or_create_node_layer
from ThreeDiToolbox.utils.layer_from_netCDF import get_or_create_pumpline_layer
from ThreeDiToolbox.utils.user_messages import pop_up_info
from ThreeDiToolbox.utils.user_messages import StatusProgressBar
import logging
logger = logging.getLogger(__name__)
def get_line_pattern(item_field):
"""Return (default) line pattern for plots from this datasource.
Look at the already-used styles and try to pick an unused one.
:param item_field:
:return: QT line pattern
"""
available_styles = [
Qt.SolidLine,
Qt.DashLine,
Qt.DotLine,
Qt.DashDotLine,
Qt.DashDotDotLine,
]
already_used_patterns = [item.pattern.value for item in item_field.row.model.rows]
for style in available_styles:
if style not in already_used_patterns:
# Hurray, an unused style.
return style
# No unused styles. Use the solid line style as a default.
return Qt.SolidLine
def pop_up_unkown_datasource_type():
msg = (
"QGIS3 works with ThreeDiToolbox >v1.6 and can only handle \n"
"results created after March 2018 (groundwater release). \n\n"
"You can do two things: \n"
"1. simulate this model again and load the result in QGIS3 \n"
"2. load this result into QGIS2.18 ThreeDiToolbox v1.6 "
)
logger.error(msg)
pop_up_info(msg, title="Error")
class ValueWithChangeSignal(object):
"""Value for use inside a BaseModel. A change emits a signal.
It works like a python property. The whole ``__get__``, ``instance``,
``owner`` stuff is explained here:
https://stackoverflow.com/a/18038707/27401
The ``signal_setting_name`` has to do with the way project state is saved,
see ``utils/qprojects.py``.
"""
def __init__(self, signal_name, signal_setting_name, initial_value=None):
"""Initialize ourselves as a kind-of-python-property.
``signal_name`` is the name of a class attribute that should be a qtsignal.
``signal_setting_name`` is the string that gets emitted as the first
argument of the signal. It functions as a key for the key/value state
storage mechanism from ``utils.qprojects.py``.
"""
self.signal_name = signal_name
self.signal_setting_name = signal_setting_name
self.value = initial_value
def __get__(self, instance, owner):
return self.value
def __set__(self, instance, value):
self.value = value
getattr(instance, self.signal_name).emit(self.signal_setting_name, value)
class DatasourceLayerHelper(object):
"""Helper class for TimeseriesDatasourceModel
Our methods are transparently called from
:py:class:`TimeseriesDatasourceModel`, so effectively we could also be
methods on *that* class.
"""
def __init__(self, file_path):
self.file_path = Path(file_path)
self.datasource_dir = self.file_path.parent
# Note: this is the older sqlite gridadmin, not the newer gridadmin.h5!
self.sqlite_gridadmin_filepath = str(self.datasource_dir / "gridadmin.sqlite")
# The following three are caches for self.get_result_layers()
self._line_layer = None
self._node_layer = None
self._pumpline_layer = None
@cached_property
def threedi_result(self):
"""Return an instance of a subclass of ``BaseDataSource``."""
return ThreediResult(self.file_path)
def get_result_layers(self, progress_bar=None):
"""Return QgsVectorLayers for line, node, and pumpline layers.
Use cached versions (``self._line_layer`` and so) if present.
"""
if progress_bar is None:
progress_bar = StatusProgressBar(100, "create gridadmin.sqlite")
progress_bar.increase_progress(0, "create flowline layer")
progress_bar.increase_progress(33, "create node layer")
self._line_layer = self._line_layer or get_or_create_flowline_layer(
self.threedi_result, self.sqlite_gridadmin_filepath
)
progress_bar.increase_progress(33, "create pumplayer layer")
self._node_layer = self._node_layer or get_or_create_node_layer(
self.threedi_result, self.sqlite_gridadmin_filepath
)
progress_bar.increase_progress(34, "done")
self._pumpline_layer = self._pumpline_layer or get_or_create_pumpline_layer(
self.threedi_result, self.sqlite_gridadmin_filepath
)
return [self._line_layer, self._node_layer, self._pumpline_layer]
class TimeseriesDatasourceModel(BaseModel):
"""Model for selecting threedi netcdf results.
Used as ``self.ts_datasources`` throughout the entire plugin.
Often, ``self.ts_datasources.rows[0]`` is used, as the first one is
effectively treated as the selected datasource
We're also used for storing the selected model schematisation as
:py:attr:`model_spatialite_filepath`.
"""
model_schematisation_change = pyqtSignal(str, str)
results_change = pyqtSignal(str, list)
def __init__(self):
BaseModel.__init__(self)
self.dataChanged.connect(self.on_change)
self.rowsRemoved.connect(self.on_change)
self.rowsInserted.connect(self.on_change)
tool_name = "result_selection"
#: model_spatialite_filepath is the currently selected 3di model db.
model_spatialite_filepath = ValueWithChangeSignal(
"model_schematisation_change", "model_schematisation"
)
# TODO: don't we want a similar one for the selected netcdf? Instead of doing [0]?
class Fields(object):
active = CheckboxField(
show=True, default_value=True, column_width=20, column_name=""
)
name = ValueField(show=True, column_width=130, column_name="Name")
file_path = ValueField(show=True, column_width=615, column_name="File")
type = ValueField(show=False)
pattern = ValueField(show=False, default_value=get_line_pattern)
@cached_property
def datasource_layer_helper(self):
"""Return DatasourceLayerHelper."""
datasource_type = self.type.value
if datasource_type != "netcdf-groundwater":
pop_up_unkown_datasource_type()
raise AssertionError("unknown datasource type: %s" % datasource_type)
# Previously, the manager could handle more kinds of datasource
# types. If in the future, more kinds again are needed,
# instantiate a different kind of manager here.
return DatasourceLayerHelper(self.file_path.value)
def threedi_result(self):
"""Return ThreediResult instance."""
return self.datasource_layer_helper.threedi_result
def sqlite_gridadmin_filepath(self):
# Note: this is the older sqlite gridadmin, not the newer gridadmin.h5!
return self.datasource_layer_helper.sqlite_gridadmin_filepath
def get_result_layers(self):
return self.datasource_layer_helper.get_result_layers()
def reset(self):
self.removeRows(0, self.rowCount())
def on_change(self, start=None, stop=None, etc=None):
# TODO: what are emitted aren't directories but datasource models?
self.results_change.emit("result_directories", self.rows)
class DownloadableResultModel(BaseModel):
"""Model with 3di results that can be downloaded from lizard."""
class Fields(object):
name = ValueField(show=True, column_width=250, column_name="Name")
size_mebibytes = ValueField(
show=True, column_width=120, column_name="Size (MiB)"
)
url = ValueField(show=True, column_width=300, column_name="URL")
results = ValueField(show=False) # the scenario results
| nens/threedi-qgis-plugin | tool_result_selection/models.py | Python | gpl-3.0 | 8,337 | [
"NetCDF"
] | f6ac1d0fae3c7eba5414fa24481ebe40589204375d03b3383dc823c05904f0a5 |
import os
import warnings
# cmr calls all available methods in ase.atoms detected by the module inspect.
# Therefore also deprecated methods are called - and we choose to silence those warnings.
warnings.filterwarnings('ignore', 'ase.atoms.*deprecated',)
from ase.test import NotAvailable
# if CMR_SETTINGS_FILE is missing, cmr raises simply
# Exception("CMR is not configured properly. Please create the settings file with cmr --create-settings.")
try:
import cmr
except (Exception, ImportError):
raise NotAvailable('CMR is required')
from ase.calculators.emt import EMT
from ase.io import read, write
from ase.structure import molecule
cmr_params = {"db_keywords":["O", "ase"], # keyword
"molecule":"O2"} #field
m1 = molecule('O2')
m1.set_calculator(EMT())
e1 = m1.get_potential_energy()
write("O2.cmr", m1, cmr_params = cmr_params)
reread = read("O2.cmr")
e2 = reread.get_potential_energy()
assert abs(e1 - e2) < 1.e-6, str(e1) + ' ' + str(e2)
db_read = cmr.read("O2.cmr")
assert "O" in db_read["db_keywords"]
assert "ase" in db_read["db_keywords"]
assert db_read["molecule"] == "O2"
# clean
filename = "O2.cmr"
if os.path.exists(filename): os.unlink(filename)
| grhawk/ASE | tools/ase/test/cmr/ase_rw.py | Python | gpl-2.0 | 1,194 | [
"ASE"
] | 5034f5449c418ea81a7da4e44d22c8085a441a9679edc2a5405df1cb7247f8bb |
import tensorflow as tf # neural network for function approximation
import gym # environment
import numpy as np # matrix operation and math functions
from gym import wrappers
import gym_morph # customized environment for cart-pole
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
start_time = time.time()
MAX_TEST = 10
for test_num in range(1,MAX_TEST+1):
# Hyperparameters
RANDOM_NUMBER_SEED = test_num
ENVIRONMENT1 = "morph-v0"
MAX_EPISODES = 8000 # number of episodes
EPISODE_LENGTH = 500 # single episode length
HIDDEN_SIZE = 16
DISPLAY_WEIGHTS = False # Help debug weight update
gamma = 0.99 # Discount per step
RENDER = False # Render the cart-pole system
VIDEO_INTERVAL = 100 # Generate a video at this interval
CONSECUTIVE_TARGET = 100 # Including previous 100 rewards
CONST_LR = True # Constant or decaying learing rate
# Constant learning rate
const_learning_rate_in = 0.003
# Decay learning rate
start_learning_rate_in = 0.003
decay_steps_in = 100
decay_rate_in = 0.95
DIR_PATH_SAVEFIG = "/root/cartpole_plot/"
if CONST_LR:
learning_rate = const_learning_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_clr" + str(learning_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
else:
start_learning_rate = start_learning_rate_in
decay_steps = decay_steps_in
decay_rate = decay_rate_in
file_name_savefig = "el" + str(EPISODE_LENGTH) \
+ "_hn" + str(HIDDEN_SIZE) \
+ "_dlr_slr" + str(start_learning_rate).replace(".", "p") \
+ "_ds" + str(decay_steps) \
+ "_dr" + str(decay_rate).replace(".", "p") \
+ "_test" + str(test_num) \
+ ".png"
env = gym.make(ENVIRONMENT1)
env.seed(RANDOM_NUMBER_SEED)
np.random.seed(RANDOM_NUMBER_SEED)
tf.set_random_seed(RANDOM_NUMBER_SEED)
# Input and output sizes
input_size = 4
output_size = 2
# input_size = env.observation_space.shape[0]
# try:
# output_size = env.action_space.shape[0]
# except AttributeError:
# output_size = env.action_space.n
# Tensorflow network setup
x = tf.placeholder(tf.float32, shape=(None, input_size))
y = tf.placeholder(tf.float32, shape=(None, 1))
if not CONST_LR:
# decay learning rate
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(start_learning_rate, global_step, decay_steps, decay_rate, staircase=False)
expected_returns = tf.placeholder(tf.float32, shape=(None, 1))
# Xavier (2010) weights initializer for uniform distribution:
# x = sqrt(6. / (in + out)); [-x, x]
w_init = tf.contrib.layers.xavier_initializer()
hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE],
initializer=w_init)
hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE))
dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B)
dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B)
dist_soft = tf.nn.log_softmax(dist)
dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]]))
pi = tf.contrib.distributions.Bernoulli(dist_in)
pi_sample = pi.sample()
log_pi = pi.log_prob(y)
if CONST_LR:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi)
else:
optimizer = tf.train.RMSPropOptimizer(learning_rate)
train = optimizer.minimize(-1.0 * expected_returns * log_pi, global_step=global_step)
# saver = tf.train.Saver()
# Create and initialize a session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def run_episode(environment, ep, render=False):
raw_reward = 0
discounted_reward = 0
cumulative_reward = []
discount = 1.0
states = []
actions = []
obs = environment.reset()
done = False
while not done:
states.append(obs)
cumulative_reward.append(discounted_reward)
if render and ((ep % VIDEO_INTERVAL) == 0):
environment.render()
action = sess.run(pi_sample, feed_dict={x: [obs]})[0]
actions.append(action)
obs, reward, done, info = env.step(action[0])
raw_reward += reward
if reward > 0:
discounted_reward += reward * discount
else:
discounted_reward += reward
discount *= gamma
return raw_reward, discounted_reward, cumulative_reward, states, actions
def display_weights(session):
w1 = session.run(hidden_W)
b1 = session.run(hidden_B)
w2 = session.run(dist_W)
b2 = session.run(dist_B)
print(w1, b1, w2, b2)
returns = []
mean_returns = []
for ep in range(MAX_EPISODES):
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, ep, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
returns.append(raw_G)
running_returns = returns[max(0, ep-CONSECUTIVE_TARGET):(ep+1)]
mean_return = np.mean(running_returns)
mean_returns.append(mean_return)
if CONST_LR:
msg = "Test: {}/{}, Episode: {}/{}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, MAX_TEST, ep+1, MAX_EPISODES, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), learning_rate, raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
else:
msg = "Test: {}/{}, Episode: {}/{}, Time: {}, Learning rate: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(test_num, MAX_TEST, ep+1, MAX_EPISODES, time.strftime('%H:%M:%S', time.gmtime(time.time()-start_time)), sess.run(learning_rate), raw_G, CONSECUTIVE_TARGET, mean_return)
print(msg)
env.close() # close openai gym environment
tf.reset_default_graph() # clear tensorflow graph
# Plot
# plt.style.use('ggplot')
plt.style.use('dark_background')
episodes_plot = np.arange(MAX_EPISODES)
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.85)
if CONST_LR:
ax.set_title("The Cart-Pole Problem Test %i \n \
Episode Length: %i \
Discount Factor: %.2f \n \
Number of Hidden Neuron: %i \
Constant Learning Rate: %.5f" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, learning_rate))
else:
ax.set_title("The Cart-Pole Problem Test %i \n \
EpisodeLength: %i DiscountFactor: %.2f NumHiddenNeuron: %i \n \
Decay Learning Rate: (start: %.5f, steps: %i, rate: %.2f)" % (test_num, EPISODE_LENGTH, gamma, HIDDEN_SIZE, start_learning_rate, decay_steps, decay_rate))
ax.set_xlabel("Episode")
ax.set_ylabel("Return")
ax.set_ylim((0, EPISODE_LENGTH))
ax.grid(linestyle='--')
ax.plot(episodes_plot, returns, label='Instant return')
ax.plot(episodes_plot, mean_returns, label='Averaged return')
legend = ax.legend(loc='best', shadow=True)
fig.savefig(DIR_PATH_SAVEFIG + file_name_savefig, dpi=500)
# plt.show()
| GitYiheng/reinforcement_learning_test | test03_monte_carlo/t31_rlvps07_hn16_clr0p003.py | Python | mit | 7,660 | [
"NEURON"
] | 7086f8113b0f47b01ceccc69170064eaf7ea64d22d9a0666f8c5867e5aeb65ef |
#!/usr/bin/env python3
"""
This script is a python version of TimingAccuracyDHC. We use numpy functions to
simplify the creation of random coefficients.
"""
import time
import numpy as np
from pyshtools import expand
from pyshtools import spectralanalysis
# ==== MAIN FUNCTION ====
def main():
TimingAccuracyDHC(2)
# ==== TEST FUNCTIONS ====
def TimingAccuracyDHC(sampling=1):
# ---- input parameters ----
maxdeg = 2800
ls = np.arange(maxdeg + 1)
beta = 1.5
print('Driscoll-Healy (complex), sampling =', sampling)
# ---- create mask to filter out m<=l ----
mask = np.zeros((2, maxdeg + 1, maxdeg + 1), dtype=np.bool)
mask[0, 0, 0] = True
for l in ls:
mask[:, l, :l + 1] = True
mask[1, :, 0] = False
# ---- create Gaussian powerlaw coefficients ----
print('creating {:d} random coefficients'.format(2 * (maxdeg + 1) *
(maxdeg + 1)))
np.random.seed(0)
cilm = np.zeros((2, (maxdeg + 1), (maxdeg + 1)), dtype=np.complex)
cilm.imag = np.random.normal(loc=0., scale=1.,
size=(2, maxdeg + 1, maxdeg + 1))
cilm.real = np.random.normal(loc=0., scale=1.,
size=(2, maxdeg + 1, maxdeg + 1))
old_power = spectralanalysis.spectrum(cilm)
new_power = 1. / (1. + ls)**beta # initialize degrees > 0 to power-law
cilm[:, :, :] *= np.sqrt(new_power / old_power)[None, :, None]
cilm[~mask] = 0.
# ---- time spherical harmonics transform for lmax set to increasing
# ---- powers of 2
lmax = 2
print('lmax maxerror rms tinverse tforward')
while lmax <= maxdeg:
# trim coefficients to lmax
cilm_trim = cilm[:, :lmax + 1, :lmax + 1]
mask_trim = mask[:, :lmax + 1, :lmax + 1]
# synthesis / inverse
tstart = time.time()
grid = expand.MakeGridDHC(cilm_trim, sampling=sampling)
tend = time.time()
tinverse = tend - tstart
# analysis / forward
tstart = time.time()
cilm2_trim = expand.SHExpandDHC(grid, sampling=sampling)
tend = time.time()
tforward = tend - tstart
# compute error
err = np.abs(cilm_trim[mask_trim] - cilm2_trim[mask_trim]) / \
np.abs(cilm_trim[mask_trim])
maxerr = err.max()
rmserr = np.mean(err**2)
print('{:4d} {:1.2e} {:1.2e} {:1.1e}s {:1.1e}s'.format(
lmax, maxerr, rmserr, tinverse, tforward))
if maxerr > 100.:
raise RuntimeError('Tests Failed. Maximum relative error = ',
maxerr)
lmax = lmax * 2
# ==== EXECUTE SCRIPT ====
if __name__ == "__main__":
main()
| MarkWieczorek/SHTOOLS | examples/python/TimingAccuracy/TimingAccuracyDHC.py | Python | bsd-3-clause | 2,762 | [
"Gaussian"
] | ed9e8c9bf85bd8224001d776abeee511ce00243901770ac82d3ff91b6e40d758 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
from io import StringIO
import os
import numpy as np
import pandas as pd
from skbio import TreeNode
from skbio.util import get_data_path
from skbio.tree import DuplicateNodeError, MissingNodeError
from skbio.diversity.alpha import faith_pd
class FaithPDTests(TestCase):
def setUp(self):
self.counts = np.array([0, 1, 1, 4, 2, 5, 2, 4, 1, 2])
self.b1 = np.array([[1, 3, 0, 1, 0],
[0, 2, 0, 4, 4],
[0, 0, 6, 2, 1],
[0, 0, 1, 1, 1]])
self.sids1 = list('ABCD')
self.oids1 = ['OTU%d' % i for i in range(1, 6)]
self.t1 = TreeNode.read(StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
'0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
self.t1_w_extra_tips = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
')root;'))
def test_faith_pd_none_observed(self):
actual = faith_pd(np.array([], dtype=int),
np.array([], dtype=int),
self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
actual = faith_pd([0, 0, 0, 0, 0], self.oids1, self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
def test_faith_pd_all_observed(self):
actual = faith_pd([1, 1, 1, 1, 1], self.oids1, self.t1)
expected = sum(n.length for n in self.t1.traverse()
if n.length is not None)
self.assertAlmostEqual(actual, expected)
actual = faith_pd([1, 2, 3, 4, 5], self.oids1, self.t1)
expected = sum(n.length for n in self.t1.traverse()
if n.length is not None)
self.assertAlmostEqual(actual, expected)
def test_faith_pd(self):
# expected results derived from QIIME 1.9.1, which
# is a completely different implementation skbio's initial
# phylogenetic diversity implementation
actual = faith_pd(self.b1[0], self.oids1, self.t1)
expected = 4.5
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[1], self.oids1, self.t1)
expected = 4.75
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[2], self.oids1, self.t1)
expected = 4.75
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[3], self.oids1, self.t1)
expected = 4.75
self.assertAlmostEqual(actual, expected)
def test_faith_pd_extra_tips(self):
# results are the same despite presences of unobserved tips in tree
actual = faith_pd(self.b1[0], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[0], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[1], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[1], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[2], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[2], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[3], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[3], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
def test_faith_pd_minimal(self):
# two tips
tree = TreeNode.read(StringIO('(OTU1:0.25, OTU2:0.25)root;'))
actual = faith_pd([1, 0], ['OTU1', 'OTU2'], tree)
expected = 0.25
self.assertEqual(actual, expected)
def test_faith_pd_qiime_tiny_test(self):
# the following table and tree are derived from the QIIME 1.9.1
# "tiny-test" data
tt_table_fp = get_data_path(
os.path.join('qiime-191-tt', 'otu-table.tsv'), 'data')
tt_tree_fp = get_data_path(
os.path.join('qiime-191-tt', 'tree.nwk'), 'data')
self.q_table = pd.read_csv(tt_table_fp, sep='\t', skiprows=1,
index_col=0)
self.q_tree = TreeNode.read(tt_tree_fp)
expected_fp = get_data_path(
os.path.join('qiime-191-tt', 'faith-pd.txt'), 'data')
expected = pd.read_csv(expected_fp, sep='\t', index_col=0)
for sid in self.q_table.columns:
actual = faith_pd(self.q_table[sid],
otu_ids=self.q_table.index,
tree=self.q_tree)
self.assertAlmostEqual(actual, expected['PD_whole_tree'][sid])
def test_faith_pd_root_not_observed(self):
# expected values computed by hand
tree = TreeNode.read(
StringIO('((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)'
'root;'))
otu_ids = ['OTU%d' % i for i in range(1, 5)]
# root node not observed, but branch between (OTU1, OTU2) and root
# is considered observed
actual = faith_pd([1, 1, 0, 0], otu_ids, tree)
expected = 0.6
self.assertAlmostEqual(actual, expected)
# root node not observed, but branch between (OTU3, OTU4) and root
# is considered observed
actual = faith_pd([0, 0, 1, 1], otu_ids, tree)
expected = 2.3
self.assertAlmostEqual(actual, expected)
def test_faith_pd_invalid_input(self):
# tree has duplicated tip ids
t = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(DuplicateNodeError, faith_pd, counts, otu_ids,
t)
# unrooted tree as input
t = TreeNode.read(StringIO('((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
'OTU4:0.7);'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# otu_ids has duplicated ids
t = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU2']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# len of vectors not equal
t = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# negative counts
t = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, -3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# tree with no branch lengths
t = TreeNode.read(
StringIO('((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# tree missing some branch lengths
t = TreeNode.read(
StringIO('(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# otu_ids not present in tree
t = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU42']
self.assertRaises(MissingNodeError, faith_pd, counts, otu_ids, t)
if __name__ == "__main__":
main()
| gregcaporaso/scikit-bio | skbio/diversity/alpha/tests/test_faith_pd.py | Python | bsd-3-clause | 8,646 | [
"scikit-bio"
] | fee044def915af522e9ef2c110f12e70e84faf2cc0e386507f16d7faa12c53b6 |
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
sdk = config['TITANIUM_SDK']
support_dir = os.path.join(sdk,'module','support')
sys.path.append(support_dir)
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','__MODULE_ID__.js')
if not os.path.exists(js_file): return
sdk = config['TITANIUM_SDK']
iphone_dir = os.path.join(sdk,'iphone')
sys.path.insert(0,iphone_dir)
from compiler import Compiler
path = os.path.basename(js_file)
metadata = Compiler.make_function_from_file(path,js_file)
method = metadata['method']
eq = path.replace('.','_')
method = ' return %s;' % method
f = os.path.join(cwd,'Classes','___PROJECTNAMEASIDENTIFIER___ModuleAssets.m')
c = open(f).read()
idx = c.find('return ')
before = c[0:idx]
after = """
}
@end
"""
newc = before + method + after
if newc!=c:
x = open(f,'w')
x.write(newc)
x.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README','__MODULE_ID__.js']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[]):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e)==2 and e[1]=='.pyc':continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
for dn in ('assets','example','platform'):
if os.path.exists(dn):
zip_dir(zf,dn,'%s/%s' % (modulepath,dn),['README'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
| arnaudsj/titanium_mobile | support/module/iphone/templates/build.py | Python | apache-2.0 | 5,924 | [
"VisIt"
] | 5e22d195b1cb3abff0461e2ab393108ce3d4c11a9303c67d0fb6288f433ba72b |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Program to compare models using different compute engines.
This program lets you compare results between OpenCL and DLL versions
of the code and between precision (half, fast, single, double, quad),
where fast precision is single precision using native functions for
trig, etc., and may not be completely IEEE 754 compliant. This lets
make sure that the model calculations are stable, or if you need to
tag the model as double precision only.
Run using "./sascomp -h" in the sasmodels root to see the command
line options. To run from from an installed version of sasmodels,
use "python -m sasmodels.compare -h".
Note that there is no way within sasmodels to select between an
OpenCL CPU device and a GPU device, but you can do so by setting the
SAS_OPENCL environment variable. Start a python interpreter and enter::
import pyopencl as cl
cl.create_some_context()
This will prompt you to select from the available OpenCL devices
and tell you which string to use for the SAS_OPENCL variable.
On Windows you will need to remove the quotes.
"""
from __future__ import print_function, division
import sys
import os
import math
import datetime
import traceback
import re
import numpy as np # type: ignore
from . import core
from . import weights
from . import kerneldll
from . import kernelcl
from . import kernelcuda
from .data import plot_theory, empty_data1D, empty_data2D, load_data
from .direct_model import DirectModel, get_mesh
from .generate import FLOAT_RE, set_integration_size
# pylint: disable=unused-import
from typing import Optional, Dict, Any, Callable, Tuple, List
from .modelinfo import ModelInfo, Parameter, ParameterSet
from .data import Data
try:
# With python 3.8+ we can indicate that calculator takes floats.
from typing import Protocol
class Calculator(Protocol):
"""Kernel calculator takes *par=value* keyword arguments."""
def __call__(self, **par: float) -> np.ndarray: ...
except ImportError:
#: Kernel calculator takes *par=value* keyword arguments.
Calculator = Callable[..., np.ndarray]
# pylint: enable=unused-import
USAGE = """
usage: sascomp model [options...] [key=val]
Generate and compare SAS models. If a single model is specified it shows
a plot of that model. Different models can be compared, or the same model
with different parameters. The same model with the same parameters can
be compared with different calculation engines to see the effects of precision
on the resultant values.
model or model1,model2 are the names of the models to compare (see below).
Options (* for default):
=== data generation ===
-data="path" uses q, dq from the data file
-noise=0 sets the measurement error dI/I
-res=0 sets the resolution width dQ/Q if calculating with resolution
-lowq*/-midq/-highq/-exq use q values up to 0.05, 0.2, 1.0, 10.0
-q=min:max alternative specification of qrange
-nq=128 sets the number of Q points in the data set
-1d*/-2d computes 1d or 2d data
-zero indicates that q=0 should be included
=== model parameters ===
-preset*/-random[=seed] preset or random parameters
-sets=n generates n random datasets with the seed given by -random=seed
-pars/-nopars* prints the parameter set or not
-sphere[=150] set up spherical integration over theta/phi using n points
-mono*/-poly suppress or allow polydispersity on generated parameters
-magnetic/-nonmagnetic* suppress or allow magnetism on generated parameters
-maxdim[=inf] limit randomly generate particle dimensions to maxdim
=== calculation options ===
-cutoff=1e-5* cutoff value for including a point in polydispersity
-accuracy=Low accuracy of the resolution calculation Low, Mid, High, Xhigh
-neval=1 sets the number of evals for more accurate timing
-ngauss=0 overrides the number of points in the 1-D gaussian quadrature
=== precision options ===
-engine=default uses the default calcution precision
-single/-double/-half/-fast sets an OpenCL calculation engine
-single!/-double!/-quad! sets an OpenMP calculation engine
=== plotting ===
-plot*/-noplot plots or suppress the plot of the model
-linear/-log*/-q4 intensity scaling on plots
-hist/-nohist* plot histogram of relative error
-abs/-rel* plot relative or absolute error
-title="note" adds note to the plot title, after the model name
-weights shows weights plots for the polydisperse parameters
-profile shows the sld profile if the model has a plottable sld profile
=== output options ===
-edit starts the parameter explorer
-help/-html shows the model docs instead of running the model
=== help ===
-h/-? print this help
-models[=all] show all builtin models of a given type:
all, py, c, double, single, opencl, 1d, 2d, magnetic
=== environment variables ===
-DSAS_MODELPATH=~/.sasmodels/custom_models sets path to custom models
-DSAS_WEIGHTS_PATH=~/.sasview/weights sets path to custom distributions
-DSAS_OPENCL=vendor:device|cuda:device|none sets the target GPU device
-DXDG_CACHE_HOME=~/.cache sets the pyopencl cache root (linux only)
-DSAS_COMPILER=tinycc|msvc|mingw|unix sets the DLL compiler
-DSAS_OPENMP=0 set to 1 to turn on OpenMP for the DLLs
-DSAS_DLL_PATH=~/.sasmodels/compiled_models sets the DLL cache
-DPYOPENCL_NO_CACHE=1 turns off the PyOpenCL cache
The interpretation of quad precision depends on architecture, and may
vary from 64-bit to 128-bit, with 80-bit floats being common (1e-19 precision).
On unix and mac you may need single quotes around the DLL computation
engines, such as -engine='single!,double!' since !, is treated as a history
expansion request in the shell.
Key=value pairs allow you to set specific values for the model parameters.
Key=value1,value2 to compare different values of the same parameter. The
value can be an expression including other parameters.
Items later on the command line override those that appear earlier.
Examples:
# compare single and double precision calculation for a barbell
sascomp barbell -engine=single,double
# generate 10 random lorentz models, with seed=27
sascomp lorentz -sets=10 -seed=27
# compare ellipsoid with R = R_polar = R_equatorial to sphere of radius R
sascomp sphere,ellipsoid radius_polar=radius radius_equatorial=radius
# model timing test requires multiple evals to perform the estimate
sascomp pringle -engine=single,double -timing=100,100 -noplot
"""
kerneldll.ALLOW_SINGLE_PRECISION_DLLS = True
def build_math_context():
# type: () -> Dict[str, Callable]
"""build dictionary of functions from math module"""
return dict((k, getattr(math, k))
for k in dir(math) if not k.startswith('_'))
#: list of math functions for use in evaluating parameters
MATH = build_math_context()
# CRUFT python 2.6
if not hasattr(datetime.timedelta, 'total_seconds'):
def delay(dt):
"""Return number date-time delta as number seconds"""
return dt.days * 86400 + dt.seconds + 1e-6 * dt.microseconds
else:
def delay(dt):
"""Return number date-time delta as number seconds"""
return dt.total_seconds()
class push_seed(object):
"""
Set the seed value for the random number generator.
When used in a with statement, the random number generator state is
restored after the with statement is complete.
:Parameters:
*seed* : int or array_like, optional
Seed for RandomState
:Example:
Seed can be used directly to set the seed::
>>> from numpy.random import randint
>>> push_seed(24)
<...push_seed object at...>
>>> print(randint(0,1000000,3))
[242082 899 211136]
Seed can also be used in a with statement, which sets the random
number generator state for the enclosed computations and restores
it to the previous state on completion::
>>> with push_seed(24):
... print(randint(0,1000000,3))
[242082 899 211136]
Using nested contexts, we can demonstrate that state is indeed
restored after the block completes::
>>> with push_seed(24):
... print(randint(0,1000000))
... with push_seed(24):
... print(randint(0,1000000,3))
... print(randint(0,1000000))
242082
[242082 899 211136]
899
The restore step is protected against exceptions in the block::
>>> with push_seed(24):
... print(randint(0,1000000))
... try:
... with push_seed(24):
... print(randint(0,1000000,3))
... raise Exception()
... except Exception:
... print("Exception raised")
... print(randint(0,1000000))
242082
[242082 899 211136]
Exception raised
899
"""
def __init__(self, seed=None):
# type: (Optional[int]) -> None
self._state = np.random.get_state()
np.random.seed(seed)
def __enter__(self):
# type: () -> None
pass
def __exit__(self, exc_type, exc_value, trace):
# type: (Any, BaseException, Any) -> None
np.random.set_state(self._state)
def tic():
# type: () -> Callable[[], float]
"""
Timer function.
Use "toc=tic()" to start the clock and "toc()" to measure
a time interval.
"""
then = datetime.datetime.now()
return lambda: delay(datetime.datetime.now() - then)
def set_beam_stop(data, radius, outer=None):
# type: (Data, float, float) -> None
"""
Add a beam stop of the given *radius*. If *outer*, make an annulus.
"""
if hasattr(data, 'qx_data'):
q = np.sqrt(data.qx_data**2 + data.qy_data**2)
data.mask = (q < radius)
if outer is not None:
data.mask |= (q >= outer)
else:
data.mask = (data.x < radius)
if outer is not None:
data.mask |= (data.x >= outer)
def parameter_range(p, v):
# type: (str, float) -> Tuple[float, float]
"""
Choose a parameter range based on parameter name and initial value.
"""
# process the polydispersity options
if p.endswith('_pd_n'):
return 0., 100.
elif p.endswith('_pd_nsigma'):
return 0., 5.
elif p.endswith('_pd_type'):
raise ValueError("Cannot return a range for a string value")
elif any(s in p for s in ('theta', 'phi', 'psi')):
# orientation in [-180,180], orientation pd in [0,45]
if p.endswith('_pd'):
return 0., 180.
else:
return -180., 180.
elif p.endswith('_pd'):
return 0., 1.
elif 'sld' in p:
return -0.5, 10.
elif p == 'background':
return 0., 10.
elif p == 'scale':
return 0., 1.e3
elif v < 0.:
return 2.*v, -2.*v
else:
return 0., (2.*v if v > 0. else 1.)
def _randomize_one(model_info, name, value):
# type: (ModelInfo, str, float) -> float
"""
Randomize a single parameter.
"""
# Set the amount of polydispersity/angular dispersion, but by default pd_n
# is zero so there is no polydispersity. This allows us to turn on/off
# pd by setting pd_n, and still have randomly generated values
if name.endswith('_pd'):
par = model_info.parameters[name[:-3]]
if par.type == 'orientation':
# Let oriention variation peak around 13 degrees; 95% < 42 degrees
return 180*np.random.beta(2.5, 20)
else:
# Let polydispersity peak around 15%; 95% < 0.4; max=100%
return np.random.beta(1.5, 7)
# pd is selected globally rather than per parameter, so set to 0 for no pd
# In particular, when multiple pd dimensions, want to decrease the number
# of points per dimension for faster computation
if name.endswith('_pd_n'):
return 0
# Don't mess with distribution type for now
if name.endswith('_pd_type'):
return 'gaussian'
# type-dependent value of number of sigmas; for gaussian use 3.
if name.endswith('_pd_nsigma'):
return 3.
# background in the range [0.01, 1]
if name == 'background':
return 10**np.random.uniform(-2, 0)
# scale defaults to 0.1% to 30% volume fraction
if name == 'scale':
return 10**np.random.uniform(-3, -0.5)
# If it is a list of choices, pick one at random with equal probability
par = model_info.parameters[name]
if par.choices: # choice list
return np.random.randint(len(par.choices))
# If it is a fixed range, pick from it with equal probability.
# For logarithmic ranges, the model will have to override.
if np.isfinite(par.limits).all():
return np.random.uniform(*par.limits)
# If the paramter is marked as an sld use the range of neutron slds
if par.type == 'sld':
return np.random.uniform(-0.5, 12)
# Limit magnetic SLDs to a smaller range, from zero to iron=5/A^2
if par.name.endswith('_M0'):
return np.random.uniform(0, 5)
# Guess at the random length/radius/thickness. In practice, all models
# are going to set their own reasonable ranges.
if par.type == 'volume':
if ('length' in par.name or
'radius' in par.name or
'thick' in par.name):
return 10**np.random.uniform(2, 4)
# In the absence of any other info, select a value in [0, 2v], or
# [-2|v|, 2|v|] if v is negative, or [0, 1] if v is zero. Mostly the
# model random parameter generators will override this default.
low, high = parameter_range(par.name, value)
limits = (max(par.limits[0], low), min(par.limits[1], high))
return np.random.uniform(*limits)
def _random_pd(model_info, pars, is2d):
# type: (ModelInfo, Dict[str, float], bool) -> None
"""
Generate a random dispersity distribution for the model.
1% no shape dispersity
85% single shape parameter
13% two shape parameters
1% three shape parameters
If oriented, then put dispersity in theta, add phi and psi dispersity
with 10% probability for each.
"""
# Find the polydisperse parameters.
pd = [p for p in model_info.parameters.kernel_parameters if p.polydisperse]
# If the sample is oriented then add polydispersity to the orientation.
oriented = any(p.type == 'orientation' for p in pd)
num_oriented_pd = 0
if oriented:
if np.random.rand() < 0.8:
# 80% change of pd on long axis (20x cost)
pars['theta_pd_n'] = 20
num_oriented_pd += 1
if np.random.rand() < 0.1:
# 10% change of pd on short axis (5x cost)
pars['phi_pd_n'] = 5
num_oriented_pd += 1
if any(p.name == 'psi' for p in pd) and np.random.rand() < 0.1:
# 10% change of pd on spin axis (5x cost)
#print("generating psi_pd_n")
pars['psi_pd_n'] = 5
num_oriented_pd += 1
# Process non-orientation parameters
pd = [p for p in pd if p.type != 'orientation']
# Find the remaining pd parameters, which are all volume parameters.
# Use the parameter value as the weight on the choice function for
# the polydispersity parameter. The I(Q) curve is more sensitive to
# pd on larger dimensions, so they should be preferred.
# TODO: choose better weights for parameters like num_pearls or num_disks.
name = [] # type: List[str] # name of the next volume parameter
default = [] # type: List[float] # default val for that volume parameter
for p in pd:
if p.length_control is not None:
slots = int(pars.get(p.length_control, 1) + 0.5)
name.extend(p.name+str(k+1) for k in range(slots))
default.extend(p.default for k in range(slots))
elif p.length > 1:
slots = p.length
name.extend(p.name+str(k+1) for k in range(slots))
default.extend(p.default for k in range(slots))
else:
name.append(p.name)
default.append(p.default)
p = [pars.get(k, v) for k, v in zip(name, default)] # relative weight
p = np.array(p)/sum(p) if p else [] # normalize to probability
# Select number of pd parameters to use. The selection is biased
# toward fewer pd parameters if there is already orientational pd
# (effectively allowing only one volume pd) and the number of pd steps
# is scaled down. Ignore oriented if it is not 2d data.
if not is2d:
num_oriented_pd = 0
n = len(name)
u = np.random.rand()
if u < (1 - 1/(1+num_oriented_pd)):
# if lots of orientation dispersity then reject shape dispersity
pass
elif u < 0.01 or n < 1:
# 1% chance of no polydispersity (1x cost)
pass
elif u < 0.66 or n < 2:
# 65% chance of pd on one value (35x cost)
choice = np.random.choice(n, size=1, replace=False, p=p)
pars[name[choice[0]]+"_pd_n"] = 35 // (1 + num_oriented_pd)
elif u < 0.99 or n < 3:
# 33% chance of pd on two values (250x cost)
choice = np.random.choice(n, size=2, replace=False, p=p)
pars[name[choice[0]]+"_pd_n"] = 25 // (1 + num_oriented_pd)
pars[name[choice[1]]+"_pd_n"] = 10 // (1 + num_oriented_pd)
else:
# 1% chance of pd on three values (1250x cost)
choice = np.random.choice(n, size=3, replace=False, p=p)
pars[name[choice[0]]+"_pd_n"] = 25
pars[name[choice[1]]+"_pd_n"] = 10
pars[name[choice[2]]+"_pd_n"] = 5
## Show selected polydispersity
#for name, value in pars.items():
# if name.endswith('_pd_n') and value > 0:
# print(name, value, pars.get(name[:-5], 0), pars.get(name[:-2], 0))
def randomize_pars(model_info, pars, maxdim=np.inf, is2d=False):
# type: (ModelInfo, ParameterSet, float, bool) -> ParameterSet
"""
Generate random values for all of the parameters.
Valid ranges for the random number generator are guessed from the name of
the parameter; this will not account for constraints such as cap radius
greater than cylinder radius in the capped_cylinder model, so
:func:`constrain_pars` needs to be called afterward..
"""
# Note: the sort guarantees order of calls to random number generator
random_pars = dict((p, _randomize_one(model_info, p, v))
for p, v in sorted(pars.items()))
if model_info.random is not None:
random_pars.update(model_info.random())
_random_pd(model_info, random_pars, is2d)
limit_dimensions(model_info, random_pars, maxdim)
return random_pars
def limit_dimensions(model_info, pars, maxdim):
# type: (ModelInfo, ParameterSet, float) -> None
"""
Limit parameters of units of Ang to maxdim.
"""
for p in model_info.parameters.call_parameters:
value = pars[p.name]
if p.units == 'Ang' and value > maxdim:
pars[p.name] = maxdim*10**np.random.uniform(-3, 0)
def _swap_pars(pars, a, b):
# type: (ModelInfo, str, str) -> None
"""
Swap polydispersity and magnetism when swapping parameters.
Assume the parameters are of the same basic type (volume, sld, or other),
so that if, for example, radius_pd is in pars but radius_bell_pd is not,
then after the swap radius_bell_pd will be the old radius_pd and radius_pd
will be removed.
"""
for ext in ("", "_pd", "_pd_n", "_pd_nsigma", "_pd_type", "_M0", "_mphi", "_mtheta"):
ax, bx = a+ext, b+ext
if ax in pars and bx in pars:
pars[ax], pars[bx] = pars[bx], pars[ax]
elif ax in pars:
pars[bx] = pars[ax]
del pars[ax]
elif bx in pars:
pars[ax] = pars[bx]
del pars[bx]
def constrain_pars(model_info, pars):
# type: (ModelInfo, ParameterSet) -> None
"""
Restrict parameters to valid values.
This includes model specific code for models such as capped_cylinder
which need to support within model constraints (cap radius more than
cylinder radius in this case).
Warning: this updates the *pars* dictionary in place.
"""
# TODO: move the model specific code to the individual models
name = model_info.id
# if it is a product model, then just look at the form factor since
# none of the structure factors need any constraints.
if '*' in name:
name = name.split('*')[0]
# Suppress magnetism for python models (not yet implemented)
if callable(model_info.Iq):
pars.update(suppress_magnetism(pars))
if name == 'barbell':
if pars['radius_bell'] < pars['radius']:
_swap_pars(pars, 'radius_bell', 'radius')
elif name == 'capped_cylinder':
if pars['radius_cap'] < pars['radius']:
_swap_pars(pars, 'radius_cap', 'radius')
elif name == 'guinier':
# Limit guinier to an Rg such that Iq > 1e-30 (single precision cutoff)
# I(q) = A e^-(Rg^2 q^2/3) > e^-(30 ln 10)
# => ln A - (Rg^2 q^2/3) > -30 ln 10
# => Rg^2 q^2/3 < 30 ln 10 + ln A
# => Rg < sqrt(90 ln 10 + 3 ln A)/q
#q_max = 0.2 # mid q maximum
q_max = 1.0 # high q maximum
rg_max = np.sqrt(90*np.log(10) + 3*np.log(pars['scale']))/q_max
pars['rg'] = min(pars['rg'], rg_max)
elif name == 'pearl_necklace':
if pars['radius'] < pars['thick_string']:
_swap_pars(pars, 'thick_string', 'radius')
elif name == 'rpa':
# Make sure phi sums to 1.0
if pars['case_num'] < 2:
pars['Phi1'] = 0.
pars['Phi2'] = 0.
elif pars['case_num'] < 5:
pars['Phi1'] = 0.
total = sum(pars['Phi'+c] for c in '1234')
for c in '1234':
pars['Phi'+c] /= total
def parlist(model_info, pars, is2d):
# type: (ModelInfo, ParameterSet, bool) -> str
"""
Format the parameter list for printing.
"""
lines = []
parameters = model_info.parameters
magnetic = False
magnetic_pars = []
for p in parameters.user_parameters(pars, True):
if any(p.id.endswith(x) for x in ('_M0', '_mtheta', '_mphi')):
continue
if p.id in set(('up_frac_i', 'up_frac_f', 'up_angle', 'up_phi')):
magnetic_pars.append("%s=%s"%(p.id, pars.get(p.id, p.default)))
continue
if not is2d and p.id in ('theta', 'phi', 'psi'):
continue
fields = dict(
value=pars.get(p.id, p.default),
pd=pars.get(p.id+"_pd", 0.),
n=int(pars.get(p.id+"_pd_n", 0)),
nsigma=pars.get(p.id+"_pd_nsgima", 3.),
pdtype=pars.get(p.id+"_pd_type", 'gaussian'),
relative_pd=p.relative_pd,
M0=pars.get(p.id+'_M0', 0.),
mphi=pars.get(p.id+'_mphi', 0.),
mtheta=pars.get(p.id+'_mtheta', 0.),
)
lines.append(_format_par(p.name, **fields))
magnetic = magnetic or fields['M0'] != 0.
if magnetic and magnetic_pars:
lines.append(" ".join(magnetic_pars))
return "\n".join(lines)
#return "\n".join("%s: %s"%(p, v) for p, v in sorted(pars.items()))
def _format_par(name, value=0., pd=0., n=0, nsigma=3., pdtype='gaussian',
relative_pd=False, M0=0., mphi=0., mtheta=0.):
# type: (str, float, float, int, float, str, bool, float, float, float) -> str
line = "%s: %g"%(name, value)
if pd != 0. and n != 0:
if relative_pd:
pd *= value
line += " +/- %g (%d points in [-%g,%g] sigma %s)"\
% (pd, n, nsigma, nsigma, pdtype)
if M0 != 0.:
line += " M0:%.3f mtheta:%.1f mphi:%.1f" % (M0, mtheta, mphi)
return line
def suppress_pd(pars):
# type: (ParameterSet) -> ParameterSet
"""
Complete eliminate polydispersity of the model to test models more quickly.
"""
pars = pars.copy()
for p in pars:
if p.endswith("_pd_n"):
pars[p] = 0
return pars
def suppress_magnetism(pars):
# type: (ParameterSet) -> ParameterSet
"""
Complete eliminate magnetism of the model to test models more quickly.
"""
pars = pars.copy()
for p in pars:
if p.endswith("_M0"):
pars[p] = 0
return pars
def time_calculation(calculator: Calculator, pars: ParameterSet, evals: int=1):
# not type: (Calculator, ParameterSet, int) -> Tuple[np.ndarray, float]
"""
Compute the average calculation time over N evaluations.
An additional call is generated without polydispersity in order to
initialize the calculation engine, and make the average more stable.
"""
# initialize the code so time is more accurate
if evals > 1:
calculator(**suppress_pd(pars))
toc = tic()
# make sure there is at least one eval
value = calculator(**pars)
for _ in range(evals-1):
value = calculator(**pars)
average_time = toc()*1000. / evals
#print("I(q)",value)
return value, average_time
def make_data(opts):
# type: (Dict[str, Any]) -> Tuple[Data, np.ndarray]
"""
Generate an empty dataset, used with the model to set Q points
and resolution.
*opts* contains the options, with 'qmax', 'nq', 'res',
'accuracy', 'is2d' and 'view' parsed from the command line.
"""
qmin, qmax, nq, res = opts['qmin'], opts['qmax'], opts['nq'], opts['res']
if opts['is2d']:
q = np.linspace(-qmax, qmax, nq) # type: np.ndarray
data = empty_data2D(q, resolution=res)
data.accuracy = opts['accuracy']
set_beam_stop(data, qmin)
index = ~data.mask
else:
if opts['view'] == 'log' and not opts['zero']:
q = np.logspace(math.log10(qmin), math.log10(qmax), nq)
else:
q = np.linspace(qmin, qmax, nq)
if opts['zero']:
q = np.hstack((0, q))
# TODO: provide command line control of lambda and Delta lambda/lambda
#L, dLoL = 5, 0.14/np.sqrt(6) # wavelength and 14% triangular FWHM
L, dLoL = 0, 0
data = empty_data1D(q, resolution=res, L=L, dL=L*dLoL)
index = slice(None, None)
return data, index
def make_engine(
model_info: ModelInfo, data: Data, dtype: str, cutoff: float, ngauss: int=0,
) -> Calculator:
# not type: (ModelInfo, Data, str, float, int) -> Calculator
"""
Generate the appropriate calculation engine for the given datatype.
Datatypes with '!' appended are evaluated using external C DLLs rather
than OpenCL.
"""
if ngauss:
set_integration_size(model_info, ngauss)
if (dtype != "default" and not dtype.endswith('!')
and not (kernelcl.use_opencl() or kernelcuda.use_cuda())):
raise RuntimeError("OpenCL not available " + kernelcl.OPENCL_ERROR)
model = core.build_model(model_info, dtype=dtype, platform="ocl")
calculator = DirectModel(data, model, cutoff=cutoff)
engine_type = calculator._model.__class__.__name__.replace('Model', '').upper()
bits = calculator._model.dtype.itemsize*8
precision = "fast" if getattr(calculator._model, 'fast', False) else str(bits)
calculator.engine = "%s[%s]" % (engine_type, precision)
return calculator
def _show_invalid(data, theory):
# type: (Data, np.ma.ndarray) -> None
"""
Display a list of the non-finite values in theory.
"""
if not theory.mask.any():
return
if hasattr(data, 'x'):
bad = zip(data.x[theory.mask], theory[theory.mask])
print(" *** ", ", ".join("I(%g)=%g"%(x, y) for x, y in bad))
def compare(opts, limits=None, maxdim=None):
# type: (Dict[str, Any], Optional[Tuple[float, float]], Optional[float]) -> Tuple[float, float]
"""
Preform a comparison using options from the command line.
*limits* are the display limits on the graph, either to set the y-axis
for 1D or to set the colormap scale for 2D. If None, then they are
inferred from the data and returned. When exploring using Bumps,
the limits are set when the model is initially called, and maintained
as the values are adjusted, making it easier to see the effects of the
parameters.
*maxdim* **DEPRECATED** Use opts['maxdim'] instead.
"""
# CRUFT: remove maxdim parameter
if maxdim is not None:
opts['maxdim'] = maxdim
for k in range(opts['sets']):
if k > 0:
# print a separate seed for each dataset for better reproducibility
new_seed = np.random.randint(1000000) # type: int
print("=== Set %d uses -random=%d ===" % (k+1, new_seed))
np.random.seed(new_seed)
opts['pars'] = parse_pars(opts, maxdim=maxdim)
if opts['pars'] is None:
return limits
result = run_models(opts, verbose=True)
if opts['plot']:
if opts['is2d'] and k > 0:
import matplotlib.pyplot as plt
plt.figure()
limits = plot_models(opts, result, limits=limits, setnum=k)
if opts['show_weights']:
base, _ = opts['engines']
base_pars, _ = opts['pars']
model_info = base._kernel.info
dim = base._kernel.dim
weights.plot_weights(model_info, get_mesh(model_info, base_pars, dim=dim))
if opts['show_profile']:
import pylab
base, comp = opts['engines']
base_pars, comp_pars = opts['pars']
have_base = base._kernel.info.profile is not None
have_comp = (
comp is not None
and comp._kernel.info.profile is not None
and base_pars != comp_pars
)
if have_base or have_comp:
pylab.figure()
if have_base:
plot_profile(base._kernel.info, **base_pars)
if have_comp:
plot_profile(comp._kernel.info, label='comp', **comp_pars)
pylab.legend()
if opts['plot']:
import matplotlib.pyplot as plt
plt.show()
return limits
def plot_profile(model_info, label='base', **args):
# type: (ModelInfo, List[Tuple[float, np.ndarray, np.ndarray]], float) -> None
"""
Plot the profile returned by the model profile method.
*model_info* defines model parameters, etc.
*label* is the legend label for the plotted line.
*args* are *parameter=value* pairs for the model profile function.
"""
import pylab
args = dict((k, v) for k, v in args.items()
if "_pd" not in k
and ":" not in k
and k not in ("background", "scale", "theta", "phi", "psi"))
args = args.copy()
args.pop('scale', 1.)
args.pop('background', 0.)
z, rho = model_info.profile(**args)
#pylab.interactive(True)
pylab.plot(z, rho, '-', label=label)
pylab.grid(True)
#pylab.show()
def run_models(opts, verbose=False):
# type: (Dict[str, Any], bool) -> Dict[str, Any]
"""
Process a parameter set, return calculation results and times.
"""
base, comp = opts['engines']
base_n, comp_n = opts['count']
base_pars, comp_pars = opts['pars']
base_data, comp_data = opts['data']
comparison = comp is not None
base_time = comp_time = None
base_value = comp_value = resid = relerr = None
# Base calculation
try:
base_raw, base_time = time_calculation(base, base_pars, base_n)
base_value = np.ma.masked_invalid(base_raw)
if verbose:
print("%s t=%.2f ms, intensity=%.0f"
% (base.engine, base_time, base_value.sum()))
_show_invalid(base_data, base_value)
#if base.results is not None: print(base.results())
except ImportError:
traceback.print_exc()
# Comparison calculation
if comparison:
try:
comp_raw, comp_time = time_calculation(comp, comp_pars, comp_n)
comp_value = np.ma.masked_invalid(comp_raw)
if verbose:
print("%s t=%.2f ms, intensity=%.0f"
% (comp.engine, comp_time, comp_value.sum()))
_show_invalid(base_data, comp_value)
except ImportError:
traceback.print_exc()
# Compare, but only if computing both forms
if comparison:
resid = (base_value - comp_value)
relerr = resid/np.where(comp_value != 0., abs(comp_value), 1.0)
if verbose:
_print_stats("|%s-%s|"
% (base.engine, comp.engine) + (" "*(3+len(comp.engine))),
resid)
_print_stats("|(%s-%s)/%s|"
% (base.engine, comp.engine, comp.engine),
relerr)
return dict(base_value=base_value, comp_value=comp_value,
base_time=base_time, comp_time=comp_time,
resid=resid, relerr=relerr)
def _print_stats(label, err):
# type: (str, np.ma.ndarray) -> None
# work with trimmed data, not the full set
sorted_err = np.sort(abs(err.compressed()))
if sorted_err.size == 0:
print(label + " no valid values")
return
p50 = int((len(sorted_err)-1)*0.50)
p98 = int((len(sorted_err)-1)*0.98)
data = [
"max:%.3e"%sorted_err[-1],
"median:%.3e"%sorted_err[p50],
"98%%:%.3e"%sorted_err[p98],
"rms:%.3e"%np.sqrt(np.mean(sorted_err**2)),
"zero-offset:%+.3e"%np.mean(sorted_err),
]
print(label+" "+" ".join(data))
def plot_models(opts, result, limits=None, setnum=0):
# type: (Dict[str, Any], Dict[str, Any], Optional[Tuple[float, float]], int) -> Tuple[float, float]
"""
Plot the results from :func:`run_models`.
"""
import matplotlib.pyplot as plt
base_value, comp_value = result['base_value'], result['comp_value']
base_time, comp_time = result['base_time'], result['comp_time']
resid, relerr = result['resid'], result['relerr']
have_base, have_comp = (base_value is not None), (comp_value is not None)
base, comp = opts['engines']
base_data, comp_data = opts['data']
use_data = (opts['datafile'] is not None) and (have_base ^ have_comp)
# Plot if requested
view = opts['view']
#view = 'log'
if limits is None:
vmin, vmax = np.inf, -np.inf
if have_base:
vmin = min(vmin, base_value.min())
vmax = max(vmax, base_value.max())
if have_comp:
vmin = min(vmin, comp_value.min())
vmax = max(vmax, comp_value.max())
limits = vmin, vmax
if have_base:
if have_comp:
plt.subplot(131)
plot_theory(base_data, base_value, view=view, use_data=use_data, limits=limits)
plt.title("%s t=%.2f ms"%(base.engine, base_time))
#cbar_title = "log I"
if have_comp:
if have_base:
plt.subplot(132)
if not opts['is2d'] and have_base:
plot_theory(comp_data, base_value, view=view, use_data=use_data, limits=limits)
plot_theory(comp_data, comp_value, view=view, use_data=use_data, limits=limits)
plt.title("%s t=%.2f ms"%(comp.engine, comp_time))
#cbar_title = "log I"
if have_base and have_comp:
plt.subplot(133)
if not opts['rel_err']:
err, errstr, errview = resid, "abs err", "linear"
else:
err, errstr, errview = abs(relerr), "rel err", "log"
if (err == 0.).all():
errview = 'linear'
if 0: # 95% cutoff
sorted_err = np.sort(err.flatten())
cutoff = sorted_err[int(sorted_err.size*0.95)]
err[err > cutoff] = cutoff
#err,errstr = base/comp,"ratio"
# Note: base_data only since base and comp have same q values (though
# perhaps different resolution), and we are plotting the difference
# at each q
plot_theory(base_data, None, resid=err, view=errview, use_data=use_data)
plt.xscale('log' if view == 'log' and not opts['is2d'] else 'linear')
plt.legend(['P%d'%(k+1) for k in range(setnum+1)], loc='best')
plt.title("max %s = %.3g"%(errstr, abs(err).max()))
#cbar_title = errstr if errview=="linear" else "log "+errstr
#if is2D:
# h = plt.colorbar()
# h.ax.set_title(cbar_title)
fig = plt.gcf()
extra_title = ' '+opts['title'] if opts['title'] else ''
fig.suptitle(":".join(opts['name']) + extra_title)
if have_base and have_comp and opts['show_hist']:
plt.figure()
v = relerr
v[v == 0] = 0.5*np.min(np.abs(v[v != 0]))
plt.hist(np.log10(np.abs(v)), normed=1, bins=50)
plt.xlabel('log10(err), err = |(%s - %s) / %s|'
% (base.engine, comp.engine, comp.engine))
plt.ylabel('P(err)')
plt.title('Distribution of relative error between calculation engines')
return limits
# ===========================================================================
#
# Set of command line options.
# Normal options such as -plot/-noplot are specified as 'name'.
# For options such as -nq=500 which require a value use 'name='.
#
OPTIONS = [
# Plotting
'plot', 'noplot',
'weights', 'profile',
'linear', 'log', 'q4',
'rel', 'abs',
'hist', 'nohist',
'title=',
# Data generation
'data=', 'noise=', 'res=', 'nq=', 'q=',
'lowq', 'midq', 'highq', 'exq', 'zero',
'2d', '1d',
# Parameter set
'preset', 'random', 'random=', 'sets=',
'nopars', 'pars',
'sphere', 'sphere=', # integrate over a sphere in 2d with n points
'poly', 'mono',
'magnetic', 'nonmagnetic',
'maxdim=',
# Calculation options
'cutoff=', 'accuracy=', 'ngauss=',
'neval=', # for timing...
# Precision options
'engine=',
'half', 'fast', 'single', 'double', 'single!', 'double!', 'quad!',
# Output options
'help', 'html', 'edit',
# Help options
'h', '?', 'models', 'models='
]
NAME_OPTIONS = (lambda: set(k for k in OPTIONS if not k.endswith('=')))()
VALUE_OPTIONS = (lambda: [k[:-1] for k in OPTIONS if k.endswith('=')])()
def columnize(items, indent="", width=None):
# type: (List[str], str, int) -> str
"""
Format a list of strings into columns.
Returns a string with carriage returns ready for printing.
"""
# Use the columnize package (pycolumize) if it is available
try:
from columnize import columnize as _columnize, default_opts
if width is None:
width = default_opts['displaywidth']
return _columnize(list(items), displaywidth=width, lineprefix=indent)
except ImportError:
pass
# Otherwise roll our own.
if width is None:
width = 120
column_width = max(len(w) for w in items) + 1
num_columns = (width - len(indent)) // column_width
num_rows = len(items) // num_columns
items = items + [""] * (num_rows * num_columns - len(items))
columns = [items[k*num_rows:(k+1)*num_rows] for k in range(num_columns)]
lines = [" ".join("%-*s"%(column_width, entry) for entry in row)
for row in zip(*columns)]
output = indent + ("\n"+indent).join(lines)
return output
def get_pars(model_info):
# type: (ModelInfo) -> ParameterSet
"""
Extract default parameters from the model definition.
"""
# Get the default values for the parameters
pars = {}
for p in model_info.parameters.call_parameters:
parts = [('', p.default)]
if p.polydisperse:
parts.append(('_pd', 0.0))
parts.append(('_pd_n', 0))
parts.append(('_pd_nsigma', 3.0))
parts.append(('_pd_type', "gaussian"))
for ext, val in parts:
if p.length > 1:
dict(("%s%d%s" % (p.id, k, ext), val)
for k in range(1, p.length+1))
else:
pars[p.id + ext] = val
return pars
INTEGER_RE = re.compile("^[+-]?[1-9][0-9]*$")
def isnumber(s):
# type: (str) -> bool
"""Return True if string contains an int or float"""
match = FLOAT_RE.match(s)
isfloat = (match and not s[match.end():])
return isfloat or INTEGER_RE.match(s)
def print_models(kind=None):
"""
Print the list of available models in columns.
"""
models = core.list_models(kind=kind)
print(columnize(models, indent=" "))
# For distinguishing pairs of models for comparison
# key-value pair separator =
# shell characters | & ; <> $ % ' " \ # `
# model and parameter names _
# parameter expressions - + * / . ( )
# path characters including tilde expansion and windows drive ~ / :
# not sure about brackets [] {}
# maybe one of the following @ ? ^ ! ,
PAR_SPLIT = ','
def parse_opts(argv):
# type: (List[str]) -> Dict[str, Any]
"""
Parse command line options.
"""
flags = [arg for arg in argv
if arg.startswith('-')]
values = [arg for arg in argv
if not arg.startswith('-') and '=' in arg]
positional_args = [arg for arg in argv
if not arg.startswith('-') and '=' not in arg]
# First check if help requested anywhere on line
if '-h' in flags or '-?' in flags:
print(USAGE)
return None
# Next check that all flags are valid.
invalid = [o[1:] for o in flags
if not (o[1:] in NAME_OPTIONS
or any(o.startswith('-%s='%t) for t in VALUE_OPTIONS)
or o.startswith('-D'))]
if invalid:
print("Invalid options: %s."%(", ".join(invalid)))
print("usage: ./sasmodels [-?] [-models] model")
return None
# Check if requesting a list of models. This is done after checking that
# the flags are valid so we know it is -models or -models=.
if any(v.startswith('-models') for v in flags):
# grab last -models entry
models = [v for v in flags if v.startswith('-models')][-1]
if models == '-models':
models = '-models=all'
_, kind = models.split('=', 1)
print_models(kind=kind)
return None
# Check that a model was given on the command line
if not positional_args:
print("usage: ./sascomp [-?] [-models] model")
return None
# Only the last model on the command line is used.
name = positional_args[-1]
# Interpret the flags
# pylint: disable=bad-whitespace,C0321
opts = {
'plot' : True,
'view' : 'log',
'is2d' : False,
'qmin' : None,
'qmax' : 0.05,
'nq' : 128,
'res' : '0.0',
'noise' : 0.0,
'accuracy' : 'Low',
'cutoff' : '0.0',
'seed' : -1, # default to preset
'mono' : True,
# Default to magnetic a magnetic moment is set on the command line
'magnetic' : False,
'maxdim' : np.inf,
'show_pars' : False,
'show_hist' : False,
'rel_err' : True,
'explore' : False,
'zero' : False,
'html' : False,
'title' : None,
'datafile' : None,
'sets' : 0,
'engine' : 'default',
'count' : '1',
'show_weights' : False,
'show_profile' : False,
'sphere' : 0,
'ngauss' : '0',
}
for arg in flags:
if arg == '-noplot': opts['plot'] = False
elif arg == '-plot': opts['plot'] = True
elif arg == '-linear': opts['view'] = 'linear'
elif arg == '-log': opts['view'] = 'log'
elif arg == '-q4': opts['view'] = 'q4'
elif arg == '-1d': opts['is2d'] = False
elif arg == '-2d': opts['is2d'] = True
elif arg == '-exq': opts['qmax'] = 10.0
elif arg == '-highq': opts['qmax'] = 1.0
elif arg == '-midq': opts['qmax'] = 0.2
elif arg == '-lowq': opts['qmax'] = 0.05
elif arg == '-zero': opts['zero'] = True
elif arg.startswith('-nq='): opts['nq'] = int(arg[4:])
elif arg.startswith('-q='):
opts['qmin'], opts['qmax'] = [float(v) for v in arg[3:].split(':')]
elif arg.startswith('-res='): opts['res'] = arg[5:]
elif arg.startswith('-noise='): opts['noise'] = float(arg[7:])
elif arg.startswith('-sets='): opts['sets'] = int(arg[6:])
elif arg.startswith('-accuracy='): opts['accuracy'] = arg[10:]
elif arg.startswith('-cutoff='): opts['cutoff'] = arg[8:]
elif arg.startswith('-title='): opts['title'] = arg[7:]
elif arg.startswith('-data='): opts['datafile'] = arg[6:]
elif arg.startswith('-engine='): opts['engine'] = arg[8:]
elif arg.startswith('-neval='): opts['count'] = arg[7:]
elif arg.startswith('-ngauss='): opts['ngauss'] = arg[8:]
elif arg.startswith('-random='):
opts['seed'] = int(arg[8:])
opts['sets'] = 0
elif arg == '-random':
opts['seed'] = np.random.randint(1000000)
opts['sets'] = 0
elif arg.startswith('-sphere'):
opts['sphere'] = int(arg[8:]) if len(arg) > 7 else 150
opts['is2d'] = True
elif arg.startswith('-maxdim'):
opts['maxdim'] = float(arg[8:])
elif arg == '-preset': opts['seed'] = -1
elif arg == '-mono': opts['mono'] = True
elif arg == '-poly': opts['mono'] = False
elif arg == '-magnetic': opts['magnetic'] = True
elif arg == '-nonmagnetic': opts['magnetic'] = False
elif arg == '-pars': opts['show_pars'] = True
elif arg == '-nopars': opts['show_pars'] = False
elif arg == '-hist': opts['show_hist'] = True
elif arg == '-nohist': opts['show_hist'] = False
elif arg == '-rel': opts['rel_err'] = True
elif arg == '-abs': opts['rel_err'] = False
elif arg == '-half': opts['engine'] = 'half'
elif arg == '-fast': opts['engine'] = 'fast'
elif arg == '-single': opts['engine'] = 'single'
elif arg == '-double': opts['engine'] = 'double'
elif arg == '-single!': opts['engine'] = 'single!'
elif arg == '-double!': opts['engine'] = 'double!'
elif arg == '-quad!': opts['engine'] = 'quad!'
elif arg == '-edit': opts['explore'] = True
elif arg == '-weights': opts['show_weights'] = True
elif arg == '-profile': opts['show_profile'] = True
elif arg == '-html': opts['html'] = True
elif arg == '-help': opts['html'] = True
elif arg.startswith('-D'):
var, val = arg[2:].split('=')
os.environ[var] = val
# pylint: enable=bad-whitespace,C0321
# Magnetism forces 2D for now
if opts['magnetic']:
opts['is2d'] = True
# Force random if sets is used
if opts['sets'] >= 1 and opts['seed'] < 0:
opts['seed'] = np.random.randint(1000000)
if opts['sets'] == 0:
opts['sets'] = 1
# Create the computational engines
if opts['qmin'] is None:
opts['qmin'] = 0.001*opts['qmax']
comparison = any(PAR_SPLIT in v for v in values)
if PAR_SPLIT in name:
names = name.split(PAR_SPLIT, 2)
comparison = True
else:
names = [name]*2
try:
model_info = [core.load_model_info(k) for k in names]
except ImportError as exc:
print(str(exc), "while loading", names)
print("usage: ./sasmodels [-?] [-models] model")
return None
if PAR_SPLIT in opts['ngauss']:
opts['ngauss'] = [int(k) for k in opts['ngauss'].split(PAR_SPLIT, 2)]
comparison = True
else:
opts['ngauss'] = [int(opts['ngauss'])]*2
if PAR_SPLIT in opts['engine']:
opts['engine'] = opts['engine'].split(PAR_SPLIT, 2)
comparison = True
else:
opts['engine'] = [opts['engine']]*2
if PAR_SPLIT in opts['count']:
opts['count'] = [int(k) for k in opts['count'].split(PAR_SPLIT, 2)]
comparison = True
else:
opts['count'] = [int(opts['count'])]*2
if PAR_SPLIT in opts['cutoff']:
opts['cutoff'] = [float(k) for k in opts['cutoff'].split(PAR_SPLIT, 2)]
comparison = True
else:
opts['cutoff'] = [float(opts['cutoff'])]*2
if PAR_SPLIT in opts['res']:
opts['res'] = [float(k) for k in opts['res'].split(PAR_SPLIT, 2)]
comparison = True
else:
opts['res'] = [float(opts['res'])]*2
if opts['datafile'] is not None:
data0 = load_data(os.path.expanduser(opts['datafile']))
data = data0, data0
else:
# Hack around the fact that make_data doesn't take a pair of resolutions
res = opts['res']
opts['res'] = res[0]
data0, _ = make_data(opts)
if res[0] != res[1]:
opts['res'] = res[1]
data1, _ = make_data(opts)
else:
data1 = data0
opts['res'] = res
data = data0, data1
base = make_engine(model_info[0], data[0], opts['engine'][0],
opts['cutoff'][0], opts['ngauss'][0])
if comparison:
comp = make_engine(model_info[1], data[1], opts['engine'][1],
opts['cutoff'][1], opts['ngauss'][1])
else:
comp = None
# pylint: disable=bad-whitespace
# Remember it all
opts.update({
'data' : data,
'name' : names,
'info' : model_info,
'engines' : [base, comp],
'values' : values,
})
# pylint: enable=bad-whitespace
# Set the integration parameters to the half sphere
if opts['sphere'] > 0:
set_spherical_integration_parameters(opts, opts['sphere'])
return opts
def set_spherical_integration_parameters(opts, steps):
# type: (Dict[str, Any], int) -> None
"""
Set integration parameters for spherical integration over the entire
surface in theta-phi coordinates.
"""
# Set the integration parameters to the half sphere
opts['values'].extend([
#'theta=90',
'theta_pd=%g'%(90/np.sqrt(3)),
'theta_pd_n=%d'%steps,
'theta_pd_type=rectangle',
#'phi=0',
'phi_pd=%g'%(180/np.sqrt(3)),
'phi_pd_n=%d'%(2*steps),
'phi_pd_type=rectangle',
#'background=0',
])
if 'psi' in opts['info'][0].parameters:
opts['values'].extend([
#'psi=0',
'psi_pd=%g'%(180/np.sqrt(3)),
'psi_pd_n=%d'%(2*steps),
'psi_pd_type=rectangle',
])
def parse_pars(opts, maxdim=None):
# type: (Dict[str, Any], float) -> Tuple[Dict[str, float], Dict[str, float]]
"""
Generate parameter sets for base and comparison models.
Returns a pair of parameter dictionaries.
The default parameter values come from the model, or a randomized model
if a seed value is given. Next, evaluate any parameter expressions,
constraining the value of the parameter within and between models.
Note: When generating random parameters, **the seed must already be set**
with a call to *np.random.seed(opts['seed'])*.
*opts* controls the parameter generation::
opts = {
'info': (model_info 1, model_info 2),
'seed': -1, # if seed>=0 then randomize parameters
'mono': False, # force monodisperse random parameters
'magnetic': False, # force nonmagetic random parameters
'maxdim': np.inf, # limit particle size to maxdim for random pars
'values': ['par=expr', ...], # override parameter values in model
'show_pars': False, # Show parameter values
'is2d': False, # Show values for orientation parameters
}
The values of *par=expr* are evaluated approximately as::
import numpy as np
from math import *
from parameter_set import *
parameter_set.par = eval(expr)
That is, you can use arbitrary python math expressions including the
functions defined in the math library and the numpy library. You can
also use the existing parameter values, which will either be the model
defaults or the randomly generated values if seed is non-negative.
To compare different values of the same parameter, use *par=expr,expr*.
The first parameter set will have the values from the first expression
and the second parameter set will have the values from the second
expression. Note that the second expression is evaluated using the
values from the first expression, which allows things like::
length=2*radius,length+3
which will compare length to length+3 when length is set to 2*radius.
*maxdim* **DEPRECATED** Use *opts['maxdim']* instead.
"""
# CRUFT: maxdim parameter is deprecated
if maxdim is not None:
opts['maxdim'] = maxdim
model_info, model_info2 = opts['info']
# Get default parameters from model definition.
pars = get_pars(model_info)
pars2 = get_pars(model_info2)
pars2.update((k, v) for k, v in pars.items() if k in pars2)
# randomize parameters
#pars.update(set_pars) # set value before random to control range
if opts['seed'] > -1:
pars = randomize_pars(model_info, pars, maxdim=opts['maxdim'])
if model_info.id != model_info2.id:
pars2 = randomize_pars(model_info2, pars2, maxdim=opts['maxdim'])
# Share values for parameters with the same name
for k, v in pars.items():
if k in pars2:
pars2[k] = v
else:
pars2 = pars.copy()
constrain_pars(model_info, pars)
constrain_pars(model_info2, pars2)
# TODO: randomly contrast match a pair of SLDs with some probability
# Process -mono and -magnetic command line options.
if opts['mono']:
pars = suppress_pd(pars)
pars2 = suppress_pd(pars2)
if not opts['magnetic']:
pars = suppress_magnetism(pars)
pars2 = suppress_magnetism(pars2)
# Fill in parameters given on the command line
presets = {}
presets2 = {}
for arg in opts['values']:
k, v = arg.split('=', 1)
if k not in pars and k not in pars2:
# extract base name without polydispersity info
s = set(p.split('_pd')[0] for p in pars)
print("%r invalid; parameters are: %s"%(k, ", ".join(sorted(s))))
return None
v1, v2 = v.split(PAR_SPLIT, 2) if PAR_SPLIT in v else (v, v)
if v1 and k in pars:
presets[k] = float(v1) if isnumber(v1) else v1
if v2 and k in pars2:
presets2[k] = float(v2) if isnumber(v2) else v2
# If pd given on the command line, default pd_n to 35
for k, v in list(presets.items()):
if k.endswith('_pd'):
presets.setdefault(k+'_n', 35.)
for k, v in list(presets2.items()):
if k.endswith('_pd'):
presets2.setdefault(k+'_n', 35.)
# Evaluate preset parameter expressions
# Note: need to replace ':' with '_' in parameter names and expressions
# in order to support math on magnetic parameters.
context = MATH.copy()
context['np'] = np
context.update((k.replace(':', '_'), v) for k, v in pars.items())
context.update((k, v) for k, v in presets.items() if isinstance(v, float))
#for k,v in sorted(context.items()): print(k, v)
for k, v in presets.items():
if not isinstance(v, float) and not k.endswith('_type'):
presets[k] = eval(v.replace(':', '_'), context)
context.update(presets)
context.update((k.replace(':', '_'), v) for k, v in presets2.items() if isinstance(v, float))
for k, v in presets2.items():
if not isinstance(v, float) and not k.endswith('_type'):
presets2[k] = eval(v.replace(':', '_'), context)
# update parameters with presets
pars.update(presets) # set value after random to control value
pars2.update(presets2) # set value after random to control value
#import pprint; pprint.pprint(model_info)
# Hack to load user-defined distributions; run through all parameters
# and make sure any pd_type parameter is a defined distribution.
if (any(p.endswith('pd_type') and v not in weights.DISTRIBUTIONS
for p, v in pars.items())
or any(p.endswith('pd_type') and v not in weights.DISTRIBUTIONS
for p, v in pars2.items())):
weights.load_weights()
if opts['show_pars']:
if model_info.name != model_info2.name or pars != pars2:
print("==== %s ====="%model_info.name)
print(str(parlist(model_info, pars, opts['is2d'])))
print("==== %s ====="%model_info2.name)
print(str(parlist(model_info2, pars2, opts['is2d'])))
else:
print(str(parlist(model_info, pars, opts['is2d'])))
return pars, pars2
def show_docs(opts):
# type: (Dict[str, Any]) -> None
"""
show html docs for the model
"""
from .generate import make_html
from . import rst2html
info = opts['info'][0]
html = make_html(info)
path = os.path.dirname(info.filename)
url = "file://" + path.replace("\\", "/")[2:] + "/"
rst2html.view_html_wxapp(html, url)
def explore(opts):
# type: (Dict[str, Any]) -> None
"""
explore the model using the bumps gui.
"""
import wx # type: ignore
from bumps.names import FitProblem # type: ignore
from bumps.gui.app_frame import AppFrame # type: ignore
from bumps.gui import signal
is_mac = "cocoa" in wx.version()
# Create an app if not running embedded
app = wx.App() if wx.GetApp() is None else None
model = Explore(opts)
problem = FitProblem(model)
frame = AppFrame(parent=None, title="explore", size=(1000, 700))
if not is_mac:
frame.Show()
frame.panel.set_model(model=problem)
frame.panel.Layout()
frame.panel.aui.Split(0, wx.TOP)
def _reset_parameters(event):
model.revert_values()
signal.update_parameters(problem)
frame.Bind(wx.EVT_TOOL, _reset_parameters, frame.ToolBar.GetToolByPos(1))
if is_mac:
frame.Show()
# If running withing an app, start the main loop
if app:
app.MainLoop()
class Explore(object):
"""
Bumps wrapper for a SAS model comparison.
The resulting object can be used as a Bumps fit problem so that
parameters can be adjusted in the GUI, with plots updated on the fly.
"""
def __init__(self, opts):
# type: (Dict[str, Any]) -> None
from bumps.cli import config_matplotlib # type: ignore
from . import bumps_model
config_matplotlib()
self.opts = opts
opts['pars'] = list(opts['pars'])
p1, p2 = opts['pars']
m1, m2 = opts['info']
self.fix_p2 = m1 != m2 or p1 != p2
model_info = m1
pars, pd_types = bumps_model.create_parameters(model_info, **p1)
# Initialize parameter ranges, fixing the 2D parameters for 1D data.
if not opts['is2d']:
for p in model_info.parameters.user_parameters({}, is2d=False):
for ext in ['', '_pd', '_pd_n', '_pd_nsigma']:
k = p.name+ext
v = pars.get(k, None)
if v is not None:
v.range(*parameter_range(k, v.value))
else:
for k, v in pars.items():
v.range(*parameter_range(k, v.value))
self.pars = pars
self.starting_values = dict((k, v.value) for k, v in pars.items())
self.pd_types = pd_types
self.limits = None
def revert_values(self):
# type: () -> None
"""
Restore starting values of the parameters.
"""
for k, v in self.starting_values.items():
self.pars[k].value = v
def model_update(self):
# type: () -> None
"""
Respond to signal that model parameters have been changed.
"""
pass
def numpoints(self):
# type: () -> int
"""
Return the number of points.
"""
return len(self.pars) + 1 # so dof is 1
def parameters(self):
# type: () -> Any # Dict/List hierarchy of parameters
"""
Return a dictionary of parameters.
"""
return self.pars
def nllf(self):
# type: () -> float
"""
Return cost.
"""
# pylint: disable=no-self-use
return 0. # No nllf
def plot(self, view='log'):
# type: (str) -> None
"""
Plot the data and residuals.
"""
pars = dict((k, v.value) for k, v in self.pars.items())
pars.update(self.pd_types)
self.opts['pars'][0] = pars
if not self.fix_p2:
self.opts['pars'][1] = pars
result = run_models(self.opts)
limits = plot_models(self.opts, result, limits=self.limits)
if self.limits is None:
vmin, vmax = limits
self.limits = vmax*1e-7, 1.3*vmax
import pylab
pylab.clf()
plot_models(self.opts, result, limits=self.limits)
def main(*argv):
# type: (*str) -> None
"""
Main program.
"""
opts = parse_opts(argv)
if opts is not None:
if opts['seed'] > -1:
print("Randomize using -random=%i"%opts['seed'])
np.random.seed(opts['seed'])
if opts['html']:
show_docs(opts)
elif opts['explore']:
opts['pars'] = parse_pars(opts)
if opts['pars'] is None:
return
explore(opts)
else:
compare(opts)
if __name__ == "__main__":
main(*sys.argv[1:])
| SasView/sasmodels | sasmodels/compare.py | Python | bsd-3-clause | 62,003 | [
"Gaussian"
] | 75732db330232b971f5a0d228969683fed7d7cc8e0b5902e2794e822bf256c13 |
# -*- coding: utf-8 -*-
'''
diacamma.event package
@author: Laurent GAY
@organization: sd-libre.fr
@contact: info@sd-libre.fr
@copyright: 2016 sd-libre.fr
@license: This file is part of Lucterios.
Lucterios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Lucterios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Lucterios. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
from shutil import rmtree
from lucterios.framework.test import LucteriosTest
from lucterios.framework.xfergraphic import XferContainerAcknowledge
from lucterios.framework.filetools import get_user_dir
from lucterios.CORE.models import Parameter
from lucterios.CORE.parameters import Params
from diacamma.member.test_tools import default_adherents, default_season,\
default_params, set_parameters
from diacamma.member.views import AdherentShow
from diacamma.event.views_conf import EventConf, DegreeTypeAddModify,\
DegreeTypeDel, SubDegreeTypeAddModify, SubDegreeTypeDel
from diacamma.event.views_degree import DegreeAddModify, DegreeDel
from diacamma.event.test_tools import default_event_params
class ConfigurationTest(LucteriosTest):
def setUp(self):
LucteriosTest.setUp(self)
rmtree(get_user_dir(), True)
default_season()
default_params()
set_parameters(["team", "activite", "age", "licence", "genre", 'numero', 'birth'])
def test_degreetype(self):
self.factory.xfer = EventConf()
self.calljson('/diacamma.event/eventConf', {}, False)
self.assert_observer('core.custom', 'diacamma.event', 'eventConf')
self.assert_count_equal('', 2 + 2 + 2 + 7)
self.assert_grid_equal('degreetype', {'activity': "passion", 'name': "nom", 'level': "niveau"}, 0)
self.factory.xfer = DegreeTypeAddModify()
self.calljson('/diacamma.event/degreeTypeAddModify', {}, False)
self.assert_observer('core.custom', 'diacamma.event', 'degreeTypeAddModify')
self.assert_count_equal('', 4)
self.assert_attrib_equal('activity', "description", "passion")
self.assert_select_equal('activity', 2) # nb=2
self.factory.xfer = DegreeTypeAddModify()
self.calljson('/diacamma.event/degreeTypeAddModify',
{"SAVE": "YES", "activity": 1, "name": "abc", "level": "5"}, False)
self.assert_observer('core.acknowledge', 'diacamma.event', 'degreeTypeAddModify')
self.factory.xfer = EventConf()
self.calljson('/diacamma.event/eventConf', {}, False)
self.assert_observer('core.custom', 'diacamma.event', 'eventConf')
self.assert_count_equal('degreetype', 1)
self.assert_json_equal('', 'degreetype/@0/name', "abc")
self.factory.xfer = DegreeTypeDel()
self.calljson('/diacamma.event/degreeTypeDel',
{"CONFIRME": "YES", "degreetype": 1}, False)
self.assert_observer('core.acknowledge', 'diacamma.event', 'degreeTypeDel')
self.factory.xfer = EventConf()
self.calljson('/diacamma.event/eventConf', {}, False)
self.assert_observer('core.custom', 'diacamma.event', 'eventConf')
self.assert_count_equal('degreetype', 0)
def test_subdegreetype(self):
self.factory.xfer = EventConf()
self.calljson('/diacamma.event/eventConf', {}, False)
self.assert_observer('core.custom', 'diacamma.event', 'eventConf')
self.assert_count_equal('', 2 + 2 + 2 + 7)
self.assert_grid_equal('subdegreetype', {'name': "nom", 'level': "niveau"}, 0)
self.factory.xfer = SubDegreeTypeAddModify()
self.calljson('/diacamma.event/subDegreeTypeAddModify', {}, False)
self.assert_observer('core.custom', 'diacamma.event', 'subDegreeTypeAddModify')
self.assert_count_equal('', 3)
self.factory.xfer = SubDegreeTypeAddModify()
self.calljson('/diacamma.event/subDegreeTypeAddModify',
{"SAVE": "YES", "name": "uvw", "level": "10"}, False)
self.assert_observer('core.acknowledge', 'diacamma.event', 'subDegreeTypeAddModify')
self.factory.xfer = EventConf()
self.calljson('/diacamma.event/eventConf', {}, False)
self.assert_observer('core.custom', 'diacamma.event', 'eventConf')
self.assert_count_equal('subdegreetype', 1)
self.assert_json_equal('', 'subdegreetype/@0/name', "uvw")
self.factory.xfer = SubDegreeTypeDel()
self.calljson('/diacamma.event/subDegreeTypeDel',
{"CONFIRME": "YES", "subdegreetype": 1}, False)
self.assert_observer('core.acknowledge', 'diacamma.event', 'subDegreeTypeDel')
self.factory.xfer = EventConf()
self.calljson('/diacamma.event/eventConf', {}, False)
self.assert_observer('core.custom', 'diacamma.event', 'eventConf')
self.assert_count_equal('subdegreetype', 0)
def test_no_activity(self):
set_parameters([])
self.factory.xfer = EventConf()
self.calljson('/diacamma.event/eventConf', {}, False)
self.assert_observer('core.custom', 'diacamma.event', 'eventConf')
self.assert_count_equal('', 2 + 2 + 2 + 7)
self.assert_grid_equal('degreetype', {'name': "nom", 'level': "niveau"}, 0)
self.factory.xfer = DegreeTypeAddModify()
self.calljson('/diacamma.event/degreeTypeAddModify', {}, False)
self.assert_observer('core.custom', 'diacamma.event', 'degreeTypeAddModify')
self.assert_count_equal('', 3)
def test_params(self):
self.factory.xfer = EventConf()
self.calljson('/diacamma.event/eventConf', {}, False)
self.assert_observer('core.custom', 'diacamma.event', 'eventConf')
self.assert_count_equal('', 2 + 2 + 2 + 7)
self.assert_json_equal('TAB', '__tab_1', 'Paramètres')
self.assert_json_equal('TAB', '__tab_2', 'Diplôme')
self.assert_json_equal('TAB', '__tab_3', 'Sous-diplôme')
self.assertFalse('__tab_4' in self.json_data.keys(), self.json_data.keys())
self.assert_json_equal('LABELFORM', 'event-degree-text', 'Diplôme')
self.assert_json_equal('LABELFORM', 'event-subdegree-text', 'Sous-diplôme')
Parameter.change_value("event-degree-text", 'Grade')
Parameter.change_value("event-subdegree-text", 'Barette')
Params.clear()
self.factory.xfer = EventConf()
self.calljson('/diacamma.event/eventConf', {}, False)
self.assert_observer('core.custom', 'diacamma.event', 'eventConf')
self.assert_count_equal('', 2 + 2 + 2 + 7)
self.assert_json_equal('TAB', '__tab_1', 'Paramètres')
self.assert_json_equal('TAB', '__tab_2', 'Grade')
self.assert_json_equal('TAB', '__tab_3', 'Barette')
self.assertFalse('__tab_4' in self.json_data.keys(), self.json_data.keys())
self.assert_json_equal('LABELFORM', 'event-degree-text', 'Grade')
self.assert_json_equal('LABELFORM', 'event-subdegree-text', 'Barette')
self.assert_json_equal('LABELFORM', 'event-subdegree-enable', 'Oui')
self.assert_json_equal('LABELFORM', 'event-degree-enable', 'Oui')
Parameter.change_value("event-subdegree-enable", 0)
Params.clear()
self.factory.xfer = EventConf()
self.calljson('/diacamma.event/eventConf', {}, False)
self.assert_observer('core.custom', 'diacamma.event', 'eventConf')
self.assert_count_equal('', 2 + 2 + 7)
self.assert_json_equal('TAB', '__tab_1', 'Paramètres')
self.assert_json_equal('TAB', '__tab_2', 'Grade')
self.assertFalse('__tab_3' in self.json_data.keys(), self.json_data.keys())
self.assert_json_equal('LABELFORM', 'event-subdegree-enable', 'Non')
self.assert_json_equal('LABELFORM', 'event-degree-enable', 'Oui')
Parameter.change_value("event-degree-enable", 0)
Params.clear()
self.factory.xfer = EventConf()
self.calljson('/diacamma.event/eventConf', {}, False)
self.assert_observer('core.custom', 'diacamma.event', 'eventConf')
self.assert_count_equal('', 2 + 7)
self.assertFalse('__tab_2' in self.json_data.keys(), self.json_data.keys())
self.assert_json_equal('TAB', '__tab_1', 'Paramètres')
self.assert_json_equal('LABELFORM', 'event-subdegree-enable', 'Non')
self.assert_json_equal('LABELFORM', 'event-degree-enable', 'Non')
class DegreeTest(LucteriosTest):
def setUp(self):
LucteriosTest.setUp(self)
rmtree(get_user_dir(), True)
default_season()
default_params()
default_adherents()
default_event_params()
set_parameters(["team", "activite", "age", "licence", "genre", 'numero', 'birth'])
def test_degree(self):
self.factory.xfer = AdherentShow()
self.calljson('/diacamma.member/adherentShow', {'adherent': 2}, False)
self.assert_observer('core.custom', 'diacamma.member', 'adherentShow')
self.assert_json_equal('LABELFORM', 'firstname', "Avrel")
self.assert_json_equal('LABELFORM', 'lastname', "Dalton")
self.assert_grid_equal('degrees', {'degree': "Grade", 'subdegree': "Barette", 'date': "date"}, 0)
self.factory.xfer = DegreeAddModify()
self.calljson('/diacamma.event/degreeAddModify', {}, False)
self.assert_observer('core.custom', 'diacamma.event', 'degreeAddModify')
self.assert_count_equal('', 5)
self.factory.xfer = DegreeAddModify()
self.calljson('/diacamma.event/degreeAddModify',
{"SAVE": "YES", 'adherent': 2, "degree": "3", "subdegree": "2", "date": "2014-10-12"}, False)
self.assert_observer('core.acknowledge', 'diacamma.event', 'degreeAddModify')
self.factory.xfer = AdherentShow()
self.calljson('/diacamma.member/adherentShow', {'adherent': 2}, False)
self.assert_count_equal('degrees', 1)
self.assert_json_equal('', 'degrees/@0/degree', "[activity1] level #1.3")
self.assert_json_equal('', 'degrees/@0/subdegree', "sublevel #2")
self.assert_json_equal('', 'degrees/@0/date', "2014-10-12")
self.factory.xfer = DegreeDel()
self.calljson('/diacamma.event/degreeDel', {"CONFIRME": "YES", "degrees": 1}, False)
self.assert_observer('core.acknowledge', 'diacamma.event', 'degreeDel')
self.factory.xfer = AdherentShow()
self.calljson('/diacamma.member/adherentShow', {'adherent': 2}, False)
self.assert_count_equal('degrees', 0)
def test_no_activity(self):
set_parameters([])
self.factory.xfer = DegreeAddModify()
self.calljson('/diacamma.event/degreeAddModify',
{"SAVE": "YES", 'adherent': 2, "degree": "3", "subdegree": "2", "date": "2014-10-12"}, False)
self.assert_observer('core.acknowledge', 'diacamma.event', 'degreeAddModify')
self.factory.xfer = AdherentShow()
self.calljson('/diacamma.member/adherentShow', {'adherent': 2}, False)
self.assert_grid_equal('degrees', {'degree': 'Grade', 'subdegree': 'Barette', 'date': 'date'}, 1) # nb=3
self.assert_json_equal('', 'degrees/@0/degree', "level #1.3")
self.assert_json_equal('', 'degrees/@0/subdegree', "sublevel #2")
self.assert_json_equal('', 'degrees/@0/date', "2014-10-12")
def test_no_subdegree(self):
Parameter.change_value("event-subdegree-enable", 0)
Params.clear()
self.factory.xfer = DegreeAddModify()
self.calljson('/diacamma.event/degreeAddModify',
{"SAVE": "YES", 'adherent': 2, "degree": "3", "date": "2014-10-12"}, False)
self.assert_observer('core.acknowledge', 'diacamma.event', 'degreeAddModify')
self.factory.xfer = AdherentShow()
self.calljson('/diacamma.member/adherentShow', {'adherent': 2}, False)
self.assert_grid_equal('degrees', {'degree': 'Grade', 'date': 'date'}, 1) # nb=2
self.assert_json_equal('', 'degrees/@0/degree', "[activity1] level #1.3")
self.assert_json_equal('', 'degrees/@0/date', "2014-10-12")
| Diacamma2/asso | diacamma/event/tests.py | Python | gpl-3.0 | 12,538 | [
"Dalton"
] | 771c40f0d4852a640c9780290855fcca414f4b1afd758add88cc64cfdf68b7dc |
#!/bin/bash
"""This file hold the function run_mcmc, which takes a trained emulator and a set of truth data and runs
and MCMC analysis with a predefined number of steps and walkers."""
from time import time
from multiprocessing import cpu_count, Pool
import warnings
from itertools import izip
from os import path
from ast import literal_eval
import numpy as np
import emcee as mc
import dynesty as dyn
from functools import partial
from scipy.linalg import inv
import h5py
from pearce.emulator import OriginalRecipe, ExtraCrispy, SpicyBuffalo, NashvilleHot, LemonPepperWet
# liklihood functions need to be defined here because the emulator will be made global
def lnprior(theta, param_names, *args):
"""
Prior for an MCMC. Default is to assume flat prior for all parameters defined by the boundaries the
emulator is built from. Retuns negative infinity if outside bounds or NaN
:param theta:
The parameters proposed by the sampler.
:param param_names
The names identifying the values in theta, needed to extract their boundaries
:return:
Either 0 or -np.inf, depending if the params are allowed or not.
"""
for p, t in izip(param_names, theta):
low, high = _emus[0].get_param_bounds(p)
if np.isnan(t) or t < low or t > high:
return -np.inf
return 0
def lnprior_unitcube(u, param_names):
"""
Prior for an MCMC in nested samplers. Default is to assume flat prior for all parameters defined by the boundaries the
emulator is built from. Retuns negative infinity if outside bounds or NaN
:param theta:
The parameters proposed by the sampler.
:param param_names
The names identifying the values in theta, needed to extract their boundaries
:return:
Either 0 or -np.inf, depending if the params are allowed or not.
"""
for i, p in enumerate(param_names):
low, high = _emus[0].get_param_bounds(p)
u[i] = (high-low)*u[i] + low
return u
def lnlike(theta, param_names, fixed_params, r_bin_centers, y, combined_inv_cov):
"""
:param theta:
Proposed parameters.
:param param_names:
The names of the parameters in theta
:param fixed_params:
Dictionary of parameters necessary to predict y_bar but are not being sampled over.
:param r_bin_centers:
The centers of the r bins y is measured in, angular or radial.
:param ys:
The measured values of the observables to compare to the emulators. Must be an interable that contains
predictions of each observable.
:param combined_inv_cov:
The inverse covariance matrices. Explicitly, the inverse of the sum of the mesurement covaraince matrix
and the matrix from the emulator, both for each observable. Both are independent of emulator parameters,
so can be precomputed. Must be an iterable with a matrixfor each observable.
:return:
The log liklihood of theta given the measurements and the emulator.
"""
param_dict = dict(izip(param_names, theta))
param_dict.update(fixed_params)
emu_preds = []
for _emu, rbc in izip(_emus, r_bin_centers):
y_bar = _emu.emulate_wrt_r(param_dict, rbc)[0]
emu_preds.append(10**y_bar)
#delta = y_bar - y
#chi2 -= np.dot(delta, np.dot(combined_inv_cov, delta))
emu_pred = np.hstack(emu_preds)
delta = emu_pred - y
#print delta
return - np.dot(delta, np.dot(combined_inv_cov, delta))
def lnprob(theta, *args):
"""
The total liklihood for an MCMC. Mostly a generic wrapper for the below functions.
:param theta:
Parameters for the proposal
:param args:
Arguments to pass into the liklihood
:return:
Log Liklihood of theta, a float.
"""
lp = lnprior(theta, *args)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, *args)
def _run_tests(y, cov, r_bin_centers, param_names, fixed_params, ncores):
"""
Run tests to ensure inputs are valid. Params are the same as in run_mcmc.
:params:
Same as in run_mcmc. See docstring for details.
:return: ncores, which may be updated if it is an invalid value.
"""
assert ncores == 'all' or ncores > 0
if type(ncores) is not str:
assert int(ncores) == ncores
max_cores = cpu_count()
if ncores == 'all':
ncores = max_cores
elif ncores > max_cores:
warnings.warn('ncores invalid. Changing from %d to maximum %d.' % (ncores, max_cores))
ncores = max_cores
# else, we're good!
print 'N cores', ncores
#make sure all inputs are of consistent shape
##print y.shape
#print cov.shape
assert y.shape[0] == cov.shape[0] and cov.shape[1] == cov.shape[0]
tot_bins = sum(len(rbc) for rbc in r_bin_centers)
assert y.shape[0] == tot_bins, "Scale bins mismatch with data shape"
# check we've defined all necessary params
assert all([ _emu.emulator_ndim <= len(fixed_params) + len(param_names) + 1 for _emu in _emus]) # for r
tmp = param_names[:]
assert not any([key in param_names for key in fixed_params]) # param names can't include the
tmp.extend(fixed_params.keys())
assert _emus[0].check_param_names(tmp, ignore=['r'])
return ncores
# TOOD make functions that save/restore a state, not just the chains.
def _resume_from_previous(resume_from_previous, nwalkers, num_params):
"""
Create initial guess by loading previous chain's last position.
:param resume_from_previous:
String giving the file name of the previous chain to use.
:param nwalkers:
Number of walkers to initiate. Must be the same as in resume_from_previous
:param num_params:
Number of params to initiate, must be the same as in resume_from_previous
:return: pos0, the initial position for each walker in the chain.
"""
# load a previous chain
raise NotImplementedError
# TODO add error messages here
old_chain = np.loadtxt(resume_from_previous)
if len(old_chain.shape) == 2:
c = old_chain.reshape((nwalkers, -1, num_params))
pos0 = c[:, -1, :]
else: # 3
pos0 = old_chain[:, -1, :]
return pos0
def _random_initial_guess(param_names, nwalkers, num_params):
"""
Create a random initial guess for the sampler. Creates a 3-sigma gaussian ball around the center of the prior space.
:param param_names:
The names of the parameters in the emulator
:param nwalkers:
Number of walkers to initiate. Must be the same as in resume_from_previous
:param num_params:
Number of params to initiate, must be the same as in resume_from_previous
:return: pos0, the initial position of each walker for the chain.
"""
pos0 = np.zeros((nwalkers, num_params))
for idx, pname in enumerate(param_names):
low, high = _emus[0].get_param_bounds(pname)
pos0[:, idx] = np.random.randn(nwalkers) * (np.abs(high - low) / 6.0) + (low + high) / 2.0
# TODO variable with of the initial guess
return pos0
def run_mcmc(emus, param_names, y, cov, r_bin_centers,fixed_params = {}, \
resume_from_previous=None, nwalkers=1000, nsteps=100, nburn=20, ncores='all', return_lnprob = False):
"""
Run an MCMC using emcee and the emu. Includes some sanity checks and does some precomputation.
Also optimized to be more efficient than using emcee naively with the emulator.
:param emus:
A trained instance of the Emu object. If there are multiple observables, should be a list. Otherwiese,
can be a single emu object
:param param_names:
Names of the parameters to constrain
:param ys:
data to constrain against. either one array of observables, or multiple where each new observable is a column.
# TODO figure out whether it should be row or column and assign appropriately
:param covs:
measured covariance of y for each y. Should have the same iteration properties as ys
:param r_bin_centers:
The scale bins corresponding to all y in ys
:param resume_from_previous:
String listing filename of a previous chain to resume from. Default is None, which starts a new chain.
:param fixed_params:
Any values held fixed during the emulation, default is {}
:param nwalkers:
Number of walkers for the mcmc. default is 1000
:param nsteps:
Number of steps for the mcmc. Default is 1--
:param nburn:
Number of burn in steps, default is 20
:param ncores:
Number of cores. Default is 'all', which will use all cores available
:param return_lnprob:
Whether or not to return the lnprobs of the samples along with the samples. Default is False, which returns
just the samples.
:return:
chain, collaposed to the shape ((nsteps-nburn)*nwalkers, len(param_names))
"""
# make emu global so it can be accessed by the liklihood functions
if type(emus) is not list:
emus = [emus]
_emus = emus
global _emus
ncores= _run_tests(y, cov, r_bin_centers,param_names, fixed_params, ncores)
num_params = len(param_names)
combined_inv_cov = inv(cov)
sampler = mc.EnsembleSampler(nwalkers, num_params, lnprob,
threads=ncores, args=(param_names, fixed_params, r_bin_centers, y, combined_inv_cov))
if resume_from_previous is not None:
try:
assert nburn == 0
except AssertionError:
raise AssertionError("Cannot resume from previous chain with nburn != 0. Please change! ")
# load a previous chain
pos0 = _resume_from_previous(resume_from_previous, nwalkers, num_params)
else:
pos0 = _random_initial_guess(param_names, nwalkers, num_params)
# TODO turn this into a generator
sampler.run_mcmc(pos0, nsteps)
chain = sampler.chain[:, nburn:, :].reshape((-1, num_params))
if return_lnprob:
lnprob_chain = sampler.lnprobability[:, nburn:].reshape((-1, )) # TODO think this will have the right shape
return chain, lnprob_chain
return chain
def run_nested_mcmc(emus, param_names, y, cov, r_bin_centers,fixed_params = {}, \
resume_from_previous=None, nlive = 1000, ncores='all', dlogz= 0.1):
"""
Run a nested sampling MCMC using dynesty and the emu. Includes some sanity checks and does some precomputation.
Also optimized to be more efficient than using emcee naively with the emulator.
:param emus:
A trained instance of the Emu object. If there are multiple observables, should be a list. Otherwiese,
can be a single emu object
:param param_names:
Names of the parameters to constrain
:param ys:
data to constrain against. either one array of observables, or multiple where each new observable is a column.
# TODO figure out whether it should be row or column and assign appropriately
:param covs:
measured covariance of y for each y. Should have the same iteration properties as ys
:param r_bin_centers:
The scale bins corresponding to all y in ys
:param resume_from_previous:
String listing filename of a previous chain to resume from. Default is None, which starts a new chain.
:param fixed_params:
Any values held fixed during the emulation, default is {}
:param nwalkers:
Number of walkers for the mcmc. default is 1000
:param nsteps:
Number of steps for the mcmc. Default is 1--
:param nburn:
Number of burn in steps, default is 20
:param ncores:
Number of cores. Default is 'all', which will use all cores available
:return:
chain, collaposed to the shape ((nsteps-nburn)*nwalkers, len(param_names))
"""
# make emu global so it can be accessed by the liklihood functions
if type(emus) is not list:
emus = [emus]
_emus = emus
global _emus
ncores= _run_tests(y, cov, r_bin_centers,param_names, fixed_params, ncores)
#pool = Pool(processes=ncores)
num_params = len(param_names)
combined_inv_cov = inv(cov)
#args = (param_names, fixed_params, r_bin_centers, y, combined_inv_cov)
ll = partial(lnlike, param_names = param_names, fixed_params = fixed_params,
r_bin_centers = r_bin_centers, y = y , combined_inv_cov = combined_inv_cov)
pi = partial(lnprior_unitcube, param_names = param_names)
sampler = dyn.NestedSampler(ll, pi, num_params, nlive = nlive)#, pool=pool, queue_size = ncores)
# TODO
if resume_from_previous is not None:
raise NotImplemented("Haven't figured out reviving from dead points.")
sampler.run_nested(dlogz)
'''
n_steps = nlive
results = np.zeros((n_steps, num_params+1))
for i, result in enumerate(sampler.sample(dlogz)):
if i%n_steps == 0 and i>0:
#print 'AAA', i
yield results
results = np.zeros((n_steps, num_params+1))
else:
#print '__A', i
#print result
results[i%n_steps, :-1] = result[2]
results[i%n_steps, -1] = result[6]
#print 'BBB', i, i%n_steps
yield results[:i%n_steps]
results = np.zeros((n_steps, num_params+1))
for j, result in enumerate(sampler.add_live_points()):
results[j%n_steps, :-1] = result[2]
results[j%n_steps, -1] = result[6]
#print 'CCC', len(results)
yield results
'''
res = sampler.results
#print res.summary()
## should i return the results or just these things?
chain = res['samples']
evidence = res['logz'].reshape((-1, 1))
yield np.hstack([chain, evidence])
def run_mcmc_iterator(emus, param_names, y, cov, r_bin_centers,fixed_params={},
pos0=None, nwalkers=1000, nsteps=100, nburn=20, ncores='all', return_lnprob=False):
"""
Run an MCMC using emcee and the emu. Includes some sanity checks and does some precomputation.
Also optimized to be more efficient than using emcee naively with the emulator.
This version, as opposed to run_mcmc, "yields" each step of the chain, to write to file or to print.
:param emus:
A trained instance of the Emu object. If there are multiple observables, should be a list. Otherwiese,
can be a single emu object
:param param_names:
Names of the parameters to constrain
:param y:
data to constrain against. either one array of observables, of size (n_bins*n_obs)
# TODO figure out whether it should be row or column and assign appropriately
:param cov:
measured covariance of y for each y. Should have the same shape as y, but square
:param r_bin_centers:
The scale bins corresponding to all y in ys
:param resume_from_previous:
String listing filename of a previous chain to resume from. Default is None, which starts a new chain.
:param fixed_params:
Any values held fixed during the emulation, default is {}
:param nwalkers:
Number of walkers for the mcmc. default is 1000
:param nsteps:
Number of steps for the mcmc. Default is 1--
:param nburn:
Number of burn in steps, default is 20
:param ncores:
Number of cores. Default is 'all', which will use all cores available
:param return_lnprob:
Whether to return the evaluation of lnprob on the samples along with the samples. Default is Fasle,
which only returns samples.
:yield:
chain, collaposed to the shape ((nsteps-nburn)*nwalkers, len(param_names))
"""
if type(emus) is not list:
emus = [emus]
_emus = emus
global _emus
ncores = _run_tests(y, cov, r_bin_centers, param_names, fixed_params, ncores)
pool = Pool(processes=ncores)
num_params = len(param_names)
combined_inv_cov = inv(cov)
sampler = mc.EnsembleSampler(nwalkers, num_params, lnprob, pool=pool,
args=(param_names, fixed_params, r_bin_centers, y, combined_inv_cov))
if pos0 is None:
pos0 = _random_initial_guess(param_names, nwalkers, num_params)
for result in sampler.sample(pos0, iterations=nsteps, storechain=False):
if return_lnprob:
yield result[0], result[1]
else:
yield result[0]
def run_mcmc_config(config_fname, restart = False):
"""
Run an MCMC from a config file generated from intialize_mcmc.
Essentially, a re-skin of the above. However, this is the preferred
method for using this module, because it gurantees the state space
of the samples is explicitly saved with them.
:param config_fname:
An hdf5 filename prepared a la initialize_mcmc. Will have the chain added as a dataset
"""
assert path.isfile(config_fname), "Invalid config fname for chain"
#print config_fname
f = h5py.File(config_fname, 'r+')
# TODO there's a better way to do this.
#f.swmr_mode = True # enables the chains to be accessed while they're running
emu_type_dict = {'OriginalRecipe':OriginalRecipe,
'ExtraCrispy': ExtraCrispy,
'SpicyBuffalo': SpicyBuffalo,
'NashvilleHot': NashvilleHot,
'LemonPepperWet':LemonPepperWet}
fixed_params = f.attrs['fixed_params']
fixed_params = {} if fixed_params is None else literal_eval(fixed_params)
#metric = f.attrs['metric'] if 'metric' in f.attrs else {}
emu_hps = f.attrs['emu_hps']
emu_hps = {} if emu_hps is None else literal_eval(emu_hps)
seed = f.attrs['seed']
seed = int(time()) if seed is None else seed
training_file = f.attrs['training_file']
emu_type = f.attrs['emu_type']
if type(training_file) is str:
training_file = [training_file]
if type(emu_type) is str:
emu_type = [emu_type]
assert len(emu_type) == len(training_file)
fixed_params = {} if fixed_params is None else fixed_params
if type(fixed_params) is dict:
fixed_params = [fixed_params for e in emu_type]
else:
assert len(fixed_params) == len(emu_type)
assert 'obs' in f.attrs.keys(), "No obs info in config file."
obs_cfg = literal_eval(f.attrs['obs'])
rbins = obs_cfg['rbins']
obs = obs_cfg['obs']
if type(obs) is str:
obs = [obs]
if type(rbins[0]) is list: # is list of list
rbins = [np.array(r) for r in rbins] # to numpy array
assert len(rbins) == len(obs), "not equal number of r_bins to obs"
else:
rbins = np.array(rbins)
rbins = [rbins for _ in xrange(len(obs))]
rpoints = [(rb[1:]+rb[:-1])/2.0 for rb in rbins]
y = f['data'][()].flatten()
cov = f['cov'][()]
#print y.shape
emus = []
_rp = []
_y = []
init_idx = 0
np.random.seed(seed)
#print len(emu_type), len(training_file), len(rpoints), len(fixed_params)
for emu_idx, (et, tf, rp, fp) in enumerate(zip(emu_type, training_file, rpoints, fixed_params)): # TODO iterate over the others?
# TODO how will cic work with rmin?
emu = emu_type_dict[et](tf, fixed_params = fp,
**emu_hps)
emus.append(emu)
orig_n_bins = len(rp)
cut_n_bins = orig_n_bins - emu.n_bins
_rp.append(np.array(rp[-emu.n_bins:]))
#assert np.all(np.isclose(_rp[-1], emu.scale_bin_centers))
#print cut_n_bins
_y.append(y[(orig_n_bins)*emu_idx+cut_n_bins:(orig_n_bins)*emu_idx+ orig_n_bins])
cov_idxs = np.ones((cov.shape[0],), dtype = bool)
cov_idxs[init_idx:init_idx+cut_n_bins] = False # deselect the bins we're cutting
# print 'y', y
#print cov_idxs.shape, cov_idxs
cov = cov[cov_idxs]
cov = cov[:, cov_idxs]
init_idx+= emu.n_bins
rpoints = _rp
y = np.hstack(_y)
mcmc_type = 'normal' if ('mcmc_type' not in f.attrs or f.attrs['mcmc_type'] == 'None') else f.attrs['mcmc_type']
if mcmc_type == 'normal':
nwalkers, nsteps = f.attrs['nwalkers'], f.attrs['nsteps']
elif mcmc_type=='nested':
nlive = f.attrs['nlive']
dlogz = float(f.attrs['dlogz']) if 'dlogz' in f.attrs else 0.1
if dlogz is None:
dlogz = 0.1 # TODO will this break with restart?
else:
raise NotImplementedError("Only 'normal' and 'nested' mcmc_type is valid.")
nburn, seed, fixed_params = f.attrs['nburn'], f.attrs['seed'], f.attrs['chain_fixed_params']
nburn = 0 if nburn is None else nburn
seed = int(time()) if seed is None else seed
fixed_params = {} if fixed_params is None else fixed_params
if type(fixed_params) is str:
try:
fixed_params = literal_eval(fixed_params)
except ValueError: #malformed string, can't be eval'd
pass
if fixed_params and type(fixed_params) is str:
assert fixed_params in {'HOD', 'cosmo'}, "Invalied fixed parameter value."
assert 'sim' in f.attrs.keys(), "No sim information in config file."
sim_cfg = literal_eval(f.attrs['sim'])
if fixed_params == 'HOD':
fixed_params = sim_cfg['hod_params']
else:
assert 'cosmo_params' in sim_cfg, "Fixed cosmology requested, but the values of the cosmological\"" \
"params were not specified. Please add them to the sim config."
fixed_params = sim_cfg['cosmo_params']
elif "HOD" in fixed_params:
assert 'sim' in f.attrs.keys(), "No sim information in config file."
sim_cfg = literal_eval(f.attrs['sim'])
del fixed_params['HOD']
fixed_params.update(sim_cfg['hod_params'])
if 'logMmin' in fixed_params:
del fixed_params['logMmin']
elif "cosmo" in fixed_params:
assert 'sim' in f.attrs.keys(), "No sim information in config file."
sim_cfg = literal_eval(f.attrs['sim'])
assert 'cosmo_params' in sim_cfg, "Fixed cosmology requested, but the values of the cosmological\"" \
"params were not specified. Please add them to the sim config."
del fixed_params['cosmo']
fixed_params.update(sim_cfg['cosmo_params'])
#TODO resume from previous, will need to access the written chain
param_names = [pname for pname in emu.get_param_names() if pname not in fixed_params]
if 'param_names' not in f.attrs.keys():
f.attrs['param_names'] = param_names
if 'chain' in f.keys() and not restart:
del f['chain']#[:,:] = chain
# TODO anyway to make sure all shpaes are right?
#chain_dset = f['chain']
if not restart:
f.create_dataset('chain', (0, len(param_names)), chunks = True, compression = 'gzip', maxshape = (None, len(param_names)))
#lnprob = np.zeros((nwalkers*nsteps,))
if 'lnprob' in f.keys():
del f['lnprob']#[:] = lnprob
# TODO anyway to make sure all shpaes are right?
#lnprob_dset = f['lnprob']
if mcmc_type == 'normal':
f.create_dataset('lnprob', (0,) , chunks = True, compression = 'gzip', maxshape = (None,))
else:
f.create_dataset('evidence', (0,) , chunks = True, compression = 'gzip', maxshape = (None,))
pos0 = None
else:
pos0 = f['chain'][-nwalkers:]# get last step
nsteps = nsteps - len(f['chain'])/nwalkers # don't add more steps to the end
if nsteps<=0:
return
# TODO add a way to start a new chain from the end of an old one
#print 'hi'
print 'Resuming with nsteps=%d remaining'%nsteps
f.close()
np.random.seed(seed)
if mcmc_type == 'normal':
for step, pos in enumerate(run_mcmc_iterator(emus, param_names, y, cov, rpoints,\
fixed_params=fixed_params, nwalkers=nwalkers,\
nsteps=nsteps, nburn=nburn, return_lnprob=True, ncores = 16,
pos0=pos0)):
f = h5py.File(config_fname, 'r+')
#f.swmr_mode = True
chain_dset, like_dset = f['chain'], f['lnprob']
l = len(chain_dset)
chain_dset.resize((l+nwalkers), axis = 0)
like_dset.resize((l+nwalkers), axis = 0)
chain_dset[-nwalkers:] = pos[0]
like_dset[-nwalkers:] = pos[1]
f.close()
else:
for step, pos in enumerate(run_nested_mcmc(emus, param_names, y, cov, rpoints,\
fixed_params=fixed_params, nlive=nlive,\
dlogz=dlogz, ncores = 16)):
size = pos.shape[0]
f = h5py.File(config_fname, 'r+')
#f.swmr_mode = True
chain_dset, ev_dset = f['chain'], f['evidence']
l = len(chain_dset)
chain_dset.resize((l + size), axis=0)
ev_dset.resize((l + size), axis=0)
#print pos.shape
chain_dset[-size:] = pos[:, :-1]
ev_dset[-size:] = pos[:,-1]
f.close()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Run chains with a YAML or HDF5 file for the chain')
parser.add_argument('fname', type=str, help='Config YAML File or output HDF5 file')
parser.add_argument('--restart', action='store_true')
args = vars(parser.parse_args())
fname = args['fname']
suffix = fname.split('.')[-1]
restart = args['restart']
if suffix == 'hdf5' or suffix == 'h5':
pass
elif suffix == 'yaml': # parse yaml file
import yaml
with open(fname, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
filename = cfg['fname']
fname = filename
else:
raise IOError("Invalid input filetype")
run_mcmc_config(fname, restart=restart)
| mclaughlin6464/pearce | pearce/inference/run_mcmc.py | Python | mit | 26,088 | [
"Gaussian"
] | 0d6cf0a4c03cae33b8f16f1993d5b6482ff6e802acd7034f4e1d7d20a32b5db2 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Option class representing a string.
"""
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from . import StringOption
#-------------------------------------------------------------------------
#
# NoteOption class
#
#-------------------------------------------------------------------------
class NoteOption(StringOption):
"""
This class describes an option that allows a note from the
database to be selected.
"""
def __init__(self, label):
"""
:param label: A friendly label to be applied to this option.
Example: "Title Note"
:type label: string
:param value: A Gramps ID of a note for this option.
Example: "n11"
:type value: string
:return: nothing
"""
StringOption.__init__(self, label, "")
| pmghalvorsen/gramps_branch | gramps/gen/plug/menu/_note.py | Python | gpl-2.0 | 1,777 | [
"Brian"
] | 2b38d31c86a1156f0c9ea41b8853f99e977908e537ab2a94cc00e8a041d88092 |
#!/usr/bin/env python
########################################################################
# File : dirac-agent
# Author : Adria Casajus, Andrei Tsaregorodtsev, Stuart Paterson
########################################################################
"""
This is a script to launch DIRAC agents. Mostly internal.
"""
import sys
from DIRAC import gLogger
from DIRAC.Core.Base.AgentReactor import AgentReactor
from DIRAC.Core.Utilities.DErrno import includeExtensionErrors
from DIRAC.Core.Base.Script import Script
@Script()
def main():
Script.registerArgument(["Agent: specify which agent to run"])
positionalArgs = Script.getPositionalArgs(group=True)
localCfg = Script.localCfg
agentName = positionalArgs[0]
localCfg.setConfigurationForAgent(agentName)
localCfg.addMandatoryEntry("/DIRAC/Setup")
localCfg.addDefaultEntry("/DIRAC/Security/UseServerCertificate", "yes")
localCfg.addDefaultEntry("LogLevel", "INFO")
localCfg.addDefaultEntry("LogColor", True)
resultDict = localCfg.loadUserData()
if not resultDict["OK"]:
gLogger.error("There were errors when loading configuration", resultDict["Message"])
sys.exit(1)
includeExtensionErrors()
agentReactor = AgentReactor(positionalArgs[0])
result = agentReactor.loadAgentModules(positionalArgs)
if result["OK"]:
agentReactor.go()
else:
gLogger.error("Error while loading agent module", result["Message"])
sys.exit(2)
if __name__ == "__main__":
main()
| DIRACGrid/DIRAC | src/DIRAC/Core/scripts/dirac_agent.py | Python | gpl-3.0 | 1,520 | [
"DIRAC"
] | bb5335a5da0676c83f9bb1a023111c05424bc3527768153d1ea72f5ed752a2ea |
# Cycles Mineways setup
# Version 1.3.0, 5/28/16
# Copyright © 2016
# Please send suggestions or report bugs at https://github.com/JMY1000/CyclesMineways/
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation under version 3 of the License.
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details at http://www.gnu.org/licenses/gpl-3.0.en.html
# Distributed with Mineways, http://mineways.com
# To use the script within Blender, for use with the Cycles renderer:
# Open Blender and import the obj file created by Mineways.
# Change any window to the text editor.
# Alternatively, Go to the top of the window where it says "Default",
# click on the screen layout button left to the word "Default" and pick "Scripting".
# Click "Open" at the bottom of the text window.
# Go to the directory where this file, "CyclesMineways.py", is and select it.
# You should now see some text in the text window.
# Alternatively, you can click "new" then paste in the text.
# To apply this script, click on the "Run Script" button at the bottom of the text window.
# OPTIONAL: To see that the script's print output, you may want to turn on the terminal/console.
# It is not critical to see this window, but it might give you a warm and fuzzy feeling to know that the script has worked.
# It also helps provide debug info if something goes wrong.
# For Windows:
# From the upper left of your window select "Window" and then "Toggle System Console".
# For OS X:
# Find your application, right click it, hit "Show Package Contents".
# Navigate to Contents/MacOS/blender Launch blender this way, this will show the terminal.
# For Linux:
# Run Blender through the terminal.
#importing the Blender Python library
import bpy
print("Libraries imported")
# CONSTANTS
# PREFIX can stay as "" if you are importing into project that is not massive and has no other imported mineways worlds.
# If the .blend does not meet these requirements, you must set PREFIX to allow this script to know what it is working with.
# Set the PREFIX to the name of the file it uses (eg: a castle.obj file uses PREFIX = "castle")
PREFIX = ""
# USER_INPUT_SCENE controls what scenes Blender will apply this script's functionality to.
# If this list has scenes, the script only use those scenes to work with;
# otherwise, it will use all scenes
# example: USER_INPUT_SCENE = ["scene","scene2","randomScene123"]
USER_INPUT_SCENE = []
# WATER_SHADER_TYPE controls the water shader that will be used.
# Use 0 for a solid block shader.
# Use 1 for a semi-transparent flat shader.
# Use 2 for a small, sharp waves shader.
# Use 3 for a wavy shader.
# For a more detailed explanation with pictures of each water shader type, visit: https://github.com/JMY1000/CyclesMineways/wiki/Water-Shader-Types
WATER_SHADER_TYPE = 1
# TIME_OF_DAY controls the time of day.
# If TIME_OF_DAY is between 6.5 and 19.5 (crossing 12), the daytime shader will be used.
# If TIME_OF_DAY is between 19.5 and 6.5 (crossing 24), the nighttim shader will be used.
# NOTE: The decimal is not in minutes, and is a fraction (ex. 12:30 is 12.50).
# NOTE: This currently only handles day and night
TIME_OF_DAY = 12.00
# DISPLACE_WOOD controls whether virtual displacement (changes normals for illusion of roughness) for wooden plank blocks is used.
# NOTE: This currently only works for oak wood planks.
# NOTE: This can only be True or False
DISPLACE_WOOD = False
# STAINED_GLASS_COLOR controls how coloured the light that passed through stained glass is.
# 0 means light passed through unchanged
# 1 means all the light is changed to the glass's color (not recommended)
STAINED_GLASS_COLOR = 0.4
#List of transparent blocks
transparentBlocks = ["Acacia_Leaves","Dark_Oak_Leaves","Acacia_Door","Activator_Rail","Bed","Beetroot_Seeds","Birch_Door","Brewing_Stand","Cactus","Carrot","Carrots","Cauldron","Chorus_Flower","Chorus_Flower_Dead","Chorus_Plant","Cobweb",
"Cocoa","Crops","Dandelion","Dark_Oak_Door","Dead_Bush","Detector_Rail","Enchantment_Table","Glass","Glass_Pane","Grass","Iron_Bars","Iron_Door","Iron_Trapdoor","Jack_o'Lantern","Jungle_Door","Large_Flowers",
"Leaves","Melon_Stem","Monster_Spawner","Nether_Portal","Nether_Wart","Oak_Leaves","Oak_Sapling","Poppy","Potato","Potatoes","Powered_Rail","Pumpkin_Stem","Rail","Red_Mushroom",
"Redstone_Comparator_(inactive)","Redstone_Torch_(inactive)","Repeater_(inactive)","Sapling","Spruce_Door","Stained_Glass_Pane","Sugar_Cane","Sunflower","Tall_Grass","Trapdoor","Vines","Wheat","Wooden_Door"]
#List of light emitting blocks
lightBlocks = ["Daylight_Sensor","End_Gateway","End_Portal","Ender_Chest","Flowing_Lava","Glowstone","Inverted_Daylight_Sensor","Lava","Magma_Block","Redstone_Lamp_(active)","Stationary_Lava","Sea_Lantern"]
#List of light emitting and transparent block
lightTransparentBlocks = ["Beacon","Brown_Mushroom","Dragon_Egg","Endframe","End_Rod","Fire","Powered_Rail_(active)","Redstone_Comparator_(active)","Redstone_Torch_(active)","Repeater_(active)","Torch"]
#SHADERS
def Setup_Node_Tree(material):
#Make the material use nodes
material.use_nodes=True
#Set the variable node_tree to be the material's node tree and variable nodes to be the node tree's nodes
node_tree=material.node_tree
nodes=material.node_tree.nodes
#Remove the old nodes
for eachNode in nodes:
nodes.remove(eachNode)
return nodes,node_tree
def Normal_Shader(material,rgba_image):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(0,300)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = rgba_image
rgba_node.interpolation=('Closest')
rgba_node.location=(-300,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(diffuse_node.outputs["BSDF"],output_node.inputs["Surface"])
def Transparent_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the mix shader
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(0,300)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-300,400)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-300,0)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-600,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(rgba_node.outputs["Alpha"],mix_node.inputs["Fac"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(diffuse_node.outputs["BSDF"],mix_node.inputs[2])
links.new(mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Light_Emiting_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(600,300)
#Create the diffuse deciding mix node
diffuse_mix_node=nodes.new('ShaderNodeMixShader')
diffuse_mix_node.location=(300,300)
#Create the Light Path Node
light_path_node=nodes.new('ShaderNodeLightPath')
light_path_node.location=(0,600)
#Create the diffuse emission
indirect_emission_node=nodes.new('ShaderNodeEmission')
indirect_emission_node.location=(0,100)
#Create the Light Falloff node for indirect emission
light_falloff_node=nodes.new('ShaderNodeLightFalloff')
light_falloff_node.location=(-300,0)
light_falloff_node.inputs[0].default_value=200 #sets strength of light
light_falloff_node.inputs[1].default_value=0.03 #sets smooth level of light
#Create the HSV node to brighten the light
hsv_node=nodes.new('ShaderNodeHueSaturation')
hsv_node.location=(-300,200)
hsv_node.inputs["Value"].default_value=3 # brightens the color for better lighting
#Create the direct emission node
direct_emission_node=nodes.new('ShaderNodeEmission')
direct_emission_node.location=(0,300)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-600,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],direct_emission_node.inputs["Color"])
links.new(rgba_node.outputs["Color"],hsv_node.inputs["Color"])
links.new(hsv_node.outputs["Color"],indirect_emission_node.inputs["Color"])
links.new(light_falloff_node.outputs[0],indirect_emission_node.inputs[1]) #connects quadratic output to emission strength
links.new(indirect_emission_node.outputs["Emission"],diffuse_mix_node.inputs[2])
links.new(direct_emission_node.outputs["Emission"],diffuse_mix_node.inputs[1])
links.new(light_path_node.outputs[2],diffuse_mix_node.inputs["Fac"]) #links "is diffuse ray" to factor of mix node
links.new(diffuse_mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Transparent_Emiting_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(600,300)
#Create the indirect-direct mix shader
indirect_mix_node=nodes.new('ShaderNodeMixShader')
indirect_mix_node.location=(300,300)
#Create the mix shader
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(0,300)
#Create the Light Path node to check if light is indirect
light_path_node=nodes.new('ShaderNodeLightPath')
light_path_node.location=(0,800)
#Create the Light Falloff node for indirect emission
light_falloff_node=nodes.new('ShaderNodeLightFalloff')
light_falloff_node.location=(-300,600)
light_falloff_node.inputs[0].default_value=80 #sets strength of light
light_falloff_node.inputs[1].default_value=0.03 #sets smooth level of light
#Create the indirect emission node
indirect_emission_node=nodes.new('ShaderNodeEmission')
indirect_emission_node.location=(0,500)
indirect_emission_node.inputs["Color"].default_value = (1,1,0.56,1)
#Only tested color on torches, needs testing on other transparent emitters to see if it looks weird
#Create the direct emission node
emission_node=nodes.new('ShaderNodeEmission')
emission_node.location=(-300,400)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-300,0)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-600,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],emission_node.inputs["Color"])
links.new(rgba_node.outputs["Alpha"],mix_node.inputs["Fac"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(emission_node.outputs["Emission"],mix_node.inputs[2])
links.new(mix_node.outputs["Shader"],indirect_mix_node.inputs[1])
links.new(light_falloff_node.outputs["Quadratic"],indirect_emission_node.inputs["Strength"])
links.new(indirect_emission_node.outputs["Emission"],indirect_mix_node.inputs[2])
links.new(light_path_node.outputs["Is Diffuse Ray"],indirect_mix_node.inputs["Fac"])
links.new(indirect_mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Lily_Pad_Shader(material):
#A water setup shader should have ran before this
#Set the variable node_tree to be the material's node tree and variable nodes to be the node tree's nodes
node_tree=material.node_tree
nodes=material.node_tree.nodes
output = None
image_node = None
for node in nodes:
if node.name=="Material Output":
output=node
if node.name=="Image Texture": #assumes only 1 image input
image_node=node
output.location = (600,300)
water_output = output.inputs[0].links[0].from_node
mix_node = nodes.new('ShaderNodeMixShader')
mix_node.location=(300,500)
diffuse_node = nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(0,500)
RGB_splitter_node = nodes.new('ShaderNodeSeparateRGB')
RGB_splitter_node.location=(-300,700)
less_than_node = nodes.new('ShaderNodeMath')
less_than_node.location=(0,700)
less_than_node.operation="LESS_THAN"
links=node_tree.links
links.new(mix_node.outputs[0],output.inputs[0])
links.new(diffuse_node.outputs[0],mix_node.inputs[1])
links.new(water_output.outputs[0],mix_node.inputs[2]) #making massive assumption that output of water is in first output
links.new(less_than_node.outputs[0],mix_node.inputs[0])
links.new(image_node.outputs[0],diffuse_node.inputs[0])
links.new(RGB_splitter_node.outputs[2],less_than_node.inputs[1])
links.new(RGB_splitter_node.outputs[1],less_than_node.inputs[0])
links.new(image_node.outputs[0],RGB_splitter_node.inputs[0])
def Stained_Glass_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the mix shader
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(0,300)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-300,400)
#Create shadow(math)-color(HSV) mix node
shadow_color_mix_node=nodes.new('ShaderNodeMixRGB')
shadow_color_mix_node.location=(-600,400)
shadow_color_mix_node.inputs[1].default_value=(1,1,1,0)
#Create HSV node because for some reason color from the RGBA node in transparent nodes is super dark
hsv_node=nodes.new('ShaderNodeHueSaturation')
hsv_node.location=(-900,280)
hsv_node.inputs[1].default_value=2
hsv_node.inputs[2].default_value=8
#Create math(multiply, clamped) node
multiply_node=nodes.new('ShaderNodeMath')
multiply_node.location=(-900,450)
multiply_node.operation=('MULTIPLY')
multiply_node.use_clamp=True
multiply_node.inputs[1].default_value=STAINED_GLASS_COLOR
#Create math(add, clamped) node
add_node=nodes.new('ShaderNodeMath')
add_node.location=(-1200,450)
add_node.operation=('ADD')
add_node.use_clamp=True
#Create the lightpath node
light_path_node=nodes.new('ShaderNodeLightPath')
light_path_node.location=(-1500,450)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-900,0)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-1200,100)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(rgba_node.outputs["Alpha"],mix_node.inputs["Fac"])
links.new(rgba_node.outputs["Color"],hsv_node.inputs["Color"])
links.new(light_path_node.outputs[1],add_node.inputs[0]) #connects Is Shadow Ray to add node
links.new(light_path_node.outputs[2],add_node.inputs[1]) #connects Is Shadow Ray to add node
links.new(add_node.outputs[0],multiply_node.inputs[0])
links.new(multiply_node.outputs["Value"],shadow_color_mix_node.inputs["Fac"])
links.new(hsv_node.outputs["Color"],shadow_color_mix_node.inputs[2])
links.new(shadow_color_mix_node.outputs["Color"],transparent_node.inputs["Color"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(diffuse_node.outputs["BSDF"],mix_node.inputs[2])
links.new(mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Stationary_Water_Shader_1(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the fresnel mix shader
fresnel_mix_node=nodes.new('ShaderNodeMixShader')
fresnel_mix_node.location=(0,300)
#Create Fresnel node ior=1.33
fresnel_node=nodes.new('ShaderNodeFresnel')
fresnel_node.location=(-300,400)
fresnel_node.inputs[0].default_value=1.33
#Create the transparency-diffuse mixer
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(-300,300)
mix_node.inputs[0].default_value=0.4
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-600,300)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-600,180)
#Create the glossy shader
glossy_node=nodes.new('ShaderNodeBsdfGlossy')
glossy_node.location=(-600,100)
glossy_node.inputs[1].default_value=0.02
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-900,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(rgba_node.outputs["Color"],glossy_node.inputs["Color"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[2])
links.new(diffuse_node.outputs["BSDF"],mix_node.inputs[1])
links.new(fresnel_node.outputs["Fac"],fresnel_mix_node.inputs["Fac"])
links.new(mix_node.outputs["Shader"],fresnel_mix_node.inputs[1])
links.new(glossy_node.outputs["BSDF"],fresnel_mix_node.inputs[2])
links.new(fresnel_mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Stationary_Water_Shader_2(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(600,300)
#Create the fresnel mix shader
fresnel_mix_node=nodes.new('ShaderNodeMixShader')
fresnel_mix_node.location=(300,300)
#Create Fresnel node
fresnel_node=nodes.new('ShaderNodeFresnel')
fresnel_node.location=(0,500)
fresnel_node.inputs[0].default_value=1.33
#Create the mix+transparent mix shader
mix_node_transparent_mix=nodes.new('ShaderNodeMixShader')
mix_node_transparent_mix.location=(0,300)
mix_node_transparent_mix.inputs[0].default_value=0.18
#Create the refraction-glossy mix shader
mix_node_ref_glossy=nodes.new('ShaderNodeMixShader')
mix_node_ref_glossy.location=(-300,0)
mix_node_ref_glossy.inputs[0].default_value=0.72
#Create Diffuse-transparent mix shader
diffuse_transparent_mix_shader=nodes.new('ShaderNodeMixShader')
diffuse_transparent_mix_shader.location=(-300,450)
diffuse_transparent_mix_shader.inputs["Fac"].default_value = 0.5
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-600,400)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-600,550)
#Create the glossy node
glossy_node=nodes.new('ShaderNodeBsdfGlossy')
glossy_node.location=(-600,0)
glossy_node.inputs["Roughness"].default_value=0.005
#Create the refraction node
refraction_node=nodes.new('ShaderNodeBsdfRefraction')
refraction_node.location=(-600,300)
refraction_node.inputs[2].default_value=1.33
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-900,300)
rgba_node.label = "RGBA"
#Create the first multiply node
multiply_node=nodes.new('ShaderNodeMath')
multiply_node.location=(0,-300)
multiply_node.operation=('MULTIPLY')
multiply_node.inputs[1].default_value=0.05
#Create the add node
add_node=nodes.new('ShaderNodeMath')
add_node.location=(-300,-300)
add_node.operation=('ADD')
#Create the first voronoi texture
voronoi_node=nodes.new('ShaderNodeTexVoronoi')
voronoi_node.location=(-600,-300)
voronoi_node.inputs[1].default_value=20
#Create the second multiply node
multiply_node_two=nodes.new('ShaderNodeMath')
multiply_node_two.location=(-600,-600)
multiply_node_two.operation=('MULTIPLY')
#Create the second voronoi texture
voronoi_node_two=nodes.new('ShaderNodeTexVoronoi')
voronoi_node_two.location=(-900,-600)
voronoi_node_two.inputs[1].default_value=30
#Create the texture coordinate node
texture_coordinate_node=nodes.new('ShaderNodeTexCoord')
texture_coordinate_node.location=(-1200,-300)
#Link the nodes
links=node_tree.links
links.new(fresnel_mix_node.outputs["Shader"],output_node.inputs["Surface"])
links.new(fresnel_node.outputs["Fac"],fresnel_mix_node.inputs[0])
links.new(mix_node_transparent_mix.outputs["Shader"],fresnel_mix_node.inputs[1])
links.new(diffuse_transparent_mix_shader.outputs["Shader"],mix_node_transparent_mix.inputs[1])
links.new(diffuse_node.outputs["BSDF"],diffuse_transparent_mix_shader.inputs[1])
links.new(transparent_node.outputs["BSDF"],diffuse_transparent_mix_shader.inputs[2])
links.new(mix_node_ref_glossy.outputs["Shader"],mix_node_transparent_mix.inputs[2])
links.new(mix_node_ref_glossy.outputs["Shader"],fresnel_mix_node.inputs[2])
links.new(refraction_node.outputs["BSDF"],mix_node_ref_glossy.inputs[1])
links.new(glossy_node.outputs["BSDF"],mix_node_ref_glossy.inputs[2])
links.new(rgba_node.outputs["Color"],refraction_node.inputs["Color"])
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(multiply_node.outputs["Value"],output_node.inputs["Displacement"])
links.new(add_node.outputs["Value"],multiply_node.inputs[0])
links.new(voronoi_node.outputs["Fac"],add_node.inputs[0])
links.new(multiply_node_two.outputs["Value"],add_node.inputs[1])
links.new(voronoi_node_two.outputs["Fac"],multiply_node_two.inputs[0])
links.new(texture_coordinate_node.outputs["Object"],voronoi_node.inputs["Vector"])
links.new(texture_coordinate_node.outputs["Object"],voronoi_node_two.inputs["Vector"])
def Stationary_Water_Shader_3(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the first mix shader node
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(-300,300)
#Create the clamped add node
add_node=nodes.new('ShaderNodeMath')
add_node.location=(-600,600)
add_node.operation=('ADD')
add_node.use_clamp=True
#Create the fresnel node
fresnel_node=nodes.new('ShaderNodeFresnel')
fresnel_node.location=(-900,600)
fresnel_node.inputs["IOR"].default_value=1.33
#Create the transparent shader node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-600,400)
#Create the glossy shader node
glossy_node=nodes.new('ShaderNodeBsdfGlossy')
glossy_node.location=(-600,300)
glossy_node.inputs["Roughness"].default_value=0.02
#Create the rgb mix shader
rgbmix_node=nodes.new('ShaderNodeMixRGB')
rgbmix_node.location=(-900,300)
rgbmix_node.inputs["Fac"].default_value=0.3
rgbmix_node.inputs["Color2"].default_value=(1,1,1,1)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-1200,300)
rgba_node.label = "RGBA"
#Create the wave texture node
wave_node=nodes.new('ShaderNodeTexWave')
wave_node.location=(-1200,0)
wave_node.inputs["Scale"].default_value=1.7
wave_node.inputs["Distortion"].default_value=34
wave_node.inputs["Detail"].default_value=5
wave_node.inputs["Detail Scale"].default_value=5
#Create the multiply node
multiply_node=nodes.new('ShaderNodeMath')
multiply_node.location=(-600,0)
multiply_node.operation=('MULTIPLY')
#Link the nodes
links=node_tree.links
links.new(mix_node.outputs["Shader"],output_node.inputs["Surface"])
links.new(add_node.outputs["Value"],mix_node.inputs["Fac"])
links.new(fresnel_node.outputs["Fac"],add_node.inputs[0])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(glossy_node.outputs["BSDF"],mix_node.inputs[2])
links.new(rgbmix_node.outputs["Color"],glossy_node.inputs["Color"])
links.new(rgba_node.outputs["Color"],rgbmix_node.inputs["Color1"])
links.new(multiply_node.outputs["Value"],output_node.inputs["Displacement"])
links.new(wave_node.outputs["Fac"],multiply_node.inputs[0])
def Flowing_Water_Shader(material):
material.use_nodes=True
def Slime_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the mix shader
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(0,300)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-300,300)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-300,0)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-600,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(diffuse_node.outputs["BSDF"],mix_node.inputs[2])
links.new(mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Ice_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the mix shader
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(0,300)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-300,300)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-300,0)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-600,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(diffuse_node.outputs["BSDF"],mix_node.inputs[2])
links.new(mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Sky_Day_Shader(world):
nodes, node_tree = Setup_Node_Tree(world)
#Add the output node
output_node=nodes.new('ShaderNodeOutputWorld')
output_node.location=(300,300)
#Add the background node
background_node=nodes.new('ShaderNodeBackground')
background_node.location=(0,300)
#Add the color correct node
HSV_node=nodes.new('ShaderNodeHueSaturation')
HSV_node.inputs["Value"].default_value=1.6 #Corrects the color value to be the same as Minecraft's sky
HSV_node.location=(-300,300)
#Add the sky texture node
sky_node=nodes.new('ShaderNodeTexSky')
sky_node.location=(-600,300)
#Link the nodes
links=node_tree.links
links.new(background_node.outputs["Background"],output_node.inputs["Surface"])
links.new(sky_node.outputs["Color"],HSV_node.inputs["Color"])
links.new(HSV_node.outputs["Color"],background_node.inputs["Color"])
def Sky_Night_Shader(world):
nodes, node_tree = Setup_Node_Tree(world)
#Add the output node
output_node=nodes.new('ShaderNodeOutputWorld')
output_node.location=(600,300)
#Add solid color background for diffuse textures
solid_background_node=nodes.new('ShaderNodeBackground')
solid_background_node.location=(0,150)
solid_background_node.inputs["Color"].default_value=(0.1,0.1,0.1,1)
#Add Light Path Node to make sure solid colour is only used for diffuse shaders
light_path_node=nodes.new('ShaderNodeLightPath')
light_path_node.location=(0,600)
#Add mix shader to add the diffuse-only background
diffuse_mixer_node=nodes.new('ShaderNodeMixShader')
diffuse_mixer_node.location=(300,300)
#Add the first background node
background_node=nodes.new('ShaderNodeBackground')
background_node.location=(0,300)
#Create the rgb mix shader
rgbmix_node=nodes.new('ShaderNodeMixRGB')
rgbmix_node.location=(-200,300)
rgbmix_node.inputs["Fac"].default_value=0.01
#Add the sky texture node
sky_node=nodes.new('ShaderNodeTexSky')
sky_node.location=(-600,0)
#Add the colorramp node
colorramp_node=nodes.new('ShaderNodeValToRGB')
colorramp_node.location=(-600,300)
colorramp_node.color_ramp.interpolation=('CONSTANT')
colorramp_node.color_ramp.elements[1].position=0.03
colorramp_node.color_ramp.elements[1].color=(0,0,0,255)
colorramp_node.color_ramp.elements[0].color=(255,255,255,255)
#Add the voronoi texture
voronoi_node=nodes.new('ShaderNodeTexVoronoi')
voronoi_node.location=(-900,300)
voronoi_node.coloring=("CELLS")
voronoi_node.inputs["Scale"].default_value=200
#Link the nodes
links=node_tree.links
links.new(diffuse_mixer_node.outputs["Shader"],output_node.inputs["Surface"])
links.new(solid_background_node.outputs["Background"],diffuse_mixer_node.inputs[2])
links.new(light_path_node.outputs["Is Diffuse Ray"],diffuse_mixer_node.inputs[0]) # connects "Is Diffuse Ray" to factor
links.new(background_node.outputs["Background"],diffuse_mixer_node.inputs[1])
links.new(rgbmix_node.outputs["Color"],background_node.inputs["Color"])
links.new(colorramp_node.outputs["Color"],rgbmix_node.inputs["Color1"])
links.new(sky_node.outputs["Color"],rgbmix_node.inputs["Color2"])
links.new(voronoi_node.outputs["Color"],colorramp_node.inputs["Fac"])
def Wood_Displacement_Texture(material,rgba_image):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(0,300)
diffuse_node.inputs[1].default_value=0.3 # sets diffuse to 0.3
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = rgba_image
rgba_node.interpolation=('Closest')
rgba_node.location=(-300,300)
rgba_node.label = "RGBA"
#Create displacement node tree
#Create magic node 1
magic_node_one=nodes.new('ShaderNodeTexMagic')
magic_node_one.location=(-900,200)
magic_node_one.turbulence_depth=6 #sets depth to 6
magic_node_one.inputs[1].default_value=5 #sets scale to 5
magic_node_one.inputs[2].default_value=10 #sets distortion to 10
#Create magic node 2
magic_node_two=nodes.new('ShaderNodeTexMagic')
magic_node_two.location=(-900,0)
magic_node_two.turbulence_depth=5 #sets depth to 5
magic_node_two.inputs[1].default_value=3.3 #sets scale to 3.3
magic_node_two.inputs[2].default_value=2.7 #sets distortion to 2.7
#Create Add node
#Connects to magic node 1 and 2
math_add_node_one=nodes.new('ShaderNodeMath')
math_add_node_one.location=(-600,0)
math_add_node_one.operation="ADD"
#Create noise texture
noise_node=nodes.new('ShaderNodeTexNoise')
noise_node.location=(-900,-200)
noise_node.inputs[1].default_value=6.9 #sets scale to 6.9
noise_node.inputs[2].default_value=5 #set detail to 5
noise_node.inputs[3].default_value=8 #sets distortion to 8
#Create multiply
#Connects to noise and 5
math_multiply_node=nodes.new('ShaderNodeMath')
math_multiply_node.location=(-600,-200)
math_multiply_node.operation="MULTIPLY"
math_multiply_node.inputs[1].default_value=5 #sets multiply value to 5
#Create 2nd Add node
#Connects to Add node and multiply node
math_add_node_two=nodes.new('ShaderNodeMath')
math_add_node_two.operation="ADD"
math_add_node_two.location=(-300,0)
#Create Divide node
#Connect from 2nd add node and input [1] to 10
#Connects to materials output
math_divide_node=nodes.new('ShaderNodeMath')
math_divide_node.location=(0,150)
math_divide_node.operation="DIVIDE"
math_divide_node.inputs[1].default_value=10
#Link the nodes
links=node_tree.links
#link surface modifiers
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(diffuse_node.outputs["BSDF"],output_node.inputs["Surface"])
#link displacement modifiers
links.new(magic_node_one.outputs["Fac"],math_add_node_one.inputs[0])
links.new(magic_node_two.outputs["Fac"],math_add_node_one.inputs[1])
links.new(math_add_node_one.outputs[0],math_add_node_two.inputs[0])
links.new(noise_node.outputs["Fac"],math_multiply_node.inputs[0])
links.new(math_multiply_node.outputs[0],math_add_node_two.inputs[1])
links.new(math_add_node_two.outputs[0],math_divide_node.inputs[0])
links.new(math_divide_node.outputs[0],output_node.inputs["Displacement"])
#MAIN
def main():
print("Main started")
#packing all the files into one .blend
print("Packing files")
bpy.ops.file.pack_all()
print("Files packed")
#finding the PREFIX for mineways
global PREFIX
print("Gettting PREFIX ('"+PREFIX+"')")
if PREFIX == "":
print("No prefix found, finding best PREFIX")
names={} # initalises a dictionary
for img in bpy.data.images: # loops through all images in .blend file
pos = max( # sets pos to be the max value of the 3 values
img.name.rfind("-RGBA.png"), # if "-RGBA.png" is in the file name, returns non -1, else returns -1
img.name.rfind("-RGB.png"), # if "-RGB.png" is in the file name, returns non -1, else returns -1
img.name.rfind("-Alpha.png")) # if "-Alpha.png" is in the file name, returns non -1, else returns -1
# all this max statement really does is checks if the string contains any of those strings, if not, it is -1
print("Checking:",img.name,"(Position: ",pos,"Prefix: ",img.name[:pos]+")")
if pos!=-1: # if pos==1, it does not contain "-RGBA.png" or "-RGB.png" or "-Alpha.png"
try:
names[img.name[:pos]]+=1 # if a key called the file name in the dictionary exists, increase its value by 1
except KeyError:
names[img.name[:pos]]=1 # this code is only reached if the value could not be increased by one
# this happens when the value does not exist (i.e. the key does not exist because this is the first loop)
print("names: ",names)
PREFIX = max(names) # finds the name of the key in the dictionary that has the highest value
# this is how the code determines what the PREFIX should be (majority vote)
print("Got PREFIX ('"+PREFIX+"')")
#Setting the render engine to Cycles and filtering materials that will be processed
print("Setting the render engine to Cycles and filtering materials that will be processed")
materials = []
#if the user doesn't provide any scenes, add all materials that exist to global "materials"
if len(USER_INPUT_SCENE)==0:
for scene in bpy.data.scenes:
scene.render.engine = 'CYCLES'
for material in bpy.data.materials:
materials.append(material)
#else for each scene provided
else:
for scene in bpy.data.scenes:
print("Checking for:",scene.name)
if scene.name in USER_INPUT_SCENE:
print("Adding materials from scene:",scene.name)
scene.render.engine='CYCLES'
#check to see if it's related to Mineways by checking if it has an active material
for object in scene.objects:
if object.active_material!=None: # This is a bad way or checking of an object is Mineways'
# we probably need to check its assigned texture, or name to see if it is one of our objects
materials.append(object.active_material)
print("Render engine set to Cycles for selected scenes")
try:
texture_rgba_image = bpy.data.images[PREFIX+"-RGBA.png"]
except:
print("Cannot find image. PREFIX is invalid.")
return
print("Setting up textures")
#for every material
for material in materials:
if (material.active_texture and len(material.active_texture.name)>=2 and material.active_texture.name[0:2]=="Kd"):
material_suffix = material.name[material.name.rfind("."):len(material.name)] # gets the .001 .002 .003 ... of the material
try:
int(material_suffix[1:])
except:
material_suffix=""
#if the material is transparent use a special shader
if any(material==bpy.data.materials.get(transparentBlock+material_suffix) for transparentBlock in transparentBlocks):
print(material.name+" is transparent.")
Transparent_Shader(material)
#if the material is a light emmitting block use a special shader
elif any(material==bpy.data.materials.get(lightBlock+material_suffix) for lightBlock in lightBlocks):
print(material.name+" is light block.")
Light_Emiting_Shader(material)
#if the material is a light emmitting transparent block use a special shader
elif any(material==bpy.data.materials.get(lightTransparentBlocks+material_suffix) for lightTransparentBlocks in lightTransparentBlocks):
print(material.name+" is transparent light block.")
Transparent_Emiting_Shader(material)
#if the material is stained glass, use a special shader
elif material==bpy.data.materials.get("Stained_Glass"+material_suffix):
print(material.name+" is stained glass.")
Stained_Glass_Shader(material)
#if the material is stationary water or a lily pad, use a special shader
elif material==bpy.data.materials.get("Stationary_Water"+material_suffix) or material==bpy.data.materials.get("Water"+material_suffix) or material==bpy.data.materials.get("Lily_Pad"+material_suffix):
print(material.name+" is water or a lily pad.")
print("Using shader type",WATER_SHADER_TYPE)
if WATER_SHADER_TYPE==0:
Normal_Shader(material,texture_rgba_image)
elif WATER_SHADER_TYPE==1:
Stationary_Water_Shader_1(material)
elif WATER_SHADER_TYPE==2:
Stationary_Water_Shader_2(material)
elif WATER_SHADER_TYPE==3:
Stationary_Water_Shader_3(material)
else:
print("ERROR! COULD NOT SET UP WATER")
Normal_Shader(material,texture_rgba_image)
if material==bpy.data.materials.get("Lily_Pad"+material_suffix):
Lily_Pad_Shader(material)
#if the material is flowing water, use a special shader
elif material==bpy.data.materials.get("Flowing_Water"+material_suffix):
print(material.name+" is flowing water.")
pass
#if the material is slime, use a special shader
elif material==bpy.data.materials.get("Slime"+material_suffix):
print(material.name+" is slime.")
Slime_Shader(material)
#if the material is ice, use a special shader
elif material==bpy.data.materials.get("Ice"+material_suffix):
print(material.name+" is ice.")
Ice_Shader(material)
#if the material is wood and DISPLACE_WOOD is True
elif (material==bpy.data.materials.get("Oak_Wood_Planks"+material_suffix))and(DISPLACE_WOOD):
print(material.name+" is displaced wooden planks.")
Wood_Displacement_Texture(material,texture_rgba_image)
#else use a normal shader
else:
print(material.name+" is normal.")
Normal_Shader(material,texture_rgba_image)
print("Finished setting up materials")
#Set up the sky
print("Started shading sky")
for world in bpy.data.worlds:
if 6.5<=TIME_OF_DAY<=19.5:
Sky_Day_Shader(world)
else:
Sky_Night_Shader(world)
print("Sky shaded")
#Remove unnecessary textures
print("Removing unnecessary textures")
for img in bpy.data.images: # loops through all images in ,blend file
try:
suffix = img.name.rfind(".") # finds the index of the last . in the image's name
int(img.name[suffix+1:]) # check to see if the characters after the . are numbers
# EG test.001 would work (and return 1, but we're not getting its return value)
# and test would error out, as suffix = -1, therefor int("test") errors
# if the entire name of the image is a number (eg: 123.png), it will remove it by mistake //needs fixing
print("Texture "+img.name+" removed for being a duplicate.")
img.user_clear() # clears all the image's parents to it can be removed
bpy.data.images.remove(img) # removes image from .blend file
except:
if (img.name==PREFIX+"-Alpha.png") or (img.name==PREFIX+"-RGB.png"): # checks if img ends in "-Alpha.png" or "-RGB.png"
print("Texture "+img.name+" removed for being redundant")
img.user_clear() # clears all the image's parents to it can be removed
bpy.data.images.remove(img) # removes image from .blend file
else:
print("Texture "+img.name+" was not removed.") # only non-Mineways files can get here, or PREFIX.RGBA.png
print("Finished removing unnecessary textures")
### THE FOLLOWING CODE IS USED IN SETTING UP THE GUI, THIS FEATURE IS IN DEVELOPMENT.
### the following code makes buttons in the scenes tab that allow hotswitching between water types
class OBJECT_PT_water_changer(bpy.types.Panel): # The object used for drawing the buttons
bl_label = "Water Types" # the name of the sub-sub-catagory used
bl_space_type = "PROPERTIES" # the name of the main catagory used
bl_region_type = "WINDOW" # dunno
bl_context = "scene" # the name of the sub-catagory used
def draw(self, context): # called by blender when it wants to update the screen
self.layout.operator("object.water_changer", text='Use Solid Water').type="0" # draws water button 0
self.layout.operator("object.water_changer", text='Use Transparent Water').type="1" # draws water button 1
self.layout.operator("object.water_changer", text='Use Choppy Water').type="2" # draws water button 2
self.layout.operator("object.water_changer", text='Use Wavey Water').type="3" # draws water button 3
class OBJECT_OT_water_changer(bpy.types.Operator): # the object used for executing the buttons
bl_label = "Change Water Shader" # Used when pressing space on a viewport.
# Currently broken, as all the water type buttons go to one button.
bl_idname = "object.water_changer" # Used if another script wants to use this button
bl_description = "Change water shader" # Main text of the tool tip
type = bpy.props.StringProperty() # Gets the type data set in BJECT_PT_water_changer.draw()
def execute(self, context):
print("self:",self.type,"len",len(self.type))
print("selected object:",context.object)
self.report({'INFO'}, "Set water to type "+self.type) # Used by the progress bar thingy that
# tells you when stuff is done in Blender.
global WATER_SHADER_TYPE # Allows WATER_SHADER_TYPE to be set globally
if self.type=="0":
print("setting to type 0")
WATER_SHADER_TYPE=0
elif self.type=="1":
print("setting to type 1")
WATER_SHADER_TYPE=1
elif self.type=="2":
print("setting to type 2")
WATER_SHADER_TYPE=2
elif self.type=="3":
print("setting to type 3")
WATER_SHADER_TYPE=3
# Sets WATER_SHADER_TYPE to something
main() # Runs the main script
return{'FINISHED'} # Required by Blender
def register():
bpy.utils.register_module(__name__) # Needed to register the custom GUI components
def unregister():
bpy.utils.unregister_module(__name__) # Needed to unregister the custom GUI components
### END OF GUI CODE
if __name__ == "__main__": # Standard python check to see if the code is being ran, or added as a module
print("\nStarted Cycles Mineways import script.\n")
main() # Runs the main script
#register() # Sets up the GUI
print("\nCycles Mineways has finished.\n")
| JMY1000/CyclesMineways | CyclesMineways.py | Python | gpl-3.0 | 46,368 | [
"VisIt"
] | 7d316a43271c6b09b3e043d667c085be0d35833751c765b0632e076aaa33300d |
import os
from datetime import datetime
from django.test import SimpleTestCase
from django.utils.functional import lazystr
from django.utils.html import (
conditional_escape, escape, escapejs, format_html, html_safe, json_script,
linebreaks, smart_urlquote, strip_spaces_between_tags, strip_tags, urlize,
)
from django.utils.safestring import mark_safe
class TestUtilsHtml(SimpleTestCase):
def check_output(self, function, value, output=None):
"""
function(value) equals output. If output is None, function(value)
equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
items = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
with self.subTest(value=value, output=output):
for pattern in patterns:
with self.subTest(value=value, output=output, pattern=pattern):
self.check_output(escape, pattern % value, pattern % output)
self.check_output(escape, lazystr(pattern % value), pattern % output)
# Check repeated values.
self.check_output(escape, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(escape, '<&', '<&')
def test_format_html(self):
self.assertEqual(
format_html(
"{} {} {third} {fourth}",
"< Dangerous >",
mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=mark_safe("<i>safe again</i>"),
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>"
)
def test_linebreaks(self):
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br>sub1<br>sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br>sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(linebreaks, value, output)
self.check_output(linebreaks, lazystr(value), output)
def test_strip_tags(self):
items = (
('<p>See: 'é is an apostrophe followed by e acute</p>',
'See: 'é is an apostrophe followed by e acute'),
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('hi, <f x', 'hi, <f x'),
('234<235, right?', '234<235, right?'),
('a4<a5 right?', 'a4<a5 right?'),
('b7>b2!', 'b7>b2!'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'),
('a<p a >b</p>c', 'abc'),
('d<a:b c:d>e</p>f', 'def'),
('<strong>foo</strong><a href="http://example.com">bar</a>', 'foobar'),
# caused infinite loop on Pythons not patched with
# https://bugs.python.org/issue20288
('&gotcha&#;<>', '&gotcha&#;<>'),
('<sc<!-- -->ript>test<<!-- -->/script>', 'ript>test'),
('<script>alert()</script>&h', 'alert()h'),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(strip_tags, value, output)
self.check_output(strip_tags, lazystr(value), output)
def test_strip_tags_files(self):
# Test with more lengthy content (also catching performance regressions)
for filename in ('strip_tags1.html', 'strip_tags2.txt'):
with self.subTest(filename=filename):
path = os.path.join(os.path.dirname(__file__), 'files', filename)
with open(path, 'r') as fp:
content = fp.read()
start = datetime.now()
stripped = strip_tags(content)
elapsed = datetime.now() - start
self.assertEqual(elapsed.seconds, 0)
self.assertIn("Please try again.", stripped)
self.assertNotIn('<', stripped)
def test_strip_spaces_between_tags(self):
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
with self.subTest(value=value):
self.check_output(strip_spaces_between_tags, value)
self.check_output(strip_spaces_between_tags, lazystr(value))
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(strip_spaces_between_tags, value, output)
self.check_output(strip_spaces_between_tags, lazystr(value), output)
def test_escapejs(self):
items = (
('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(r'\ : backslashes, too', '\\u005C : backslashes, too'),
(
'and lots of whitespace: \r\n\t\v\f\b',
'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'
),
(r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
(
'paragraph separator:\u2029and line separator:\u2028',
'paragraph separator:\\u2029and line separator:\\u2028'
),
('`', '\\u0060'),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(escapejs, value, output)
self.check_output(escapejs, lazystr(value), output)
def test_json_script(self):
tests = (
# "<", ">" and "&" are quoted inside JSON strings
(('&<>', '<script id="test_id" type="application/json">"\\u0026\\u003C\\u003E"</script>')),
# "<", ">" and "&" are quoted inside JSON objects
(
{'a': '<script>test&ing</script>'},
'<script id="test_id" type="application/json">'
'{"a": "\\u003Cscript\\u003Etest\\u0026ing\\u003C/script\\u003E"}</script>'
),
# Lazy strings are quoted
(lazystr('&<>'), '<script id="test_id" type="application/json">"\\u0026\\u003C\\u003E"</script>'),
(
{'a': lazystr('<script>test&ing</script>')},
'<script id="test_id" type="application/json">'
'{"a": "\\u003Cscript\\u003Etest\\u0026ing\\u003C/script\\u003E"}</script>'
),
)
for arg, expected in tests:
with self.subTest(arg=arg):
self.assertEqual(json_script(arg, 'test_id'), expected)
def test_smart_urlquote(self):
items = (
('http://öäü.com/', 'http://xn--4ca9at.com/'),
('http://öäü.com/öäü/', 'http://xn--4ca9at.com/%C3%B6%C3%A4%C3%BC/'),
# Everything unsafe is quoted, !*'();:@&=+$,/?#[]~ is considered
# safe as per RFC.
('http://example.com/path/öäü/', 'http://example.com/path/%C3%B6%C3%A4%C3%BC/'),
('http://example.com/%C3%B6/ä/', 'http://example.com/%C3%B6/%C3%A4/'),
('http://example.com/?x=1&y=2+3&z=', 'http://example.com/?x=1&y=2+3&z='),
('http://example.com/?x=<>"\'', 'http://example.com/?x=%3C%3E%22%27'),
('http://example.com/?q=http://example.com/?x=1%26q=django',
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango'),
('http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango',
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango'),
('http://.www.f oo.bar/', 'http://.www.f%20oo.bar/'),
)
# IDNs are properly quoted
for value, output in items:
with self.subTest(value=value, output=output):
self.assertEqual(smart_urlquote(value), output)
def test_conditional_escape(self):
s = '<h1>interop</h1>'
self.assertEqual(conditional_escape(s), '<h1>interop</h1>')
self.assertEqual(conditional_escape(mark_safe(s)), s)
self.assertEqual(conditional_escape(lazystr(mark_safe(s))), s)
def test_html_safe(self):
@html_safe
class HtmlClass:
def __str__(self):
return "<h1>I'm a html class!</h1>"
html_obj = HtmlClass()
self.assertTrue(hasattr(HtmlClass, '__html__'))
self.assertTrue(hasattr(html_obj, '__html__'))
self.assertEqual(str(html_obj), html_obj.__html__())
def test_html_safe_subclass(self):
class BaseClass:
def __html__(self):
# defines __html__ on its own
return 'some html content'
def __str__(self):
return 'some non html content'
@html_safe
class Subclass(BaseClass):
def __str__(self):
# overrides __str__ and is marked as html_safe
return 'some html safe content'
subclass_obj = Subclass()
self.assertEqual(str(subclass_obj), subclass_obj.__html__())
def test_html_safe_defines_html_error(self):
msg = "can't apply @html_safe to HtmlClass because it defines __html__()."
with self.assertRaisesMessage(ValueError, msg):
@html_safe
class HtmlClass:
def __html__(self):
return "<h1>I'm a html class!</h1>"
def test_html_safe_doesnt_define_str(self):
msg = "can't apply @html_safe to HtmlClass because it doesn't define __str__()."
with self.assertRaisesMessage(ValueError, msg):
@html_safe
class HtmlClass:
pass
def test_urlize(self):
tests = (
(
'Search for google.com/?q=! and see.',
'Search for <a href="http://google.com/?q=">google.com/?q=</a>! and see.'
),
(
lazystr('Search for google.com/?q=!'),
'Search for <a href="http://google.com/?q=">google.com/?q=</a>!'
),
('foo@example.com', '<a href="mailto:foo@example.com">foo@example.com</a>'),
)
for value, output in tests:
with self.subTest(value=value):
self.assertEqual(urlize(value), output)
def test_urlize_unchanged_inputs(self):
tests = (
('a' + '@a' * 50000) + 'a', # simple_email_re catastrophic test
('a' + '.' * 1000000) + 'a', # trailing_punctuation catastrophic test
'foo@',
'@foo.com',
'foo@.example.com',
'foo@localhost',
'foo@localhost.',
)
for value in tests:
with self.subTest(value=value):
self.assertEqual(urlize(value), value)
| nesdis/djongo | tests/django_tests/tests/v22/tests/utils_tests/test_html.py | Python | agpl-3.0 | 11,720 | [
"ADF"
] | e71fdab3a5bc021e61386024fc8bbaa18e1c4a7038144047a58378845ca58bb3 |
import copy
import numpy as np
import numpy.random as rng
from utils import randh
from numba import jit
# How many parameters are there?
num_params = 3
# Some data
data = np.loadtxt("road.txt")
N = data.shape[0] # Number of data points
# Plot the data
import matplotlib.pyplot as plt
plt.plot(data[:,0], data[:,1], "o")
plt.xlabel("Age of person (years)")
plt.ylabel("Maximum vision distance (feet)")
plt.show()
# Some idea of how big the Metropolis proposals should be
jump_sizes = np.array([1000.0, 1000.0, 20.0])
@jit
def from_prior():
"""
A function to generate parameter values from the prior.
Returns a numpy array of parameter values.
"""
m = 1000.0*rng.randn()
b = 1000.0*rng.randn()
log_sigma = -10.0 + 20.0*rng.rand()
return np.array([m, b, log_sigma])
@jit
def log_prior(params):
"""
Evaluate the (log of the) prior distribution
"""
# Rename the parameters
m, b, log_sigma = params
logp = 0.0
# Normal prior for m and b
# Metropolis only needs the ratio, so I've left out the 2pi bits
logp += -0.5*(m/1000.0)**2
logp += -0.5*(b/1000.0)**2
if log_sigma < -10.0 or log_sigma > 10.0:
return -np.Inf
return logp
@jit
def log_likelihood(params):
"""
Evaluate the (log of the) likelihood function
"""
# Rename the parameters
m, b, log_sigma = params
# Get sigma
sigma = np.exp(log_sigma)
# First calculate the straight line
line = m*data[:,0] + b
# Normal/gaussian distribution
return -0.5*N*np.log(2*np.pi) - N*log_sigma \
-0.5*np.sum((data[:,1] - line)**2/sigma**2)
@jit
def proposal(params):
"""
Generate new values for the parameters, for the Metropolis algorithm.
"""
# Copy the parameters
new = copy.deepcopy(params)
# Which one should we change?
which = rng.randint(num_params)
new[which] += jump_sizes[which]*randh()
return new
| eggplantbren/NSwMCMC | python/straightline.py | Python | gpl-2.0 | 1,954 | [
"Gaussian"
] | 5aad979a4ab8fa1847dfedb05a05cad5dc0df920fb62c7ff63cd116159e45b6c |
from threading import Lock
class ResponsibleGenerator(object):
"""A generator that will help clean up when it is done being used."""
__slots__ = ["cleanup", "gen"]
def __init__(self, gen, cleanup):
self.cleanup = cleanup
self.gen = gen
def __del__(self):
self.cleanup()
def __iter__(self):
return self
def __next__(self):
return next(self.gen)
class ConcurrentStore(object):
def __init__(self, store):
self.store = store
# number of calls to visit still in progress
self.__visit_count = 0
# lock for locking down the indices
self.__lock = Lock()
# lists for keeping track of added and removed triples while
# we wait for the lock
self.__pending_removes = []
self.__pending_adds = []
def add(self, triple):
(s, p, o) = triple
if self.__visit_count == 0:
self.store.add((s, p, o))
else:
self.__pending_adds.append((s, p, o))
def remove(self, triple):
(s, p, o) = triple
if self.__visit_count == 0:
self.store.remove((s, p, o))
else:
self.__pending_removes.append((s, p, o))
def triples(self, triple):
(su, pr, ob) = triple
g = self.store.triples((su, pr, ob))
pending_removes = self.__pending_removes
self.__begin_read()
for s, p, o in ResponsibleGenerator(g, self.__end_read):
if not (s, p, o) in pending_removes:
yield s, p, o
for (s, p, o) in self.__pending_adds:
if (
(su is None or su == s)
and (pr is None or pr == p)
and (ob is None or ob == o)
):
yield s, p, o
def __len__(self):
return self.store.__len__()
def __begin_read(self):
lock = self.__lock
lock.acquire()
self.__visit_count = self.__visit_count + 1
lock.release()
def __end_read(self):
lock = self.__lock
lock.acquire()
self.__visit_count = self.__visit_count - 1
if self.__visit_count == 0:
pending_removes = self.__pending_removes
while pending_removes:
(s, p, o) = pending_removes.pop()
try:
self.store.remove((s, p, o))
except:
# TODO: change to try finally?
print(s, p, o, "Not in store to remove")
pending_adds = self.__pending_adds
while pending_adds:
(s, p, o) = pending_adds.pop()
self.store.add((s, p, o))
lock.release()
| RDFLib/rdflib | rdflib/plugins/stores/concurrent.py | Python | bsd-3-clause | 2,709 | [
"VisIt"
] | 9badfedfb8716ec1d997cbccb6e0dbd4fb4a01dc8762577229cc25023ac02b23 |
'''
Created on Aug 5, 2014
@author: gearsad
'''
import vtk
import numpy
from math import sin,cos
from SceneObject import SceneObject
class LIDAR(SceneObject):
'''
A template for drawing a LIDAR point cloud.
Ref: http://stackoverflow.com/questions/7591204/how-to-display-point-cloud-in-vtk-in-different-colors
'''
# The point cloud data
vtkPointCloudPolyData = None
vtkPointCloudPoints = None
vtkPointCloudDepth = None
vtkPointCloudCells = None
#The dimensions of the window
numThetaReadings = None
numPhiReadings = None
thetaRange = [0, 0]
phiRange = [0, 0]
def __init__(self, renderer, minTheta, maxTheta, numThetaReadings, minPhi, maxPhi, numPhiReadings, minDepth, maxDepth, initialValue):
'''
Initialize the LIDAR point cloud.
'''
# Call the parent constructor
super(LIDAR,self).__init__(renderer)
# Cache these
self.numPhiReadings = numPhiReadings
self.numThetaReadings = numThetaReadings
self.thetaRange = [minTheta, maxTheta]
self.phiRange = [minPhi, maxPhi]
# Create a point cloud with the data
self.vtkPointCloudPoints = vtk.vtkPoints()
self.vtkPointCloudDepth = vtk.vtkDoubleArray()
self.vtkPointCloudDepth.SetName("DepthArray")
self.vtkPointCloudCells = vtk.vtkCellArray()
self.vtkPointCloudPolyData = vtk.vtkPolyData()
# Set up the structure
self.vtkPointCloudPolyData.SetPoints(self.vtkPointCloudPoints)
self.vtkPointCloudPolyData.SetVerts(self.vtkPointCloudCells)
self.vtkPointCloudPolyData.GetPointData().SetScalars(self.vtkPointCloudDepth)
self.vtkPointCloudPolyData.GetPointData().SetActiveScalars("DepthArray")
# Build the initial structure
for x in xrange(0, self.numThetaReadings):
for y in xrange(0, self.numPhiReadings):
# Add the point
point = [1, 1, 1]
pointId = self.vtkPointCloudPoints.InsertNextPoint(point)
self.vtkPointCloudDepth.InsertNextValue(1)
self.vtkPointCloudCells.InsertNextCell(1)
self.vtkPointCloudCells.InsertCellPoint(pointId)
# Use the update method to initialize the points with a NumPy matrix
initVals = numpy.ones((numThetaReadings, numPhiReadings)) * initialValue
self.UpdatePoints(initVals)
# Now build the mapper and actor.
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(self.vtkPointCloudPolyData)
mapper.SetColorModeToDefault()
mapper.SetScalarRange(minDepth, maxDepth)
mapper.SetScalarVisibility(1)
self.vtkActor.SetMapper(mapper)
def UpdatePoints(self, points2DNPMatrix):
'''Update the points with a 2D array that is numThetaReadings x numPhiReadings containing the depth from the source'''
for x in xrange(0, self.numThetaReadings):
theta = (self.thetaRange[0] + float(x) * (self.thetaRange[1] - self.thetaRange[0]) / float(self.numThetaReadings)) / 180.0 * 3.14159
for y in xrange(0, self.numPhiReadings):
phi = (self.phiRange[0] + float(y) * (self.phiRange[1] - self.phiRange[0]) / float(self.numPhiReadings)) / 180.0 * 3.14159
r = points2DNPMatrix[x, y]
# Polar coordinates to Euclidean space
point = [r * sin(theta) * cos(phi), r * sin(phi), r * cos(theta) * cos(phi)]
pointId = y + x * self.numPhiReadings
self.vtkPointCloudPoints.SetPoint(pointId, point)
self.vtkPointCloudCells.Modified()
self.vtkPointCloudPoints.Modified()
self.vtkPointCloudDepth.Modified() | GearsAD/semisorted_arnerve | sandbox/bot_vis_platform_post3b/scene/LIDAR.py | Python | mit | 3,811 | [
"VTK"
] | a42f29b85d56782e946c279a7912cf8cbc884f52105cba11251596c03508a4bc |
"""
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/decomposition/plot_ica_vs_pca.py | Python | mit | 3,329 | [
"Gaussian"
] | f80535f5f58450012442742aaa57ec546ec732ed8e66083f8b75428ae4dd2fe9 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ArticleMetaDataMap'
db.create_table('neuroelectro_articlemetadatamap', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['neuroelectro.Article'])),
('metadata', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['neuroelectro.MetaData'])),
('date_mod', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('added_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['neuroelectro.User'], null=True)),
('times_validated', self.gf('django.db.models.fields.IntegerField')(default=0, null=True)),
('note', self.gf('django.db.models.fields.CharField')(max_length=200, null=True)),
))
db.send_create_signal('neuroelectro', ['ArticleMetaDataMap'])
def backwards(self, orm):
# Deleting model 'ArticleMetaDataMap'
db.delete_table('neuroelectro_articlemetadatamap')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'neuroelectro.article': {
'Meta': {'object_name': 'Article'},
'abstract': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'}),
'author_list_str': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Author']", 'null': 'True', 'symmetrical': 'False'}),
'full_text_link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Journal']", 'null': 'True'}),
'metadata': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'to': "orm['neuroelectro.MetaData']", 'null': 'True', 'symmetrical': 'False'}),
'pmid': ('django.db.models.fields.IntegerField', [], {}),
'pub_year': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'substances': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Substance']", 'null': 'True', 'symmetrical': 'False'}),
'suggester': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['neuroelectro.User']", 'null': 'True'}),
'terms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.MeshTerm']", 'null': 'True', 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.articlefulltext': {
'Meta': {'object_name': 'ArticleFullText'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'full_text_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'neuroelectro.articlefulltextstat': {
'Meta': {'object_name': 'ArticleFullTextStat'},
'article_full_text': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.ArticleFullText']"}),
'data_table_ephys_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata_human_assigned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'metadata_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'methods_tag_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'neuron_article_map_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'num_unique_ephys_concept_maps': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.articlemetadatamap': {
'Meta': {'object_name': 'ArticleMetaDataMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.User']", 'null': 'True'}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.MetaData']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'})
},
'neuroelectro.articlesummary': {
'Meta': {'object_name': 'ArticleSummary'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_neurons': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.author': {
'Meta': {'object_name': 'Author'},
'first': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initials': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'last': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'middle': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'neuroelectro.brainregion': {
'Meta': {'object_name': 'BrainRegion'},
'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'allenid': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isallen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'treedepth': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.contvalue': {
'Meta': {'object_name': 'ContValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_range': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'mean': ('django.db.models.fields.FloatField', [], {}),
'min_range': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'n': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'stderr': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'stdev': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'neuroelectro.datasource': {
'Meta': {'object_name': 'DataSource'},
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataTable']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.UserSubmission']", 'null': 'True'}),
'user_upload': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.UserUpload']", 'null': 'True'})
},
'neuroelectro.datatable': {
'Meta': {'object_name': 'DataTable'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'needs_expert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'table_html': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'table_text': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'})
},
'neuroelectro.ephysconceptmap': {
'Meta': {'object_name': 'EphysConceptMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.User']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataSource']"}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'neuroelectro.ephysprop': {
'Meta': {'object_name': 'EphysProp'},
'definition': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.EphysPropSyn']", 'symmetrical': 'False'}),
'units': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Unit']", 'null': 'True'})
},
'neuroelectro.ephyspropsummary': {
'Meta': {'object_name': 'EphysPropSummary'},
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_neurons': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'value_mean_articles': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_mean_neurons': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_sd_articles': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_sd_neurons': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'neuroelectro.ephyspropsyn': {
'Meta': {'object_name': 'EphysPropSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'neuroelectro.insituexpt': {
'Meta': {'object_name': 'InSituExpt'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageseriesid': ('django.db.models.fields.IntegerField', [], {}),
'plane': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'regionexprs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.RegionExpr']", 'null': 'True', 'symmetrical': 'False'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'neuroelectro.institution': {
'Meta': {'object_name': 'Institution'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
'neuroelectro.journal': {
'Meta': {'object_name': 'Journal'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Publisher']", 'null': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.mailinglistentry': {
'Meta': {'object_name': 'MailingListEntry'},
'comments': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
},
'neuroelectro.meshterm': {
'Meta': {'object_name': 'MeshTerm'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.metadata': {
'Meta': {'object_name': 'MetaData'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.User']", 'null': 'True'}),
'cont_value': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.ContValue']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'neuroelectro.neuron': {
'Meta': {'object_name': 'Neuron'},
'added_by': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.BrainRegion']", 'null': 'True', 'symmetrical': 'False'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.neuronarticlemap': {
'Meta': {'object_name': 'NeuronArticleMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.User']", 'null': 'True'}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'num_mentions': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.neuronconceptmap': {
'Meta': {'object_name': 'NeuronConceptMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.User']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataSource']"}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'neuroelectro.neuronephysdatamap': {
'Meta': {'object_name': 'NeuronEphysDataMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.User']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'ephys_concept_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysConceptMap']"}),
'err': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'metadata': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.MetaData']", 'symmetrical': 'False'}),
'n': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'neuron_concept_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.NeuronConceptMap']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataSource']"}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'val': ('django.db.models.fields.FloatField', [], {}),
'val_norm': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'neuroelectro.neuronephyssummary': {
'Meta': {'object_name': 'NeuronEphysSummary'},
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'value_mean': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_sd': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'neuroelectro.neuronsummary': {
'Meta': {'object_name': 'NeuronSummary'},
'cluster_xval': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'cluster_yval': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_ephysprops': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.neuronsyn': {
'Meta': {'object_name': 'NeuronSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.protein': {
'Meta': {'object_name': 'Protein'},
'allenid': ('django.db.models.fields.IntegerField', [], {}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True'}),
'entrezid': ('django.db.models.fields.IntegerField', [], {}),
'gene': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_situ_expts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.InSituExpt']", 'null': 'True', 'symmetrical': 'False'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.proteinsyn': {
'Meta': {'object_name': 'ProteinSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.publisher': {
'Meta': {'object_name': 'Publisher'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'neuroelectro.regionexpr': {
'Meta': {'object_name': 'RegionExpr'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['neuroelectro.BrainRegion']"}),
'val': ('django.db.models.fields.FloatField', [], {})
},
'neuroelectro.species': {
'Meta': {'object_name': 'Species'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.substance': {
'Meta': {'object_name': 'Substance'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.unit': {
'Meta': {'object_name': 'Unit'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'prefix': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'neuroelectro.user': {
'Meta': {'object_name': 'User', '_ormbases': ['auth.User']},
'assigned_neurons': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Neuron']", 'null': 'True', 'symmetrical': 'False'}),
'institution': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Institution']", 'null': 'True'}),
'is_curator': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lab_head': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'lab_website_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'neuroelectro.usersubmission': {
'Meta': {'object_name': 'UserSubmission'},
'data': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.User']"})
},
'neuroelectro.userupload': {
'Meta': {'object_name': 'UserUpload'},
'data': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.FilePathField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.User']"})
}
}
complete_apps = ['neuroelectro'] | neuroelectro/neuroelectro_org | neuroelectro/south_migrations/0061_auto__add_articlemetadatamap.py | Python | gpl-2.0 | 31,187 | [
"NEURON"
] | 8ea1459809ae5ff787e215297f558e0a732a24dbce2063d0d4ba696e2f19b908 |
#!/usr/bin/env python
#coding=utf-8
import urllib
import base64
import hmac
import time
from hashlib import sha1 as sha
import os
import md5
import StringIO
from threading import Thread
import threading
import ConfigParser
from oss_xml_handler import *
#LOG_LEVEL can be one of DEBUG INFO ERROR CRITICAL WARNNING
LOG_LEVEL = "ERROR"
PROVIDER = "OSS"
SELF_DEFINE_HEADER_PREFIX = "x-oss-"
if "AWS" == PROVIDER:
SELF_DEFINE_HEADER_PREFIX = "x-amz-"
def initlog(log_level = LOG_LEVEL):
import logging
from logging.handlers import RotatingFileHandler
LOGFILE = os.path.join(os.getcwd(), 'log.txt')
MAXLOGSIZE = 100*1024*1024 #Bytes
BACKUPCOUNT = 5
FORMAT = \
"%(asctime)s %(levelname)-8s[%(filename)s:%(lineno)d(%(funcName)s)] %(message)s"
hdlr = RotatingFileHandler(LOGFILE,
mode='a',
maxBytes=MAXLOGSIZE,
backupCount=BACKUPCOUNT)
formatter = logging.Formatter(FORMAT)
hdlr.setFormatter(formatter)
logger = logging.getLogger("oss")
logger.addHandler(hdlr)
if "DEBUG" == log_level.upper():
logger.setLevel(logging.DEBUG)
elif "INFO" == log_level.upper():
logger.setLevel(logging.INFO)
elif "WARNING" == log_level.upper():
logger.setLevel(logging.WARNING)
elif "ERROR" == log_level.upper():
logger.setLevel(logging.ERROR)
elif "CRITICAL" == log_level.upper():
logger.setLevel(logging.CRITICAL)
else:
logger.setLevel(logging.ERROR)
return logger
log = initlog(LOG_LEVEL)
########## function for Authorization ##########
def _format_header(headers = None):
'''
format the headers that self define
convert the self define headers to lower.
'''
if not headers:
headers = {}
tmp_headers = {}
for k in headers.keys():
if isinstance(headers[k], unicode):
headers[k] = headers[k].encode('utf-8')
if k.lower().startswith(SELF_DEFINE_HEADER_PREFIX):
k_lower = k.lower()
tmp_headers[k_lower] = headers[k]
else:
tmp_headers[k] = headers[k]
return tmp_headers
def get_assign(secret_access_key, method, headers = None, resource="/", result = None):
'''
Create the authorization for OSS based on header input.
You should put it into "Authorization" parameter of header.
'''
if not headers:
headers = {}
if not result:
result = []
content_md5 = ""
content_type = ""
date = ""
canonicalized_oss_headers = ""
log.debug("secret_access_key: %s" % secret_access_key)
content_md5 = safe_get_element('Content-MD5', headers)
content_type = safe_get_element('Content-Type', headers)
date = safe_get_element('Date', headers)
canonicalized_resource = resource
tmp_headers = _format_header(headers)
if len(tmp_headers) > 0:
x_header_list = tmp_headers.keys()
x_header_list.sort()
for k in x_header_list:
if k.startswith(SELF_DEFINE_HEADER_PREFIX):
canonicalized_oss_headers += k + ":" + tmp_headers[k] + "\n"
string_to_sign = method + "\n" + content_md5.strip() + "\n" + content_type + "\n" + date + "\n" + canonicalized_oss_headers + canonicalized_resource;
result.append(string_to_sign)
log.debug("\nmethod:%s\n content_md5:%s\n content_type:%s\n data:%s\n canonicalized_oss_headers:%s\n canonicalized_resource:%s\n" % (method, content_md5, content_type, date, canonicalized_oss_headers, canonicalized_resource))
log.debug("\nstring_to_sign:%s\n \nstring_to_sign_size:%d\n" % (string_to_sign, len(string_to_sign)))
h = hmac.new(secret_access_key, string_to_sign, sha)
return base64.encodestring(h.digest()).strip()
def get_resource(params = None):
if not params:
params = {}
tmp_headers = {}
query_string = ""
for k, v in params.items():
tmp_k = k.lower().strip()
tmp_headers[tmp_k] = v
override_response_list = ['response-content-type', 'response-content-language', \
'response-cache-control', 'logging', 'response-content-encoding', \
'acl', 'uploadId', 'uploads', 'partNumber', 'group', \
'delete', 'website', 'location',\
'response-expires', 'response-content-disposition']
override_response_list.sort()
resource = ""
uri = ""
separator = "?"
for i in override_response_list:
if tmp_headers.has_key(i.lower()):
resource += separator
resource += i
tmp_key = str(tmp_headers[i.lower()])
if len(tmp_key) != 0:
resource += "="
resource += tmp_key
separator = '&'
return resource
def append_param(url, params):
'''
convert the parameters to query string of URI.
'''
l = []
for k,v in params.items():
k = k.replace('_', '-')
if k == 'maxkeys':
k = 'max-keys'
if isinstance(v, unicode):
v = v.encode('utf-8')
if v is not None and v != '':
l.append('%s=%s' % (urllib.quote(k), urllib.quote(str(v))))
elif k == 'acl':
l.append('%s' % (urllib.quote(k)))
elif v is None or v == '':
l.append('%s' % (urllib.quote(k)))
if len(l):
url = url + '?' + '&'.join(l)
return url
############### Construct XML ###############
def create_object_group_msg_xml(part_msg_list = None):
'''
get information from part_msg_list and covert it to xml.
part_msg_list has special format.
'''
if not part_msg_list:
part_msg_list = []
xml_string = r'<CreateFileGroup>'
for part in part_msg_list:
if len(part) >= 3:
if isinstance(part[1], unicode):
file_path = part[1].encode('utf-8')
else:
file_path = part[1]
xml_string += r'<Part>'
xml_string += r'<PartNumber>' + str(part[0]) + r'</PartNumber>'
xml_string += r'<PartName>' + str(file_path) + r'</PartName>'
xml_string += r'<ETag>"' + str(part[2]).upper() + r'"</ETag>'
xml_string += r'</Part>'
else:
print "the ", part, " in part_msg_list is not as expected!"
return ""
xml_string += r'</CreateFileGroup>'
return xml_string
def create_part_xml(part_msg_list = None):
if not part_msg_list:
part_msg_list = []
'''
get information from part_msg_list and covert it to xml.
part_msg_list has special format.
'''
xml_string = r'<CompleteMultipartUpload>'
for part in part_msg_list:
if len(part) >= 3:
if isinstance(part[1], unicode):
file_path = part[1].encode('utf-8')
else:
file_path = part[1]
xml_string += r'<Part>'
xml_string += r'<PartNumber>' + str(part[0]) + r'</PartNumber>'
xml_string += r'<ETag>"' + str(part[2]).upper() + r'"</ETag>'
xml_string += r'</Part>'
else:
print "the ", part, " in part_msg_list is not as expected!"
return ""
xml_string += r'</CompleteMultipartUpload>'
return xml_string
def create_delete_object_msg_xml(object_list = None, is_quiet = False, is_defult = False):
'''
covert object name list to xml.
'''
if not object_list:
object_list = []
xml_string = r'<Delete>'
if not is_defult:
if is_quiet:
xml_string += r'<Quiet>true</Quiet>'
else:
xml_string += r'<Quiet>false</Quiet>'
for object in object_list:
key = object.strip()
if isinstance(object, unicode):
key = object.encode('utf-8')
xml_string += r'<Object><Key>%s</Key></Object>' % key
xml_string += r'</Delete>'
return xml_string
############### operate OSS ###############
def clear_all_object_of_bucket(oss_instance, bucket):
'''
clean all objects in bucket, after that, it will delete this bucket.
'''
return clear_all_objects_in_bucket(oss_instance, bucket)
def clear_all_objects_in_bucket(oss_instance, bucket):
'''
it will clean all objects in bucket, after that, it will delete this bucket.
example:
from oss_api import *
host = ""
id = ""
key = ""
oss_instance = OssAPI(host, id, key)
bucket = "leopublicreadprivatewrite"
if clear_all_objects_in_bucket(oss_instance, bucket):
pass
else:
print "clean Fail"
'''
b = GetAllObjects()
b.get_all_object_in_bucket(oss_instance, bucket)
for i in b.object_list:
res = oss_instance.delete_object(bucket, i)
if (res.status / 100 != 2):
print "clear_all_objects_in_bucket: delete object fail, ret is:", res.status, "object is: ", i
return False
else:
pass
marker = ""
id_marker = ""
count = 0
while True:
res = oss_instance.get_all_multipart_uploads(bucket, key_marker = marker, upload_id_marker=id_marker)
if res.status != 200:
break
body = res.read()
hh = GetMultipartUploadsXml(body)
(fl, pl) = hh.list()
for i in fl:
count += 1
object = i[0]
if isinstance(i[0], unicode):
object = i[0].encode('utf-8')
res = oss_instance.cancel_upload(bucket, object, i[1])
if (res.status / 100 != 2 and res.status != 404):
print "clear_all_objects_in_bucket: cancel upload fail, ret is:", res.status
else:
pass
if hh.is_truncated:
marker = hh.next_key_marker
id_marker = hh.next_upload_id_marker
else:
break
if len(marker) == 0:
break
res = oss_instance.delete_bucket(bucket)
if (res.status / 100 != 2 and res.status != 404):
print "clear_all_objects_in_bucket: delete bucket fail, ret is: %s, request id is:%s" % (res.status, res.getheader("x-oss-request-id"))
return False
return True
def clean_all_bucket(oss_instance):
'''
it will clean all bucket, including the all objects in bucket.
'''
res = oss_instance.get_service()
if (res.status / 100) == 2:
h = GetServiceXml(res.read())
bucket_list = h.list()
for b in h.bucket_list:
if not clear_all_objects_in_bucket(oss_instance, b.name):
print "clean bucket ", b.name, " failed! in clean_all_bucket"
return False
return True
else:
print "failed! get service in clean_all_bucket return ", res.status
print res.read()
print res.getheaders()
return False
def delete_all_parts_of_object_group(oss, bucket, object_group_name):
res = oss.get_object_group_index(bucket, object_group_name)
if res.status == 200:
body = res.read()
h = GetObjectGroupIndexXml(body)
object_group_index = h.list()
for i in object_group_index:
if len(i) == 4 and len(i[1]) > 0:
part_name = i[1].strip()
res = oss.delete_object(bucket, part_name)
if res.status != 204:
print "delete part ", part_name, " in bucket:", bucket, " failed!"
return False
else:
return False
return True;
class GetAllObjects:
def __init__(self):
self.object_list = []
def get_object_in_bucket(self, oss, bucket="", marker="", prefix=""):
object_list = []
maxkeys = 1000
try:
res = oss.get_bucket(bucket, prefix, marker, maxkeys=maxkeys)
body = res.read()
hh = GetBucketXml(body)
(fl, pl) = hh.list()
if len(fl) != 0:
for i in fl:
if isinstance(i[0], unicode):
object = i[0].encode('utf-8')
object_list.append(object)
marker = hh.nextmarker
except:
pass
return (object_list, marker)
def get_all_object_in_bucket(self, oss, bucket="", marker="", prefix=""):
marker2 = ""
while True:
(object_list, marker) = self.get_object_in_bucket(oss, bucket, marker2, prefix)
marker2 = marker
if len(object_list) != 0:
self.object_list.extend(object_list)
if len(marker) == 0:
break
def get_all_buckets(oss):
bucket_list = []
res = oss.get_service()
if res.status == 200:
h = GetServiceXml(res.read())
for b in h.bucket_list:
bucket_list.append(str(b.name).strip())
return bucket_list
def get_object_list_marker_from_xml(body):
#return ([(object_name, object_length, last_modify_time)...], marker)
object_meta_list = []
next_marker = ""
hh = GetBucketXml(body)
(fl, pl) = hh.list()
if len(fl) != 0:
for i in fl:
if isinstance(i[0], unicode):
object = i[0].encode('utf-8')
else:
object = i[0]
last_modify_time = i[1]
length = i[3]
etag = i[2]
object_meta_list.append((object, length, last_modify_time, etag))
if hh.is_truncated:
next_marker = hh.nextmarker
return (object_meta_list, next_marker)
def get_upload_id(oss, bucket, object, headers = None):
'''
get the upload id of object.
Returns:
string
'''
if not headers:
headers = {}
upload_id = ""
res = oss.init_multi_upload(bucket, object, headers)
if res.status == 200:
body = res.read()
h = GetInitUploadIdXml(body)
upload_id = h.upload_id
else:
print res.status
print res.getheaders()
print res.read()
return upload_id
def get_all_upload_id_list(oss, bucket):
'''
get all upload id of bucket
Returns:
list
'''
all_upload_id_list = []
marker = ""
id_marker = ""
while True:
res = oss.get_all_multipart_uploads(bucket, key_marker = marker, upload_id_marker=id_marker)
if res.status != 200:
return all_upload_id_list
body = res.read()
hh = GetMultipartUploadsXml(body)
(fl, pl) = hh.list()
for i in fl:
all_upload_id_list.append(i)
if hh.is_truncated:
marker = hh.next_key_marker
id_marker = hh.next_upload_id_marker
else:
break
if len(marker) == 0 and len(id_marker) == 0:
break
return all_upload_id_list
def get_upload_id_list(oss, bucket, object):
'''
get all upload id list of one object.
Returns:
list
'''
upload_id_list = []
marker = ""
id_marker = ""
while True:
res = oss.get_all_multipart_uploads(bucket, prefix=object, key_marker = marker, upload_id_marker=id_marker)
if res.status != 200:
break
body = res.read()
hh = GetMultipartUploadsXml(body)
(fl, pl) = hh.list()
for i in fl:
upload_id_list.append(i[1])
if hh.is_truncated:
marker = hh.next_key_marker
id_marker = hh.next_upload_id_marker
else:
break
if len(marker) == 0:
break
return upload_id_list
def get_part_list(oss, bucket, object, upload_id, max_part=""):
'''
get uploaded part list of object.
Returns:
list
'''
part_list = []
marker = ""
while True:
res = oss.get_all_parts(bucket, object, upload_id, part_number_marker = marker, max_parts=max_part)
if res.status != 200:
break
body = res.read()
h = GetPartsXml(body)
part_list.extend(h.list())
if h.is_truncated:
marker = h.next_part_number_marker
else:
break
if len(marker) == 0:
break
return part_list
def get_part_xml(oss, bucket, object, upload_id):
'''
get uploaded part list of object.
Returns:
string
'''
part_list = []
part_list = get_part_list(oss, bucket, object, upload_id)
xml_string = r'<CompleteMultipartUpload>'
for part in part_list:
xml_string += r'<Part>'
xml_string += r'<PartNumber>' + str(part[0]) + r'</PartNumber>'
xml_string += r'<ETag>' + part[1] + r'</ETag>'
xml_string += r'</Part>'
xml_string += r'</CompleteMultipartUpload>'
return xml_string
def get_part_map(oss, bucket, object, upload_id):
part_list = []
part_list = get_part_list(oss, bucket, object, upload_id)
part_map = {}
for part in part_list:
part_map[str(part[0])] = part[1]
return part_map
########## multi-thread ##########
class DeleteObjectWorker(Thread):
def __init__(self, oss, bucket, part_msg_list, retry_times=5):
Thread.__init__(self)
self.oss = oss
self.bucket = bucket
self.part_msg_list = part_msg_list
self.retry_times = retry_times
def run(self):
bucket = self.bucket
object_list = self.part_msg_list
step = 1000
begin = 0
end = 0
total_length = len(object_list)
remain_length = total_length
while True:
if remain_length > step:
end = begin + step
elif remain_length > 0:
end = begin + remain_length
else:
break
is_fail = True
retry_times = self.retry_times
while True:
try:
if retry_times <= 0:
break
res = self.oss.delete_objects(bucket, object_list[begin:end])
if res.status / 100 == 2:
is_fail = False
break
except:
retry_times = retry_times - 1
time.sleep(1)
if is_fail:
print "delete object_list[%s:%s] failed!, first is %s" % (begin, end, object_list[begin])
begin = end
remain_length = remain_length - step
class PutObjectGroupWorker(Thread):
def __init__(self, oss, bucket, file_path, part_msg_list, retry_times=5):
Thread.__init__(self)
self.oss = oss
self.bucket = bucket
self.part_msg_list = part_msg_list
self.file_path = file_path
self.retry_times = retry_times
def run(self):
for part in self.part_msg_list:
if len(part) == 5:
bucket = self.bucket
file_name = part[1]
if isinstance(file_name, unicode):
filename = file_name.encode('utf-8')
object_name = file_name
retry_times = self.retry_times
is_skip = False
while True:
try:
if retry_times <= 0:
break
res = self.oss.head_object(bucket, object_name)
if res.status == 200:
header_map = convert_header2map(res.getheaders())
etag = safe_get_element("etag", header_map)
md5 = part[2]
if etag.replace('"', "").upper() == md5.upper():
is_skip = True
break
except:
retry_times = retry_times - 1
time.sleep(1)
if is_skip:
continue
partsize = part[3]
offset = part[4]
retry_times = self.retry_times
while True:
try:
if retry_times <= 0:
break
res = self.oss.put_object_from_file_given_pos(bucket, object_name, self.file_path, offset, partsize)
if res.status != 200:
print "upload ", file_name, "failed!"," ret is:", res.status
print "headers", res.getheaders()
retry_times = retry_times - 1
time.sleep(1)
else:
break
except:
retry_times = retry_times - 1
time.sleep(1)
else:
print "ERROR! part", part , " is not as expected!"
class UploadPartWorker(Thread):
def __init__(self, oss, bucket, object, upoload_id, file_path, part_msg_list, uploaded_part_map, retry_times=5):
Thread.__init__(self)
self.oss = oss
self.bucket = bucket
self.object = object
self.part_msg_list = part_msg_list
self.file_path = file_path
self.upload_id = upoload_id
self.uploaded_part_map = uploaded_part_map
self.retry_times = retry_times
def run(self):
for part in self.part_msg_list:
part_number = str(part[0])
if len(part) == 5:
bucket = self.bucket
object = self.object
if self.uploaded_part_map.has_key(part_number):
md5 = part[2]
if self.uploaded_part_map[part_number].replace('"', "").upper() == md5.upper():
continue
partsize = part[3]
offset = part[4]
retry_times = self.retry_times
while True:
try:
if retry_times <= 0:
break
res = self.oss.upload_part_from_file_given_pos(bucket, object, self.file_path, offset, partsize, self.upload_id, part_number)
if res.status != 200:
log.warn("Upload %s/%s from %s, failed! ret is:%s." %(bucket, object, self.file_path, res.status))
log.warn("headers:%s" % res.getheaders())
retry_times = retry_times - 1
time.sleep(1)
else:
log.info("Upload %s/%s from %s, OK! ret is:%s." % (bucket, object, self.file_path, res.status))
break
except:
retry_times = retry_times - 1
time.sleep(1)
else:
log.error("ERROR! part %s is not as expected!" % part)
class MultiGetWorker(Thread):
def __init__(self, oss, bucket, object, file, start, end, retry_times=5):
Thread.__init__(self)
self.oss = oss
self.bucket = bucket
self.object = object
self.startpos = start
self.endpos = end
self.file = file
self.length = self.endpos - self.startpos + 1
self.need_read = 0
self.get_buffer_size = 10*1024*1024
self.retry_times = retry_times
def run(self):
if self.startpos >= self.endpos:
return
retry_times = 0
while True:
headers = {}
self.file.seek(self.startpos)
headers['Range'] = 'bytes=%d-%d' % (self.startpos, self.endpos)
try:
res = self.oss.object_operation("GET", self.bucket, self.object, headers)
if res.status == 206:
while self.need_read < self.length:
left_len = self.length - self.need_read
if left_len > self.get_buffer_size:
content = res.read(self.get_buffer_size)
else:
content = res.read(left_len)
if content:
self.need_read += len(content)
self.file.write(content)
else:
break
break
except:
pass
retry_times += 1
if retry_times > self.retry_times:
print "ERROR, reach max retry times:%s when multi get /%s/%s" % (self.retry_times, self.bucket, self.object)
break
self.file.flush()
self.file.close()
############### misc ###############
def split_large_file(file_path, object_prefix = "", max_part_num = 1000, part_size = 10 * 1024 * 1024, buffer_size = 10 * 1024):
parts_list = []
if os.path.isfile(file_path):
file_size = os.path.getsize(file_path)
if file_size > part_size * max_part_num:
part_size = (file_size + max_part_num - file_size % max_part_num) / max_part_num
part_order = 1
fp = open(file_path, 'rb')
fp.seek(os.SEEK_SET)
total_split_len = 0
part_num = file_size / part_size
if file_size % part_size != 0:
part_num += 1
for i in range(0, part_num):
left_len = part_size
real_part_size = 0
m = md5.new()
offset = part_size * i
while True:
read_size = 0
if left_len <= 0:
break
elif left_len < buffer_size:
read_size = left_len
else:
read_size = buffer_size
buffer_content = fp.read(read_size)
m.update(buffer_content)
real_part_size += len(buffer_content)
left_len = left_len - read_size
md5sum = m.hexdigest()
temp_file_name = os.path.basename(file_path) + "_" + str(part_order)
if isinstance(object_prefix, unicode):
object_prefix = object_prefix.encode('utf-8')
if len(object_prefix) == 0:
file_name = sum_string(temp_file_name) + "_" + temp_file_name
else:
file_name = object_prefix + "/" + sum_string(temp_file_name) + "_" + temp_file_name
part_msg = (part_order, file_name, md5sum, real_part_size, offset)
total_split_len += real_part_size
parts_list.append(part_msg)
part_order += 1
fp.close()
else:
print "ERROR! No file: ", file_path, ", please check."
return parts_list
def sumfile(fobj):
'''Returns an md5 hash for an object with read() method.'''
m = md5.new()
while True:
d = fobj.read(8096)
if not d:
break
m.update(d)
return m.hexdigest()
def md5sum(fname):
'''Returns an md5 hash for file fname, or stdin if fname is "-".'''
if fname == '-':
ret = sumfile(sys.stdin)
else:
try:
f = file(fname, 'rb')
except:
return 'Failed to open file'
ret = sumfile(f)
f.close()
return ret
def md5sum2(filename, offset = 0, partsize = 0):
m = md5.new()
fp = open(filename, 'rb')
if offset > os.path.getsize(filename):
fp.seek(os.SEEK_SET, os.SEEK_END)
else:
fp.seek(offset)
left_len = partsize
BufferSize = 8 * 1024
while True:
if left_len <= 0:
break
elif left_len < BufferSize:
buffer_content = fp.read(left_len)
else:
buffer_content = fp.read(BufferSize)
m.update(buffer_content)
left_len = left_len - len(buffer_content)
md5sum = m.hexdigest()
return md5sum
def sum_string(content):
f = StringIO.StringIO(content)
md5sum = sumfile(f)
f.close()
return md5sum
def convert_header2map(header_list):
header_map = {}
for (a, b) in header_list:
header_map[a] = b
return header_map
def safe_get_element(name, container):
for k, v in container.items():
if k.strip().lower() == name.strip().lower():
return v
return ""
def get_content_type_by_filename(file_name):
suffix = ""
name = os.path.basename(file_name)
suffix = name.split('.')[-1]
#http://www.iangraham.org/books/html4ed/appb/mimetype.html
map = {}
map['html'] = 'text/html'
map['htm'] = 'text/html'
map['asc'] = 'text/plain'
map['txt'] = 'text/plain'
map['c'] = 'text/plain'
map['c++'] = 'text/plain'
map['cc'] = 'text/plain'
map['cpp'] = 'text/plain'
map['h'] = 'text/plain'
map['rtx'] = 'text/richtext'
map['rtf'] = 'text/rtf'
map['sgml'] = 'text/sgml'
map['sgm'] = 'text/sgml'
map['tsv'] = 'text/tab-separated-values'
map['wml'] = 'text/vnd.wap.wml'
map['wmls'] = 'text/vnd.wap.wmlscript'
map['etx'] = 'text/x-setext'
map['xsl'] = 'text/xml'
map['xml'] = 'text/xml'
map['talk'] = 'text/x-speech'
map['css'] = 'text/css'
map['gif'] = 'image/gif'
map['xbm'] = 'image/x-xbitmap'
map['xpm'] = 'image/x-xpixmap'
map['png'] = 'image/png'
map['ief'] = 'image/ief'
map['jpeg'] = 'image/jpeg'
map['jpg'] = 'image/jpeg'
map['jpe'] = 'image/jpeg'
map['tiff'] = 'image/tiff'
map['tif'] = 'image/tiff'
map['rgb'] = 'image/x-rgb'
map['g3f'] = 'image/g3fax'
map['xwd'] = 'image/x-xwindowdump'
map['pict'] = 'image/x-pict'
map['ppm'] = 'image/x-portable-pixmap'
map['pgm'] = 'image/x-portable-graymap'
map['pbm'] = 'image/x-portable-bitmap'
map['pnm'] = 'image/x-portable-anymap'
map['bmp'] = 'image/bmp'
map['ras'] = 'image/x-cmu-raster'
map['pcd'] = 'image/x-photo-cd'
map['wi'] = 'image/wavelet'
map['dwg'] = 'image/vnd.dwg'
map['dxf'] = 'image/vnd.dxf'
map['svf'] = 'image/vnd.svf'
map['cgm'] = 'image/cgm'
map['djvu'] = 'image/vnd.djvu'
map['djv'] = 'image/vnd.djvu'
map['wbmp'] = 'image/vnd.wap.wbmp'
map['ez'] = 'application/andrew-inset'
map['cpt'] = 'application/mac-compactpro'
map['doc'] = 'application/msword'
map['msw'] = 'application/x-dox_ms_word'
map['oda'] = 'application/oda'
map['dms'] = 'application/octet-stream'
map['lha'] = 'application/octet-stream'
map['lzh'] = 'application/octet-stream'
map['class'] = 'application/octet-stream'
map['so'] = 'application/octet-stream'
map['dll'] = 'application/octet-stream'
map['pdf'] = 'application/pdf'
map['ai'] = 'application/postscript'
map['eps'] = 'application/postscript'
map['ps'] = 'application/postscript'
map['smi'] = 'application/smil'
map['smil'] = 'application/smil'
map['mif'] = 'application/vnd.mif'
map['xls'] = 'application/vnd.ms-excel'
map['xlc'] = 'application/vnd.ms-excel'
map['xll'] = 'application/vnd.ms-excel'
map['xlm'] = 'application/vnd.ms-excel'
map['xlw'] = 'application/vnd.ms-excel'
map['ppt'] = 'application/vnd.ms-powerpoint'
map['ppz'] = 'application/vnd.ms-powerpoint'
map['pps'] = 'application/vnd.ms-powerpoint'
map['pot'] = 'application/vnd.ms-powerpoint'
map['wbxml'] = 'application/vnd.wap.wbxml'
map['wmlc'] = 'application/vnd.wap.wmlc'
map['wmlsc'] = 'application/vnd.wap.wmlscriptc'
map['vcd'] = 'application/x-cdlink'
map['pgn'] = 'application/x-chess-pgn'
map['dcr'] = 'application/x-director'
map['dir'] = 'application/x-director'
map['dxr'] = 'application/x-director'
map['spl'] = 'application/x-futuresplash'
map['gtar'] = 'application/x-gtar'
map['tar'] = 'application/x-tar'
map['ustar'] = 'application/x-ustar'
map['bcpio'] = 'application/x-bcpio'
map['cpio'] = 'application/x-cpio'
map['shar'] = 'application/x-shar'
map['zip'] = 'application/zip'
map['hqx'] = 'application/mac-binhex40'
map['sit'] = 'application/x-stuffit'
map['sea'] = 'application/x-stuffit'
map['bin'] = 'application/octet-stream'
map['exe'] = 'application/octet-stream'
map['src'] = 'application/x-wais-source'
map['wsrc'] = 'application/x-wais-source'
map['hdf'] = 'application/x-hdf'
map['js'] = 'application/x-javascript'
map['sh'] = 'application/x-sh'
map['csh'] = 'application/x-csh'
map['pl'] = 'application/x-perl'
map['tcl'] = 'application/x-tcl'
map['skp'] = 'application/x-koan'
map['skd'] = 'application/x-koan'
map['skt'] = 'application/x-koan'
map['skm'] = 'application/x-koan'
map['nc'] = 'application/x-netcdf'
map['cdf'] = 'application/x-netcdf'
map['swf'] = 'application/x-shockwave-flash'
map['sv4cpio'] = 'application/x-sv4cpio'
map['sv4crc'] = 'application/x-sv4crc'
map['t'] = 'application/x-troff'
map['tr'] = 'application/x-troff'
map['roff'] = 'application/x-troff'
map['man'] = 'application/x-troff-man'
map['me'] = 'application/x-troff-me'
map['ms'] = 'application/x-troff-ms'
map['latex'] = 'application/x-latex'
map['tex'] = 'application/x-tex'
map['texinfo'] = 'application/x-texinfo'
map['texi'] = 'application/x-texinfo'
map['dvi'] = 'application/x-dvi'
map['xhtml'] = 'application/xhtml+xml'
map['xht'] = 'application/xhtml+xml'
map['au'] = 'audio/basic'
map['snd'] = 'audio/basic'
map['aif'] = 'audio/x-aiff'
map['aiff'] = 'audio/x-aiff'
map['aifc'] = 'audio/x-aiff'
map['wav'] = 'audio/x-wav'
map['mpa'] = 'audio/x-mpeg'
map['abs'] = 'audio/x-mpeg'
map['mpega'] = 'audio/x-mpeg'
map['mp2a'] = 'audio/x-mpeg2'
map['mpa2'] = 'audio/x-mpeg2'
map['mid'] = 'audio/midi'
map['midi'] = 'audio/midi'
map['kar'] = 'audio/midi'
map['mp2'] = 'audio/mpeg'
map['mp3'] = 'audio/mpeg'
map['m3u'] = 'audio/x-mpegurl'
map['ram'] = 'audio/x-pn-realaudio'
map['rm'] = 'audio/x-pn-realaudio'
map['rpm'] = 'audio/x-pn-realaudio-plugin'
map['ra'] = 'audio/x-realaudio'
map['pdb'] = 'chemical/x-pdb'
map['xyz'] = 'chemical/x-xyz'
map['igs'] = 'model/iges'
map['iges'] = 'model/iges'
map['msh'] = 'model/mesh'
map['mesh'] = 'model/mesh'
map['silo'] = 'model/mesh'
map['wrl'] = 'model/vrml'
map['vrml'] = 'model/vrml'
map['vrw'] = 'x-world/x-vream'
map['svr'] = 'x-world/x-svr'
map['wvr'] = 'x-world/x-wvr'
map['3dmf'] = 'x-world/x-3dmf'
map['p3d'] = 'application/x-p3d'
map['mpeg'] = 'video/mpeg'
map['mpg'] = 'video/mpeg'
map['mpe'] = 'video/mpeg'
map['mpv2'] = 'video/mpeg2'
map['mp2v'] = 'video/mpeg2'
map['qt'] = 'video/quicktime'
map['mov'] = 'video/quicktime'
map['avi'] = 'video/x-msvideo'
map['movie'] = 'video/x-sgi-movie'
map['vdo'] = 'video/vdo'
map['viv'] = 'video/viv'
map['mxu'] = 'video/vnd.mpegurl'
map['ice'] = 'x-conference/x-cooltalk'
import mimetypes
mimetypes.init()
mime_type = ""
try:
mime_type = mimetypes.types_map["." + suffix]
except Exception, e:
if map.has_key(suffix):
mime_type = map[suffix]
else:
mime_type = 'application/octet-stream'
return mime_type
def smart_code(input_stream):
if isinstance(input_stream, str):
try:
tmp = unicode(input_stream, 'utf-8')
except UnicodeDecodeError:
try:
tmp = unicode(input_stream, 'gbk')
except UnicodeDecodeError:
try:
tmp = unicode(input_stream, 'big5')
except UnicodeDecodeError:
try:
tmp = unicode(input_stream, 'ascii')
except:
tmp = input_stream
else:
tmp = input_stream
return tmp
def is_ip(s):
try:
tmp_list = s.split(':')
s = tmp_list[0]
if s == 'localhost':
return True
tmp_list = s.split('.')
if len(tmp_list) != 4:
return False
else:
for i in tmp_list:
if int(i) < 0 or int(i) > 255:
return False
except:
return False
return True
if __name__ == '__main__':
pass
| matrixorz/justpic | justpic/third/oss/oss_util.py | Python | mit | 36,295 | [
"NetCDF"
] | 72b1c935d75386f248e53e632489e5c4a592e9b9026ddb9c94985446d478fdaf |
# -*- coding: utf-8 -*-
# MDclt.primary.amber.log.py
#
# Copyright (C) 2012-2015 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Classes for transfer of AMBER simulation logs to h5
"""
################################### MODULES ####################################
from __future__ import division, print_function
import os, sys
import numpy as np
from MDclt import Block, Block_Acceptor, primary
################################## FUNCTIONS ###################################
def add_parser(tool_subparsers, **kwargs):
"""
Adds subparser for this analysis to a nascent argument parser
**Arguments:**
:*tool_subparsers*: Argparse subparsers object to add subparser
:*args*: Passed to tool_subparsers.add_parser(...)
:*\*\*kwargs*: Passed to tool_subparsers.add_parser(...)
.. todo:
- Implement nested subparser (should be 'amber log', not just 'log')
"""
from MDclt import overridable_defaults
subparser = primary.add_parser(tool_subparsers,
name = "log",
help = "Load AMBER logs")
arg_groups = {ag.title:ag for ag in subparser._action_groups}
arg_groups["input"].add_argument(
"-frames_per_file",
type = int,
required = False,
help = "Number of frames in each file; used to check if new data " +
"is present")
arg_groups["input"].add_argument(
"-start_time",
type = float,
required = False,
help = "Time of first frame (ns) (optional)")
arg_groups["output"].add_argument(
"-output",
type = str,
required = True,
nargs = "+",
action = overridable_defaults(nargs = 2, defaults = {1: "/log"}),
help = "H5 file and optionally address in which to output data " +
"(default address: /log)")
subparser.set_defaults(analysis = command_line)
def command_line(n_cores = 1, **kwargs):
"""
Provides command line functionality for this analysis
**Arguments:**
:*n_cores*: Number of cores to use
.. todo:
- Figure out syntax to get this into MDclt.primary
"""
from multiprocessing import Pool
from MDclt import pool_director
block_generator = AmberLog_Block_Generator(**kwargs)
block_acceptor = Block_Acceptor(outputs = block_generator.outputs,
**kwargs)
if n_cores == 1: # Serial
for block in block_generator:
block()
block_acceptor.send(block)
else: # Parallel (processes)
pool = Pool(n_cores)
for block in pool.imap_unordered(pool_director, block_generator):
pass
block_acceptor.send(block)
pool.close()
pool.join()
block_acceptor.close()
################################### CLASSES ####################################
class AmberLog_Block_Generator(primary.Primary_Block_Generator):
"""
Generator class that prepares blocks of analysis
"""
fields = [("TIME(PS)", "time", "ns"),
("Etot", "total energy", "kcal mol-1"),
("EPtot", "potential energy", "kcal mol-1"),
("EKtot", "kinetic energy", "kcal mol-1"),
("BOND", "bond energy", "kcal mol-1"),
("ANGLE", "angle energy", "kcal mol-1"),
("DIHED", "dihedral energy", "kcal mol-1"),
("EELEC", "coulomb energy", "kcal mol-1"),
("1-4 EEL", "coulomb 1-4 energy", "kcal mol-1"),
("VDWAALS", "van der Waals energy", "kcal mol-1"),
("1-4 NB", "van der Waals 1-4 energy", "kcal mol-1"),
("EHBOND", "hydrogen bond energy", "kcal mol-1"),
("RESTRAINT", "position restraint energy", "kcal mol-1"),
("EKCMT", "center of mass motion kinetic energy",
"kcal mol-1"),
("VIRIAL", "virial energy", "kcal mol-1"),
("EPOLZ", "polarization energy", "kcal mol-1"),
("TEMP(K)", "temperature", "K"),
("PRESS", "pressure", "bar"),
("VOLUME", "volume", "A3"),
("Density", "density", "g/cm3"),
("Dipole convergence: rms",
"dipole convergence rms", None),
("iters", "dipole convergence iterations",
None)]
def __init__(self, infiles, output, frames_per_file = None, **kwargs):
"""
Initializes generator
**Arguments:**
:*output*: List including path to h5 file and
address within h5 file
:*infiles*: List of infiles
:*frames_per_file*: Number of frames in each infile
.. todo:
- Intelligently break lists of infiles into blocks larger
than 1
"""
# Input
self.infiles = infiles
self.frames_per_file = frames_per_file
self.infiles_per_block = 1
# Output
self.outputs = [(output[0], os.path.normpath(output[1]))]
# Adjust start time, if applicable
self.get_time_offset(**kwargs)
# Determine dtype of input data
self.get_dataset_format(**kwargs)
super(AmberLog_Block_Generator, self).__init__(**kwargs)
# Disregard last infile, if applicable
self.cut_incomplete_infiles(**kwargs)
# Output
self.outputs = [(output[0], os.path.normpath(output[1]),
(self.final_slice.stop - self.final_slice.start,))]
def next(self):
"""
Prepares and returns next Block of analysis
"""
if len(self.infiles) == 0:
raise StopIteration()
else:
block_infiles = self.infiles[:self.infiles_per_block]
block_slice = slice(self.start_index,
self.start_index + len(block_infiles) * self.frames_per_file, 1)
self.infiles = self.infiles[self.infiles_per_block:]
self.start_index += len(block_infiles) * self.frames_per_file
return AmberLog_Block(infiles = block_infiles,
raw_keys = self.raw_keys,
new_keys = self.new_keys,
output = self.outputs[0],
slc = block_slice,
time_offset = self.time_offset,
dtype = self.dtype)
def get_time_offset(self, start_time = None, **kwargs):
"""
Calculates time offset based on desired and actual time of first frame
**Arguments:**
:*start_time*: Desired time of first frame (ns); typically 0.001
"""
from subprocess import Popen, PIPE
if start_time is None:
self.time_offset = 0
else:
with open(os.devnull, "w") as fnull:
command = "cat {0} | ".format(self.infiles[0]) + \
"grep -m 1 'TIME(PS)' | " + \
"awk '{{print $6}}'"
process = Popen(command,
stdout = PIPE,
stderr = fnull,
shell = True)
result = process.stdout.read()
self.time_offset = float(result) / -1000 + start_time
def get_dataset_format(self, **kwargs):
"""
Determines format of dataset
"""
from h5py import File as h5
out_path, out_address = self.outputs[0]
with h5(out_path) as out_h5:
if out_address in out_h5:
# If dataset already exists, extract current dtype
self.dtype = out_h5[out_address].dtype
self.new_keys = list(self.dtype.names)
self.raw_keys = []
for key in self.new_keys:
self.raw_keys += [r for r, n, _ in self.fields if n == key]
self.attrs = dict(out_h5[out_address].attrs)
else:
# Otherwise, determine fields present in infile
raw_keys = []
breaking = False
with open(self.infiles[0], "r") as infile:
raw_text = [line.strip() for line in infile.readlines()]
for i in xrange(len(raw_text)):
if breaking: break
if raw_text[i].startswith("NSTEP"):
while True:
if raw_text[i].startswith("----------"):
breaking = True
break
for j, field in enumerate(
raw_text[i].split("=")[:-1]):
if j == 0:
raw_keys += [field.strip()]
else:
raw_keys += [" ".join(field.split()[1:])]
i += 1
# Determine appropriate dtype of new data
self.raw_keys = ["TIME(PS)"]
self.new_keys = ["time"]
self.dtype = [("time", "f4")]
self.attrs = {"time units": "ns"}
for raw_key, new_key, units in self.fields[1:]:
if raw_key in raw_keys:
self.raw_keys += [raw_key]
self.new_keys += [new_key]
self.dtype += [(new_key, "f4")]
if units is not None:
self.attrs[new_key + " units"] = units
def cut_incomplete_infiles(self, **kwargs):
"""
Checks if log of last infile is incomplete; if so removes from
list of infiles
"""
from subprocess import Popen, PIPE
if len(self.infiles) == 0:
return
with open(os.devnull, "w") as fnull:
command = "tail -n 1 {0}".format(self.infiles[-1])
process = Popen(command,
stdout = PIPE,
stderr = fnull,
shell = True)
result = process.stdout.read()
if not (result.startswith("| Total wall time:") # pmemd.cuda
or result.startswith("| Master Total wall time:")): # pmemd
self.infiles.pop(-1)
self.final_slice = slice(self.final_slice.start,
self.final_slice.stop - self.frames_per_file, 1)
class AmberLog_Block(Block):
"""
Independent block of analysis
"""
def __init__(self, infiles, raw_keys, new_keys, output, dtype, slc,
time_offset = 0, attrs = {}, **kwargs):
"""
Initializes block of analysis
**Arguments:**
:*infiles*: List of infiles
:*raw_keys*: Original names of fields in Amber mdout
:*new_keys*: Desired names of fields in nascent dataset
:*output*: Path to h5 file and address within h5 file
:*dtype*: Data type of nascent dataset
:*slc*: Slice within dataset at which this block
will be stored
:*time_offset*: Offset by which to adjust simulation time
:*attrs*: Attributes to add to dataset
"""
super(AmberLog_Block, self).__init__(**kwargs)
self.infiles = infiles
self.raw_keys = raw_keys
self.new_keys = new_keys
self.time_offset = time_offset
self.output = output
self.datasets = {self.output: dict(slc = slc, attrs = attrs,
data = np.empty(slc.stop - slc.start, dtype))}
def __call__(self, **kwargs):
"""
Runs this block of analysis
"""
# Load raw data from each infile
print(self.infiles)
raw_data = {raw_key: [] for raw_key in self.raw_keys}
for infile in self.infiles:
with open(infile, "r") as infile:
raw_text = [line.strip() for line in infile.readlines()]
i = 0
while i < len(raw_text):
if raw_text[i].startswith("A V E R A G E S"): break
if raw_text[i].startswith("NSTEP"):
while True:
if raw_text[i].startswith("----------"): break
line = raw_text[i].split("=")
for j, field in enumerate(line[:-1]):
if j == 0:
raw_key = field.strip()
else:
raw_key = " ".join(field.split()[1:])
value = line[j+1].split()[0]
if raw_key in self.raw_keys:
raw_data[raw_key] += [value]
i += 1
i += 1
# Copy from raw_data to new_data
self.datasets[self.output]["data"]["time"] = (np.array(
raw_data["TIME(PS)"], np.float) / 1000) + self.time_offset
for raw_key, new_key in zip(self.raw_keys[1:], self.new_keys[1:]):
try:
self.datasets[self.output]["data"][new_key] = np.array(
raw_data[raw_key])
except:
print(raw_data[raw_key])
print(raw_key)
raise
| KarlTDebiec/MDclt | primary/amber/log.py | Python | bsd-3-clause | 14,006 | [
"Amber"
] | 5bb9e215fb008c31708d60e32ebc7ebe577f03d41bb877645159cf7959048277 |
import os, sys, re
from Bio import pairwise2
from Bio.Blast import NCBIXML
from Bio.SubsMat.MatrixInfo import blosum62
import gzip
import optparse
import subprocess
from jaspar.settings import BASE_DIR
'''
The original code is available in motif_inferrer.py.
This is modified by Aziz Khan <azez.khan@gmail.com> on May 20, 2017 for Django based JASPAR portal.
'''
#---------------------#
# Default Options #
#---------------------#
class default_options():
'''
Default options
'''
def __init__(self):
#BLAST path (blastpgp dwelling directory; default = ./src/)
self.blast_path = BASE_DIR+"/utils/motif_inferrer/src/"
#Domains file (i.e. domains.txt from domains.py
self.domains_file = BASE_DIR+"/utils/motif_inferrer/domains.txt"
#Dummy directory (default = temp in the BASE_DIR)"
self.dummy_dir = BASE_DIR+"/temp"
#JASPAR file (i.e. jaspar.txt from domains.py; default = ./jaspar.txt)
self.jaspar_file = BASE_DIR+"/utils/motif_inferrer/jaspar.txt"
#N parameter for the Rost's curve (e.g. n=5 ensures 99% of correctly assigned homologs; default = 0)
self.n_parameter = 0
#Database file (i.e. sequence.fa from domains.py; default = ./sequences.fa
self.database_file = BASE_DIR+"/utils/motif_inferrer/sequences.fa"
#Single mode (if True, returns profiles derived from a single TF; default = False)
self.single = False
#Input files generated from the input sequence
self.input_file = None
def parse_file(file_name, gz=False):
"""
This function parses any file and yields lines one by one.
@input:
file_name {string}
@return:
line {string}
"""
if os.path.exists(file_name):
# Initialize #
f = None
# Open file handle #
if gz: f = gzip.open(file_name, "rt")
else: f = open(file_name, "rt")
# For each line... #
for line in f:
yield line.strip("\n")
f.close()
else:
raise ValueError("Could not open file %s" % file_name)
def parse_fasta_file(file_name, gz=False, clean=True):
"""
This function parses any FASTA file and yields sequences as a tuple
of the form (identifier, sequence).
@input:
file_name {string}
@return:
line {tuple} header, sequence
"""
# Initialize #
identifier = ""
sequence = ""
# For each line... #
line = ""
for line in parse_file(file_name, gz):
if line[0] == ">":
if sequence != "":
if clean:
sequence += re.sub("\W|\d", "X", line)
yield (identifier, sequence)
m = re.search("^>(.+)", line)
identifier = m.group(1)
sequence = ""
else:
sequence += line.upper()
if clean:
sequence += re.sub("\W|\d", "X", line)
yield (identifier, sequence)
def write(file_name, content=None):
"""
This function writes any {content} to a file. If the file already
exists, it pushed the {content} at the bottom of the file.
@input:
file_name {string}
content {string}
"""
if file_name is not None:
try:
f = open(file_name, "w")
f.write("%s\n" % content)
f.close()
except:
raise ValueError("Could create file %s" % file_name)
else:
sys.stdout.write("%s\n" % content)
def is_alignment_over_Rost_sequence_identity_curve(identities, align_length, parameter=0):
"""
This function evaluates whether an alignment is over {True} or
below {False} the Rost's sequence identity curve.
@input:
identities {int}
align_length {int}
parameter {int} N parameter in the curve (if > 0 more strict)
@return: {boolean}
"""
return identities >= get_Rost_ID_threshold(align_length, n=parameter)
def get_Rost_ID_threshold(L, n=0):
"""
This function returns the Rost sequence identity threshold for a
given alignment of length "L".
@input:
L {int} alignment length
parameter {int} N parameter in the curve (if > 0 more strict)
@return: {Decimal}
"""
import math
return n+ (480*pow(L,float('-0.32')*(1+pow(float(repr(math.e)),float(repr(float(-L)/1000))))))
def get_alignment_identities(A, B):
"""
This function returns the number of identities between a pair
of aligned sequences {A} and {B}. If {A} and {B} have different
lengths, returns None.
@input:
A {string} aligned sequence A (with residues and gaps)
B {string} aligned sequence B (with residues and gaps)
@return: {int} or None
"""
if len(A) == len(B):
return len([i for i in range(len(A)) if A[i] == B[i]])
return None
#---------------------------------#
# Main motif inferrer function #
#---------------------------------#
def motif_infer(input_sequence):
"""
M
Takes the input sequence and infer the matrix profiles.
@input:
input_file {file} a fasta file of input sequnece by user
@return: {dict} a dict of inferred profiles
"""
# Get default options #
options = default_options()
input_file = os.path.join(BASE_DIR, options.dummy_dir,"sequence_" + str(os.getpid()) + ".fa")
if os.path.exists(input_file):
os.remove(input_file)
#write the sequence to file
write(input_file, input_sequence.replace('\r\n','\n'))
options.input_file = input_file
#options.input_file = "./utils/motif_inferrer/examples/MAX.fa"
# Get current working directory #
cwd = os.path.abspath(os.getcwd())
# Initialize #
domains = {}
# For each line... #
for line in parse_file(options.domains_file):
if line.startswith("#"): continue
line = line.split(";")
domains.setdefault(line[0], {'domains': line[1].split(","), 'threshold': float(line[2])})
# Initialize #
jaspar = {}
# For each line... #
for line in parse_file(options.jaspar_file):
if line.startswith("#"): continue
line = line.split(";")
jaspar.setdefault(line[0], [])
jaspar[line[0]].append([line[1], line[2]])
# Initialize #
inferences = {}
database_file = os.path.abspath(options.database_file)
# For each header, sequence... #
for header, sequence in parse_fasta_file(options.input_file):
# Initialize #
fasta_file = os.path.join(options.dummy_dir, "query." + str(os.getpid()) + ".fa")
blast_file = os.path.join(options.dummy_dir, "blast." + str(os.getpid()) + ".xml")
inferences.setdefault(header, [])
# Create FASTA file #
if os.path.exists(fasta_file):
os.remove(fasta_file)
write(fasta_file, ">%s\n%s" % (header, sequence))
# Exec BLAST #
try:
# Initialize #
homologs = []
# Exec process #
os.system("blastall -p blastp -i %s -d %s -o %s -m 7" % (fasta_file, database_file, blast_file))
#process = subprocess.check_output(["Users/azizk/tools/blast-2.2.26/bin/blastall", "-p", "blastp", "-i", fasta_file, "-d", database_file, "-o", blast_file, "-m", "7"], stderr=subprocess.STDOUT)
# Parse BLAST results #
blast_records = NCBIXML.parse(open(blast_file))
# For each blast record... #
for blast_record in blast_records:
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
# If structural homologs... #
if is_alignment_over_Rost_sequence_identity_curve(hsp.identities, hsp.align_length, parameter=int(options.n_parameter)):
homologs.append((str(alignment.hit_def), float(hsp.expect), hsp.query, "%s-%s" % (hsp.query_start, hsp.query_end), hsp.sbjct, "%s-%s" % (hsp.sbjct_start, hsp.sbjct_end)))
break
except:
raise ValueError("Could not exec blastpgp!!! Make sure it's on your path.")
# Remove files #
os.remove(blast_file)
os.remove(fasta_file)
# For each uniacc... #
for uniacc, evalue, query_alignment, query_from_to, hit_alignment, hit_from_to in homologs:
# Skip if uniacc does not have assigned domains... #
if uniacc not in domains: continue
# Initialize #
identities = []
# For each domain... #
for domain in domains[uniacc]['domains']:
for alignment in pairwise2.align.globalds(sequence, domain, blosum62, -11.0, -1):
identities.append(get_alignment_identities(alignment[0], alignment[1])/float(len(domain)))
# If domain alignment passes threshold... #
if max(identities) >= domains[uniacc]['threshold']:
# For each uniacc JASPAR matrix... #
for matrix, genename in jaspar[uniacc]:
# If single mode... #
if options.single:
if "::" in genename: continue
# Infer matrix #
#inferences[header].append([genename, matrix, evalue, query_alignment, query_from_to, hit_alignment, hit_from_to, max(identities)])
inferences[header].append([genename, matrix, evalue, max(identities)])
#delete the input file
os.remove(input_file)
return inferences
# Write output #
#write(options.output_file, "#Query,TF Name,TF Matrix,E-value,Query Alignment,Query Start-End,TF Alignment,TF Start-End,DBD %ID")
#for header in inferences:
# for inference in sorted(inferences[header], key=lambda x: x[-1], reverse=True):
# write(options.output_file, "%s,%s" % (header, ",".join(map(str, inference))))
| asntech/jaspar | utils/motif_inferrer/inferrer.py | Python | bsd-3-clause | 9,890 | [
"BLAST"
] | 8ffceed2e45f33254a1c90f55fe565979ca3fafd8592bc98a29f9ec1d783400f |
import argparse
import inspect
import logging
import json
import base64
from docstring_parser import parse
from flask import Flask, request
from flask_restx import Api, Resource, fields, abort
from flask_cors import CORS
from indra import get_config
from indra.sources import trips, reach, bel, biopax, eidos
from indra.databases import hgnc_client
from indra.statements import stmts_from_json, get_statement_by_name
from indra.assemblers.pysb import PysbAssembler
import indra.assemblers.pysb.assembler as pysb_assembler
from indra.assemblers.cx import CxAssembler
from indra.assemblers.graph import GraphAssembler
from indra.assemblers.cyjs import CyJSAssembler
from indra.assemblers.sif import SifAssembler
from indra.assemblers.english import EnglishAssembler
from indra.tools.assemble_corpus import *
from indra.databases import cbio_client
from indra.sources.indra_db_rest import get_statements
from indra.sources.ndex_cx.api import process_ndex_network
from indra.sources.reach.api import reach_nxml_url, reach_text_url
from indra.ontology.bio import bio_ontology
from indra.pipeline import AssemblyPipeline, pipeline_functions
from indra.preassembler.custom_preassembly import *
logger = logging.getLogger('rest_api')
logger.setLevel(logging.DEBUG)
# Create Flask app, api, namespaces, and models
app = Flask(__name__)
api = Api(
app, title='INDRA REST API', description='REST API for INDRA webservice')
CORS(app)
preassembly_ns = api.namespace(
'Preassembly', 'Preassemble INDRA Statements', path='/preassembly/')
sources_ns = api.namespace(
'Sources', 'Get INDRA Statements from various sources', path='/')
assemblers_ns = api.namespace(
'Assemblers', 'Assemble INDRA Statements into models', path='/assemblers/')
ndex_ns = api.namespace('NDEx', 'Use NDEx service', path='/')
indra_db_rest_ns = api.namespace(
'INDRA DB REST', 'Use INDRA DB REST API', path='/indra_db_rest/')
databases_ns = api.namespace(
'Databases', 'Access external databases', path='/databases/')
# Models that can be inherited and reused in different namespaces
dict_model = api.model('dict', {})
stmts_model = api.model('Statements', {
'statements': fields.List(fields.Nested(dict_model), example=[{
"id": "acc6d47c-f622-41a4-8ae9-d7b0f3d24a2f",
"type": "Complex",
"members": [
{"db_refs": {"TEXT": "MEK", "FPLX": "MEK"}, "name": "MEK"},
{"db_refs": {"TEXT": "ERK", "FPLX": "ERK"}, "name": "ERK"}
],
"sbo": "https://identifiers.org/SBO:0000526",
"evidence": [{"text": "MEK binds ERK", "source_api": "trips"}]
}])})
bio_text_model = api.model('BioText', {
'text': fields.String(example='GRB2 binds SHC.')})
jsonld_model = api.model('jsonld', {
'jsonld': fields.String(example='{}')})
genes_model = api.model('Genes', {
'genes': fields.List(fields.String, example=['BRAF', 'MAP2K1'])})
# Store the arguments by type
int_args = ['poolsize', 'size_cutoff']
float_args = ['score_threshold', 'belief_cutoff']
boolean_args = [
'do_rename', 'use_adeft', 'do_methionine_offset', 'do_orthology_mapping',
'do_isoform_mapping', 'use_cache', 'return_toplevel', 'flatten_evidence',
'normalize_equivalences', 'normalize_opposites', 'invert', 'remove_bound',
'specific_only', 'allow_families', 'match_suffix', 'update_belief']
list_args = [
'gene_list', 'name_list', 'values', 'source_apis', 'uuids', 'curations',
'correct_tags', 'ignores', 'deletions']
dict_args = [
'grounding_map', 'misgrounding_map', 'whitelist', 'mutations']
def _return_stmts(stmts):
if stmts:
stmts_json = stmts_to_json(stmts)
res = {'statements': stmts_json}
else:
res = {'statements': []}
return res
def _stmts_from_proc(proc):
if proc and proc.statements:
stmts = stmts_to_json(proc.statements)
res = {'statements': stmts}
else:
res = {'statements': []}
return res
# Create Resources in Preassembly Namespace
# Manually add preassembly resources not based on assembly corpus functions
pipeline_model = api.inherit('Pipeline', stmts_model, {
'pipeline': fields.List(fields.Nested(dict_model), example=[
{'function': 'filter_grounded_only'},
{'function': 'run_preassembly', 'kwargs': {'return_toplevel': False}}
])
})
# There's an extra blank line between parameters here and in all the following
# docstrings for better visualization in Swagger
@preassembly_ns.expect(pipeline_model)
@preassembly_ns.route('/pipeline')
class RunPipeline(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Run an assembly pipeline for a list of Statements.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to run the pipeline.
pipeline : list[dict]
A list of dictionaries representing steps in the pipeline. Each
step should have a 'function' key and, if appropriate, 'args' and
'kwargs' keys. For more documentation and examples, see
https://indra.readthedocs.io/en/latest/modules/pipeline.html
Returns
-------
statements : list[indra.statements.Statement.to_json()]
The list of INDRA Statements resulting from running the pipeline
on the list of input Statements.
"""
args = request.json
stmts = stmts_from_json(args.get('statements'))
pipeline_steps = args.get('pipeline')
ap = AssemblyPipeline(pipeline_steps)
stmts_out = ap.run(stmts)
return _return_stmts(stmts_out)
# Dynamically generate resources for assembly corpus functions
class PreassembleStatements(Resource):
"""Parent Resource for Preassembly resources."""
func_name = None
def process_args(self, args_json):
for arg in args_json:
if arg == 'stmt_type':
args_json[arg] = get_statement_by_name(args_json[arg])
elif arg in ['matches_fun', 'refinement_fun']:
args_json[arg] = pipeline_functions[args_json[arg]]
elif arg == 'belief_scorer':
# Here we could handle various string values of args_json[arg]
# but there currently aren't any specific options
args_json[arg] = None
elif arg == 'ontology':
# Here we could handle various string values of args_json[arg]
# but there currently aren't any specific options
args_json[arg] = bio_ontology
elif arg == 'whitelist' or arg == 'mutations':
args_json[arg] = {
gene: [tuple(mod) for mod in mods]
for gene, mods in args_json[arg].items()}
return args_json
@api.doc(False)
def options(self):
return {}
def post(self):
args = self.process_args(request.json)
stmts = stmts_from_json(args.pop('statements'))
stmts_out = pipeline_functions[self.func_name](stmts, **args)
return _return_stmts(stmts_out)
def make_preassembly_model(func):
"""Create new Flask model with function arguments."""
args = inspect.signature(func).parameters
# We can reuse Staetments model if only stmts_in or stmts and **kwargs are
# arguments of the function
if ((len(args) == 1 and ('stmts_in' in args or 'stmts' in args)) or
(len(args) == 2 and 'kwargs' in args and
('stmts_in' in args or 'stmts' in args))):
return stmts_model
# Inherit a model if there are other arguments
model_fields = {}
for arg in args:
if arg != 'stmts_in' and arg != 'stmts' and arg != 'kwargs':
default = None
if args[arg].default is not inspect.Parameter.empty:
default = args[arg].default
# Need to use default for boolean and example for other types
if arg in boolean_args:
model_fields[arg] = fields.Boolean(default=default)
elif arg in int_args:
model_fields[arg] = fields.Integer(example=default)
elif arg in float_args:
model_fields[arg] = fields.Float(example=0.7)
elif arg in list_args:
if arg == 'curations':
model_fields[arg] = fields.List(
fields.Nested(dict_model),
example=[{'pa_hash': '1234', 'source_hash': '2345',
'tag': 'wrong_relation'}])
else:
model_fields[arg] = fields.List(
fields.String, example=default)
elif arg in dict_args:
model_fields[arg] = fields.Nested(dict_model)
else:
model_fields[arg] = fields.String(example=default)
new_model = api.inherit(
('%s_input' % func.__name__), stmts_model, model_fields)
return new_model
def update_docstring(func):
doc = func.__doc__
docstring = parse(doc)
new_doc = docstring.short_description + '\n\n'
if docstring.long_description:
new_doc += (docstring.long_description + '\n\n')
new_doc += ('Parameters\n----------\n')
for param in docstring.params:
if param.arg_name in ['save', 'save_unique']:
continue
elif param.arg_name in ['stmts', 'stmts_in']:
param.arg_name = 'statements'
param.type_name = 'list[indra.statements.Statement.to_json()]'
elif param.arg_name == 'belief_scorer':
param.type_name = 'Optional[str] or None'
param.description = (
'Type of BeliefScorer to use in calculating Statement '
'probabilities. If None is provided (default), then the '
'default scorer is used (good for biology use case). '
'For WorldModelers use case belief scorer should be set '
'to "wm".')
elif param.arg_name == 'ontology':
param.type_name = 'Optional[str] or None'
param.description = (
'Type of ontology to use for preassembly ("bio" or "wm"). '
'If None is provided (default), then the bio ontology is used.'
'For WorldModelers use case ontology should be set to "wm".')
elif param.arg_name in ['matches_fun', 'refinement_fun']:
param.type_name = 'str'
elif param.arg_name == 'curations':
param.type_name = 'list[dict]'
param.description = (
'A list of dictionaries representing curations. Each '
'dictionary must have "pa_hash" (preassembled statement hash)'
', "source_hash", (evidence hash) and "tag" (e.g. "correct", '
'"wrong_relation", etc.) keys.')
new_doc += (param.arg_name + ' : ' + param.type_name + '\n' +
param.description + '\n\n')
new_doc += 'Returns\n----------\n'
new_doc += 'statements : list[indra.statements.Statement.to_json()]\n'
new_doc += 'A list of processed INDRA Statements'
return docstring.short_description, new_doc
# Create resources for each of assembly_corpus functions
for func_name, func in pipeline_functions.items():
if func.__module__ == 'indra.tools.assemble_corpus':
doc = ''
short_doc = ''
# Get the function description from docstring
if func.__doc__:
short_doc, doc = update_docstring(func)
new_model = make_preassembly_model(func)
@preassembly_ns.expect(new_model)
@preassembly_ns.route(('/%s' % func_name),
doc={'summary': short_doc})
class NewFunction(PreassembleStatements):
func_name = func_name
def post(self):
return super().post()
post.__doc__ = doc
# Create resources for Sources namespace
# REACH
reach_text_model = api.inherit('ReachText', bio_text_model, {
'offline': fields.Boolean(default=False),
'url': fields.String(example=reach_text_url)
})
reach_json_model = api.model('ReachJSON', {'json': fields.String(example='{}')})
reach_pmc_model = api.model('ReachPMC', {
'pmcid': fields.String(example='PMC3717945'),
'offline': fields.Boolean(default=False),
'url': fields.String(example=reach_nxml_url)
})
@sources_ns.expect(reach_text_model)
@sources_ns.route('/reach/process_text')
class ReachProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process text with REACH and return INDRA Statements.
Parameters
----------
text : str
The text to be processed.
offline : Optional[bool]
If set to True, the REACH system is run offline via a JAR file.
Otherwise (by default) the web service is called. Default: False
url : Optional[str]
URL for a REACH web service instance, which is used for reading if
provided. If not provided but offline is set to False (its default
value), REACH_TEXT_URL set in configuration will be used. If not
provided in configuration, the Arizona REACH web service is called
(http://agathon.sista.arizona.edu:8080/odinweb/api/help).
Default: None
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
text = args.get('text')
offline = True if args.get('offline') else False
given_url = args.get('url')
config_url = get_config('REACH_TEXT_URL', failure_ok=True)
# Order: URL given as an explicit argument in the request. Then any URL
# set in the configuration. Then, unless offline is set, use the
# default REACH web service URL.
if 'url' in args: # This is to take None if explicitly given
url = given_url
elif config_url:
url = config_url
elif not offline:
url = reach_text_url
else:
url = None
# If a URL is set, prioritize it over the offline setting
if url:
offline = False
rp = reach.process_text(text, offline=offline, url=url)
return _stmts_from_proc(rp)
@sources_ns.expect(reach_json_model)
@sources_ns.route('/reach/process_json')
class ReachProcessJson(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process REACH json and return INDRA Statements.
Parameters
----------
json : str
The json string to be processed.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
json_str = args.get('json')
rp = reach.process_json_str(json_str)
return _stmts_from_proc(rp)
@sources_ns.expect(reach_pmc_model)
@sources_ns.route('/reach/process_pmc')
class ReachProcessPmc(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process PubMedCentral article and return INDRA Statements.
Parameters
----------
pmc_id : str
The ID of a PubmedCentral article. The string may start with PMC
but passing just the ID also works.
Examples: 3717945, PMC3717945
https://www.ncbi.nlm.nih.gov/pmc/
offline : Optional[bool]
If set to True, the REACH system is run offline via a JAR file.
Otherwise (by default) the web service is called. Default: False
url : Optional[str]
URL for a REACH web service instance, which is used for reading if
provided. If not provided but offline is set to False (its default
value), REACH_NXML_URL set in configuration will be used. If not
provided in configuration, the Arizona REACH web service is called
(http://agathon.sista.arizona.edu:8080/odinweb/api/help).
Default: None
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
pmcid = args.get('pmcid')
offline = True if args.get('offline') else False
given_url = args.get('url')
config_url = get_config('REACH_NXML_URL', failure_ok=True)
# Order: URL given as an explicit argument in the request. Then any URL
# set in the configuration. Then, unless offline is set, use the
# default REACH web service URL.
if 'url' in args: # This is to take None if explicitly given
url = given_url
elif config_url:
url = config_url
elif not offline:
url = reach_nxml_url
else:
url = None
# If a URL is set, prioritize it over the offline setting
if url:
offline = False
rp = reach.process_pmc(pmcid, offline=offline, url=url)
return _stmts_from_proc(rp)
# TRIPS
xml_model = api.model('XML', {'xml_str': fields.String})
@sources_ns.expect(bio_text_model)
@sources_ns.route('/trips/process_text')
class TripsProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process text with TRIPS and return INDRA Statements.
Parameters
----------
text : str
The text to be processed.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
text = args.get('text')
tp = trips.process_text(text)
return _stmts_from_proc(tp)
@sources_ns.expect(xml_model)
@sources_ns.route('/trips/process_xml')
class TripsProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process TRIPS EKB XML and return INDRA Statements.
Parameters
----------
xml_string : str
A TRIPS extraction knowledge base (EKB) string to be processed.
http://trips.ihmc.us/parser/api.html
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
xml_str = args.get('xml_str')
tp = trips.process_xml(xml_str)
return _stmts_from_proc(tp)
# Eidos
eidos_text_model = api.inherit('EidosText', bio_text_model, {
'webservice': fields.String
})
# Hide docs until webservice is available
@sources_ns.expect(eidos_text_model)
@sources_ns.route('/eidos/process_text', doc=False)
class EidosProcessText(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process text with EIDOS and return biology INDRA Statements.
Parameters
----------
text : str
The text to be processed.
webservice : Optional[str]
An Eidos reader web service URL to send the request to.
If None, the reading is assumed to be done with the Eidos JAR
rather than via a web service. Default: None
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
text = args.get('text')
webservice = args.get('webservice')
if not webservice:
abort(400, 'No web service address provided.')
ep = eidos.process_text_bio(text, webservice=webservice)
return _stmts_from_proc(ep)
@sources_ns.expect(jsonld_model)
@sources_ns.route('/eidos/process_jsonld')
class EidosProcessJsonld(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process an EIDOS JSON-LD and return biology INDRA Statements.
Parameters
----------
jsonld : str
The JSON-LD string to be processed.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
eidos_json = args.get('jsonld')
jj = json.loads(eidos_json)
ep = eidos.process_json_bio(jj)
return _stmts_from_proc(ep)
# BEL
bel_rdf_model = api.model('BelRdf', {'belrdf': fields.String})
@sources_ns.expect(genes_model)
@sources_ns.route('/bel/process_pybel_neighborhood')
class BelProcessNeighborhood(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process BEL Large Corpus neighborhood and return INDRA Statements.
Parameters
----------
genes : list[str]
A list of entity names (e.g., gene names) which will be used as the
basis of filtering the result. If any of the Agents of an extracted
INDRA Statement has a name appearing in this list, the Statement is
retained in the result.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
genes = args.get('genes')
bp = bel.process_pybel_neighborhood(genes)
return _stmts_from_proc(bp)
@sources_ns.expect(bel_rdf_model)
@sources_ns.route('/bel/process_belrdf')
class BelProcessBelRdf(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process BEL RDF and return INDRA Statements.
Parameters
----------
belrdf : str
A BEL/RDF string to be processed. This will usually come from
reading a .rdf file.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
belrdf = args.get('belrdf')
bp = bel.process_belrdf(belrdf)
return _stmts_from_proc(bp)
# BioPax
source_target_model = api.model('SourceTarget', {
'source': fields.List(fields.String, example=['BRAF', 'RAF1', 'ARAF']),
'target': fields.List(fields.String, example=['MAP2K1', 'MAP2K2'])
})
@sources_ns.expect(genes_model)
@sources_ns.route('/biopax/process_pc_pathsbetween')
class BiopaxPathsBetween(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""
Process PathwayCommons paths between genes, return INDRA Statements.
Parameters
----------
genes : list
A list of HGNC gene symbols to search for paths between.
Examples: ['BRAF', 'MAP2K1']
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
genes = args.get('genes')
bp = biopax.process_pc_pathsbetween(genes)
return _stmts_from_proc(bp)
@sources_ns.expect(source_target_model)
@sources_ns.route('/biopax/process_pc_pathsfromto')
class BiopaxPathsFromTo(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""
Process PathwayCommons paths from-to genes, return INDRA Statements.
Parameters
----------
source : list
A list of HGNC gene symbols that are the sources of paths being
searched for.
Examples: ['BRAF', 'RAF1', 'ARAF']
target : list
A list of HGNC gene symbols that are the targets of paths being
searched for.
Examples: ['MAP2K1', 'MAP2K2']
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
source = args.get('source')
target = args.get('target')
bp = biopax.process_pc_pathsfromto(source, target)
return _stmts_from_proc(bp)
@sources_ns.expect(genes_model)
@sources_ns.route('/biopax/process_pc_neighborhood')
class BiopaxNeighborhood(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Process PathwayCommons neighborhood, return INDRA Statements.
Parameters
----------
genes : list
A list of HGNC gene symbols to search the neighborhood of.
Examples: ['BRAF'], ['BRAF', 'MAP2K1']
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of extracted INDRA Statements.
"""
args = request.json
genes = args.get('genes')
bp = biopax.process_pc_neighborhood(genes)
return _stmts_from_proc(bp)
# Create resources for Assemblers namespace
pysb_stmts_model = api.inherit('PysbStatements', stmts_model, {
'export_format': fields.String(example='kappa')
})
@assemblers_ns.expect(pysb_stmts_model)
@assemblers_ns.route('/pysb')
class AssemblePysb(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble INDRA Statements and return PySB model string.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
export_format : str
The format to export into, for instance "kappa", "bngl",
"sbml", "matlab", "mathematica", "potterswheel". See
http://pysb.readthedocs.io/en/latest/modules/export/index.html
for a list of supported formats. In addition to the formats
supported by PySB itself, this method also provides "sbgn"
output.
Returns
-------
image or model
Assembled exported model. If export_format is kappa_im or kappa_cm,
image is returned. Otherwise model string is returned.
"""
args = request.json
stmts_json = args.get('statements')
export_format = args.get('export_format')
stmts = stmts_from_json(stmts_json)
pa = PysbAssembler()
pa.add_statements(stmts)
pa.make_model()
try:
for m in pa.model.monomers:
pysb_assembler.set_extended_initial_condition(pa.model, m, 0)
except Exception as e:
logger.exception(e)
if not export_format:
model_str = pa.print_model()
elif export_format in ('kappa_im', 'kappa_cm'):
fname = 'model_%s.png' % export_format
root = os.path.dirname(os.path.abspath(fname))
graph = pa.export_model(format=export_format, file_name=fname)
with open(fname, 'rb') as fh:
data = 'data:image/png;base64,%s' % \
base64.b64encode(fh.read()).decode()
return {'image': data}
else:
try:
model_str = pa.export_model(format=export_format)
except Exception as e:
logger.exception(e)
model_str = ''
res = {'model': model_str}
return res
@assemblers_ns.expect(stmts_model)
@assemblers_ns.route('/cx')
class AssembleCx(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble INDRA Statements and return CX network json.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
model
Assembled model string.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
ca = CxAssembler(stmts)
model_str = ca.make_model()
res = {'model': model_str}
return res
@assemblers_ns.expect(stmts_model)
@assemblers_ns.route('/graph')
class AssembleGraph(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble INDRA Statements and return Graphviz graph dot string.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
model
Assembled model string.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
ga = GraphAssembler(stmts)
model_str = ga.make_model()
res = {'model': model_str}
return res
@assemblers_ns.expect(stmts_model)
@assemblers_ns.route('/cyjs')
class AssembleCyjs(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble INDRA Statements and return Cytoscape JS network.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
json_model : dict
Json dictionary containing graph information.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
cja = CyJSAssembler(stmts)
cja.make_model(grouping=True)
model_str = cja.print_cyjs_graph()
return json.loads(model_str)
@assemblers_ns.expect(stmts_model)
@assemblers_ns.route('/english')
class AssembleEnglish(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble each statement into English sentence.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
sentences : dict
Dictionary mapping Statement UUIDs with English sentences.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
sentences = {}
for st in stmts:
enga = EnglishAssembler()
enga.add_statements([st])
model_str = enga.make_model()
sentences[st.uuid] = model_str
res = {'sentences': sentences}
return res
@assemblers_ns.expect(stmts_model)
@assemblers_ns.route('/sif/loopy')
class AssembleLoopy(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Assemble INDRA Statements into a Loopy model using SIF Assembler.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
loopy_url : str
Assembled Loopy model string.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
sa = SifAssembler(stmts)
sa.make_model(use_name_as_key=True)
model_str = sa.print_loopy(as_url=True)
res = {'loopy_url': model_str}
return res
# Create resources for NDEx namespace
network_model = api.model('Network', {'network_id': fields.String})
@ndex_ns.expect(stmts_model)
@ndex_ns.route('/share_model_ndex')
class ShareModelNdex(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Upload the model to NDEX.
Parameters
----------
statements : list[indra.statements.Statement.to_json()]
A list of INDRA Statements to assemble.
Returns
-------
network_id : str
ID of uploaded NDEx network.
"""
args = request.json
stmts_json = args.get('statements')
stmts = stmts_from_json(stmts_json)
ca = CxAssembler(stmts)
for n, v in args.items():
ca.cx['networkAttributes'].append({'n': n, 'v': v, 'd': 'string'})
ca.make_model()
network_id = ca.upload_model(private=False)
return {'network_id': network_id}
@ndex_ns.expect(network_model)
@ndex_ns.route('/fetch_model_ndex')
class FetchModelNdex(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Download model and associated pieces from NDEX.
Parameters
----------
network_id : str
ID of NDEx network to fetch.
Returns
-------
stored_data : dict
Dictionary representing the network.
"""
args = request.json
network_id = args.get('network_id')
cx = process_ndex_network(network_id)
network_attr = [x for x in cx.cx if x.get('networkAttributes')]
network_attr = network_attr[0]['networkAttributes']
keep_keys = ['txt_input', 'parser',
'model_elements', 'preset_pos', 'stmts',
'sentences', 'evidence', 'cell_line', 'mrna', 'mutations']
stored_data = {}
for d in network_attr:
if d['n'] in keep_keys:
stored_data[d['n']] = d['v']
return stored_data
# Create resources for INDRA DB REST namespace
stmt_model = api.model('Statement', {'statement': fields.Nested(dict_model)})
@indra_db_rest_ns.expect(stmt_model)
@indra_db_rest_ns.route('/get_evidence')
class GetEvidence(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Get all evidence for a given INDRA statement.
Parameters
----------
statements : indra.statements.Statement.to_json()
An INDRA Statement to get evidence for.
Returns
-------
statements : list[indra.statements.Statement.to_json()]
A list of retrieved INDRA Statements with evidence.
"""
args = request.json
stmt_json = args.get('statement')
stmt = Statement._from_json(stmt_json)
def _get_agent_ref(agent):
"""Get the preferred ref for an agent for db web api."""
if agent is None:
return None
ag_hgnc_id = hgnc_client.get_hgnc_id(agent.name)
if ag_hgnc_id is not None:
return ag_hgnc_id + "@HGNC"
db_refs = agent.db_refs
for namespace in ['HGNC', 'FPLX', 'CHEBI', 'TEXT']:
if namespace in db_refs.keys():
return '%s@%s' % (db_refs[namespace], namespace)
return '%s@%s' % (agent.name, 'TEXT')
def _get_matching_stmts(stmt_ref):
# Filter by statement type.
stmt_type = stmt_ref.__class__.__name__
agent_name_list = [
_get_agent_ref(ag) for ag in stmt_ref.agent_list()]
non_binary_statements = (Complex, SelfModification, ActiveForm)
# TODO: We should look at more than just the agent name.
# Doing so efficiently may require changes to the web api.
if isinstance(stmt_ref, non_binary_statements):
agent_list = [ag_name for ag_name in agent_name_list
if ag_name is not None]
kwargs = {}
else:
agent_list = []
kwargs = {k: v for k, v in zip(['subject', 'object'],
agent_name_list)}
if not any(kwargs.values()):
return []
print(agent_list)
ip = get_statements(agents=agent_list, stmt_type=stmt_type,
**kwargs)
return ip.statements
stmts_out = _get_matching_stmts(stmt)
agent_name_list = [ag.name for ag in stmt.agent_list()]
stmts_out = stmts = filter_concept_names(
stmts_out, agent_name_list, 'all')
return _return_stmts(stmts_out)
# Create resources for Databases namespace
cbio_model = api.model('Cbio', {
'gene_list': fields.List(fields.String, example=["FOSL1", "GRB2"]),
'cell_lines': fields.List(fields.String, example=['COLO679_SKIN'])
})
@databases_ns.expect(cbio_model)
@databases_ns.route('/cbio/get_ccle_mrna')
class CbioMrna(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Get CCLE mRNA amounts using cBioClient
Parameters
----------
gene_list : list[str]
A list of HGNC gene symbols to get mRNA amounts for.
cell_lines : list[str]
A list of CCLE cell line names to get mRNA amounts for.
Returns
-------
mrna_amounts : dict[dict[float]]
A dict keyed to cell lines containing a dict keyed to genes
containing float
"""
args = request.json
gene_list = args.get('gene_list')
cell_lines = args.get('cell_lines')
mrna_amounts = cbio_client.get_ccle_mrna(gene_list, cell_lines)
res = {'mrna_amounts': mrna_amounts}
return res
@databases_ns.expect(cbio_model)
@databases_ns.route('/cbio/get_ccle_cna')
class CbioCna(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Get CCLE CNA
-2 = homozygous deletion
-1 = hemizygous deletion
0 = neutral / no change
1 = gain
2 = high level amplification
Parameters
----------
gene_list : list[str]
A list of HGNC gene symbols to get mutations in.
cell_lines : list[str]
A list of CCLE cell line names to get mutations for.
Returns
-------
cna : dict[dict[int]]
A dict keyed to cases containing a dict keyed to genes
containing int
"""
args = request.json
gene_list = args.get('gene_list')
cell_lines = args.get('cell_lines')
cna = cbio_client.get_ccle_cna(gene_list, cell_lines)
res = {'cna': cna}
return res
@databases_ns.expect(cbio_model)
@databases_ns.route('/cbio/get_ccle_mutations')
class CbioMutations(Resource):
@api.doc(False)
def options(self):
return {}
def post(self):
"""Get CCLE mutations
Parameters
----------
gene_list : list[str]
A list of HGNC gene symbols to get mutations in
cell_lines : list[str]
A list of CCLE cell line names to get mutations for.
Returns
-------
mutations : dict
The result from cBioPortal as a dict in the format
{cell_line : {gene : [mutation1, mutation2, ...] }}
"""
args = request.json
gene_list = args.get('gene_list')
cell_lines = args.get('cell_lines')
mutations = cbio_client.get_ccle_mutations(gene_list, cell_lines)
res = {'mutations': mutations}
return res
if __name__ == '__main__':
argparser = argparse.ArgumentParser('Run the INDRA REST API')
argparser.add_argument('--host', default='0.0.0.0')
argparser.add_argument('--port', default=8080, type=int)
argparserargs = argparser.parse_args()
app.run(host=argparserargs.host, port=argparserargs.port)
| johnbachman/indra | rest_api/api.py | Python | bsd-2-clause | 39,694 | [
"Cytoscape"
] | 6346c02f370f7d9eaf532dbbde371c84ea3d5dd3c70a81cdde8ad3c07c754a96 |
from openmm_systems.test_systems import (
LennardJonesPair,
LysozymeImplicit,
)
import simtk.openmm.app as omma
import simtk.openmm as omm
import simtk.unit as unit
from wepy.runners.openmm import gen_sim_state
import time
def create_sim():
test_sys = LysozymeImplicit()
integrator = omm.LangevinIntegrator(300.0*unit.kelvin,
1/unit.picosecond,
0.002*unit.picoseconds)
init_state = gen_sim_state(test_sys.positions, test_sys.system, integrator)
platform = omm.Platform.getPlatformByName('CPU')
simulation = omma.Simulation(
test_sys.topology,
test_sys.system,
integrator,
platform=platform,
)
simulation.context.setState(init_state)
return simulation
def run_sim(sim, steps):
sim.integrator.step(steps)
return sim
def main():
num_sims = 2
steps = 5000
simulations = []
for idx in range(num_sims):
simulations.append(create_sim())
for i, sim in enumerate(simulations):
start = time.time()
run_sim(sim, steps)
end = time.time()
print(f"Sim {i} took: {end - start}")
start = time.time()
main()
end = time.time()
print(f"Took {end - start} seconds")
| ADicksonLab/wepy | jigs/trio_mapper/source/sync_openmm.py | Python | mit | 1,288 | [
"OpenMM"
] | 4165b3ae598bfb555f85e9b45100d96f06a8ccd926eb84c2445328656ca6145e |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes import session, msgprint
from webnotes.utils import today,add_days,cint,nowdate,formatdate
sql = webnotes.conn.sql
from utilities.transaction_base import TransactionBase
class DocType(TransactionBase):
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def validate(self):
if session['user'] != 'Guest' and not self.doc.customer:
msgprint("Please select Customer from whom issue is raised",
raise_exception=True)
if self.doc.status=="Closed" and \
webnotes.conn.get_value("Customer Issue", self.doc.name, "status")!="Closed":
self.doc.resolution_date = today()
self.doc.resolved_by = webnotes.session.user
def on_cancel(self):
lst = sql("select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent = t1.name and t2.prevdoc_docname = '%s' and t1.docstatus!=2"%(self.doc.name))
if lst:
lst1 = ','.join([x[0] for x in lst])
msgprint("Maintenance Visit No. "+lst1+" already created against this customer issue. So can not be Cancelled")
raise Exception
else:
webnotes.conn.set(self.doc, 'status', 'Cancelled')
def on_update(self):
pass
@webnotes.whitelist()
def make_maintenance_visit(source_name, target_doclist=None):
from webnotes.model.mapper import get_mapped_doclist
visit = webnotes.conn.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doclist("Customer Issue", source_name, {
"Customer Issue": {
"doctype": "Maintenance Visit",
"field_map": {
"complaint": "description",
"doctype": "prevdoc_doctype",
"name": "prevdoc_docname"
}
}
}, target_doclist)
return [d.fields for d in doclist]
@webnotes.whitelist()
def get_warranty_code_details(warranty_code):
customer_details=webnotes.conn.sql("""select item_code,name,coalesce(customer,'') as customer from `tabSerial No` where warranty_code='%s'"""%(warranty_code),as_dict=1,debug=1)
if customer_details:
webnotes.errprint(customer_details[0]['item_code'])
warranty_period=webnotes.conn.sql("""select end_customer_warranty_period from `tabItem` where name='%s'"""%(customer_details[0]['item_code']),as_dict=1,debug=1)
webnotes.errprint(warranty_period[0]['end_customer_warranty_period'])
if warranty_period:
final_date=add_days(nowdate(),cint(warranty_period[0]['end_customer_warranty_period']))
else:
final_date=nowdate()
webnotes.errprint(final_date)
return [{
"item_code": customer_details[0]['item_code'],
"serial_no":customer_details[0]['name'],
"end_date":final_date,
"customer":customer_details[0]['customer']
}]
| gangadhar-kadam/sapphire_app | support/doctype/customer_issue/customer_issue.py | Python | agpl-3.0 | 3,002 | [
"VisIt"
] | 06076fffe95317533a114c001eb777f6cee22616a8fba9dbaece22b4100efa91 |
"""
Loads synthetic reaction datasets from USPTO.
This file contains loaders for synthetic reaction datasets from the US Patenent Office. http://nextmovesoftware.com/blog/2014/02/27/unleashing-over-a-million-reactions-into-the-wild/.
"""
import os
import csv
import logging
import deepchem
import numpy as np
from deepchem.data import DiskDataset
logger = logging.getLogger(__name__)
DEFAULT_DIR = deepchem.utils.data_utils.get_data_dir()
USPTO_URL = "https://bitbucket.org/dan2097/patent-reaction-extraction/downloads/2008-2011_USPTO_reactionSmiles_filtered.zip"
def load_uspto(featurizer="plain",
split=None,
num_to_load=10000,
reload=True,
verbose=False,
data_dir=None,
save_dir=None,
**kwargs):
"""Load USPTO dataset.
For now, only loads the subset of data for 2008-2011 reactions.
See https://figshare.com/articles/Chemical_reactions_from_US_patents_1976-Sep2016_/5104873
for more details. The full dataset contains some 400K reactions. This causes
an out-of-memory error on development laptop if full dataset is featurized.
For now, return a truncated subset of dataset.
Reloading is not entirely supported for this dataset.
"""
if data_dir is None:
data_dir = DEFAULT_DIR
if save_dir is None:
save_dir = DEFAULT_DIR
# Most reaction dataset ML tasks train the prediction of products from
# ractants. Both of these are contained in the rxn object that is output,
# so there is no "tasks" field.
uspto_tasks = []
if split is not None:
raise ValueError("Train/valid/test not yet supported.")
# Download USPTO dataset
if reload:
save_folder = os.path.join(save_dir, "uspto-featurized", str(featurizer))
if featurizer == "smiles2img":
img_spec = kwargs.get("img_spec", "std")
save_folder = os.path.join(save_folder, img_spec)
save_folder = os.path.join(save_folder, str(split))
loaded, all_dataset, transformers = deepchem.utils.data_utils.load_dataset_from_disk(
save_folder)
if loaded:
return uspto_tasks, all_dataset, transformers
dataset_file = os.path.join(data_dir,
"2008-2011_USPTO_reactionSmiles_filtered.zip")
if not os.path.exists(dataset_file):
deepchem.utils.data_utils.download_url(url=USPTO_URL, dest_dir=data_dir)
# Unzip
unzip_dir = os.path.join(data_dir, "2008-2011_USPTO_reactionSmiles_filtered")
if not os.path.exists(unzip_dir):
deepchem.utils.data_utils.unzip_file(dataset_file, dest_dir=unzip_dir)
# Unzipped file is a tap seperated values file (despite the .txt)
filename = os.path.join(unzip_dir,
"2008-2011_USPTO_reactionSmiles_filtered.txt")
rxns = []
from rdkit.Chem import rdChemReactions
with open(filename) as tsvfile:
reader = csv.reader(tsvfile, delimiter="\t")
for ind, row in enumerate(reader):
if ind > num_to_load:
break
if verbose:
print("Loading reaction %d" % ind)
# The first element in the row is the reaction smarts
smarts = row[0]
# Sometimes smarts have extraneous information at end of form "
# |f:0" that causes parsing to fail. Not sure what this information
# is, but just ignoring for now.
smarts = smarts.split(" ")[0]
rxn = rdChemReactions.ReactionFromSmarts(smarts)
rxns.append(rxn)
rxn_array = np.array(rxns)
# Make up dummy labels since DiskDataset.from_numpy doesn't allow
# creation from just features for now.
y = np.ones(len(rxn_array))
# TODO: This dataset isn't saved to disk so reload doesn't happen.
rxn_dataset = DiskDataset.from_numpy(rxn_array, y)
transformers = []
return uspto_tasks, (rxn_dataset, None, None), transformers
| lilleswing/deepchem | deepchem/molnet/load_function/uspto_datasets.py | Python | mit | 3,783 | [
"RDKit"
] | f645712ea5bad992601456acedee6efd2a72c942a869c5b3699a1c94968f4ee1 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
from functools import reduce
try:
# New Py>=3.5 import
from math import gcd
except ImportError:
# Deprecated import from Py3.5 onwards.
from fractions import gcd
import math
import itertools
import logging
import warnings
import numpy as np
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import linkage, fcluster
from monty.fractions import lcm
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.structure import Structure, Composition
from pymatgen.core.lattice import Lattice
from pymatgen.core.sites import PeriodicSite
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.coord import in_coord_list
from pymatgen.analysis.structure_matcher import StructureMatcher
"""
This module implements representations of slabs and surfaces, as well as
algorithms for generating them. If you use this module, please consider
citing the following work::
R. Tran, Z. Xu, B. Radhakrishnan, D. Winston, W. Sun, K. A. Persson,
S. P. Ong, "Surface Energies of Elemental Crystals", Scientific Data,
2016, 3:160080, doi: 10.1038/sdata.2016.80.
as well as::
Sun, W.; Ceder, G. Efficient creation and convergence of surface slabs,
Surface Science, 2013, 617, 53–59, doi:10.1016/j.susc.2013.05.016.
"""
__author__ = "Richard Tran, Wenhao Sun, Zihan Xu, Shyue Ping Ong"
__copyright__ = "Copyright 2014, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "6/10/14"
logger = logging.getLogger(__name__)
class Slab(Structure):
"""
Subclass of Structure representing a Slab. Implements additional
attributes pertaining to slabs, but the init method does not
actually implement any algorithm that creates a slab. This is a
DUMMY class who's init method only holds information about the
slab. Also has additional methods that returns other information
about a slab such as the surface area, normal, and atom adsorption.
Note that all Slabs have the surface normal oriented in the c-direction.
This means the lattice vectors a and b are in the surface plane and the c
vector is out of the surface plane (though not necessary perpendicular to
the surface.)
.. attribute:: miller_index
Miller index of plane parallel to surface.
.. attribute:: scale_factor
Final computed scale factor that brings the parent cell to the
surface cell.
.. attribute:: shift
The shift value in Angstrom that indicates how much this
slab has been shifted.
"""
def __init__(self, lattice, species, coords, miller_index,
oriented_unit_cell, shift, scale_factor, reorient_lattice=True,
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None, energy=None):
"""
Makes a Slab structure, a structure object with additional information
and methods pertaining to slabs.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
miller_index ([h, k, l]): Miller index of plane parallel to
surface. Note that this is referenced to the input structure. If
you need this to be based on the conventional cell,
you should supply the conventional structure.
oriented_unit_cell (Structure): The oriented_unit_cell from which
this Slab is created (by scaling in the c-direction).
shift (float): The shift in the c-direction applied to get the
termination.
scale_factor (array): scale_factor Final computed scale factor
that brings the parent cell to the surface cell.
reorient_lattice (bool): reorients the lattice parameters such that
the c direction is the third vector of the lattice matrix
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
energy (float): A value for the energy.
"""
self.oriented_unit_cell = oriented_unit_cell
self.miller_index = tuple(miller_index)
self.shift = shift
self.scale_factor = scale_factor
self.energy = energy
self.reorient_lattice = reorient_lattice
lattice = Lattice.from_parameters(lattice.a, lattice.b, lattice.c,
lattice.alpha, lattice.beta,
lattice.gamma) \
if self.reorient_lattice else lattice
super(Slab, self).__init__(
lattice, species, coords, validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties)
def get_orthogonal_c_slab(self):
"""
This method returns a Slab where the normal (c lattice vector) is
"forced" to be exactly orthogonal to the surface a and b lattice
vectors. **Note that this breaks inherent symmetries in the slab.**
It should be pointed out that orthogonality is not required to get good
surface energies, but it can be useful in cases where the slabs are
subsequently used for postprocessing of some kind, e.g. generating
GBs or interfaces.
"""
a, b, c = self.lattice.matrix
new_c = np.cross(a, b)
new_c /= np.linalg.norm(new_c)
new_c = np.dot(c, new_c) * new_c
new_latt = Lattice([a, b, new_c])
return Slab(lattice=new_latt, species=self.species,
coords=self.cart_coords, miller_index=self.miller_index,
oriented_unit_cell=self.oriented_unit_cell,
shift=self.shift, scale_factor=self.scale_factor,
coords_are_cartesian=True, energy=self.energy,
reorient_lattice=self.reorient_lattice)
def get_tasker2_slabs(self, tol=0.01, same_species_only=True):
"""
Get a list of slabs that have been Tasker 2 corrected.
Args:
tol (float): Tolerance to determine if atoms are within same plane.
This is a fractional tolerance, not an absolute one.
same_species_only (bool): If True, only that are of the exact same
species as the atom at the outermost surface are considered for
moving. Otherwise, all atoms regardless of species that is
within tol are considered for moving. Default is True (usually
the desired behavior).
Returns:
([Slab]) List of tasker 2 corrected slabs.
"""
sites = list(self.sites)
slabs = []
sortedcsites = sorted(sites, key=lambda site: site.c)
# Determine what fraction the slab is of the total cell size in the
# c direction. Round to nearest rational number.
nlayers_total = int(round(self.lattice.c /
self.oriented_unit_cell.lattice.c))
nlayers_slab = int(round((sortedcsites[-1].c - sortedcsites[0].c)
* nlayers_total))
slab_ratio = nlayers_slab / nlayers_total
a = SpacegroupAnalyzer(self)
symm_structure = a.get_symmetrized_structure()
def equi_index(site):
for i, equi_sites in enumerate(symm_structure.equivalent_sites):
if site in equi_sites:
return i
raise ValueError("Cannot determine equi index!")
for surface_site, shift in [(sortedcsites[0], slab_ratio),
(sortedcsites[-1], -slab_ratio)]:
tomove = []
fixed = []
for site in sites:
if abs(site.c - surface_site.c) < tol and (
(not same_species_only) or
site.species_and_occu == surface_site.species_and_occu):
tomove.append(site)
else:
fixed.append(site)
# Sort and group the sites by the species and symmetry equivalence
tomove = sorted(tomove, key=lambda s: equi_index(s))
grouped = [list(sites) for k, sites in itertools.groupby(
tomove, key=lambda s: equi_index(s))]
if len(tomove) == 0 or any([len(g) % 2 != 0 for g in grouped]):
warnings.warn("Odd number of sites to divide! Try changing "
"the tolerance to ensure even division of "
"sites or create supercells in a or b directions "
"to allow for atoms to be moved!")
continue
combinations = []
for g in grouped:
combinations.append(
[c for c in itertools.combinations(g, int(len(g) / 2))])
for selection in itertools.product(*combinations):
species = [site.species_and_occu for site in fixed]
fcoords = [site.frac_coords for site in fixed]
for s in tomove:
species.append(s.species_and_occu)
for group in selection:
if s in group:
fcoords.append(s.frac_coords)
break
else:
# Move unselected atom to the opposite surface.
fcoords.append(s.frac_coords + [0, 0, shift])
# sort by species to put all similar species together.
sp_fcoord = sorted(zip(species, fcoords), key=lambda x: x[0])
species = [x[0] for x in sp_fcoord]
fcoords = [x[1] for x in sp_fcoord]
slab = Slab(self.lattice, species, fcoords, self.miller_index,
self.oriented_unit_cell, self.shift,
self.scale_factor, energy=self.energy,
reorient_lattice=self.reorient_lattice)
slabs.append(slab)
s = StructureMatcher()
unique = [ss[0] for ss in s.group_structures(slabs)]
return unique
def is_symmetric(self, symprec=0.1):
"""
Checks if slab is symmetric, i.e., contains inversion symmetry.
Args:
symprec (float): Symmetry precision used for SpaceGroup analyzer.
Returns:
(bool) Whether slab contains inversion symmetry.
"""
sg = SpacegroupAnalyzer(self, symprec=symprec)
return sg.is_laue()
def get_sorted_structure(self, key=None, reverse=False):
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species. Note that Slab has to override this
because of the different __init__ args.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
sites = sorted(self, key=key, reverse=reverse)
s = Structure.from_sites(sites)
return Slab(s.lattice, s.species_and_occu, s.frac_coords,
self.miller_index, self.oriented_unit_cell, self.shift,
self.scale_factor, site_properties=s.site_properties,
reorient_lattice=self.reorient_lattice)
def copy(self, site_properties=None, sanitize=False):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Args:
site_properties (dict): Properties to add or override. The
properties are specified in the same way as the constructor,
i.e., as a dict of the form {property: [values]}. The
properties should be in the order of the *original* structure
if you are performing sanitization.
sanitize (bool): If True, this method will return a sanitized
structure. Sanitization performs a few things: (i) The sites are
sorted by electronegativity, (ii) a LLL lattice reduction is
carried out to obtain a relatively orthogonalized cell,
(iii) all fractional coords for sites are mapped into the
unit cell.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
props = self.site_properties
if site_properties:
props.update(site_properties)
return Slab(self.lattice, self.species_and_occu, self.frac_coords,
self.miller_index, self.oriented_unit_cell, self.shift,
self.scale_factor, site_properties=props,
reorient_lattice=self.reorient_lattice)
@property
def dipole(self):
"""
Calculates the dipole of the Slab in the direction of the surface
normal. Note that the Slab must be oxidation state-decorated for this
to work properly. Otherwise, the Slab will always have a dipole of 0.
"""
dipole = np.zeros(3)
mid_pt = np.sum(self.cart_coords, axis=0) / len(self)
normal = self.normal
for site in self:
charge = sum([getattr(sp, "oxi_state", 0) * amt
for sp, amt in site.species_and_occu.items()])
dipole += charge * np.dot(site.coords - mid_pt, normal) * normal
return dipole
def is_polar(self, tol_dipole_per_unit_area=1e-3):
"""
Checks whether the surface is polar by computing the dipole per unit
area. Note that the Slab must be oxidation state-decorated for this
to work properly. Otherwise, the Slab will always be non-polar.
Args:
tol_dipole_per_unit_area (float): A tolerance. If the dipole
magnitude per unit area is less than this value, the Slab is
considered non-polar. Defaults to 1e-3, which is usually
pretty good. Normalized dipole per unit area is used as it is
more reliable than using the total, which tends to be larger for
slabs with larger surface areas.
"""
dip_per_unit_area = self.dipole / self.surface_area
return np.linalg.norm(dip_per_unit_area) > tol_dipole_per_unit_area
@property
def normal(self):
"""
Calculates the surface normal vector of the slab
"""
normal = np.cross(self.lattice.matrix[0], self.lattice.matrix[1])
normal /= np.linalg.norm(normal)
return normal
@property
def surface_area(self):
"""
Calculates the surface area of the slab
"""
m = self.lattice.matrix
return np.linalg.norm(np.cross(m[0], m[1]))
@property
def center_of_mass(self):
"""
Calculates the center of mass of the slab
"""
weights = [s.species_and_occu.weight for s in self]
center_of_mass = np.average(self.frac_coords,
weights=weights, axis=0)
return center_of_mass
def add_adsorbate_atom(self, indices, specie, distance):
"""
Gets the structure of single atom adsorption.
slab structure from the Slab class(in [0, 0, 1])
Args:
indices ([int]): Indices of sites on which to put the absorbate.
Absorbed atom will be displaced relative to the center of
these sites.
specie (Specie/Element/str): adsorbed atom species
distance (float): between centers of the adsorbed atom and the
given site in Angstroms.
"""
# Let's do the work in cartesian coords
center = np.sum([self[i].coords for i in indices], axis=0) / len(
indices)
coords = center + self.normal * distance / np.linalg.norm(self.normal)
self.append(specie, coords, coords_are_cartesian=True)
def __str__(self):
comp = self.composition
outs = [
"Slab Summary (%s)" % comp.formula,
"Reduced Formula: %s" % comp.reduced_formula,
"Miller index: %s" % (self.miller_index, ),
"Shift: %.4f, Scale Factor: %s" % (self.shift,
self.scale_factor.__str__())]
to_s = lambda x: "%0.6f" % x
outs.append("abc : " + " ".join([to_s(i).rjust(10)
for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i).rjust(10)
for i in self.lattice.angles]))
outs.append("Sites ({i})".format(i=len(self)))
for i, site in enumerate(self):
outs.append(" ".join([str(i + 1), site.species_string,
" ".join([to_s(j).rjust(12)
for j in site.frac_coords])]))
return "\n".join(outs)
def as_dict(self):
d = super(Slab, self).as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["oriented_unit_cell"] = self.oriented_unit_cell.as_dict()
d["miller_index"] = self.miller_index
d["shift"] = self.shift
d["scale_factor"] = self.scale_factor
d["energy"] = self.energy
return d
@classmethod
def from_dict(cls, d):
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
s = Structure.from_sites(sites)
return Slab(
lattice=lattice,
species=s.species_and_occu, coords=s.frac_coords,
miller_index=d["miller_index"],
oriented_unit_cell=Structure.from_dict(d["oriented_unit_cell"]),
shift=d["shift"], scale_factor=d["scale_factor"],
site_properties=s.site_properties, energy=d["energy"]
)
def get_surface_sites(self, tag=False):
"""
Returns the surface sites and their indices in a dictionary. The
oriented unit cell of the slab will determine the coordination number
of a typical site. We use VoronoiCoordFinder to determine the
coordination number of bulk sites and slab sites. Due to the
pathological error resulting from some surface sites in the
VoronoiCoordFinder, we assume any site that has this error is a surface
site as well. This will work for elemental systems only for now. Useful
for analysis involving broken bonds and for finding adsorption sites.
Args:
tag (bool): Option to adds site attribute "is_surfsite" (bool) to
all sites of slab. Defaults to False
Returns:
A dictionary grouping sites on top and bottom of the slab together.
{"top": [sites with indices], "bottom": [sites with indices}
TODO:
Is there a way to determine site equivalence between sites in a slab
and bulk system? This would allow us get the coordination number of a
specific site for multi-elemental systems or systems with more than one
unequivalent sites. This will allow us to use this for compound systems.
"""
from pymatgen.analysis.structure_analyzer import VoronoiCoordFinder
# Get a dictionary of coordination numbers
# for each distinct site in the structure
a = SpacegroupAnalyzer(self.oriented_unit_cell)
ucell = a.get_symmetrized_structure()
cn_dict = {}
v = VoronoiCoordFinder(ucell)
unique_indices = [equ[0] for equ in ucell.equivalent_indices]
for i in unique_indices:
el = ucell[i].species_string
if el not in cn_dict.keys():
cn_dict[el] = []
# Since this will get the cn as a result of the weighted polyhedra, the
# slightest difference in cn will indicate a different environment for a
# species, eg. bond distance of each neighbor or neighbor species. The
# decimal place to get some cn to be equal.
cn = v.get_coordination_number(i)
cn = float('%.5f' %(round(cn, 5)))
if cn not in cn_dict[el]:
cn_dict[el].append(cn)
v = VoronoiCoordFinder(self)
surf_sites_dict, properties = {"top": [], "bottom": []}, []
for i, site in enumerate(self):
# Determine if site is closer to the top or bottom of the slab
top = True if site.frac_coords[2] > self.center_of_mass[2] else False
try:
# A site is a surface site, if its environment does
# not fit the environment of other sites
cn = float('%.5f' %(round(v.get_coordination_number(i), 5)))
if cn < min(cn_dict[site.species_string]):
properties.append(True)
key = "top" if top else "bottom"
surf_sites_dict[key].append([site, i])
else:
properties.append(False)
except RuntimeError:
# or if pathological error is returned, indicating a surface site
properties.append(True)
key = "top" if top else "bottom"
surf_sites_dict[key].append([site, i])
if tag:
self.add_site_property("is_surf_site", properties)
return surf_sites_dict
def have_equivalent_surfaces(self):
"""
Check if we have same number of equivalent sites on both surfaces.
This is an alternative to checking Laue symmetry (is_symmetric())
if we want to ensure both surfaces in the slab are the same
"""
# tag the sites as either surface sites or not
surf_sites_dict = self.get_surface_sites(tag=True)
a = SpacegroupAnalyzer(self)
symm_structure = a.get_symmetrized_structure()
# ensure each site on one surface has a
# corresponding equivalent site on the other
equal_surf_sites = []
for equ in symm_structure.equivalent_sites:
# Top and bottom are arbitrary, we will just determine
# if one site is on one side of the slab or the other
top, bottom = 0, 0
for s in equ:
if s.is_surf_site:
if s.frac_coords[2] > self.center_of_mass[2]:
top += 1
else:
bottom += 1
# Check to see if the number of equivalent sites
# on one side of the slab are equal to the other
equal_surf_sites.append(top == bottom)
return all(equal_surf_sites)
class SlabGenerator(object):
"""
This class generates different slabs using shift values determined by where
a unique termination can be found along with other criterias such as where a
termination doesn't break a polyhedral bond. The shift value then indicates
where the slab layer will begin and terminate in the slab-vacuum system.
.. attribute:: oriented_unit_cell
A unit cell of the parent structure with the miller
index of plane parallel to surface
.. attribute:: parent
Parent structure from which Slab was derived.
.. attribute:: lll_reduce
Whether or not the slabs will be orthogonalized
.. attribute:: center_slab
Whether or not the slabs will be centered between
the vacuum layer
.. attribute:: slab_scale_factor
Final computed scale factor that brings the parent cell to the
surface cell.
.. attribute:: miller_index
Miller index of plane parallel to surface.
.. attribute:: min_slab_size
Minimum size in angstroms of layers containing atoms
.. attribute:: min_vac_size
Minimize size in angstroms of layers containing vacuum
"""
def __init__(self, initial_structure, miller_index, min_slab_size,
min_vacuum_size, lll_reduce=False, center_slab=False,
primitive=True, max_normal_search=None, reorient_lattice=True):
"""
Calculates the slab scale factor and uses it to generate a unit cell
of the initial structure that has been oriented by its miller index.
Also stores the initial information needed later on to generate a slab.
Args:
initial_structure (Structure): Initial input structure. Note that to
ensure that the miller indices correspond to usual
crystallographic definitions, you should supply a conventional
unit cell structure.
miller_index ([h, k, l]): Miller index of plane parallel to
surface. Note that this is referenced to the input structure. If
you need this to be based on the conventional cell,
you should supply the conventional structure.
min_slab_size (float): In Angstroms
min_vacuum_size (float): In Angstroms
lll_reduce (bool): Whether to perform an LLL reduction on the
eventual structure.
center_slab (bool): Whether to center the slab in the cell with
equal vacuum spacing from the top and bottom.
primitive (bool): Whether to reduce any generated slabs to a
primitive cell (this does **not** mean the slab is generated
from a primitive cell, it simply means that after slab
generation, we attempt to find shorter lattice vectors,
which lead to less surface area and smaller cells).
max_normal_search (int): If set to a positive integer, the code will
conduct a search for a normal lattice vector that is as
perpendicular to the surface as possible by considering
multiples linear combinations of lattice vectors up to
max_normal_search. This has no bearing on surface energies,
but may be useful as a preliminary step to generating slabs
for absorption and other sizes. It is typical that this will
not be the smallest possible cell for simulation. Normality
is not guaranteed, but the oriented cell will have the c
vector as normal as possible (within the search range) to the
surface. A value of up to the max absolute Miller index is
usually sufficient.
reorient_lattice (bool): reorients the lattice parameters such that
the c direction is the third vector of the lattice matrix
"""
latt = initial_structure.lattice
miller_index = reduce_vector(miller_index)
# Calculate the surface normal using the reciprocal lattice vector.
recp = latt.reciprocal_lattice_crystallographic
normal = recp.get_cartesian_coords(miller_index)
normal /= np.linalg.norm(normal)
slab_scale_factor = []
non_orth_ind = []
eye = np.eye(3, dtype=np.int)
for i, j in enumerate(miller_index):
if j == 0:
# Lattice vector is perpendicular to surface normal, i.e.,
# in plane of surface. We will simply choose this lattice
# vector as one of the basis vectors.
slab_scale_factor.append(eye[i])
else:
# Calculate projection of lattice vector onto surface normal.
d = abs(np.dot(normal, latt.matrix[i])) / latt.abc[i]
non_orth_ind.append((i, d))
# We want the vector that has maximum magnitude in the
# direction of the surface normal as the c-direction.
# Results in a more "orthogonal" unit cell.
c_index, dist = max(non_orth_ind, key=lambda t: t[1])
if len(non_orth_ind) > 1:
lcm_miller = lcm(*[miller_index[i] for i, d in non_orth_ind])
for (i, di), (j, dj) in itertools.combinations(non_orth_ind, 2):
l = [0, 0, 0]
l[i] = -int(round(lcm_miller / miller_index[i]))
l[j] = int(round(lcm_miller / miller_index[j]))
slab_scale_factor.append(l)
if len(slab_scale_factor) == 2:
break
if max_normal_search is None:
slab_scale_factor.append(eye[c_index])
else:
index_range = sorted(
reversed(range(-max_normal_search, max_normal_search + 1)),
key=lambda x: abs(x))
candidates = []
for uvw in itertools.product(index_range, index_range, index_range):
if (not any(uvw)) or abs(
np.linalg.det(slab_scale_factor + [uvw])) < 1e-8:
continue
vec = latt.get_cartesian_coords(uvw)
l = np.linalg.norm(vec)
cosine = abs(np.dot(vec, normal) / l)
candidates.append((uvw, cosine, l))
if abs(abs(cosine) - 1) < 1e-8:
# If cosine of 1 is found, no need to search further.
break
# We want the indices with the maximum absolute cosine,
# but smallest possible length.
uvw, cosine, l = max(candidates, key=lambda x: (x[1], -x[2]))
slab_scale_factor.append(uvw)
slab_scale_factor = np.array(slab_scale_factor)
# Let's make sure we have a left-handed crystallographic system
if np.linalg.det(slab_scale_factor) < 0:
slab_scale_factor *= -1
# Make sure the slab_scale_factor is reduced to avoid
# unnecessarily large slabs
reduced_scale_factor = [reduce_vector(v) for v in slab_scale_factor]
slab_scale_factor = np.array(reduced_scale_factor)
single = initial_structure.copy()
single.make_supercell(slab_scale_factor)
self.oriented_unit_cell = Structure.from_sites(single,
to_unit_cell=True)
self.parent = initial_structure
self.lll_reduce = lll_reduce
self.center_slab = center_slab
self.slab_scale_factor = slab_scale_factor
self.miller_index = miller_index
self.min_vac_size = min_vacuum_size
self.min_slab_size = min_slab_size
self.primitive = primitive
self._normal = normal
a, b, c = self.oriented_unit_cell.lattice.matrix
self._proj_height = abs(np.dot(normal, c))
self.reorient_lattice = reorient_lattice
def get_slab(self, shift=0, tol=0.1, energy=None):
"""
This method takes in shift value for the c lattice direction and
generates a slab based on the given shift. You should rarely use this
method. Instead, it is used by other generation algorithms to obtain
all slabs.
Arg:
shift (float): A shift value in Angstrom that determines how much a
slab should be shifted.
tol (float): Tolerance to determine primitive cell.
energy (float): An energy to assign to the slab.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
"""
h = self._proj_height
nlayers_slab = int(math.ceil(self.min_slab_size / h))
nlayers_vac = int(math.ceil(self.min_vac_size / h))
nlayers = nlayers_slab + nlayers_vac
species = self.oriented_unit_cell.species_and_occu
props = self.oriented_unit_cell.site_properties
props = {k: v * nlayers_slab for k, v in props.items()}
frac_coords = self.oriented_unit_cell.frac_coords
frac_coords = np.array(frac_coords) +\
np.array([0, 0, -shift])[None, :]
frac_coords -= np.floor(frac_coords)
a, b, c = self.oriented_unit_cell.lattice.matrix
new_lattice = [a, b, nlayers * c]
frac_coords[:, 2] = frac_coords[:, 2] / nlayers
all_coords = []
for i in range(nlayers_slab):
fcoords = frac_coords.copy()
fcoords[:, 2] += i / nlayers
all_coords.extend(fcoords)
slab = Structure(new_lattice, species * nlayers_slab, all_coords,
site_properties=props)
scale_factor = self.slab_scale_factor
# Whether or not to orthogonalize the structure
if self.lll_reduce:
lll_slab = slab.copy(sanitize=True)
mapping = lll_slab.lattice.find_mapping(slab.lattice)
scale_factor = np.dot(mapping[2], scale_factor)
slab = lll_slab
# Whether or not to center the slab layer around the vacuum
if self.center_slab:
avg_c = np.average([c[2] for c in slab.frac_coords])
slab.translate_sites(list(range(len(slab))), [0, 0, 0.5 - avg_c])
if self.primitive:
prim = slab.get_primitive_structure(tolerance=tol)
if energy is not None:
energy = prim.volume / slab.volume * energy
slab = prim
return Slab(slab.lattice, slab.species_and_occu,
slab.frac_coords, self.miller_index,
self.oriented_unit_cell, shift,
scale_factor, site_properties=slab.site_properties,
energy=energy, reorient_lattice=self.reorient_lattice)
def _calculate_possible_shifts(self, tol=0.1):
frac_coords = self.oriented_unit_cell.frac_coords
n = len(frac_coords)
if n == 1:
# Clustering does not work when there is only one data point.
shift = frac_coords[0][2] + 0.5
return [shift - math.floor(shift)]
# We cluster the sites according to the c coordinates. But we need to
# take into account PBC. Let's compute a fractional c-coordinate
# distance matrix that accounts for PBC.
dist_matrix = np.zeros((n, n))
h = self._proj_height
# Projection of c lattice vector in
# direction of surface normal.
for i, j in itertools.combinations(list(range(n)), 2):
if i != j:
cdist = frac_coords[i][2] - frac_coords[j][2]
cdist = abs(cdist - round(cdist)) * h
dist_matrix[i, j] = cdist
dist_matrix[j, i] = cdist
condensed_m = squareform(dist_matrix)
z = linkage(condensed_m)
clusters = fcluster(z, tol, criterion="distance")
# Generate dict of cluster# to c val - doesn't matter what the c is.
c_loc = {c: frac_coords[i][2] for i, c in enumerate(clusters)}
# Put all c into the unit cell.
possible_c = [c - math.floor(c) for c in sorted(c_loc.values())]
# Calculate the shifts
nshifts = len(possible_c)
shifts = []
for i in range(nshifts):
if i == nshifts - 1:
# There is an additional shift between the first and last c
# coordinate. But this needs special handling because of PBC.
shift = (possible_c[0] + 1 + possible_c[i]) * 0.5
if shift > 1:
shift -= 1
else:
shift = (possible_c[i] + possible_c[i + 1]) * 0.5
shifts.append(shift - math.floor(shift))
shifts = sorted(shifts)
return shifts
def _get_c_ranges(self, bonds):
c_ranges = set()
bonds = {(get_el_sp(s1), get_el_sp(s2)): dist for (s1, s2), dist in
bonds.items()}
for (sp1, sp2), bond_dist in bonds.items():
for site in self.oriented_unit_cell:
if sp1 in site.species_and_occu:
for nn, d in self.oriented_unit_cell.get_neighbors(
site, bond_dist):
if sp2 in nn.species_and_occu:
c_range = tuple(sorted([site.frac_coords[2],
nn.frac_coords[2]]))
if c_range[1] > 1:
# Takes care of PBC when c coordinate of site
# goes beyond the upper boundary of the cell
c_ranges.add((c_range[0], 1))
c_ranges.add((0, c_range[1] - 1))
elif c_range[0] < 0:
# Takes care of PBC when c coordinate of site
# is below the lower boundary of the unit cell
c_ranges.add((0, c_range[1]))
c_ranges.add((c_range[0] + 1, 1))
elif c_range[0] != c_range[1]:
c_ranges.add(c_range)
return c_ranges
def get_slabs(self, bonds=None, tol=0.1, max_broken_bonds=0,
symmetrize=False, repair=False):
"""
This method returns a list of slabs that are generated using the list of
shift values from the method, _calculate_possible_shifts(). Before the
shifts are used to create the slabs however, if the user decides to take
into account whether or not a termination will break any polyhedral
structure (bonds is not None), this method will filter out any shift
values that do so.
Args:
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
tol (float): Threshold parameter in fcluster in order to check
if two atoms are lying on the same plane. Default thresh set
to 0.1 Angstrom in the direction of the surface normal.
max_broken_bonds (int): Maximum number of allowable broken bonds
for the slab. Use this to limit # of slabs (some structures
may have a lot of slabs). Defaults to zero, which means no
defined bonds must be broken.
symmetrize (bool): Whether or not to ensure the surfaces of the
slabs are equivalent.
repair (bool): Whether to repair terminations with broken bonds
or just omit them. Set to False as repairing terminations can
lead to many possible slabs as oppose to just omitting them.
Returns:
([Slab]) List of all possible terminations of a particular surface.
Slabs are sorted by the # of bonds broken.
"""
c_ranges = set() if bonds is None else self._get_c_ranges(bonds)
slabs = []
for shift in self._calculate_possible_shifts(tol=tol):
bonds_broken = 0
for r in c_ranges:
if r[0] <= shift <= r[1]:
bonds_broken += 1
slab = self.get_slab(shift, tol=tol, energy=bonds_broken)
if bonds_broken <= max_broken_bonds:
slabs.append(slab)
elif repair:
# If the number of broken bonds is exceeded,
# we repair the broken bonds on the slab
slabs.append(self.repair_broken_bonds(slab, bonds))
# Further filters out any surfaces made that might be the same
m = StructureMatcher(ltol=tol, stol=tol, primitive_cell=False,
scale=False)
new_slabs = []
original_formula = str(self.parent.composition.reduced_formula)
for g in m.group_structures(slabs):
# For each unique termination, symmetrize the
# surfaces by removing sites from the bottom.
if symmetrize:
slab = self.nonstoichiometric_symmetrized_slab(g[0])
if original_formula != str(slab.composition.reduced_formula):
warnings.warn("WARNING: Stoichiometry is no longer the "
"same due to symmetrization")
new_slabs.append(slab)
else:
new_slabs.append(g[0])
return sorted(new_slabs, key=lambda s: s.energy)
def repair_broken_bonds(self, slab, bonds):
"""
This method will find undercoordinated atoms due to slab
cleaving specified by the bonds parameter and move them
to the other surface to make sure the bond is kept intact.
In a future release of surface.py, the ghost_sites will be
used to tell us how the repair bonds should look like.
Arg:
slab (structure): A structure object representing a slab.
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
"""
for pair in bonds.keys():
blength = bonds[pair]
# First lets determine which element should be the
# reference (center element) to determine broken bonds.
# e.g. P for a PO4 bond. Find integer coordination
# numbers of the pair of elements wrt to each other
cn_dict = {}
for i, el in enumerate(pair):
cnlist = []
for site in self.oriented_unit_cell:
poly_coord = 0
if site.species_string == el:
for nn in self.oriented_unit_cell.get_neighbors(site,
blength):
if nn[0].species_string == pair[i-1]:
poly_coord += 1
cnlist.append(poly_coord)
cn_dict[el] = cnlist
# We make the element with the higher coordination our reference
if max(cn_dict[pair[0]]) > max(cn_dict[pair[1]]):
element1, element2 = pair
else:
element2, element1 = pair
for i, site in enumerate(slab):
# Determine the coordination of our reference
if site.species_string == element1:
poly_coord = 0
for neighbor in slab.get_neighbors(site, blength):
poly_coord += 1 if neighbor[0].species_string == element2 else 0
# suppose we find an undercoordinated reference atom
if poly_coord not in cn_dict[element1]:
# We get the reference atom of the broken bonds
# (undercoordinated), move it to the other surface
slab = self.move_to_other_side(slab, [i])
# find its NNs with the corresponding
# species it should be coordinated with
neighbors = slab.get_neighbors(slab[i], blength,
include_index=True)
tomove = [nn[2] for nn in neighbors if
nn[0].species_string == element2]
tomove.append(i)
# and then move those NNs along with the central
# atom back to the other side of the slab again
slab = self.move_to_other_side(slab, tomove)
return slab
def move_to_other_side(self, init_slab, index_of_sites):
"""
This method will Move a set of sites to the
other side of the slab (opposite surface).
Arg:
init_slab (structure): A structure object representing a slab.
index_of_sites (list of ints): The list of indices representing
the sites we want to move to the other side.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
"""
slab = init_slab.copy()
# Determine what fraction the slab is of the total cell size
# in the c direction. Round to nearest rational number.
h = self._proj_height
nlayers_slab = int(math.ceil(self.min_slab_size / h))
nlayers_vac = int(math.ceil(self.min_vac_size / h))
nlayers = nlayers_slab + nlayers_vac
slab_ratio = nlayers_slab / nlayers
# Sort the index of sites based on which side they are on
top_site_index = [ i for i in index_of_sites if
slab[i].frac_coords[2] > slab.center_of_mass[2]]
bottom_site_index = [ i for i in index_of_sites if
slab[i].frac_coords[2] < slab.center_of_mass[2]]
# Translate sites to the opposite surfaces
slab.translate_sites(top_site_index, [0, 0, slab_ratio])
slab.translate_sites(bottom_site_index, [0, 0, -slab_ratio])
return Slab(init_slab.lattice, slab.species, slab.frac_coords,
init_slab.miller_index, init_slab.oriented_unit_cell,
init_slab.shift, init_slab.scale_factor,
energy=init_slab.energy)
def nonstoichiometric_symmetrized_slab(self, slab, tol=1e-3):
"""
This method checks whether or not the two surfaces of the slab are
equivalent. If the point group of the slab has an inversion symmetry (
ie. belong to one of the Laue groups), then it is assumed that the
surfaces should be equivalent. Otherwise, sites at the bottom of the
slab will be removed until the slab is symmetric. Note the removal of sites
can destroy the stoichiometry of the slab. For non-elemental
structures, the chemical potential will be needed to calculate surface energy.
Arg:
slab (Structure): A single slab structure
tol (float): Tolerance for SpaceGroupanalyzer.
Returns:
Slab (structure): A symmetrized Slab object.
"""
sg = SpacegroupAnalyzer(slab, symprec=tol)
if sg.is_laue():
return slab
else:
asym = True
while asym or (len(slab) < len(self.parent)):
# Keep removing sites from the bottom one by one until both
# surfaces are symmetric or the number of sites removed has
# exceeded 10 percent of the original slab
c_dir = [site[2] for i, site in enumerate(slab.frac_coords)]
slab.remove_sites([c_dir.index(min(c_dir))])
# Check if the altered surface is symmetric
sg = SpacegroupAnalyzer(slab, symprec=tol)
if sg.is_laue():
asym = False
if len(slab) < len(self.parent):
warnings.warn("Too many sites removed, please use a larger slab "
"size.")
return slab
def get_recp_symmetry_operation(structure, symprec=0.01):
"""
Find the symmetric operations of the reciprocal lattice,
to be used for hkl transformations
Args:
structure (Structure): conventional unit cell
symprec: default is 0.001
"""
recp_lattice = structure.lattice.reciprocal_lattice_crystallographic
# get symmetry operations from input conventional unit cell
# Need to make sure recp lattice is big enough, otherwise symmetry
# determination will fail. We set the overall volume to 1.
recp_lattice = recp_lattice.scale(1)
recp = Structure(recp_lattice, ["H"], [[0, 0, 0]])
# Creates a function that uses the symmetry operations in the
# structure to find Miller indices that might give repetitive slabs
analyzer = SpacegroupAnalyzer(recp, symprec=symprec)
recp_symmops = analyzer.get_symmetry_operations()
return recp_symmops
def get_symmetrically_distinct_miller_indices(structure, max_index):
"""
Returns all symmetrically distinct indices below a certain max-index for
a given structure. Analysis is based on the symmetry of the reciprocal
lattice of the structure.
Args:
structure (Structure): input structure.
max_index (int): The maximum index. For example, a max_index of 1
means that (100), (110), and (111) are returned for the cubic
structure. All other indices are equivalent to one of these.
"""
symm_ops = get_recp_symmetry_operation(structure)
unique_millers = []
def is_already_analyzed(miller_index):
for op in symm_ops:
if in_coord_list(unique_millers, op.operate(miller_index)):
return True
return False
r = list(range(-max_index, max_index + 1))
r.reverse()
for miller in itertools.product(r, r, r):
if any([i != 0 for i in miller]):
d = abs(reduce(gcd, miller))
miller = tuple([int(i / d) for i in miller])
if not is_already_analyzed(miller):
unique_millers.append(miller)
return unique_millers
def generate_all_slabs(structure, max_index, min_slab_size, min_vacuum_size,
bonds=None, tol=1e-3, max_broken_bonds=0,
lll_reduce=False, center_slab=False, primitive=True,
max_normal_search=None, symmetrize=False, repair=False):
"""
A function that finds all different slabs up to a certain miller index.
Slabs oriented under certain Miller indices that are equivalent to other
slabs in other Miller indices are filtered out using symmetry operations
to get rid of any repetitive slabs. For example, under symmetry operations,
CsCl has equivalent slabs in the (0,0,1), (0,1,0), and (1,0,0) direction.
Args:
structure (Structure): Initial input structure. Note that to
ensure that the miller indices correspond to usual
crystallographic definitions, you should supply a conventional
unit cell structure.
max_index (int): The maximum Miller index to go up to.
min_slab_size (float): In Angstroms
min_vacuum_size (float): In Angstroms
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
tol (float): Threshold parameter in fcluster in order to check
if two atoms are lying on the same plane. Default thresh set
to 0.1 Angstrom in the direction of the surface normal.
max_broken_bonds (int): Maximum number of allowable broken bonds
for the slab. Use this to limit # of slabs (some structures
may have a lot of slabs). Defaults to zero, which means no
defined bonds must be broken.
lll_reduce (bool): Whether to perform an LLL reduction on the
eventual structure.
center_slab (bool): Whether to center the slab in the cell with
equal vacuum spacing from the top and bottom.
primitive (bool): Whether to reduce any generated slabs to a
primitive cell (this does **not** mean the slab is generated
from a primitive cell, it simply means that after slab
generation, we attempt to find shorter lattice vectors,
which lead to less surface area and smaller cells).
max_normal_search (int): If set to a positive integer, the code will
conduct a search for a normal lattice vector that is as
perpendicular to the surface as possible by considering
multiples linear combinations of lattice vectors up to
max_normal_search. This has no bearing on surface energies,
but may be useful as a preliminary step to generating slabs
for absorption and other sizes. It is typical that this will
not be the smallest possible cell for simulation. Normality
is not guaranteed, but the oriented cell will have the c
vector as normal as possible (within the search range) to the
surface. A value of up to the max absolute Miller index is
usually sufficient.
symmetrize (bool): Whether or not to ensure the surfaces of the
slabs are equivalent.
repair (bool): Whether to repair terminations with broken bonds
or just omit them
"""
all_slabs = []
for miller in get_symmetrically_distinct_miller_indices(structure,
max_index):
gen = SlabGenerator(structure, miller, min_slab_size,
min_vacuum_size, lll_reduce=lll_reduce,
center_slab=center_slab, primitive=primitive,
max_normal_search=max_normal_search)
slabs = gen.get_slabs(bonds=bonds, tol=tol, symmetrize=symmetrize,
max_broken_bonds=max_broken_bonds, repair=repair)
if len(slabs) > 0:
logger.debug("%s has %d slabs... " % (miller, len(slabs)))
all_slabs.extend(slabs)
return all_slabs
def reduce_vector(vector):
# small function to reduce vectors
d = abs(reduce(gcd, vector))
vector = tuple([int(i / d) for i in vector])
return vector
| matk86/pymatgen | pymatgen/core/surface.py | Python | mit | 55,689 | [
"pymatgen"
] | 6f2c3212a24b5087b890fd4ef8085f46e86c726c27f4a5f71dcb24a838db53e0 |
""" core implementation of testing process: init, session, runtest loop. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import os
import pkgutil
import sys
import warnings
import attr
import py
import six
import _pytest._code
from _pytest import nodes
from _pytest.config import directory_arg
from _pytest.config import hookimpl
from _pytest.config import UsageError
from _pytest.deprecated import PYTEST_CONFIG_GLOBAL
from _pytest.outcomes import exit
from _pytest.runner import collect_one_node
# exitcodes for the command line
EXIT_OK = 0
EXIT_TESTSFAILED = 1
EXIT_INTERRUPTED = 2
EXIT_INTERNALERROR = 3
EXIT_USAGEERROR = 4
EXIT_NOTESTSCOLLECTED = 5
def pytest_addoption(parser):
parser.addini(
"norecursedirs",
"directory patterns to avoid for recursion",
type="args",
default=[".*", "build", "dist", "CVS", "_darcs", "{arch}", "*.egg", "venv"],
)
parser.addini(
"testpaths",
"directories to search for tests when no files or directories are given in the "
"command line.",
type="args",
default=[],
)
# parser.addini("dirpatterns",
# "patterns specifying possible locations of test files",
# type="linelist", default=["**/test_*.txt",
# "**/test_*.py", "**/*_test.py"]
# )
group = parser.getgroup("general", "running and selection options")
group._addoption(
"-x",
"--exitfirst",
action="store_const",
dest="maxfail",
const=1,
help="exit instantly on first error or failed test.",
),
group._addoption(
"--maxfail",
metavar="num",
action="store",
type=int,
dest="maxfail",
default=0,
help="exit after first num failures or errors.",
)
group._addoption(
"--strict",
action="store_true",
help="marks not registered in configuration file raise errors.",
)
group._addoption(
"-c",
metavar="file",
type=str,
dest="inifilename",
help="load configuration from `file` instead of trying to locate one of the implicit "
"configuration files.",
)
group._addoption(
"--continue-on-collection-errors",
action="store_true",
default=False,
dest="continue_on_collection_errors",
help="Force test execution even if collection errors occur.",
)
group._addoption(
"--rootdir",
action="store",
dest="rootdir",
help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', "
"'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: "
"'$HOME/root_dir'.",
)
group = parser.getgroup("collect", "collection")
group.addoption(
"--collectonly",
"--collect-only",
action="store_true",
help="only collect tests, don't execute them.",
),
group.addoption(
"--pyargs",
action="store_true",
help="try to interpret all arguments as python packages.",
)
group.addoption(
"--ignore",
action="append",
metavar="path",
help="ignore path during collection (multi-allowed).",
)
group.addoption(
"--deselect",
action="append",
metavar="nodeid_prefix",
help="deselect item during collection (multi-allowed).",
)
# when changing this to --conf-cut-dir, config.py Conftest.setinitial
# needs upgrading as well
group.addoption(
"--confcutdir",
dest="confcutdir",
default=None,
metavar="dir",
type=functools.partial(directory_arg, optname="--confcutdir"),
help="only load conftest.py's relative to specified dir.",
)
group.addoption(
"--noconftest",
action="store_true",
dest="noconftest",
default=False,
help="Don't load any conftest.py files.",
)
group.addoption(
"--keepduplicates",
"--keep-duplicates",
action="store_true",
dest="keepduplicates",
default=False,
help="Keep duplicate tests.",
)
group.addoption(
"--collect-in-virtualenv",
action="store_true",
dest="collect_in_virtualenv",
default=False,
help="Don't ignore tests in a local virtualenv directory",
)
group = parser.getgroup("debugconfig", "test session debugging and configuration")
group.addoption(
"--basetemp",
dest="basetemp",
default=None,
metavar="dir",
help=(
"base temporary directory for this test run."
"(warning: this directory is removed if it exists)"
),
)
class _ConfigDeprecated(object):
def __init__(self, config):
self.__dict__["_config"] = config
def __getattr__(self, attr):
warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2)
return getattr(self._config, attr)
def __setattr__(self, attr, val):
warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2)
return setattr(self._config, attr, val)
def __repr__(self):
return "{}({!r})".format(type(self).__name__, self._config)
def pytest_configure(config):
__import__("pytest").config = _ConfigDeprecated(config) # compatibility
def wrap_session(config, doit):
"""Skeleton command line program"""
session = Session(config)
session.exitstatus = EXIT_OK
initstate = 0
try:
try:
config._do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
session.exitstatus = doit(config, session) or 0
except UsageError:
raise
except Failed:
session.exitstatus = EXIT_TESTSFAILED
except (KeyboardInterrupt, exit.Exception):
excinfo = _pytest._code.ExceptionInfo.from_current()
exitstatus = EXIT_INTERRUPTED
if initstate <= 2 and isinstance(excinfo.value, exit.Exception):
sys.stderr.write("{}: {}\n".format(excinfo.typename, excinfo.value.msg))
if excinfo.value.returncode is not None:
exitstatus = excinfo.value.returncode
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = exitstatus
except: # noqa
excinfo = _pytest._code.ExceptionInfo.from_current()
config.notify_exception(excinfo, config.option)
session.exitstatus = EXIT_INTERNALERROR
if excinfo.errisinstance(SystemExit):
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
finally:
excinfo = None # Explicitly break reference cycle.
session.startdir.chdir()
if initstate >= 2:
config.hook.pytest_sessionfinish(
session=session, exitstatus=session.exitstatus
)
config._ensure_unconfigure()
return session.exitstatus
def pytest_cmdline_main(config):
return wrap_session(config, _main)
def _main(config, session):
""" default command line protocol for initialization, session,
running tests and reporting. """
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
if session.testsfailed:
return EXIT_TESTSFAILED
elif session.testscollected == 0:
return EXIT_NOTESTSCOLLECTED
def pytest_collection(session):
return session.perform_collect()
def pytest_runtestloop(session):
if session.testsfailed and not session.config.option.continue_on_collection_errors:
raise session.Interrupted("%d errors during collection" % session.testsfailed)
if session.config.option.collectonly:
return True
for i, item in enumerate(session.items):
nextitem = session.items[i + 1] if i + 1 < len(session.items) else None
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldfail:
raise session.Failed(session.shouldfail)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def _in_venv(path):
"""Attempts to detect if ``path`` is the root of a Virtual Environment by
checking for the existence of the appropriate activate script"""
bindir = path.join("Scripts" if sys.platform.startswith("win") else "bin")
if not bindir.isdir():
return False
activates = (
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
)
return any([fname.basename in activates for fname in bindir.listdir()])
def pytest_ignore_collect(path, config):
ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath())
ignore_paths = ignore_paths or []
excludeopt = config.getoption("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
if py.path.local(path) in ignore_paths:
return True
allow_in_venv = config.getoption("collect_in_virtualenv")
if not allow_in_venv and _in_venv(path):
return True
return False
def pytest_collection_modifyitems(items, config):
deselect_prefixes = tuple(config.getoption("deselect") or [])
if not deselect_prefixes:
return
remaining = []
deselected = []
for colitem in items:
if colitem.nodeid.startswith(deselect_prefixes):
deselected.append(colitem)
else:
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
@contextlib.contextmanager
def _patched_find_module():
"""Patch bug in pkgutil.ImpImporter.find_module
When using pkgutil.find_loader on python<3.4 it removes symlinks
from the path due to a call to os.path.realpath. This is not consistent
with actually doing the import (in these versions, pkgutil and __import__
did not share the same underlying code). This can break conftest
discovery for pytest where symlinks are involved.
The only supported python<3.4 by pytest is python 2.7.
"""
if six.PY2: # python 3.4+ uses importlib instead
def find_module_patched(self, fullname, path=None):
# Note: we ignore 'path' argument since it is only used via meta_path
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
# original: path = [os.path.realpath(self.path)]
path = [self.path]
try:
file, filename, etc = pkgutil.imp.find_module(subname, path)
except ImportError:
return None
return pkgutil.ImpLoader(fullname, file, filename, etc)
old_find_module = pkgutil.ImpImporter.find_module
pkgutil.ImpImporter.find_module = find_module_patched
try:
yield
finally:
pkgutil.ImpImporter.find_module = old_find_module
else:
yield
class FSHookProxy(object):
def __init__(self, fspath, pm, remove_mods):
self.fspath = fspath
self.pm = pm
self.remove_mods = remove_mods
def __getattr__(self, name):
x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
self.__dict__[name] = x
return x
class NoMatch(Exception):
""" raised if matching cannot locate a matching names. """
class Interrupted(KeyboardInterrupt):
""" signals an interrupted test run. """
__module__ = "builtins" # for py3
class Failed(Exception):
""" signals a stop as failed test run. """
@attr.s
class _bestrelpath_cache(dict):
path = attr.ib()
def __missing__(self, path):
r = self.path.bestrelpath(path)
self[path] = r
return r
class Session(nodes.FSCollector):
Interrupted = Interrupted
Failed = Failed
def __init__(self, config):
nodes.FSCollector.__init__(
self, config.rootdir, parent=None, config=config, session=self, nodeid=""
)
self.testsfailed = 0
self.testscollected = 0
self.shouldstop = False
self.shouldfail = False
self.trace = config.trace.root.get("collection")
self._norecursepatterns = config.getini("norecursedirs")
self.startdir = py.path.local()
self._initialpaths = frozenset()
# Keep track of any collected nodes in here, so we don't duplicate fixtures
self._node_cache = {}
self._bestrelpathcache = _bestrelpath_cache(config.rootdir)
# Dirnames of pkgs with dunder-init files.
self._pkg_roots = {}
self.config.pluginmanager.register(self, name="session")
def _node_location_to_relpath(self, node_path):
# bestrelpath is a quite slow function
return self._bestrelpathcache[node_path]
@hookimpl(tryfirst=True)
def pytest_collectstart(self):
if self.shouldfail:
raise self.Failed(self.shouldfail)
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
@hookimpl(tryfirst=True)
def pytest_runtest_logreport(self, report):
if report.failed and not hasattr(report, "wasxfail"):
self.testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self.testsfailed >= maxfail:
self.shouldfail = "stopping after %d failures" % (self.testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path):
return path in self._initialpaths
def gethookproxy(self, fspath):
# check if we have the common case of running
# hooks with all conftest.py files
pm = self.config.pluginmanager
my_conftestmodules = pm._getconftestmodules(fspath)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
if remove_mods:
# one or more conftests are not in use at this fspath
proxy = FSHookProxy(fspath, pm, remove_mods)
else:
# all plugis are active for this fspath
proxy = self.config.hook
return proxy
def perform_collect(self, args=None, genitems=True):
hook = self.config.hook
try:
items = self._perform_collect(args, genitems)
self.config.pluginmanager.check_pending()
hook.pytest_collection_modifyitems(
session=self, config=self.config, items=items
)
finally:
hook.pytest_collection_finish(session=self)
self.testscollected = len(items)
return items
def _perform_collect(self, args, genitems):
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound = []
initialpaths = []
self._initialparts = []
self.items = items = []
for arg in args:
parts = self._parsearg(arg)
self._initialparts.append(parts)
initialpaths.append(parts[0])
self._initialpaths = frozenset(initialpaths)
rep = collect_one_node(self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
errors = []
for arg, exc in self._notfound:
line = "(no name %r in any of %r)" % (arg, exc.args[0])
errors.append("not found: %s\n%s" % (arg, line))
# XXX: test this
raise UsageError(*errors)
if not genitems:
return rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
return items
def collect(self):
for initialpart in self._initialparts:
arg = "::".join(map(str, initialpart))
self.trace("processing argument", arg)
self.trace.root.indent += 1
try:
for x in self._collect(arg):
yield x
except NoMatch:
# we are inside a make_report hook so
# we cannot directly pass through the exception
self._notfound.append((arg, sys.exc_info()[1]))
self.trace.root.indent -= 1
def _collect(self, arg):
from _pytest.python import Package
names = self._parsearg(arg)
argpath = names.pop(0)
# Start with a Session root, and delve to argpath item (dir or file)
# and stack all Packages found on the way.
# No point in finding packages when collecting doctests
if not self.config.option.doctestmodules:
pm = self.config.pluginmanager
for parent in reversed(argpath.parts()):
if pm._confcutdir and pm._confcutdir.relto(parent):
break
if parent.isdir():
pkginit = parent.join("__init__.py")
if pkginit.isfile():
if pkginit not in self._node_cache:
col = self._collectfile(pkginit, handle_dupes=False)
if col:
if isinstance(col[0], Package):
self._pkg_roots[parent] = col[0]
# always store a list in the cache, matchnodes expects it
self._node_cache[col[0].fspath] = [col[0]]
# If it's a directory argument, recurse and look for any Subpackages.
# Let the Package collector deal with subnodes, don't collect here.
if argpath.check(dir=1):
assert not names, "invalid arg %r" % (arg,)
if six.PY2:
def filter_(f):
return f.check(file=1) and not f.strpath.endswith("*.pyc")
else:
def filter_(f):
return f.check(file=1)
seen_dirs = set()
for path in argpath.visit(
fil=filter_, rec=self._recurse, bf=True, sort=True
):
dirpath = path.dirpath()
if dirpath not in seen_dirs:
# Collect packages first.
seen_dirs.add(dirpath)
pkginit = dirpath.join("__init__.py")
if pkginit.exists():
for x in self._collectfile(pkginit):
yield x
if isinstance(x, Package):
self._pkg_roots[dirpath] = x
if dirpath in self._pkg_roots:
# Do not collect packages here.
continue
for x in self._collectfile(path):
key = (type(x), x.fspath)
if key in self._node_cache:
yield self._node_cache[key]
else:
self._node_cache[key] = x
yield x
else:
assert argpath.check(file=1)
if argpath in self._node_cache:
col = self._node_cache[argpath]
else:
collect_root = self._pkg_roots.get(argpath.dirname, self)
col = collect_root._collectfile(argpath)
if col:
self._node_cache[argpath] = col
m = self.matchnodes(col, names)
# If __init__.py was the only file requested, then the matched node will be
# the corresponding Package, and the first yielded item will be the __init__
# Module itself, so just use that. If this special case isn't taken, then all
# the files in the package will be yielded.
if argpath.basename == "__init__.py":
yield next(m[0].collect())
return
for y in m:
yield y
def _collectfile(self, path, handle_dupes=True):
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
if handle_dupes:
keepduplicates = self.config.getoption("keepduplicates")
if not keepduplicates:
duplicate_paths = self.config.pluginmanager._duplicatepaths
if path in duplicate_paths:
return ()
else:
duplicate_paths.add(path)
return ihook.pytest_collect_file(path=path, parent=self)
def _recurse(self, dirpath):
if dirpath.basename == "__pycache__":
return False
ihook = self.gethookproxy(dirpath.dirpath())
if ihook.pytest_ignore_collect(path=dirpath, config=self.config):
return False
for pat in self._norecursepatterns:
if dirpath.check(fnmatch=pat):
return False
ihook = self.gethookproxy(dirpath)
ihook.pytest_collect_directory(path=dirpath, parent=self)
return True
def _tryconvertpyarg(self, x):
"""Convert a dotted module name to path."""
try:
with _patched_find_module():
loader = pkgutil.find_loader(x)
except ImportError:
return x
if loader is None:
return x
# This method is sometimes invoked when AssertionRewritingHook, which
# does not define a get_filename method, is already in place:
try:
with _patched_find_module():
path = loader.get_filename(x)
except AttributeError:
# Retrieve path from AssertionRewritingHook:
path = loader.modules[x][0].co_filename
if loader.is_package(x):
path = os.path.dirname(path)
return path
def _parsearg(self, arg):
""" return (fspath, names) tuple after checking the file exists. """
parts = str(arg).split("::")
if self.config.option.pyargs:
parts[0] = self._tryconvertpyarg(parts[0])
relpath = parts[0].replace("/", os.sep)
path = self.config.invocation_dir.join(relpath, abs=True)
if not path.check():
if self.config.option.pyargs:
raise UsageError(
"file or package not found: " + arg + " (missing __init__.py?)"
)
raise UsageError("file not found: " + arg)
parts[0] = path.realpath()
return parts
def matchnodes(self, matching, names):
self.trace("matchnodes", matching, names)
self.trace.root.indent += 1
nodes = self._matchnodes(matching, names)
num = len(nodes)
self.trace("matchnodes finished -> ", num, "nodes")
self.trace.root.indent -= 1
if num == 0:
raise NoMatch(matching, names[:1])
return nodes
def _matchnodes(self, matching, names):
if not matching or not names:
return matching
name = names[0]
assert name
nextnames = names[1:]
resultnodes = []
for node in matching:
if isinstance(node, nodes.Item):
if not names:
resultnodes.append(node)
continue
assert isinstance(node, nodes.Collector)
key = (type(node), node.nodeid)
if key in self._node_cache:
rep = self._node_cache[key]
else:
rep = collect_one_node(node)
self._node_cache[key] = rep
if rep.passed:
has_matched = False
for x in rep.result:
# TODO: remove parametrized workaround once collection structure contains parametrization
if x.name == name or x.name.split("[")[0] == name:
resultnodes.extend(self.matchnodes([x], nextnames))
has_matched = True
# XXX accept IDs that don't have "()" for class instances
if not has_matched and len(rep.result) == 1 and x.name == "()":
nextnames.insert(0, name)
resultnodes.extend(self.matchnodes([x], nextnames))
else:
# report collection failures here to avoid failing to run some test
# specified in the command line because the module could not be
# imported (#134)
node.ihook.pytest_collectreport(report=rep)
return resultnodes
def genitems(self, node):
self.trace("genitems", node)
if isinstance(node, nodes.Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, nodes.Collector)
rep = collect_one_node(node)
if rep.passed:
for subnode in rep.result:
for x in self.genitems(subnode):
yield x
node.ihook.pytest_collectreport(report=rep)
| hackebrot/pytest | src/_pytest/main.py | Python | mit | 25,480 | [
"VisIt"
] | 111fe939298b33c18d54f44b9d328e22b9a1bf92aa940a38cf016ee8921e40e3 |
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010-2012 Gary Burton
# GraphvizSvgParser is based on the Gramps XML import
# DotSvgGenerator is based on the relationship graph
# report.
# Mouse panning is derived from the pedigree view
# Copyright (C) 2012 Mathieu MD
# Copyright (C) 2015- Serge Noiraud
# Copyright (C) 2016- Ivan Komaritsyn
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# $Id$
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import os
import logging
from re import MULTILINE, findall
from xml.parsers.expat import ParserCreate
import string
from subprocess import Popen, PIPE
from io import StringIO
from threading import Thread
from math import sqrt, pow
from html import escape
from collections import abc, deque
import gi
from gi.repository import Gtk, Gdk, GdkPixbuf, GLib, Pango
#-------------------------------------------------------------------------
#
# Gramps Modules
#
#-------------------------------------------------------------------------
from gramps.gen import datehandler
from gramps.gen.config import config
from gramps.gen.constfunc import win
from gramps.gen.db import DbTxn
from gramps.gen.display.name import displayer
from gramps.gen.display.place import displayer as place_displayer
from gramps.gen.errors import WindowActiveError
from gramps.gen.lib import (Person, Family, ChildRef, Name, Surname,
ChildRefType, EventType, EventRoleType)
from gramps.gen.utils.alive import probably_alive
from gramps.gen.utils.callback import Callback
from gramps.gen.utils.db import (get_birth_or_fallback, get_death_or_fallback,
find_children, find_parents, preset_name,
find_witnessed_people)
from gramps.gen.utils.file import search_for, media_path_full, find_file
from gramps.gen.utils.libformatting import FormattingHelper
from gramps.gen.utils.thumbnails import get_thumbnail_path
from gramps.gui.dialog import (OptionDialog, ErrorDialog, QuestionDialog2,
WarningDialog)
from gramps.gui.display import display_url
from gramps.gui.editors import EditPerson, EditFamily, EditTagList
from gramps.gui.utils import (color_graph_box, color_graph_family,
rgb_to_hex, hex_to_rgb_float,
process_pending_events)
from gramps.gui.views.navigationview import NavigationView
from gramps.gui.views.bookmarks import PersonBookmarks
from gramps.gui.views.tags import OrganizeTagsDialog
from gramps.gui.widgets import progressdialog as progressdlg
from gramps.gui.widgets.menuitem import add_menuitem
from gramps.gen.utils.symbols import Symbols
from gramps.gui.pluginmanager import GuiPluginManager
from gramps.gen.plug import CATEGORY_QR_PERSON, CATEGORY_QR_FAMILY
from gramps.gui.plug.quick import run_report
from gramps.gen.const import GRAMPS_LOCALE as glocale
try:
_trans = glocale.get_addon_translator(__file__)
except ValueError:
_trans = glocale.translation
_ = _trans.gettext
if win():
DETACHED_PROCESS = 8
for goo_ver in ('3.0', '2.0'):
try:
gi.require_version('GooCanvas', goo_ver)
from gi.repository import GooCanvas
_GOO = True
break
except (ImportError, ValueError):
_GOO = False
if not _GOO:
raise Exception("Goocanvas 2 or 3 (http://live.gnome.org/GooCanvas) is "
"required for this view to work")
if os.sys.platform == "win32":
_DOT_FOUND = search_for("dot.exe")
else:
_DOT_FOUND = search_for("dot")
if not _DOT_FOUND:
raise Exception("GraphViz (http://www.graphviz.org) is "
"required for this view to work")
SPLINE = {0: 'false', 1: 'true', 2: 'ortho'}
WIKI_PAGE = 'https://gramps-project.org/wiki/index.php?title=Graph_View'
# gtk version
gtk_version = float("%s.%s" % (Gtk.MAJOR_VERSION, Gtk.MINOR_VERSION))
#-------------------------------------------------------------------------
#
# GraphView modules
#
#-------------------------------------------------------------------------
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from search_widget import SearchWidget, Popover, ListBoxRow, get_person_tooltip
from avatars import Avatars
#-------------------------------------------------------------------------
#
# GraphView
#
#-------------------------------------------------------------------------
class GraphView(NavigationView):
"""
View for pedigree tree.
Displays the ancestors and descendants of a selected individual.
"""
# default settings in the config file
CONFIGSETTINGS = (
('interface.graphview-show-images', True),
('interface.graphview-show-avatars', True),
('interface.graphview-avatars-style', 1),
('interface.graphview-avatars-male', ''), # custom avatar
('interface.graphview-avatars-female', ''), # custom avatar
('interface.graphview-show-full-dates', False),
('interface.graphview-show-places', False),
('interface.graphview-place-format', 0),
('interface.graphview-show-lines', 1),
('interface.graphview-show-tags', False),
('interface.graphview-highlight-home-person', True),
('interface.graphview-home-path-color', '#000000'),
('interface.graphview-descendant-generations', 10),
('interface.graphview-ancestor-generations', 3),
('interface.graphview-show-animation', True),
('interface.graphview-animation-speed', 3),
('interface.graphview-animation-count', 4),
('interface.graphview-search-all-db', True),
('interface.graphview-search-show-images', True),
('interface.graphview-search-marked-first', True),
('interface.graphview-ranksep', 5),
('interface.graphview-nodesep', 2),
('interface.graphview-person-theme', 0),
('interface.graphview-font', ['', 14]),
('interface.graphview-show-all-connected', False))
def __init__(self, pdata, dbstate, uistate, nav_group=0):
NavigationView.__init__(self, _('Graph View'), pdata, dbstate, uistate,
PersonBookmarks, nav_group)
self.show_images = self._config.get('interface.graphview-show-images')
self.show_full_dates = self._config.get(
'interface.graphview-show-full-dates')
self.show_places = self._config.get('interface.graphview-show-places')
self.show_tag_color = self._config.get('interface.graphview-show-tags')
self.highlight_home_person = self._config.get(
'interface.graphview-highlight-home-person')
self.home_path_color = self._config.get(
'interface.graphview-home-path-color')
self.descendant_generations = self._config.get(
'interface.graphview-descendant-generations')
self.ancestor_generations = self._config.get(
'interface.graphview-ancestor-generations')
self.dbstate = dbstate
self.uistate = uistate
self.graph_widget = None
self.dbstate.connect('database-changed', self.change_db)
# dict {handle, tooltip_str} of tooltips in markup format
self.tags_tooltips = {}
# for disable animation options in config dialog
self.ani_widgets = []
# for disable custom avatar options in config dialog
self.avatar_widgets = []
self.additional_uis.append(self.additional_ui)
self.define_print_actions()
self.uistate.connect('font-changed', self.font_changed)
def on_delete(self):
"""
Method called on shutdown.
See PageView class (../gramps/gui/views/pageview.py).
"""
super().on_delete()
# stop search to allow close app properly
self.graph_widget.search_widget.stop_search()
def font_changed(self):
self.graph_widget.font_changed(self.get_active())
#self.goto_handle(None)
def define_print_actions(self):
"""
Associate the print button to the PrintView action.
"""
self._add_action('PrintView', self.printview, "<PRIMARY><SHIFT>P")
self._add_action('PRIMARY-J', self.jump, '<PRIMARY>J')
def _connect_db_signals(self):
"""
Set up callbacks for changes to person and family nodes.
"""
self.callman.add_db_signal('person-update', self.goto_handle)
self.callman.add_db_signal('family-update', self.goto_handle)
self.callman.add_db_signal('event-update', self.goto_handle)
def change_db(self, _db):
"""
Set up callback for changes to the database.
"""
self._change_db(_db)
self.graph_widget.scale = 1
if self.active:
if self.get_active() != "":
self.graph_widget.populate(self.get_active())
self.graph_widget.set_available(True)
else:
self.graph_widget.set_available(False)
else:
self.dirty = True
self.graph_widget.set_available(False)
def get_stock(self):
"""
The category stock icon.
"""
return 'gramps-pedigree'
def get_viewtype_stock(self):
"""
Type of view in category.
"""
return 'gramps-pedigree'
def build_widget(self):
"""
Builds the widget with canvas and controls.
"""
self.graph_widget = GraphWidget(self, self.dbstate, self.uistate)
return self.graph_widget.get_widget()
def build_tree(self):
"""
There is no separate step to fill the widget with data.
The data is populated as part of canvas widget construction.
It can be called to rebuild tree.
"""
if self.active:
if self.get_active() != "":
self.graph_widget.populate(self.get_active())
additional_ui = [ # Defines the UI string for UIManager
'''
<placeholder id="CommonGo">
<section>
<item>
<attribute name="action">win.Back</attribute>
<attribute name="label" translatable="yes">_Back</attribute>
</item>
<item>
<attribute name="action">win.Forward</attribute>
<attribute name="label" translatable="yes">_Forward</attribute>
</item>
</section>
<section>
<item>
<attribute name="action">win.HomePerson</attribute>
<attribute name="label" translatable="yes">_Home</attribute>
</item>
</section>
</placeholder>
''',
'''
<section id='CommonEdit' groups='RW'>
<item>
<attribute name="action">win.PrintView</attribute>
<attribute name="label" translatable="yes">_Print...</attribute>
</item>
</section>
''', # Following are the Toolbar items
'''
<placeholder id='CommonNavigation'>
<child groups='RO'>
<object class="GtkToolButton">
<property name="icon-name">go-previous</property>
<property name="action-name">win.Back</property>
<property name="tooltip_text" translatable="yes">'''
'''Go to the previous object in the history</property>
<property name="label" translatable="yes">_Back</property>
<property name="use-underline">True</property>
</object>
<packing>
<property name="homogeneous">False</property>
</packing>
</child>
<child groups='RO'>
<object class="GtkToolButton">
<property name="icon-name">go-next</property>
<property name="action-name">win.Forward</property>
<property name="tooltip_text" translatable="yes">'''
'''Go to the next object in the history</property>
<property name="label" translatable="yes">_Forward</property>
<property name="use-underline">True</property>
</object>
<packing>
<property name="homogeneous">False</property>
</packing>
</child>
<child groups='RO'>
<object class="GtkToolButton">
<property name="icon-name">go-home</property>
<property name="action-name">win.HomePerson</property>
<property name="tooltip_text" translatable="yes">'''
'''Go to the default person</property>
<property name="label" translatable="yes">_Home</property>
<property name="use-underline">True</property>
</object>
<packing>
<property name="homogeneous">False</property>
</packing>
</child>
</placeholder>
''',
'''
<placeholder id='BarCommonEdit'>
<child groups='RO'>
<object class="GtkToolButton">
<property name="icon-name">document-print</property>
<property name="action-name">win.PrintView</property>
<property name="tooltip_text" translatable="yes">"Save the dot file '''
'''for a later print.\nThis will save a .gv file and a svg file.\n'''
'''You must select a .gv file"</property>
<property name="label" translatable="yes">_Print...</property>
<property name="use-underline">True</property>
</object>
<packing>
<property name="homogeneous">False</property>
</packing>
</child>
</placeholder>
''']
def navigation_type(self):
"""
The type of forward and backward navigation to perform.
"""
return 'Person'
def goto_handle(self, handle):
"""
Go to a named handle.
"""
if self.active:
if self.get_active() != "":
self.graph_widget.populate(self.get_active())
self.graph_widget.set_available(True)
else:
self.dirty = True
self.graph_widget.set_available(False)
def change_active_person(self, _menuitem=None, person_handle=''):
"""
Change active person.
"""
if person_handle:
self.change_active(person_handle)
def can_configure(self):
"""
See :class:`~gui.views.pageview.PageView
:return: bool
"""
return True
def cb_update_show_images(self, _client, _cnxn_id, entry, _data):
"""
Called when the configuration menu changes the images setting.
"""
self.show_images = entry == 'True'
self.graph_widget.populate(self.get_active())
def cb_update_show_avatars(self, _client, _cnxn_id, entry, _data):
"""
Called when the configuration menu changes the avatars setting.
"""
self.show_avatars = entry == 'True'
self.graph_widget.populate(self.get_active())
def cb_update_avatars_style(self, _client, _cnxn_id, entry, _data):
"""
Called when the configuration menu changes the avatars setting.
"""
for widget in self.avatar_widgets:
widget.set_visible(entry == '0')
self.graph_widget.populate(self.get_active())
def cb_on_combo_show(self, combobox):
"""
Called when the configuration menu show combobox widget for avatars.
Used to hide custom avatars settings.
"""
for widget in self.avatar_widgets:
widget.set_visible(combobox.get_active() == 0)
def cb_male_avatar_set(self, file_chooser_button):
"""
Called when the configuration menu changes the male avatar.
"""
self._config.set('interface.graphview-avatars-male',
file_chooser_button.get_filename())
self.graph_widget.populate(self.get_active())
def cb_female_avatar_set(self, file_chooser_button):
"""
Called when the configuration menu changes the female avatar.
"""
self._config.set('interface.graphview-avatars-female',
file_chooser_button.get_filename())
self.graph_widget.populate(self.get_active())
def cb_update_show_full_dates(self, _client, _cnxn_id, entry, _data):
"""
Called when the configuration menu changes the date setting.
"""
self.show_full_dates = entry == 'True'
self.graph_widget.populate(self.get_active())
def cb_update_show_places(self, _client, _cnxn_id, entry, _data):
"""
Called when the configuration menu changes the place setting.
"""
self.show_places = entry == 'True'
self.graph_widget.populate(self.get_active())
def cb_update_place_fmt(self, _client, _cnxn_id, _entry, _data):
"""
Called when the configuration menu changes the place setting.
"""
self.graph_widget.populate(self.get_active())
def cb_update_show_tag_color(self, _client, _cnxn_id, entry, _data):
"""
Called when the configuration menu changes the show tags setting.
"""
self.show_tag_color = entry == 'True'
self.graph_widget.populate(self.get_active())
def cb_update_show_lines(self, _client, _cnxn_id, _entry, _data):
"""
Called when the configuration menu changes the line setting.
"""
self.graph_widget.populate(self.get_active())
def cb_update_highlight_home_person(self, _client, _cnxn_id, entry, _data):
"""
Called when the configuration menu changes the highlight home
person setting.
"""
self.highlight_home_person = entry == 'True'
self.graph_widget.populate(self.get_active())
def cb_update_home_path_color(self, _client, _cnxn_id, entry, _data):
"""
Called when the configuration menu changes the path person color.
"""
self.home_path_color = entry
self.graph_widget.populate(self.get_active())
def cb_update_desc_generations(self, _client, _cnxd_id, entry, _data):
"""
Called when the configuration menu changes the descendant generation
count setting.
"""
self.descendant_generations = entry
self.graph_widget.populate(self.get_active())
def cb_update_ancestor_generations(self, _client, _cnxd_id, entry, _data):
"""
Called when the configuration menu changes the ancestor generation
count setting.
"""
self.ancestor_generations = entry
self.graph_widget.populate(self.get_active())
def cb_update_show_animation(self, _client, _cnxd_id, entry, _data):
"""
Called when the configuration menu changes the show animation
setting.
"""
if entry == 'True':
self.graph_widget.animation.show_animation = True
# enable animate options
for widget in self.ani_widgets:
widget.set_sensitive(True)
else:
self.graph_widget.animation.show_animation = False
# diable animate options
for widget in self.ani_widgets:
widget.set_sensitive(False)
def cb_update_animation_count(self, _client, _cnxd_id, entry, _data):
"""
Called when the configuration menu changes the animation count
setting.
"""
self.graph_widget.animation.max_count = int(entry) * 2
def cb_update_animation_speed(self, _client, _cnxd_id, entry, _data):
"""
Called when the configuration menu changes the animation speed
setting.
"""
self.graph_widget.animation.speed = 50 * int(entry)
def cb_update_search_all_db(self, _client, _cnxn_id, entry, _data):
"""
Called when the configuration menu changes the search setting.
"""
value = entry == 'True'
self.graph_widget.search_widget.set_options(search_all_db=value)
def cb_update_search_show_images(self, _client, _cnxn_id, entry, _data):
"""
Called when the configuration menu changes the search setting.
"""
value = entry == 'True'
self.graph_widget.search_widget.set_options(show_images=value)
self.graph_widget.show_images_option = value
def cb_update_search_marked_first(self, _client, _cnxn_id, entry, _data):
"""
Called when the configuration menu changes the search setting.
"""
value = entry == 'True'
self.graph_widget.search_widget.set_options(marked_first=value)
def cb_update_spacing(self, _client, _cnxd_id, _entry, _data):
"""
Called when the ranksep or nodesep setting changed.
"""
self.graph_widget.populate(self.get_active())
def cb_update_person_theme(self, _client, _cnxd_id, _entry, _data):
"""
Called when person theme setting changed.
"""
self.graph_widget.populate(self.get_active())
def cb_show_all_connected(self, _client, _cnxd_id, _entry, _data):
"""
Called when show all connected setting changed.
"""
value = _entry == 'True'
self.graph_widget.all_connected_btn.set_active(value)
self.graph_widget.populate(self.get_active())
def config_change_font(self, font_button):
"""
Called when font is change.
"""
font_family = font_button.get_font_family()
if font_family is not None:
font_name = font_family.get_name()
else:
font_name = ''
# apply Pango.SCALE=1024 to font size
font_size = int(font_button.get_font_size() / 1024)
self._config.set('interface.graphview-font', [font_name, font_size])
self.graph_widget.retest_font = True
self.graph_widget.populate(self.get_active())
def config_connect(self):
"""
Overwriten from :class:`~gui.views.pageview.PageView method
This method will be called after the ini file is initialized,
use it to monitor changes in the ini file.
"""
self._config.connect('interface.graphview-show-images',
self.cb_update_show_images)
self._config.connect('interface.graphview-show-avatars',
self.cb_update_show_avatars)
self._config.connect('interface.graphview-avatars-style',
self.cb_update_avatars_style)
self._config.connect('interface.graphview-show-full-dates',
self.cb_update_show_full_dates)
self._config.connect('interface.graphview-show-places',
self.cb_update_show_places)
self._config.connect('interface.graphview-place-format',
self.cb_update_place_fmt)
self._config.connect('interface.graphview-show-tags',
self.cb_update_show_tag_color)
self._config.connect('interface.graphview-show-lines',
self.cb_update_show_lines)
self._config.connect('interface.graphview-highlight-home-person',
self.cb_update_highlight_home_person)
self._config.connect('interface.graphview-home-path-color',
self.cb_update_home_path_color)
self._config.connect('interface.graphview-descendant-generations',
self.cb_update_desc_generations)
self._config.connect('interface.graphview-ancestor-generations',
self.cb_update_ancestor_generations)
self._config.connect('interface.graphview-show-animation',
self.cb_update_show_animation)
self._config.connect('interface.graphview-animation-speed',
self.cb_update_animation_speed)
self._config.connect('interface.graphview-animation-count',
self.cb_update_animation_count)
self._config.connect('interface.graphview-search-all-db',
self.cb_update_search_all_db)
self._config.connect('interface.graphview-search-show-images',
self.cb_update_search_show_images)
self._config.connect('interface.graphview-search-marked-first',
self.cb_update_search_marked_first)
self._config.connect('interface.graphview-ranksep',
self.cb_update_spacing)
self._config.connect('interface.graphview-nodesep',
self.cb_update_spacing)
self._config.connect('interface.graphview-person-theme',
self.cb_update_person_theme)
self._config.connect('interface.graphview-show-all-connected',
self.cb_show_all_connected)
def _get_configure_page_funcs(self):
"""
Return a list of functions that create gtk elements to use in the
notebook pages of the Configure dialog.
:return: list of functions
"""
return [self.layout_config_panel,
self.theme_config_panel,
self.animation_config_panel,
self.search_config_panel]
def layout_config_panel(self, configdialog):
"""
Function that builds the widget in the configuration dialog.
See "gramps/gui/configure.py" for details.
"""
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
row = 0
configdialog.add_checkbox(
grid, _('Show images'), row, 'interface.graphview-show-images')
row += 1
configdialog.add_checkbox(
grid, _('Show avatars'), row, 'interface.graphview-show-avatars')
row += 1
configdialog.add_checkbox(
grid, _('Highlight the home person'),
row, 'interface.graphview-highlight-home-person')
row += 1
configdialog.add_checkbox(
grid, _('Show full dates'),
row, 'interface.graphview-show-full-dates')
row += 1
configdialog.add_checkbox(
grid, _('Show places'), row, 'interface.graphview-show-places')
row += 1
# Place format:
p_fmts = [(0, _("Default"))]
for (indx, fmt) in enumerate(place_displayer.get_formats()):
p_fmts.append((indx + 1, fmt.name))
active = self._config.get('interface.graphview-place-format')
if active >= len(p_fmts):
active = 1
configdialog.add_combo(grid, _('Place format'), row,
'interface.graphview-place-format',
p_fmts, setactive=active)
row += 1
configdialog.add_checkbox(
grid, _('Show tags'), row, 'interface.graphview-show-tags')
return _('Layout'), grid
def theme_config_panel(self, configdialog):
"""
Function that builds the widget in the configuration dialog.
See "gramps/gui/configure.py" for details.
"""
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
p_themes = DotSvgGenerator(self.dbstate, self).get_person_themes()
themes_list = []
for t in p_themes:
themes_list.append((t[0], t[1]))
row = 0
configdialog.add_combo(grid, _('Person theme'), row,
'interface.graphview-person-theme',
themes_list)
row += 1
configdialog.add_color(grid,
_('Path color to home person'),
row, 'interface.graphview-home-path-color',
col=1)
row += 1
font_lbl = Gtk.Label(label=_('Font:'), xalign=0)
grid.attach(font_lbl, 1, row, 1, 1)
font = self._config.get('interface.graphview-font')
font_str = '%s, %d' % (font[0], font[1])
font_btn = Gtk.FontButton.new_with_font(font_str)
font_btn.set_show_style(False)
grid.attach(font_btn, 2, row, 1, 1)
font_btn.connect('font-set', self.config_change_font)
font_btn.set_filter_func(self.font_filter_func)
# Avatars options
# ===================================================================
row += 1
avatars = Avatars(self._config)
combo = configdialog.add_combo(grid, _('Avatars style'), row,
'interface.graphview-avatars-style',
avatars.get_styles_list())
combo.connect('show', self.cb_on_combo_show)
file_filter = Gtk.FileFilter()
file_filter.set_name(_('PNG files'))
file_filter.add_pattern("*.png")
self.avatar_widgets.clear()
row += 1
lbl = Gtk.Label(label=_('Male avatar:'), halign=Gtk.Align.END)
FCB_male = Gtk.FileChooserButton.new(_('Choose male avatar'),
Gtk.FileChooserAction.OPEN)
FCB_male.add_filter(file_filter)
FCB_male.set_filename(
self._config.get('interface.graphview-avatars-male'))
FCB_male.connect('file-set', self.cb_male_avatar_set)
grid.attach(lbl, 1, row, 1, 1)
grid.attach(FCB_male, 2, row, 1, 1)
self.avatar_widgets.append(lbl)
self.avatar_widgets.append(FCB_male)
row += 1
lbl = Gtk.Label(label=_('Female avatar:'), halign=Gtk.Align.END)
FCB_female = Gtk.FileChooserButton.new(_('Choose female avatar'),
Gtk.FileChooserAction.OPEN)
FCB_female.connect('file-set', self.cb_female_avatar_set)
FCB_female.add_filter(file_filter)
FCB_female.set_filename(
self._config.get('interface.graphview-avatars-female'))
grid.attach(lbl, 1, row, 1, 1)
grid.attach(FCB_female, 2, row, 1, 1)
self.avatar_widgets.append(lbl)
self.avatar_widgets.append(FCB_female)
# ===================================================================
return _('Themes'), grid
def animation_config_panel(self, configdialog):
"""
Function that builds the widget in the configuration dialog.
See "gramps/gui/configure.py" for details.
"""
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
configdialog.add_checkbox(
grid, _('Show animation'),
0, 'interface.graphview-show-animation')
self.ani_widgets.clear()
widget = configdialog.add_spinner(
grid, _('Animation speed (1..5 and 5 is the slower)'),
1, 'interface.graphview-animation-speed', (1, 5))
self.ani_widgets.append(widget)
widget = configdialog.add_spinner(
grid, _('Animation count (0..8 use 0 to turn off)'),
2, 'interface.graphview-animation-count', (0, 8))
self.ani_widgets.append(widget)
# disable animate options if needed
if not self.graph_widget.animation.show_animation:
for widget in self.ani_widgets:
widget.set_sensitive(False)
return _('Animation'), grid
def search_config_panel(self, configdialog):
"""
Function that builds the widget in the configuration dialog.
See "gramps/gui/configure.py" for details.
"""
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
row = 0
widget = configdialog.add_checkbox(
grid, _('Search in all database'), row,
'interface.graphview-search-all-db')
widget.set_tooltip_text(_("Also apply search by all database."))
row += 1
widget = configdialog.add_checkbox(
grid, _('Show person images'), row,
'interface.graphview-search-show-images')
widget.set_tooltip_text(
_("Show persons thumbnails in search result list."))
row += 1
widget = configdialog.add_checkbox(
grid, _('Show bookmarked first'), row,
'interface.graphview-search-marked-first')
widget.set_tooltip_text(
_("Show bookmarked persons first in search result list."))
return _('Search'), grid
def font_filter_func(self, _family, face):
"""
Filter function to display only regular fonts.
"""
desc = face.describe()
stretch = desc.get_stretch()
if stretch != Pango.Stretch.NORMAL:
return False # avoid Condensed or Expanded
sty = desc.get_style()
if sty != Pango.Style.NORMAL:
return False # avoid italic etc.
weight = desc.get_weight()
if weight != Pango.Weight.NORMAL:
return False # avoid Bold
return True
#-------------------------------------------------------------------------
#
# Printing functionalities
#
#-------------------------------------------------------------------------
def printview(self, *obj):
"""
Save the dot file for a later printing with an appropriate tool.
"""
# ask for the dot file name
filter1 = Gtk.FileFilter()
filter1.set_name("dot files")
filter1.add_pattern("*.gv")
dot = Gtk.FileChooserDialog(title=_("Select a dot file name"),
action=Gtk.FileChooserAction.SAVE,
transient_for=self.uistate.window)
dot.add_button(_('_Cancel'), Gtk.ResponseType.CANCEL)
dot.add_button(_('_Apply'), Gtk.ResponseType.OK)
mpath = config.get('paths.report-directory')
dot.set_current_folder(os.path.dirname(mpath))
dot.set_filter(filter1)
dot.set_current_name("Graphview.gv")
status = dot.run()
if status == Gtk.ResponseType.OK:
val = dot.get_filename()
(spath, _ext) = os.path.splitext(val)
val = spath + ".gv" # used to avoid filename without extension
# selected path is an existing file and we need a file
if os.path.isfile(val):
aaa = OptionDialog(_('File already exists'), # parent-OK
_('You can choose to either overwrite the '
'file, or change the selected filename.'),
_('_Overwrite'), None,
_('_Change filename'), None,
parent=dot)
if aaa.get_response() == Gtk.ResponseType.YES:
dot.destroy()
self.printview(obj)
return
svg = val.replace('.gv', '.svg')
# both dot_data and svg_data are bytes, already utf-8 encoded
# just write them as binary
try:
with open(val, 'wb') as __g, open(svg, 'wb') as __s:
__g.write(self.graph_widget.dot_data)
__s.write(self.graph_widget.svg_data)
except IOError as msg:
msg2 = _("Could not create %s") % (val + ', ' + svg)
ErrorDialog(msg2, str(msg), parent=dot)
dot.destroy()
#-------------------------------------------------------------------------
#
# GraphWidget
#
#-------------------------------------------------------------------------
class GraphWidget(object):
"""
Define the widget with controls and canvas that displays the graph.
"""
def __init__(self, view, dbstate, uistate):
"""
:type view: GraphView
"""
# variables for drag and scroll
self._last_x = 0
self._last_y = 0
self._in_move = False
self.view = view
self.dbstate = dbstate
self.uistate = uistate
self.parser = None
self.active_person_handle = None
self.actions = Actions(dbstate, uistate, self.view.bookmarks)
self.actions.connect('rebuild-graph', self.view.build_tree)
self.actions.connect('active-changed', self.populate)
self.actions.connect('focus-person-changed', self.set_person_to_focus)
self.dot_data = None
self.svg_data = None
scrolled_win = Gtk.ScrolledWindow()
scrolled_win.set_shadow_type(Gtk.ShadowType.IN)
self.hadjustment = scrolled_win.get_hadjustment()
self.vadjustment = scrolled_win.get_vadjustment()
self.canvas = GooCanvas.Canvas()
self.canvas.connect("scroll-event", self.scroll_mouse)
self.canvas.props.units = Gtk.Unit.POINTS
self.canvas.props.resolution_x = 72
self.canvas.props.resolution_y = 72
scrolled_win.add(self.canvas)
self.vbox = Gtk.Box(homogeneous=False, spacing=4,
orientation=Gtk.Orientation.VERTICAL)
self.vbox.set_border_width(4)
self.toolbar = Gtk.Box(homogeneous=False, spacing=4,
orientation=Gtk.Orientation.HORIZONTAL)
self.vbox.pack_start(self.toolbar, False, False, 0)
# add zoom-in button
self.zoom_in_btn = Gtk.Button.new_from_icon_name('zoom-in',
Gtk.IconSize.MENU)
self.zoom_in_btn.set_tooltip_text(_('Zoom in'))
self.toolbar.pack_start(self.zoom_in_btn, False, False, 1)
self.zoom_in_btn.connect("clicked", self.zoom_in)
# add zoom-out button
self.zoom_out_btn = Gtk.Button.new_from_icon_name('zoom-out',
Gtk.IconSize.MENU)
self.zoom_out_btn.set_tooltip_text(_('Zoom out'))
self.toolbar.pack_start(self.zoom_out_btn, False, False, 1)
self.zoom_out_btn.connect("clicked", self.zoom_out)
# add original zoom button
self.orig_zoom_btn = Gtk.Button.new_from_icon_name('zoom-original',
Gtk.IconSize.MENU)
self.orig_zoom_btn.set_tooltip_text(_('Zoom to original'))
self.toolbar.pack_start(self.orig_zoom_btn, False, False, 1)
self.orig_zoom_btn.connect("clicked", self.set_original_zoom)
# add best fit button
self.fit_btn = Gtk.Button.new_from_icon_name('zoom-fit-best',
Gtk.IconSize.MENU)
self.fit_btn.set_tooltip_text(_('Zoom to best fit'))
self.toolbar.pack_start(self.fit_btn, False, False, 1)
self.fit_btn.connect("clicked", self.fit_to_page)
# add 'go to active person' button
self.goto_active_btn = Gtk.Button.new_from_icon_name('go-jump',
Gtk.IconSize.MENU)
self.goto_active_btn.set_tooltip_text(_('Go to active person'))
self.toolbar.pack_start(self.goto_active_btn, False, False, 1)
self.goto_active_btn.connect("clicked", self.goto_active)
# add 'go to bookmark' button
self.goto_other_btn = Gtk.Button(label=_('Go to bookmark'))
self.goto_other_btn.set_tooltip_text(
_('Center view on selected bookmark'))
self.toolbar.pack_start(self.goto_other_btn, False, False, 1)
self.bkmark_popover = Popover(_('Bookmarks for current graph'),
_('Other Bookmarks'),
ext_panel=self.build_bkmark_ext_panel())
self.bkmark_popover.set_relative_to(self.goto_other_btn)
self.goto_other_btn.connect("clicked", self.show_bkmark_popup)
self.goto_other_btn.connect("key-press-event",
self.goto_other_btn_key_press_event)
self.bkmark_popover.connect('item-activated', self.activate_popover)
self.show_images_option = self.view._config.get(
'interface.graphview-search-show-images')
# add search widget
self.search_widget = SearchWidget(self.dbstate,
self.get_person_image,
bookmarks=self.view.bookmarks)
search_box = self.search_widget.get_widget()
self.toolbar.pack_start(search_box, True, True, 1)
self.search_widget.set_options(
search_all_db=self.view._config.get(
'interface.graphview-search-all-db'),
show_images=self.show_images_option)
self.search_widget.connect('item-activated', self.activate_popover)
# add accelerator to focus search entry
accel_group = Gtk.AccelGroup()
self.uistate.window.add_accel_group(accel_group)
search_box.add_accelerator('grab-focus', accel_group, Gdk.KEY_f,
Gdk.ModifierType.CONTROL_MASK,
Gtk.AccelFlags.VISIBLE)
# add spinners for quick generations change
gen_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
box = self.build_spinner('go-up-symbolic', 0, 50,
_('Ancestor generations'),
'interface.graphview-ancestor-generations')
gen_box.add(box)
box = self.build_spinner('go-down-symbolic', 0, 50,
_('Descendant generations'),
'interface.graphview-descendant-generations')
gen_box.add(box)
# pack generation spinners to popover
gen_btn = Gtk.Button(label=_('Generations'))
self.add_popover(gen_btn, gen_box)
self.toolbar.pack_start(gen_btn, False, False, 1)
# add spiner for generation (vertical) spacing
spacing_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
box = self.build_spinner('object-flip-vertical', 1, 50,
_('Vertical spacing between generations'),
'interface.graphview-ranksep')
spacing_box.add(box)
# add spiner for node (horizontal) spacing
box = self.build_spinner('object-flip-horizontal', 1, 50,
_('Horizontal spacing between generations'),
'interface.graphview-nodesep')
spacing_box.add(box)
# pack spacing spinners to popover
spacing_btn = Gtk.Button(label=_('Spacings'))
self.add_popover(spacing_btn, spacing_box)
self.toolbar.pack_start(spacing_btn, False, False, 1)
# add button to show all connected persons
self.all_connected_btn = Gtk.ToggleButton(label=_('All connected'))
self.all_connected_btn.set_tooltip_text(
_("Show all connected persons limited by generation restrictions.\n"
"Works slow, so don't set large generation values."))
self.all_connected_btn.set_active(
self.view._config.get('interface.graphview-show-all-connected'))
self.all_connected_btn.connect('clicked', self.toggle_all_connected)
self.toolbar.pack_start(self.all_connected_btn, False, False, 1)
self.vbox.pack_start(scrolled_win, True, True, 0)
# if we have graph lager than graphviz paper size
# this coef is needed
self.transform_scale = 1
self.scale = 1
self.animation = CanvasAnimation(self.view, self.canvas, scrolled_win)
self.search_widget.set_items_list(self.animation.items_list)
# person that will focus (once) after graph rebuilding
self.person_to_focus = None
# for detecting double click
self.click_events = []
# for timeout on changing settings by spinners
self.timeout_event = False
# Gtk style context for scrollwindow to operate with theme colors
self.sw_style_context = scrolled_win.get_style_context()
# used for popup menu, prevent destroy menu as local variable
self.menu = None
self.retest_font = True # flag indicates need to resize font
self.bold_size = self.norm_size = 0 # font sizes to send to dot
def add_popover(self, widget, container):
"""
Add popover for button.
"""
popover = Gtk.Popover()
popover.set_relative_to(widget)
popover.add(container)
widget.connect("clicked", self.spinners_popup, popover)
container.show_all()
def build_spinner(self, icon, start, end, tooltip, conf_const):
"""
Build spinner with icon and pack it into box.
Chenges apply to config with delay.
"""
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
img = Gtk.Image.new_from_icon_name(icon, Gtk.IconSize.MENU)
box.pack_start(img, False, False, 1)
spinner = Gtk.SpinButton.new_with_range(start, end, 1)
spinner.set_tooltip_text(tooltip)
spinner.set_value(self.view._config.get(conf_const))
spinner.connect("value-changed", self.apply_spinner_delayed,
conf_const)
box.pack_start(spinner, False, False, 1)
return box
def toggle_all_connected(self, widget):
"""
Change state for "Show all connected" setting.
"""
self.view._config.set('interface.graphview-show-all-connected',
widget.get_active())
def spinners_popup(self, _widget, popover):
"""
Popover for generations and spacing params.
Different popup depending on gtk version.
"""
if gtk_version >= 3.22:
popover.popup()
else:
popover.show()
def set_available(self, state):
"""
Set state for GraphView.
"""
if not state:
# if no database is opened
self.clear()
self.toolbar.set_sensitive(state)
def font_changed(self, active):
self.sym_font = config.get('utf8.selected-font')
if self.parser:
self.parser.font_changed()
self.populate(active)
def set_person_to_focus(self, handle):
"""
Set person that will focus (once) after graph rebuilding.
"""
self.person_to_focus = handle
def goto_other_btn_key_press_event(self, _widget, event):
"""
Handle 'Esc' key on bookmarks button to hide popup.
"""
key = event.keyval
if event.keyval == Gdk.KEY_Escape:
self.hide_bkmark_popover()
elif key == Gdk.KEY_Down:
self.bkmark_popover.grab_focus()
return True
def activate_popover(self, _widget, person_handle):
"""
Called when some item(person)
in search or bookmarks popup(popover) is activated.
"""
self.hide_bkmark_popover()
self.search_widget.hide_search_popover()
# move view to person with animation
self.move_to_person(None, person_handle, True)
def apply_spinner_delayed(self, widget, conf_const):
"""
Set params by spinners (generations, spacing).
Use timeout for better interface responsiveness.
"""
value = int(widget.get_value())
# try to remove planed event (changing setting)
if self.timeout_event and \
not self.timeout_event.is_destroyed():
GLib.source_remove(self.timeout_event.get_id())
# timeout saving setting for better interface responsiveness
event_id = GLib.timeout_add(300, self.view._config.set,
conf_const, value)
context = GLib.main_context_default()
self.timeout_event = context.find_source_by_id(event_id)
def build_bkmark_ext_panel(self):
"""
Build bookmark popover extand panel.
"""
btn_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
# add button to add active person to bookmarks
# tooltip will be changed in "self.load_bookmarks"
self.add_bkmark = Gtk.Button(label=_('Add active person'))
self.add_bkmark.connect("clicked", self.add_active_to_bkmarks)
btn_box.pack_start(self.add_bkmark, True, True, 2)
# add buton to call bookmarks manager
manage_bkmarks = Gtk.Button(label=_('Edit'))
manage_bkmarks.set_tooltip_text(_('Call the bookmark editor'))
manage_bkmarks.connect("clicked", self.edit_bookmarks)
btn_box.pack_start(manage_bkmarks, True, True, 2)
return btn_box
def load_bookmarks(self):
"""
Load bookmarks in Popover (goto_other_btn).
"""
# remove all old items from popup
self.bkmark_popover.clear_items()
active = self.view.get_active()
active_in_bkmarks = False
found = False
found_other = False
count = 0
count_other = 0
bookmarks = self.view.bookmarks.get_bookmarks().bookmarks
for bkmark in bookmarks:
if active == bkmark:
active_in_bkmarks = True
person = self.dbstate.db.get_person_from_handle(bkmark)
if person:
name = displayer.display_name(person.get_primary_name())
present = self.animation.get_item_by_title(bkmark)
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL,
spacing=10)
# add person ID
label = Gtk.Label("[%s]" % person.gramps_id, xalign=0)
hbox.pack_start(label, False, False, 2)
# add person name
label = Gtk.Label(name, xalign=0)
hbox.pack_start(label, True, True, 2)
# add person image if needed
if self.show_images_option:
person_image = self.get_person_image(person, 32, 32)
if person_image:
hbox.pack_start(person_image, False, True, 2)
row = ListBoxRow(person_handle=bkmark, label=name,
db=self.dbstate.db)
row.add(hbox)
if present is not None:
found = True
count += 1
self.bkmark_popover.main_panel.add_to_panel(row)
else:
found_other = True
count_other += 1
self.bkmark_popover.other_panel.add_to_panel(row)
row.show_all()
if not found and not found_other:
self.bkmark_popover.show_other_panel(False)
row = ListBoxRow()
row.add(Gtk.Label(_("You don't have any bookmarks yet...\n"
"Try to add some frequently used persons "
"to speedup navigation.")))
self.bkmark_popover.main_panel.add_to_panel(row)
row.show_all()
else:
if not found:
row = ListBoxRow()
row.add(Gtk.Label(_('No bookmarks for this graph...')))
self.bkmark_popover.main_panel.add_to_panel(row)
row.show_all()
if not found_other:
row = ListBoxRow()
row.add(Gtk.Label(_('No other bookmarks...')))
self.bkmark_popover.other_panel.add_to_panel(row)
row.show_all()
self.bkmark_popover.show_other_panel(True)
self.bkmark_popover.main_panel.set_progress(0, _('found: %s') % count)
self.bkmark_popover.other_panel.set_progress(
0, _('found: %s') % count_other)
# set tooltip for "add_bkmark" button
self.add_bkmark.hide()
if active and not active_in_bkmarks:
person = self.dbstate.db.get_person_from_handle(active)
if person:
name = displayer.display_name(person.get_primary_name())
val_to_display = "[%s] %s" % (person.gramps_id, name)
self.add_bkmark.set_tooltip_text(
_('Add active person to bookmarks\n'
'%s') % val_to_display)
self.add_bkmark.show()
def get_person_image(self, person, width=-1, height=-1, kind='image'):
"""
kind - 'image', 'path', 'both'
Returns default person image and path or None.
"""
# see if we have an image to use for this person
image_path = None
media_list = person.get_media_list()
if media_list:
media_handle = media_list[0].get_reference_handle()
media = self.dbstate.db.get_media_from_handle(media_handle)
media_mime_type = media.get_mime_type()
if media_mime_type[0:5] == "image":
rectangle = media_list[0].get_rectangle()
path = media_path_full(self.dbstate.db, media.get_path())
image_path = get_thumbnail_path(path, rectangle=rectangle)
# test if thumbnail actually exists in thumbs
# (import of data means media files might not be present
image_path = find_file(image_path)
if image_path:
if kind == 'path':
return image_path
# get and scale image
person_image = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=image_path,
width=width, height=height,
preserve_aspect_ratio=True)
person_image = Gtk.Image.new_from_pixbuf(person_image)
if kind == 'image':
return person_image
elif kind == 'both':
return person_image, image_path
return None
def add_active_to_bkmarks(self, _widget):
"""
Add active person to bookmarks.
"""
self.view.add_bookmark(None)
self.load_bookmarks()
def edit_bookmarks(self, _widget):
"""
Call the bookmark editor.
"""
self.view.edit_bookmarks(None)
self.load_bookmarks()
def show_bkmark_popup(self, _widget):
"""
Show bookmark popup.
"""
self.load_bookmarks()
self.bkmark_popover.popup()
def hide_bkmark_popover(self, _widget=None, _event=None):
"""
Hide bookmark popup.
"""
self.bkmark_popover.popdown()
def goto_active(self, button=None):
"""
Go to active person.
"""
# check if animation is needed
animation = bool(button)
self.animation.move_to_person(self.active_person_handle, animation)
def move_to_person(self, _menuitem, handle, animate=False):
"""
Move to specified person (by handle).
If person not present in the current graphview tree,
show dialog to change active person.
"""
self.person_to_focus = None
if self.animation.get_item_by_title(handle):
self.animation.move_to_person(handle, animate)
else:
person = self.dbstate.db.get_person_from_handle(handle)
if not person:
return False
quest = (_('Person <b><i>%s</i></b> is not in the current view.\n'
'Do you want to set it active and rebuild view?')
% escape(displayer.display(person)))
dialog = QuestionDialog2(_("Change active person?"), quest,
_("Yes"), _("No"),
self.uistate.window)
if dialog.run():
self.view.change_active(handle)
def scroll_mouse(self, _canvas, event):
"""
Zoom by mouse wheel.
"""
if event.direction == Gdk.ScrollDirection.UP:
self.zoom_in()
elif event.direction == Gdk.ScrollDirection.DOWN:
self.zoom_out()
# stop the signal of scroll emission
# to prevent window scrolling
return True
def populate(self, active_person):
"""
Populate the graph with widgets derived from Graphviz.
"""
# set the busy cursor, so the user knows that we are working
self.uistate.set_busy_cursor(True)
if self.uistate.window.get_window().is_visible():
process_pending_events()
self.clear()
self.active_person_handle = active_person
# fit the text to boxes
self.bold_size, self.norm_size = self.fit_text()
self.search_widget.hide_search_popover()
self.hide_bkmark_popover()
# generate DOT and SVG data
dot = DotSvgGenerator(self.dbstate, self.view,
bold_size=self.bold_size,
norm_size=self.norm_size)
graph_data = dot.build_graph(active_person)
del dot
if not graph_data:
# something go wrong when build all-connected tree
# so turn off this feature
self.view._config.set('interface.graphview-show-all-connected',
False)
return
self.dot_data = graph_data[0]
self.svg_data = graph_data[1]
parser = GraphvizSvgParser(self, self.view)
parser.parse(self.svg_data)
self.animation.update_items(parser.items_list)
# save transform scale
self.transform_scale = parser.transform_scale
self.set_zoom(self.scale)
# focus on edited person if posible
if not self.animation.move_to_person(self.person_to_focus, False):
self.goto_active()
self.person_to_focus = None
# update the status bar
self.view.change_page()
self.uistate.set_busy_cursor(False)
def zoom_in(self, _button=None):
"""
Increase zoom scale.
"""
scale_coef = self.scale * 1.1
self.set_zoom(scale_coef)
def zoom_out(self, _button=None):
"""
Decrease zoom scale.
"""
scale_coef = self.scale * 0.9
if scale_coef < 0.01:
scale_coef = 0.01
self.set_zoom(scale_coef)
def set_original_zoom(self, _button):
"""
Set original zoom scale = 1.
"""
self.set_zoom(1)
def fit_to_page(self, _button):
"""
Calculate scale and fit tree to page.
"""
# get the canvas size
bounds = self.canvas.get_root_item().get_bounds()
height_canvas = bounds.y2 - bounds.y1
width_canvas = bounds.x2 - bounds.x1
# get scroll window size
width = self.hadjustment.get_page_size()
height = self.vadjustment.get_page_size()
# prevent division by zero
if height_canvas == 0:
height_canvas = 1
if width_canvas == 0:
width_canvas = 1
# calculate minimum scale
scale_h = (height / height_canvas)
scale_w = (width / width_canvas)
if scale_h > scale_w:
scale = scale_w
else:
scale = scale_h
scale = scale * self.transform_scale
# set scale if it needed, else restore it to default
if scale < 1:
self.set_zoom(scale)
else:
self.set_zoom(1)
def clear(self):
"""
Clear the graph by creating a new root item.
"""
# remove root item (with all children)
self.canvas.get_root_item().remove()
self.canvas.set_root_item(GooCanvas.CanvasGroup())
def get_widget(self):
"""
Return the graph display widget that includes the drawing canvas.
"""
return self.vbox
def button_press(self, item, _target, event):
"""
Enter in scroll mode when left or middle mouse button pressed
on background.
"""
self.search_widget.hide_search_popover()
self.hide_bkmark_popover()
if not (event.type == getattr(Gdk.EventType, "BUTTON_PRESS") and
item == self.canvas.get_root_item()):
return False
button = event.get_button()[1]
if button == 1 or button == 2:
window = self.canvas.get_parent().get_window()
window.set_cursor(Gdk.Cursor.new(Gdk.CursorType.FLEUR))
self._last_x = event.x_root
self._last_y = event.y_root
self._in_move = True
self.animation.stop_animation()
return False
if button == 3:
self.menu = PopupMenu(self, kind='background')
self.menu.show_menu(event)
return True
return False
def button_release(self, item, target, event):
"""
Exit from scroll mode when button release.
"""
button = event.get_button()[1]
if((button == 1 or button == 2) and
event.type == getattr(Gdk.EventType, "BUTTON_RELEASE")):
self.motion_notify_event(item, target, event)
self.canvas.get_parent().get_window().set_cursor(None)
self._in_move = False
return True
return False
def motion_notify_event(self, _item, _target, event):
"""
Function for motion notify events for drag and scroll mode.
"""
if self._in_move and (event.type == Gdk.EventType.MOTION_NOTIFY or
event.type == Gdk.EventType.BUTTON_RELEASE):
# scale coefficient for prevent flicking when drag
scale_coef = self.canvas.get_scale()
new_x = (self.hadjustment.get_value() -
(event.x_root - self._last_x) * scale_coef)
self.hadjustment.set_value(new_x)
new_y = (self.vadjustment.get_value() -
(event.y_root - self._last_y) * scale_coef)
self.vadjustment.set_value(new_y)
return True
return False
def set_zoom(self, value):
"""
Set value for zoom of the canvas widget and apply it.
"""
self.scale = value
self.canvas.set_scale(value / self.transform_scale)
def select_node(self, item, target, event):
"""
Perform actions when a node is clicked.
If middle mouse was clicked then try to set scroll mode.
"""
self.search_widget.hide_search_popover()
self.hide_bkmark_popover()
handle = item.title
node_class = item.description
button = event.get_button()[1]
self.person_to_focus = None
# perform double click on node by left mouse button
if event.type == getattr(Gdk.EventType, "DOUBLE_BUTTON_PRESS"):
# Remove all single click events
for click_item in self.click_events:
if not click_item.is_destroyed():
GLib.source_remove(click_item.get_id())
self.click_events.clear()
if button == 1 and node_class == 'node':
GLib.idle_add(self.actions.edit_person, None, handle)
return True
elif button == 1 and node_class == 'familynode':
GLib.idle_add(self.actions.edit_family, None, handle)
return True
if event.type != getattr(Gdk.EventType, "BUTTON_PRESS"):
return False
if button == 1 and node_class == 'node': # left mouse
if handle == self.active_person_handle:
# Find a parent of the active person so that they can become
# the active person, if no parents then leave as the current
# active person
parent_handle = self.find_a_parent(handle)
if parent_handle:
handle = parent_handle
else:
return True
# redraw the graph based on the selected person
# schedule after because double click can occur
click_event_id = GLib.timeout_add(200, self.view.change_active,
handle)
# add single click events to list, it will be removed if necessary
context = GLib.main_context_default()
self.click_events.append(context.find_source_by_id(click_event_id))
elif button == 3 and node_class: # right mouse
if node_class == 'node':
self.menu = PopupMenu(self, 'person', handle)
self.menu.show_menu(event)
elif node_class == 'familynode':
self.menu = PopupMenu(self, 'family', handle)
self.menu.show_menu(event)
elif button == 2: # middle mouse
# to enter in scroll mode (we should change "item" to root item)
item = self.canvas.get_root_item()
self.button_press(item, target, event)
return True
def find_a_parent(self, handle):
"""
Locate a parent from the first family that the selected person is a
child of. Try and find the father first, then the mother.
Either will be OK.
"""
person = self.dbstate.db.get_person_from_handle(handle)
try:
fam_handle = person.get_parent_family_handle_list()[0]
if fam_handle:
family = self.dbstate.db.get_family_from_handle(fam_handle)
if family and family.get_father_handle():
handle = family.get_father_handle()
elif family and family.get_mother_handle():
handle = family.get_mother_handle()
except IndexError:
handle = None
return handle
def update_lines_type(self, _menu_item, lines_type, constant):
"""
Save the lines type setting.
"""
self.view._config.set(constant, lines_type)
def update_setting(self, menu_item, constant):
"""
Save changed setting.
menu_item should be Gtk.CheckMenuItem.
"""
self.view._config.set(constant, menu_item.get_active())
def fit_text(self):
"""
Fit the text to the boxes more exactly. Works by trying some sample
text, measuring the results, and trying an increasing size of font
sizes to some sample nodes to see which one will fit the expected
text size.
In other words we are telling dot to use different font sizes than
we are actually displaying, since dot doesn't do a good job of
determining the text size.
"""
if not self.retest_font: # skip this uless font changed.
return self.bold_size, self.norm_size
text = "The quick Brown Fox jumped over the Lazy Dogs 1948-01-01."
dot_test = DotSvgGenerator(self.dbstate, self.view)
dot_test.init_dot()
# These are at the desired font sizes.
dot_test.add_node('test_bold', '<B>%s</B>' % text, shape='box')
dot_test.add_node('test_norm', text, shape='box')
# now add nodes at increasing font sizes
for scale in range(35, 140, 2):
f_size = dot_test.fontsize * scale / 100.0
dot_test.add_node(
'test_bold' + str(scale),
'<FONT POINT-SIZE="%(bsize)3.1f"><B>%(text)s</B></FONT>' %
{'text': text, 'bsize': f_size}, shape='box')
dot_test.add_node(
'test_norm' + str(scale),
text, shape='box', fontsize=("%3.1f" % f_size))
# close the graphviz dot code with a brace
dot_test.write('}\n')
# get DOT and generate SVG data by Graphviz
dot_data = dot_test.dot.getvalue().encode('utf8')
svg_data = dot_test.make_svg(dot_data)
svg_data = svg_data.decode('utf8')
# now lest find the box sizes, and font sizes for the generated svg.
points_a = findall(r'points="(.*)"', svg_data, MULTILINE)
font_fams = findall(r'font-family="(.*)" font-weight',
svg_data, MULTILINE)
font_sizes = findall(r'font-size="(.*)" fill', svg_data, MULTILINE)
box_w = []
for points in points_a:
box_pts = points.split()
x_1 = box_pts[0].split(',')[0]
x_2 = box_pts[1].split(',')[0]
box_w.append(float(x_1) - float(x_2) - 16) # adjust for margins
text_font = font_fams[0] + ", " + font_sizes[0] + 'px'
font_desc = Pango.FontDescription.from_string(text_font)
# lets measure the bold text on our canvas at desired font size
c_text = GooCanvas.CanvasText(parent=self.canvas.get_root_item(),
text='<b>' + text + '</b>',
x=100,
y=100,
anchor=GooCanvas.CanvasAnchorType.WEST,
use_markup=True,
font_desc=font_desc)
bold_b = c_text.get_bounds()
# and measure the normal text on our canvas at desired font size
c_text.props.text = text
norm_b = c_text.get_bounds()
# now scan throught test boxes, finding the smallest that will hold
# the actual text as measured. And record the dot font that was used.
for indx in range(3, len(font_sizes), 2):
bold_size = float(font_sizes[indx - 1])
if box_w[indx] > bold_b.x2 - bold_b.x1:
break
for indx in range(4, len(font_sizes), 2):
norm_size = float(font_sizes[indx - 1])
if box_w[indx] > norm_b.x2 - norm_b.x1:
break
self.retest_font = False # we don't do this again until font changes
# return the adjusted font size to tell dot to use.
return bold_size, norm_size
#-------------------------------------------------------------------------
#
# GraphvizSvgParser
#
#-------------------------------------------------------------------------
class GraphvizSvgParser(object):
"""
Parses SVG produces by Graphviz and adds the elements to a GooCanvas.
"""
def __init__(self, widget, view):
"""
Initialise the GraphvizSvgParser class.
"""
self.func = None
self.widget = widget
self.canvas = widget.canvas
self.view = view
self.highlight_home_person = self.view._config.get(
'interface.graphview-highlight-home-person')
scheme = config.get('colors.scheme')
self.home_person_color = config.get('colors.home-person')[scheme]
self.font_size = self.view._config.get('interface.graphview-font')[1]
self.tlist = []
self.text_attrs = None
self.func_list = []
self.handle = None
self.func_map = {"g": (self.start_g, self.stop_g),
"svg": (self.start_svg, self.stop_svg),
"polygon": (self.start_polygon, self.stop_polygon),
"path": (self.start_path, self.stop_path),
"image": (self.start_image, self.stop_image),
"text": (self.start_text, self.stop_text),
"ellipse": (self.start_ellipse, self.stop_ellipse),
"title": (self.start_title, self.stop_title)}
self.text_anchor_map = {"start": GooCanvas.CanvasAnchorType.WEST,
"middle": GooCanvas.CanvasAnchorType.CENTER,
"end": GooCanvas.CanvasAnchorType.EAST}
# This list is used as a LIFO stack so that the SAX parser knows
# which Goocanvas object to link the next object to.
self.item_hier = []
# list of persons items, used for animation class
self.items_list = []
self.transform_scale = 1
def parse(self, ifile):
"""
Parse an SVG file produced by Graphviz.
"""
self.item_hier.append(self.canvas.get_root_item())
parser = ParserCreate()
parser.StartElementHandler = self.start_element
parser.EndElementHandler = self.end_element
parser.CharacterDataHandler = self.characters
parser.Parse(ifile)
for key in list(self.func_map.keys()):
del self.func_map[key]
del self.func_map
del self.func_list
del parser
def start_g(self, attrs):
"""
Parse <g> tags.
"""
# The class attribute defines the group type. There should be one
# graph type <g> tag which defines the transform for the whole graph.
if attrs.get('class') == 'graph':
self.items_list.clear()
transform = attrs.get('transform')
item = self.canvas.get_root_item()
transform_list = transform.split(') ')
scale = transform_list[0].split()
scale_x = float(scale[0].lstrip('scale('))
scale_y = float(scale[1])
self.transform_scale = scale_x
if scale_x > scale_y:
self.transform_scale = scale_y
# scale should be (0..1)
# fix graphviz issue from version > 2.40.1
if self.transform_scale > 1:
self.transform_scale = 1 / self.transform_scale
item.set_simple_transform(self.bounds[1],
self.bounds[3],
self.transform_scale,
0)
item.connect("button-press-event", self.widget.button_press)
item.connect("button-release-event", self.widget.button_release)
item.connect("motion-notify-event",
self.widget.motion_notify_event)
else:
item = GooCanvas.CanvasGroup(parent=self.current_parent())
item.connect("button-press-event", self.widget.select_node)
self.items_list.append(item)
item.description = attrs.get('class')
self.item_hier.append(item)
def stop_g(self, _tag):
"""
Parse </g> tags.
"""
item = self.item_hier.pop()
item.title = self.handle
def start_svg(self, attrs):
"""
Parse <svg> tags.
"""
GooCanvas.CanvasGroup(parent=self.current_parent())
view_box = attrs.get('viewBox').split()
v_left = float(view_box[0])
v_top = float(view_box[1])
v_right = float(view_box[2])
v_bottom = float(view_box[3])
self.canvas.set_bounds(v_left, v_top, v_right, v_bottom)
self.bounds = (v_left, v_top, v_right, v_bottom)
def stop_svg(self, tag):
"""
Parse </svg> tags.
"""
pass
def start_title(self, attrs):
"""
Parse <title> tags.
"""
pass
def stop_title(self, tag):
"""
Parse </title> tags.
Stripping off underscore prefix added to fool Graphviz.
"""
self.handle = tag.lstrip("_")
def start_polygon(self, attrs):
"""
Parse <polygon> tags.
Polygons define the boxes around individuals on the graph.
"""
coord_string = attrs.get('points')
coord_count = 5
points = GooCanvas.CanvasPoints.new(coord_count)
nnn = 0
for i in coord_string.split():
coord = i.split(",")
coord_x = float(coord[0])
coord_y = float(coord[1])
points.set_point(nnn, coord_x, coord_y)
nnn += 1
style = attrs.get('style')
if style:
p_style = self.parse_style(style)
stroke_color = p_style['stroke']
fill_color = p_style['fill']
else:
stroke_color = attrs.get('stroke')
fill_color = attrs.get('fill')
if self.handle == self.widget.active_person_handle:
line_width = 3 # thick box
else:
line_width = 1 # thin box
tooltip = self.view.tags_tooltips.get(self.handle)
# highlight the home person
# stroke_color is not '#...' when tags are drawing, so we check this
# maybe this is not good solution to check for tags but it works
if self.highlight_home_person and stroke_color[:1] == '#':
home_person = self.widget.dbstate.db.get_default_person()
if home_person and home_person.handle == self.handle:
fill_color = self.home_person_color
item = GooCanvas.CanvasPolyline(parent=self.current_parent(),
points=points,
close_path=True,
fill_color=fill_color,
line_width=line_width,
stroke_color=stroke_color,
tooltip=tooltip)
# turn on tooltip show if have it
if tooltip:
item_canvas = item.get_canvas()
item_canvas.set_has_tooltip(True)
self.item_hier.append(item)
def stop_polygon(self, _tag):
"""
Parse </polygon> tags.
"""
self.item_hier.pop()
def start_ellipse(self, attrs):
"""
Parse <ellipse> tags.
These define the family nodes of the graph.
"""
center_x = float(attrs.get('cx'))
center_y = float(attrs.get('cy'))
radius_x = float(attrs.get('rx'))
radius_y = float(attrs.get('ry'))
style = attrs.get('style')
if style:
p_style = self.parse_style(style)
stroke_color = p_style['stroke']
fill_color = p_style['fill']
else:
stroke_color = attrs.get('stroke')
fill_color = attrs.get('fill')
tooltip = self.view.tags_tooltips.get(self.handle)
item = GooCanvas.CanvasEllipse(parent=self.current_parent(),
center_x=center_x,
center_y=center_y,
radius_x=radius_x,
radius_y=radius_y,
fill_color=fill_color,
stroke_color=stroke_color,
line_width=1,
tooltip=tooltip)
if tooltip:
item_canvas = item.get_canvas()
item_canvas.set_has_tooltip(True)
self.current_parent().description = 'familynode'
self.item_hier.append(item)
def stop_ellipse(self, _tag):
"""
Parse </ellipse> tags.
"""
self.item_hier.pop()
def start_path(self, attrs):
"""
Parse <path> tags.
These define the links between nodes.
Solid lines represent birth relationships and dashed lines are used
when a child has a non-birth relationship to a parent.
"""
p_data = attrs.get('d')
line_width = attrs.get('stroke-width')
if line_width is None:
line_width = 1
line_width = float(line_width)
style = attrs.get('style')
if style:
p_style = self.parse_style(style)
stroke_color = p_style['stroke']
is_dashed = 'stroke-dasharray' in p_style
else:
stroke_color = attrs.get('stroke')
is_dashed = attrs.get('stroke-dasharray')
if is_dashed:
line_dash = GooCanvas.CanvasLineDash.newv([5.0, 5.0])
item = GooCanvas.CanvasPath(parent=self.current_parent(),
data=p_data,
stroke_color=stroke_color,
line_width=line_width,
line_dash=line_dash)
else:
item = GooCanvas.CanvasPath(parent=self.current_parent(),
data=p_data,
stroke_color=stroke_color,
line_width=line_width)
self.item_hier.append(item)
def stop_path(self, _tag):
"""
Parse </path> tags.
"""
self.item_hier.pop()
def start_text(self, attrs):
"""
Parse <text> tags.
"""
self.text_attrs = attrs
def stop_text(self, tag):
"""
Parse </text> tags.
The text tag contains some textual data.
"""
tag = escape(tag)
pos_x = float(self.text_attrs.get('x'))
pos_y = float(self.text_attrs.get('y'))
anchor = self.text_attrs.get('text-anchor')
style = self.text_attrs.get('style')
# does the following always work with symbols?
if style:
p_style = self.parse_style(style)
font_family = p_style['font-family']
text_font = font_family + ", " + p_style['font-size'] + 'px'
else:
font_family = self.text_attrs.get('font-family')
text_font = font_family + ", " + str(self.font_size) + 'px'
font_desc = Pango.FontDescription.from_string(text_font)
# set bold text using PangoMarkup
if self.text_attrs.get('font-weight') == 'bold':
tag = '<b>%s</b>' % tag
# text color
fill_color = self.text_attrs.get('fill')
GooCanvas.CanvasText(parent=self.current_parent(),
text=tag,
x=pos_x,
y=pos_y,
anchor=self.text_anchor_map[anchor],
use_markup=True,
font_desc=font_desc,
fill_color=fill_color)
def start_image(self, attrs):
"""
Parse  tags.
"""
self.item_hier.pop()
def start_element(self, tag, attrs):
"""
Generic parsing function for opening tags.
"""
self.func_list.append((self.func, self.tlist))
self.tlist = []
try:
start_function, self.func = self.func_map[tag]
if start_function:
start_function(attrs)
except KeyError:
self.func_map[tag] = (None, None)
self.func = None
def end_element(self, _tag):
"""
Generic parsing function for closing tags.
"""
if self.func:
self.func(''.join(self.tlist))
self.func, self.tlist = self.func_list.pop()
def characters(self, data):
"""
Generic parsing function for tag data.
"""
if self.func:
self.tlist.append(data)
def current_parent(self):
"""
Returns the Goocanvas object which should be the parent of any new
Goocanvas objects.
"""
return self.item_hier[len(self.item_hier) - 1]
def parse_style(self, style):
"""
Parse style attributes for Graphviz version < 2.24.
"""
style = style.rstrip(';')
return dict([i.split(':') for i in style.split(';')])
#------------------------------------------------------------------------
#
# DotSvgGenerator
#
#------------------------------------------------------------------------
class DotSvgGenerator(object):
"""
Generator of graphing instructions in dot format and svg data by Graphviz.
"""
def __init__(self, dbstate, view, bold_size=0, norm_size=0):
"""
Initialise the DotSvgGenerator class.
"""
self.bold_size = bold_size
self.norm_size = norm_size
self.dbstate = dbstate
self.uistate = view.uistate
self.database = dbstate.db
self.view = view
self.dot = None # will be StringIO()
# This dictionary contains person handle as the index and the value is
# the number of families in which the person is a parent. From this
# dictionary is obtained a list of person handles sorted in decreasing
# value order which is used to keep multiple spouses positioned
# together.
self.person_handles_dict = {}
self.person_handles = []
# list of persons on path to home person
self.current_list = list()
self.home_person = None
# Gtk style context for scrollwindow
self.context = self.view.graph_widget.sw_style_context
# font if we use genealogical symbols
self.sym_font = None
self.avatars = Avatars(self.view._config)
def __del__(self):
"""
Free stream file on destroy.
"""
if self.dot:
self.dot.close()
def init_dot(self):
"""
Init/reinit stream for dot file.
Load and write config data to start of dot file.
"""
if self.dot:
self.dot.close()
self.dot = StringIO()
self.current_list.clear()
self.person_handles_dict.clear()
self.show_images = self.view._config.get(
'interface.graphview-show-images')
self.show_avatars = self.view._config.get(
'interface.graphview-show-avatars')
self.show_full_dates = self.view._config.get(
'interface.graphview-show-full-dates')
self.show_places = self.view._config.get(
'interface.graphview-show-places')
self.place_format = self.view._config.get(
'interface.graphview-place-format') - 1
self.show_tag_color = self.view._config.get(
'interface.graphview-show-tags')
spline = self.view._config.get('interface.graphview-show-lines')
self.spline = SPLINE.get(int(spline))
self.descendant_generations = self.view._config.get(
'interface.graphview-descendant-generations')
self.ancestor_generations = self.view._config.get(
'interface.graphview-ancestor-generations')
self.person_theme_index = self.view._config.get(
'interface.graphview-person-theme')
self.show_all_connected = self.view._config.get(
'interface.graphview-show-all-connected')
ranksep = self.view._config.get('interface.graphview-ranksep')
ranksep = ranksep * 0.1
nodesep = self.view._config.get('interface.graphview-nodesep')
nodesep = nodesep * 0.1
self.avatars.update_current_style()
# get background color from gtk theme and convert it to hex
# else use white background
bg_color = self.context.lookup_color('theme_bg_color')
if bg_color[0]:
bg_rgb = (bg_color[1].red, bg_color[1].green, bg_color[1].blue)
bg_color = rgb_to_hex(bg_rgb)
else:
bg_color = '#ffffff'
# get font color from gtk theme and convert it to hex
# else use black font
font_color = self.context.lookup_color('theme_fg_color')
if font_color[0]:
fc_rgb = (font_color[1].red, font_color[1].green,
font_color[1].blue)
font_color = rgb_to_hex(fc_rgb)
else:
font_color = '#000000'
# get colors from config
home_path_color = self.view._config.get(
'interface.graphview-home-path-color')
# set of colors
self.colors = {'link_color': font_color,
'home_path_color': home_path_color}
self.arrowheadstyle = 'none'
self.arrowtailstyle = 'none'
dpi = 72
# use font from config if needed
font = self.view._config.get('interface.graphview-font')
fontfamily = self.resolve_font_name(font[0])
self.fontsize = font[1]
if not self.bold_size:
self.bold_size = self.norm_size = font[1]
pagedir = "BL"
rankdir = "TB"
ratio = "compress"
# as we are not using paper,
# choose a large 'page' size with no margin
sizew = 100
sizeh = 100
xmargin = 0.00
ymargin = 0.00
self.write('digraph GRAMPS_graph\n')
self.write('{\n')
self.write(' bgcolor="%s";\n' % bg_color)
self.write(' center="false"; \n')
self.write(' charset="utf8";\n')
self.write(' concentrate="false";\n')
self.write(' dpi="%d";\n' % dpi)
self.write(' graph [fontsize=%3.1f];\n' % self.fontsize)
self.write(' margin="%3.2f,%3.2f"; \n' % (xmargin, ymargin))
self.write(' mclimit="99";\n')
self.write(' nodesep="%.2f";\n' % nodesep)
self.write(' outputorder="edgesfirst";\n')
self.write(' pagedir="%s";\n' % pagedir)
self.write(' rankdir="%s";\n' % rankdir)
self.write(' ranksep="%.2f";\n' % ranksep)
self.write(' ratio="%s";\n' % ratio)
self.write(' searchsize="100";\n')
self.write(' size="%3.2f,%3.2f"; \n' % (sizew, sizeh))
self.write(' splines=%s;\n' % self.spline)
self.write('\n')
self.write(' edge [style=solid fontsize=%d];\n' % self.fontsize)
if fontfamily:
self.write(' node [style=filled fontname="%s" '
'fontsize=%3.1f fontcolor="%s"];\n'
% (fontfamily, self.norm_size, font_color))
else:
self.write(' node [style=filled fontsize=%3.1f fontcolor="%s"];\n'
% (self.norm_size, font_color))
self.write('\n')
self.uistate.connect('font-changed', self.font_changed)
self.symbols = Symbols()
self.font_changed()
def resolve_font_name(self, font_name):
"""
Helps to resolve font by graphviz.
"""
# Sometimes graphviz have problem with font resolving.
font_family_map = {"Times New Roman": "Times",
"Times Roman": "Times",
"Times-Roman": "Times",
}
font = font_family_map.get(font_name)
if font is None:
font = font_name
return font
def font_changed(self):
dth_idx = self.uistate.death_symbol
if self.uistate.symbols:
self.bth = self.symbols.get_symbol_for_string(
self.symbols.SYMBOL_BIRTH)
self.dth = self.symbols.get_death_symbol_for_char(dth_idx)
else:
self.bth = self.symbols.get_symbol_fallback(
self.symbols.SYMBOL_BIRTH)
self.dth = self.symbols.get_death_symbol_fallback(dth_idx)
# make sure to display in selected symbols font
self.sym_font = config.get('utf8.selected-font')
self.bth = '<FONT FACE="%s">%s</FONT>' % (self.sym_font, self.bth)
self.dth = '<FONT FACE="%s">%s</FONT>' % (self.sym_font, self.dth)
def build_graph(self, active_person):
"""
Builds a GraphViz tree based on the active person.
"""
# reinit dot file stream (write starting graphviz dot code to file)
self.init_dot()
if active_person:
self.home_person = self.dbstate.db.get_default_person()
self.set_current_list(active_person)
self.set_current_list_desc(active_person)
if self.show_all_connected:
self.person_handles_dict.update(
self.find_connected(active_person))
else:
self.person_handles_dict.update(
self.find_descendants(active_person))
self.person_handles_dict.update(
self.find_ancestors(active_person))
if self.person_handles_dict:
self.person_handles = sorted(
self.person_handles_dict,
key=self.person_handles_dict.__getitem__,
reverse=True)
self.add_persons_and_families()
self.add_child_links_to_families()
# close the graphviz dot code with a brace
self.write('}\n')
# get DOT and generate SVG data by Graphviz
dot_data = self.dot.getvalue().encode('utf8')
svg_data = self.make_svg(dot_data)
return (dot_data, svg_data)
def make_svg(self, dot_data):
"""
Make SVG data by Graphviz.
"""
if win():
svg_data = Popen(['dot', '-Tsvg'],
creationflags=DETACHED_PROCESS,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE).communicate(input=dot_data)[0]
else:
svg_data = Popen(['dot', '-Tsvg'],
stdin=PIPE,
stdout=PIPE).communicate(input=dot_data)[0]
return svg_data
def set_current_list(self, active_person, recurs_list=None):
"""
Get the path from the active person to the home person.
Select ancestors.
"""
if not active_person:
return False
person = self.database.get_person_from_handle(active_person)
if recurs_list is None:
recurs_list = set() # make a recursion check list (actually a set)
# see if we have a recursion (database loop)
elif active_person in recurs_list:
logging.warning(_("Relationship loop detected"))
return False
recurs_list.add(active_person) # record where we have been for check
if person == self.home_person:
self.current_list.append(active_person)
return True
else:
for fam_handle in person.get_parent_family_handle_list():
family = self.database.get_family_from_handle(fam_handle)
if self.set_current_list(family.get_father_handle(),
recurs_list=recurs_list):
self.current_list.append(active_person)
self.current_list.append(fam_handle)
return True
if self.set_current_list(family.get_mother_handle(),
recurs_list=recurs_list):
self.current_list.append(active_person)
self.current_list.append(fam_handle)
return True
return False
def set_current_list_desc(self, active_person, recurs_list=None):
"""
Get the path from the active person to the home person.
Select children.
"""
if not active_person:
return False
person = self.database.get_person_from_handle(active_person)
if recurs_list is None:
recurs_list = set() # make a recursion check list (actually a set)
# see if we have a recursion (database loop)
elif active_person in recurs_list:
logging.warning(_("Relationship loop detected"))
return False
recurs_list.add(active_person) # record where we have been for check
if person == self.home_person:
self.current_list.append(active_person)
return True
else:
for fam_handle in person.get_family_handle_list():
family = self.database.get_family_from_handle(fam_handle)
for child in family.get_child_ref_list():
if self.set_current_list_desc(child.ref,
recurs_list=recurs_list):
self.current_list.append(active_person)
self.current_list.append(fam_handle)
return True
return False
def find_connected(self, active_person):
"""
Spider the database from the active person.
"""
person = self.database.get_person_from_handle(active_person)
person_handles = {}
self.add_connected(person, self.descendant_generations,
self.ancestor_generations, person_handles)
return person_handles
def add_connected(self, person, num_desc, num_anc, person_handles):
"""
Include all connected to active in the list of people to graph.
Recursive algorithm is not used becasue some trees have been found
that exceed the standard python recursive depth.
"""
# list of work to do, handles with generation delta,
# add to right and pop from left
todo = deque([(person, 0)])
while todo:
# check for person count
if len(person_handles) > 1000:
w_msg = _("You try to build graph containing more then 1000 "
"persons. Not all persons will be shown in the graph."
)
WarningDialog(_("Incomplete graph"), w_msg)
return
person, delta_gen = todo.popleft()
if not person:
continue
# check generation restrictions
if (delta_gen > num_desc) or (delta_gen < -num_anc):
continue
# check if handle is not already processed
if person.handle not in person_handles:
spouses_list = person.get_family_handle_list()
person_handles[person.handle] = len(spouses_list)
else:
continue
# add descendants
for family_handle in spouses_list:
family = self.database.get_family_from_handle(family_handle)
# add every child recursively
if num_desc >= (delta_gen + 1): # generation restriction
for child_ref in family.get_child_ref_list():
if (child_ref.ref in person_handles
or child_ref.ref in todo):
continue
todo.append(
(self.database.get_person_from_handle(child_ref.ref),
delta_gen+1))
# add person spouses
for sp_handle in (family.get_father_handle(),
family.get_mother_handle()):
if sp_handle and (sp_handle not in person_handles
and sp_handle not in todo):
todo.append(
(self.database.get_person_from_handle(sp_handle),
delta_gen))
# add ancestors
if -num_anc <= (delta_gen - 1): # generation restriction
for family_handle in person.get_parent_family_handle_list():
family = self.database.get_family_from_handle(family_handle)
# add every ancestor's spouses
for sp_handle in (family.get_father_handle(),
family.get_mother_handle()):
if sp_handle and (sp_handle not in person_handles
and sp_handle not in todo):
todo.append(
(self.database.get_person_from_handle(sp_handle),
delta_gen-1))
def find_descendants(self, active_person):
"""
Spider the database from the active person.
"""
person = self.database.get_person_from_handle(active_person)
person_handles = {}
self.add_descendant(person, self.descendant_generations,
person_handles)
return person_handles
def add_descendant(self, person, num_generations, person_handles):
"""
Include a descendant in the list of people to graph.
"""
if not person:
return
# check if handle is not already processed
# and add self and spouses
if person.handle not in person_handles:
spouses_list = person.get_family_handle_list()
person_handles[person.handle] = len(spouses_list)
self.add_spouses(person, person_handles)
else:
return
if num_generations <= 0:
return
# add every child recursively
for family_handle in spouses_list:
family = self.database.get_family_from_handle(family_handle)
for child_ref in family.get_child_ref_list():
self.add_descendant(
self.database.get_person_from_handle(child_ref.ref),
num_generations - 1, person_handles)
def add_spouses(self, person, person_handles):
"""
Add spouses to the list.
"""
if not person:
return
for family_handle in person.get_family_handle_list():
sp_family = self.database.get_family_from_handle(family_handle)
for sp_handle in (sp_family.get_father_handle(),
sp_family.get_mother_handle()):
if sp_handle and sp_handle not in person_handles:
# add only spouse (num_generations = 0)
self.add_descendant(
self.database.get_person_from_handle(sp_handle),
0, person_handles)
def find_ancestors(self, active_person):
"""
Spider the database from the active person.
"""
person = self.database.get_person_from_handle(active_person)
person_handles = {}
self.add_ancestor(person, self.ancestor_generations, person_handles)
return person_handles
def add_ancestor(self, person, num_generations, person_handles):
"""
Include an ancestor in the list of people to graph.
"""
if not person:
return
# add self if handle is not already processed
if person.handle not in person_handles:
person_handles[person.handle] = len(person.get_family_handle_list())
else:
return
if num_generations <= 0:
return
for family_handle in person.get_parent_family_handle_list():
family = self.database.get_family_from_handle(family_handle)
# add parents
sp_persons = []
for sp_handle in (family.get_father_handle(),
family.get_mother_handle()):
if sp_handle and sp_handle not in person_handles:
sp_person = self.database.get_person_from_handle(sp_handle)
self.add_ancestor(sp_person,
num_generations - 1,
person_handles)
sp_persons.append(sp_person)
# add every other spouses for parents
for sp_person in sp_persons:
self.add_spouses(sp_person, person_handles)
def add_child_links_to_families(self):
"""
Returns string of GraphViz edges linking parents to families or
children.
"""
for person_handle in self.person_handles:
person = self.database.get_person_from_handle(person_handle)
for fam_handle in person.get_parent_family_handle_list():
family = self.database.get_family_from_handle(fam_handle)
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
for child_ref in family.get_child_ref_list():
if child_ref.ref == person_handle:
frel = child_ref.frel
mrel = child_ref.mrel
break
if((father_handle in self.person_handles) or
(mother_handle in self.person_handles)):
# link to the family node if either parent is in graph
self.add_family_link(person_handle, family, frel, mrel)
def add_family_link(self, p_id, family, frel, mrel):
"""
Links the child to a family.
"""
style = 'solid'
adopted = ((int(frel) != ChildRefType.BIRTH) or
(int(mrel) != ChildRefType.BIRTH))
# if birth relation to father is NONE, meaning there is no father and
# if birth relation to mother is BIRTH then solid line
if((int(frel) == ChildRefType.NONE) and
(int(mrel) == ChildRefType.BIRTH)):
adopted = False
if adopted:
style = 'dotted'
self.add_link(family.handle, p_id, style,
self.arrowheadstyle, self.arrowtailstyle,
color=self.colors['home_path_color'],
bold=self.is_in_path_to_home(p_id))
def add_parent_link(self, p_id, parent_handle, rel):
"""
Links the child to a parent.
"""
style = 'solid'
if int(rel) != ChildRefType.BIRTH:
style = 'dotted'
self.add_link(parent_handle, p_id, style,
self.arrowheadstyle, self.arrowtailstyle,
color=self.colors['home_path_color'],
bold=self.is_in_path_to_home(p_id))
def add_persons_and_families(self):
"""
Adds nodes for persons and their families.
Subgraphs are used to indicate to Graphviz that parents of families
should be positioned together. The person_handles list is sorted so
that people with the largest number of spouses are at the start of the
list. As families are only processed once, this means people with
multiple spouses will have their additional spouses included in their
subgraph.
"""
# variable to communicate with get_person_label
url = ""
# The list of families for which we have output the node,
# so we don't do it twice
# use set() as it little faster then list()
family_nodes_done = set()
family_links_done = set()
for person_handle in self.person_handles:
person = self.database.get_person_from_handle(person_handle)
# Output the person's node
label = self.get_person_label(person)
(shape, style, color, fill) = self.get_gender_style(person)
self.add_node(person_handle, label, shape, color, style, fill, url)
# Output family nodes where person is a parent
family_list = person.get_family_handle_list()
for fam_handle in family_list:
if fam_handle not in family_nodes_done:
family_nodes_done.add(fam_handle)
self.__add_family_node(fam_handle)
# Output family links where person is a parent
subgraph_started = False
family_list = person.get_family_handle_list()
for fam_handle in family_list:
if fam_handle not in family_links_done:
family_links_done.add(fam_handle)
if not subgraph_started:
subgraph_started = True
self.start_subgraph(person_handle)
self.__add_family_links(fam_handle)
if subgraph_started:
self.end_subgraph()
def is_in_path_to_home(self, f_handle):
"""
Is the current person in the path to the home person?
"""
if f_handle in self.current_list:
return True
return False
def __add_family_node(self, fam_handle):
"""
Add a node for a family.
"""
fam = self.database.get_family_from_handle(fam_handle)
fill, color = color_graph_family(fam, self.dbstate)
style = "filled"
label = self.get_family_label(fam)
self.add_node(fam_handle, label, "ellipse", color, style, fill)
def __add_family_links(self, fam_handle):
"""
Add the links for spouses.
"""
fam = self.database.get_family_from_handle(fam_handle)
f_handle = fam.get_father_handle()
m_handle = fam.get_mother_handle()
if f_handle in self.person_handles:
self.add_link(f_handle,
fam_handle, "",
self.arrowheadstyle,
self.arrowtailstyle,
color=self.colors['home_path_color'],
bold=self.is_in_path_to_home(f_handle))
if m_handle in self.person_handles:
self.add_link(m_handle,
fam_handle, "",
self.arrowheadstyle,
self.arrowtailstyle,
color=self.colors['home_path_color'],
bold=self.is_in_path_to_home(m_handle))
def get_gender_style(self, person):
"""
Return gender specific person style.
"""
gender = person.get_gender()
shape = "box"
style = "solid, filled"
# get alive status of person to get box color
try:
alive = probably_alive(person, self.dbstate.db)
except RuntimeError:
alive = False
fill, color = color_graph_box(alive, gender)
return(shape, style, color, fill)
def get_tags_and_table(self, obj):
"""
Return html tags table for obj (person or family).
"""
tag_table = ''
tags = []
for tag_handle in obj.get_tag_list():
tags.append(self.dbstate.db.get_tag_from_handle(tag_handle))
# prepare html table of tags
if tags:
tag_table = ('<TABLE BORDER="0" CELLBORDER="0" '
'CELLPADDING="5"><TR>')
for tag in tags:
rgba = Gdk.RGBA()
rgba.parse(tag.get_color())
value = '#%02x%02x%02x' % (int(rgba.red * 255),
int(rgba.green * 255),
int(rgba.blue * 255))
tag_table += '<TD BGCOLOR="%s"></TD>' % value
tag_table += '</TR></TABLE>'
return tags, tag_table
def get_person_themes(self, index=-1):
"""
Person themes.
If index == -1 return list of themes.
If index out of range return default theme.
"""
person_themes = [
(0, _('Default'),
'<TABLE '
'BORDER="0" CELLSPACING="2" CELLPADDING="0" CELLBORDER="0">'
'<TR><TD>%(img)s</TD></TR>'
'<TR><TD><FONT POINT-SIZE="%(bsize)3.1f"><B>%(name)s</B>'
'</FONT></TD></TR>'
'<TR><TD ALIGN="LEFT">%(birth_str)s</TD></TR>'
'<TR><TD ALIGN="LEFT">%(death_str)s</TD></TR>'
'<TR><TD>%(tags)s</TD></TR>'
'</TABLE>'
),
(1, _('Image on right side'),
'<TABLE '
'BORDER="0" CELLSPACING="5" CELLPADDING="0" CELLBORDER="0">'
'<tr>'
'<td colspan="2"><FONT POINT-SIZE="%(bsize)3.1f"><B>%(name)s'
'</B></FONT></td>'
'</tr>'
'<tr>'
'<td ALIGN="LEFT" BALIGN="LEFT" CELLPADDING="5">%(birth_wraped)s'
'</td>'
'<td rowspan="2">%(img)s</td>'
'</tr>'
'<tr>'
'<td ALIGN="LEFT" BALIGN="LEFT" CELLPADDING="5">%(death_wraped)s'
'</td>'
'</tr>'
'<tr>'
' <td colspan="2">%(tags)s</td>'
'</tr>'
'</TABLE>'
),
(2, _('Image on left side'),
'<TABLE '
'BORDER="0" CELLSPACING="5" CELLPADDING="0" CELLBORDER="0">'
'<tr>'
'<td colspan="2"><FONT POINT-SIZE="%(bsize)3.1f"><B>%(name)s'
'</B></FONT></td>'
'</tr>'
'<tr>'
'<td rowspan="2">%(img)s</td>'
'<td ALIGN="LEFT" BALIGN="LEFT" CELLPADDING="5">%(birth_wraped)s'
'</td>'
'</tr>'
'<tr>'
'<td ALIGN="LEFT" BALIGN="LEFT" CELLPADDING="5">%(death_wraped)s'
'</td>'
'</tr>'
'<tr>'
' <td colspan="2">%(tags)s</td>'
'</tr>'
'</TABLE>'
),
(3, _('Normal'),
'<TABLE '
'BORDER="0" CELLSPACING="2" CELLPADDING="0" CELLBORDER="0">'
'<TR><TD>%(img)s</TD></TR>'
'<TR><TD><FONT POINT-SIZE="%(bsize)3.1f"><B>%(name)s'
'</B></FONT></TD></TR>'
'<TR><TD ALIGN="LEFT" BALIGN="LEFT">%(birth_wraped)s</TD></TR>'
'<TR><TD ALIGN="LEFT" BALIGN="LEFT">%(death_wraped)s</TD></TR>'
'<TR><TD>%(tags)s</TD></TR>'
'</TABLE>'
)]
if index < 0:
return person_themes
if index < len(person_themes):
return person_themes[index]
else:
return person_themes[0]
def get_person_label(self, person):
"""
Return person label string (with tags).
"""
# Start an HTML table.
# Remember to close the table afterwards!
#
# This isn't a free-form HTML format here...just a few keywords that
# happen to be similar to keywords commonly seen in HTML.
# For additional information on what is allowed, see:
#
# http://www.graphviz.org/info/shapes.html#html
#
# Will use html.escape to avoid '&', '<', '>' in the strings.
# FIRST get all strings: img, name, dates, tags
# see if we have an image to use for this person
image = ''
if self.show_images:
image = self.view.graph_widget.get_person_image(person,
kind='path')
if not image and self.show_avatars:
image = self.avatars.get_avatar(gender=person.gender)
if image is not None:
image = '<IMG SRC="%s"/>' % image
else:
image = ''
# get the person's name
name = displayer.display_name(person.get_primary_name())
# name string should not be empty
name = escape(name) if name else ' '
# birth, death is a lists [date, place]
birth, death = self.get_date_strings(person)
birth_str = ''
death_str = ''
birth_wraped = ''
death_wraped = ''
# There are two ways of displaying dates:
# 1) full and on two lines:
# b. 1890-12-31 - BirthPlace
# d. 1960-01-02 - DeathPlace
if self.show_full_dates or self.show_places:
# add symbols
if birth[0]:
birth[0] = '%s %s' % (self.bth, birth[0])
birth_wraped = birth[0]
birth_str = birth[0]
if birth[1]:
birth_wraped += '<BR/>'
birth_str += ' '
elif birth[1]:
birth_wraped = _('%s ') % self.bth
birth_str = _('%s ') % self.bth
birth_wraped += birth[1]
birth_str += birth[1]
if death[0]:
death[0] = '%s %s' % (self.dth, death[0])
death_wraped = death[0]
death_str = death[0]
if death[1]:
death_wraped += '<BR/>'
death_str += ' '
elif death[1]:
death_wraped = _('%s ') % self.dth
death_str = _('%s ') % self.dth
death_wraped += death[1]
death_str += death[1]
# 2) simple and on one line:
# (1890 - 1960)
else:
if birth[0] or death[0]:
birth_str = '(%s - %s)' % (birth[0], death[0])
# add symbols
if image:
if birth[0]:
birth_wraped = '%s %s' % (self.bth, birth[0])
if death[0]:
death_wraped = '%s %s' % (self.dth, death[0])
else:
birth_wraped = birth_str
# get tags table for person and add tooltip for node
tag_table = ''
if self.show_tag_color:
tags, tag_table = self.get_tags_and_table(person)
if tag_table:
self.add_tags_tooltip(person.handle, tags)
# apply theme to person label
if(image or self.person_theme_index == 0 or
self.person_theme_index == 3):
p_theme = self.get_person_themes(self.person_theme_index)
else:
# use default theme if no image
p_theme = self.get_person_themes(3)
label = p_theme[2] % {'img': image,
'name': name,
'birth_str': birth_str,
'death_str': death_str,
'birth_wraped': birth_wraped,
'death_wraped': death_wraped,
'tags': tag_table,
'bsize' : self.bold_size}
return label
def get_family_label(self, family):
"""
Return family label string (with tags).
"""
# start main html table
label = ('<TABLE '
'BORDER="0" CELLSPACING="2" CELLPADDING="0" CELLBORDER="0">')
# add dates strtings to table
event_str = ['', '']
for event_ref in family.get_event_ref_list():
event = self.database.get_event_from_handle(event_ref.ref)
if (event.type == EventType.MARRIAGE and
(event_ref.get_role() == EventRoleType.FAMILY or
event_ref.get_role() == EventRoleType.PRIMARY)):
event_str = self.get_event_string(event)
break
if event_str[0] and event_str[1]:
event_str = '%s<BR/>%s' % (event_str[0], event_str[1])
elif event_str[0]:
event_str = event_str[0]
elif event_str[1]:
event_str = event_str[1]
else:
event_str = ''
label += '<TR><TD>%s</TD></TR>' % event_str
# add tags table for family and add tooltip for node
if self.show_tag_color:
tags, tag_table = self.get_tags_and_table(family)
if tag_table:
label += '<TR><TD>%s</TD></TR>' % tag_table
self.add_tags_tooltip(family.handle, tags)
# close main table
label += '</TABLE>'
return label
def get_date_strings(self, person):
"""
Returns tuple of birth/christening and death/burying date strings.
"""
birth_event = get_birth_or_fallback(self.database, person)
if birth_event:
birth = self.get_event_string(birth_event)
else:
birth = ['', '']
death_event = get_death_or_fallback(self.database, person)
if death_event:
death = self.get_event_string(death_event)
else:
death = ['', '']
return (birth, death)
def get_event_string(self, event):
"""
Return string for an event label.
Based on the data availability and preferences, we select one
of the following for a given event:
year only
complete date
place name
empty string
"""
if event:
place_title = place_displayer.display_event(self.database, event,
fmt=self.place_format)
date_object = event.get_date_object()
date = ''
place = ''
# shall we display full date
# or do we have a valid year to display only year
if(self.show_full_dates and date_object.get_text() or
date_object.get_year_valid()):
if self.show_full_dates:
date = '%s' % datehandler.get_date(event)
else:
date = '%i' % date_object.get_year()
# shall we add the place?
if self.show_places and place_title:
place = place_title
return [escape(date), escape(place)]
else:
if place_title and self.show_places:
return ['', escape(place_title)]
return ['', '']
def add_link(self, id1, id2, style="", head="", tail="", comment="",
bold=False, color=""):
"""
Add a link between two nodes.
Gramps handles are used as nodes but need to be prefixed
with an underscore because Graphviz does not like IDs
that begin with a number.
"""
self.write(' _%s -> _%s' % (id1, id2))
boldok = False
if id1 in self.current_list:
if id2 in self.current_list:
boldok = True
self.write(' [')
if style:
self.write(' style=%s' % style)
if head:
self.write(' arrowhead=%s' % head)
if tail:
self.write(' arrowtail=%s' % tail)
if bold and boldok:
self.write(' penwidth=%d' % 5)
if color:
self.write(' color="%s"' % color)
else:
# if not path to home than set default color of link
self.write(' color="%s"' % self.colors['link_color'])
self.write(' ]')
self.write(';')
if comment:
self.write(' // %s' % comment)
self.write('\n')
def add_node(self, node_id, label, shape="", color="",
style="", fillcolor="", url="", fontsize=""):
"""
Add a node to this graph.
Nodes can be different shapes like boxes and circles.
Gramps handles are used as nodes but need to be prefixed with an
underscore because Graphviz does not like IDs that begin with a number.
"""
text = '[margin="0.11,0.08"'
if shape:
text += ' shape="%s"' % shape
if color:
text += ' color="%s"' % color
if fillcolor:
color = hex_to_rgb_float(fillcolor)
yiq = (color[0] * 299 + color[1] * 587 + color[2] * 114)
fontcolor = "#ffffff" if yiq < 500 else "#000000"
text += ' fillcolor="%s" fontcolor="%s"' % (fillcolor, fontcolor)
if style:
text += ' style="%s"' % style
if fontsize:
text += ' fontsize="%s"' % fontsize
# note that we always output a label -- even if an empty string --
# otherwise GraphViz uses the node ID as the label which is unlikely
# to be what the user wants to see in the graph
text += ' label=<%s>' % label
if url:
text += ' URL="%s"' % url
text += " ]"
self.write(' _%s %s;\n' % (node_id, text))
def add_tags_tooltip(self, handle, tag_list):
"""
Add tooltip to dict {handle, tooltip}.
"""
tooltip_str = _('<b>Tags:</b>')
for tag in tag_list:
tooltip_str += ('\n<span background="%s"> </span> - %s'
% (tag.get_color(), tag.get_name()))
self.view.tags_tooltips[handle] = tooltip_str
def start_subgraph(self, graph_id):
"""
Opens a subgraph which is used to keep together related nodes
on the graph.
"""
self.write('\n subgraph cluster_%s\n' % graph_id)
self.write(' {\n')
# no border around subgraph (#0002176)
self.write(' style="invis";\n')
def end_subgraph(self):
"""
Closes a subgraph section.
"""
self.write(' }\n\n')
def write(self, text):
"""
Write text to the dot file.
"""
if self.dot:
self.dot.write(text)
#-------------------------------------------------------------------------
#
# CanvasAnimation
#
#-------------------------------------------------------------------------
class CanvasAnimation(object):
"""
Produce animation for operations with canvas.
"""
def __init__(self, view, canvas, scroll_window):
"""
We need canvas and window in which it placed.
And view to get config.
"""
self.view = view
self.canvas = canvas
self.hadjustment = scroll_window.get_hadjustment()
self.vadjustment = scroll_window.get_vadjustment()
self.items_list = []
self.in_motion = False
self.max_count = self.view._config.get(
'interface.graphview-animation-count')
self.max_count = self.max_count * 2 # must be modulo 2
self.show_animation = self.view._config.get(
'interface.graphview-show-animation')
# delay between steps in microseconds
self.speed = self.view._config.get(
'interface.graphview-animation-speed')
self.speed = 50 * int(self.speed)
# length of step
self.step_len = 10
# separated counter and direction of shaking
# for each item that in shake procedure
self.counter = {}
self.shake = {}
self.in_shake = []
def update_items(self, items_list):
"""
Update list of items for current graph.
"""
self.items_list.clear()
self.items_list.extend(items_list)
self.in_shake.clear()
# clear counters and shakes - items not exists anymore
self.counter.clear()
self.shake.clear()
def stop_animation(self):
"""
Stop move_to animation.
And wait while thread is finished.
"""
self.in_motion = False
try:
self.thread.join()
except:
pass
def stop_shake_animation(self, item, stoped):
"""
Processing of 'animation-finished' signal.
Stop or keep shaking item depending on counter for item.
"""
counter = self.counter.get(item.title)
shake = self.shake.get(item.title)
if (not stoped) and counter and shake and counter < self.max_count:
self.shake[item.title] = (-1) * self.shake[item.title]
self.counter[item.title] += 1
item.animate(0, self.shake[item.title], 1, 0, False,
self.speed, 10, 0)
else:
item.disconnect_by_func(self.stop_shake_animation)
try:
self.counter.pop(item.title)
self.shake.pop(item.title)
except:
pass
def shake_person(self, person_handle):
"""
Shake person node to help to see it.
Use build-in function of CanvasItem.
"""
item = self.get_item_by_title(person_handle)
if item:
self.shake_item(item)
def shake_item(self, item):
"""
Shake item to help to see it.
Use build-in function of CanvasItem.
"""
if item and self.show_animation and self.max_count > 0:
if not self.counter.get(item.title):
self.in_shake.append(item)
self.counter[item.title] = 1
self.shake[item.title] = 10
item.connect('animation-finished', self.stop_shake_animation)
item.animate(0, self.shake[item.title], 1, 0, False,
self.speed, 10, 0)
def get_item_by_title(self, handle):
"""
Find item by title.
"""
if handle:
for item in self.items_list:
if item.title == handle:
return item
return None
def move_to_person(self, handle, animated):
"""
Move graph to specified person by handle.
"""
self.stop_animation()
item = self.get_item_by_title(handle)
if item:
bounds = item.get_bounds()
# calculate middle of node coordinates
xxx = (bounds.x2 - (bounds.x2 - bounds.x1) / 2)
yyy = (bounds.y1 - (bounds.y1 - bounds.y2) / 2)
self.move_to(item, (xxx, yyy), animated)
return True
return False
def get_trace_to(self, destination):
"""
Return next point to destination from current position.
"""
# get current position (left-top corner) with scale
start_x = self.hadjustment.get_value() / self.canvas.get_scale()
start_y = self.vadjustment.get_value() / self.canvas.get_scale()
x_delta = destination[0] - start_x
y_delta = destination[1] - start_y
# calculate step count depending on length of the trace
trace_len = sqrt(pow(x_delta, 2) + pow(y_delta, 2))
steps_count = int(trace_len / self.step_len * self.canvas.get_scale())
# prevent division by 0
if steps_count > 0:
x_step = x_delta / steps_count
y_step = y_delta / steps_count
point = (start_x + x_step, start_y + y_step)
else:
point = destination
return point
def scroll_canvas(self, point):
"""
Scroll window to point on canvas.
"""
self.canvas.scroll_to(point[0], point[1])
def animation(self, item, destination):
"""
Animate scrolling to destination point in thread.
Dynamically get points to destination one by one
and try to scroll to them.
"""
self.in_motion = True
while self.in_motion:
# correct destination to window centre
h_offset = self.hadjustment.get_page_size() / 2
v_offset = self.vadjustment.get_page_size() / 3
# apply the scaling factor so the offset is adjusted to the scale
h_offset = h_offset / self.canvas.get_scale()
v_offset = v_offset / self.canvas.get_scale()
dest = (destination[0] - h_offset,
destination[1] - v_offset)
# get maximum scroll of window
max_scroll_x = ((self.hadjustment.get_upper() -
self.hadjustment.get_page_size()) /
self.canvas.get_scale())
max_scroll_y = ((self.vadjustment.get_upper() -
self.vadjustment.get_page_size()) /
self.canvas.get_scale())
# fix destination to fit in max scroll
if dest[0] > max_scroll_x:
dest = (max_scroll_x, dest[1])
if dest[0] < 0:
dest = (0, dest[1])
if dest[1] > max_scroll_y:
dest = (dest[0], max_scroll_y)
if dest[1] < 0:
dest = (dest[0], 0)
cur_pos = (self.hadjustment.get_value() / self.canvas.get_scale(),
self.vadjustment.get_value() / self.canvas.get_scale())
# finish if we already at destination
if dest == cur_pos:
break
# get next point to destination
point = self.get_trace_to(dest)
GLib.idle_add(self.scroll_canvas, point)
GLib.usleep(20 * self.speed)
# finish if we try to goto destination point
if point == dest:
break
self.in_motion = False
# shake item after scroll to it
self.shake_item(item)
def move_to(self, item, destination, animated):
"""
Move graph to specified position.
If 'animated' is True then movement will be animated.
It works with 'canvas.scroll_to' in thread.
"""
# if animated is True than run thread with animation
# else - just scroll_to immediately
if animated and self.show_animation:
self.thread = Thread(target=self.animation,
args=[item, destination])
self.thread.start()
else:
# correct destination to screen centre
h_offset = self.hadjustment.get_page_size() / 2
v_offset = self.vadjustment.get_page_size() / 3
# apply the scaling factor so the offset is adjusted to the scale
h_offset = h_offset / self.canvas.get_scale()
v_offset = v_offset / self.canvas.get_scale()
destination = (destination[0] - h_offset,
destination[1] - v_offset)
self.scroll_canvas(destination)
# shake item after scroll to it
self.shake_item(item)
#-------------------------------------------------------------------------
#
# Popup menu widget
#
#-------------------------------------------------------------------------
class PopupMenu(Gtk.Menu):
"""
Produce popup widget for right-click menu.
"""
def __init__(self, graph_widget, kind=None, handle=None):
"""
graph_widget: GraphWidget
kind: 'person', 'family', 'background'
handle: person or family handle
"""
Gtk.Menu.__init__(self)
self.set_reserve_toggle_size(False)
self.graph_widget = graph_widget
self.view = graph_widget.view
self.dbstate = graph_widget.dbstate
self.actions = graph_widget.actions
if kind == 'background':
self.background_menu()
elif kind == 'person' and handle is not None:
self.person_menu(handle)
elif kind == 'family' and handle is not None:
self.family_menu(handle)
def show_menu(self, event=None):
"""
Show popup menu.
"""
if (Gtk.MAJOR_VERSION >= 3) and (Gtk.MINOR_VERSION >= 22):
# new from gtk 3.22:
self.popup_at_pointer(event)
else:
if event:
self.popup(None, None, None, None,
event.get_button()[1], event.time)
else:
self.popup(None, None, None, None,
0, Gtk.get_current_event_time())
#self.popup(None, None, None, None, 0, 0)
def background_menu(self):
"""
Popup menu on background.
"""
menu_item = Gtk.CheckMenuItem(_('Show images'))
menu_item.set_active(
self.view._config.get('interface.graphview-show-images'))
menu_item.connect("activate", self.graph_widget.update_setting,
'interface.graphview-show-images')
menu_item.show()
self.append(menu_item)
menu_item = Gtk.CheckMenuItem(_('Highlight the home person'))
menu_item.set_active(
self.view._config.get('interface.graphview-highlight-home-person'))
menu_item.connect("activate", self.graph_widget.update_setting,
'interface.graphview-highlight-home-person')
menu_item.show()
self.append(menu_item)
menu_item = Gtk.CheckMenuItem(_('Show full dates'))
menu_item.set_active(
self.view._config.get('interface.graphview-show-full-dates'))
menu_item.connect("activate", self.graph_widget.update_setting,
'interface.graphview-show-full-dates')
menu_item.show()
self.append(menu_item)
menu_item = Gtk.CheckMenuItem(_('Show places'))
menu_item.set_active(
self.view._config.get('interface.graphview-show-places'))
menu_item.connect("activate", self.graph_widget.update_setting,
'interface.graphview-show-places')
menu_item.show()
self.append(menu_item)
menu_item = Gtk.CheckMenuItem(_('Show tags'))
menu_item.set_active(
self.view._config.get('interface.graphview-show-tags'))
menu_item.connect("activate", self.graph_widget.update_setting,
'interface.graphview-show-tags')
menu_item.show()
self.append(menu_item)
self.add_separator()
menu_item = Gtk.CheckMenuItem(_('Show animation'))
menu_item.set_active(
self.view._config.get('interface.graphview-show-animation'))
menu_item.connect("activate", self.graph_widget.update_setting,
'interface.graphview-show-animation')
menu_item.show()
self.append(menu_item)
# add sub menu for line type setting
menu_item, sub_menu = self.add_submenu(label=_('Lines type'))
spline = self.view._config.get('interface.graphview-show-lines')
entry = Gtk.RadioMenuItem(label=_('Direct'))
entry.connect("activate", self.graph_widget.update_lines_type,
0, 'interface.graphview-show-lines')
if spline == 0:
entry.set_active(True)
entry.show()
sub_menu.append(entry)
entry = Gtk.RadioMenuItem(label=_('Curves'))
entry.connect("activate", self.graph_widget.update_lines_type,
1, 'interface.graphview-show-lines')
if spline == 1:
entry.set_active(True)
entry.show()
sub_menu.append(entry)
entry = Gtk.RadioMenuItem(label=_('Ortho'))
entry.connect("activate", self.graph_widget.update_lines_type,
2, 'interface.graphview-show-lines')
if spline == 2:
entry.set_active(True)
entry.show()
sub_menu.append(entry)
# add help menu
self.add_separator()
self.append_help_menu_entry()
def person_menu(self, handle):
"""
Popup menu for person node.
"""
person = self.dbstate.db.get_person_from_handle(handle)
if person:
add_menuitem(self, _('Edit'),
handle, self.actions.edit_person)
add_menuitem(self, _('Copy'),
handle, self.actions.copy_person_to_clipboard)
add_menuitem(self, _('Delete'),
person, self.actions.remove_person)
self.add_separator()
# build tag submenu
item, tag_menu = self.add_submenu(label=_("Tags"))
add_menuitem(tag_menu, _('Select tags for person'),
[handle, 'person'], self.actions.edit_tag_list)
add_menuitem(tag_menu, _('Organize Tags...'),
[handle, 'person'], self.actions.organize_tags)
# go over spouses and build their menu
item, sp_menu = self.add_submenu(label=_("Spouses"))
add_menuitem(sp_menu, _('Add new family'),
handle, self.actions.add_spouse)
self.add_separator(sp_menu)
fam_list = person.get_family_handle_list()
for fam_id in fam_list:
family = self.dbstate.db.get_family_from_handle(fam_id)
if family.get_father_handle() == person.get_handle():
sp_id = family.get_mother_handle()
else:
sp_id = family.get_father_handle()
if not sp_id:
continue
spouse = self.dbstate.db.get_person_from_handle(sp_id)
if not spouse:
continue
self.add_menuitem(sp_menu, displayer.display(spouse),
self.graph_widget.move_to_person,
sp_id, True)
# go over siblings and build their menu
item, sib_menu = self.add_submenu(label=_("Siblings"))
pfam_list = person.get_parent_family_handle_list()
siblings = []
step_siblings = []
for f_h in pfam_list:
fam = self.dbstate.db.get_family_from_handle(f_h)
sib_list = fam.get_child_ref_list()
for sib_ref in sib_list:
sib_id = sib_ref.ref
if sib_id == person.get_handle():
continue
siblings.append(sib_id)
# collect a list of per-step-family step-siblings
for parent_h in [fam.get_father_handle(),
fam.get_mother_handle()]:
if not parent_h:
continue
parent = self.dbstate.db.get_person_from_handle(
parent_h)
other_families = [
self.dbstate.db.get_family_from_handle(fam_id)
for fam_id in parent.get_family_handle_list()
if fam_id not in pfam_list]
for step_fam in other_families:
fam_stepsiblings = [
sib_ref.ref for sib_ref in
step_fam.get_child_ref_list()
if not sib_ref.ref == person.get_handle()]
if fam_stepsiblings:
step_siblings.append(fam_stepsiblings)
# add siblings sub-menu with a bar between each siblings group
if siblings or step_siblings:
sibs = [siblings] + step_siblings
for sib_group in sibs:
for sib_id in sib_group:
sib = self.dbstate.db.get_person_from_handle(
sib_id)
if not sib:
continue
if find_children(self.dbstate.db, sib):
label = Gtk.Label(
label='<b><i>%s</i></b>'
% escape(displayer.display(sib)))
else:
label = Gtk.Label(
label=escape(displayer.display(sib)))
sib_item = Gtk.MenuItem()
label.set_use_markup(True)
label.show()
label.set_alignment(0, 0)
sib_item.add(label)
sib_item.connect("activate",
self.graph_widget.move_to_person,
sib_id, True)
sib_item.show()
sib_menu.append(sib_item)
if sibs.index(sib_group) < len(sibs) - 1:
self.add_separator(sib_menu)
else:
item.set_sensitive(0)
self.add_children_submenu(person=person)
# Go over parents and build their menu
item, par_menu = self.add_submenu(label=_("Parents"))
no_parents = True
par_list = find_parents(self.dbstate.db, person)
for par_id in par_list:
if not par_id:
continue
par = self.dbstate.db.get_person_from_handle(par_id)
if not par:
continue
if no_parents:
no_parents = False
if find_parents(self.dbstate.db, par):
label = Gtk.Label(label='<b><i>%s</i></b>'
% escape(displayer.display(par)))
else:
label = Gtk.Label(label=escape(displayer.display(par)))
par_item = Gtk.MenuItem()
label.set_use_markup(True)
label.show()
label.set_halign(Gtk.Align.START)
par_item.add(label)
par_item.connect("activate", self.graph_widget.move_to_person,
par_id, True)
par_item.show()
par_menu.append(par_item)
if no_parents:
# add button to add parents
add_menuitem(par_menu, _('Add parents'), handle,
self.actions.add_parents_to_person)
# go over related persons and build their menu
item, per_menu = self.add_submenu(label=_("Related"))
no_related = True
for p_id in find_witnessed_people(self.dbstate.db, person):
per = self.dbstate.db.get_person_from_handle(p_id)
if not per:
continue
if no_related:
no_related = False
self.add_menuitem(per_menu, displayer.display(per),
self.graph_widget.move_to_person,
p_id, True)
if no_related:
item.set_sensitive(0)
self.add_separator()
add_menuitem(self, _('Set as home person'),
handle, self.actions.set_home_person)
# check if we have person in bookmarks
marks = self.graph_widget.view.bookmarks.get_bookmarks().bookmarks
if handle in marks:
add_menuitem(self, _('Remove from bookmarks'), handle,
self.actions.remove_from_bookmarks)
else:
add_menuitem(self, _('Add to bookmarks'), [handle, person],
self.actions.add_to_bookmarks)
# QuickReports and WebConnect section
self.add_separator()
q_exists = self.add_quickreport_submenu(CATEGORY_QR_PERSON, handle)
w_exists = self.add_web_connect_submenu(handle)
if q_exists or w_exists:
self.add_separator()
self.append_help_menu_entry()
def add_quickreport_submenu(self, category, handle):
"""
Adds Quick Reports menu.
"""
def make_quick_report_callback(pdata, category, dbstate, uistate,
handle, track=[]):
return lambda x: run_report(dbstate, uistate, category, handle,
pdata, track=track)
# select the reports to show
showlst = []
pmgr = GuiPluginManager.get_instance()
for pdata in pmgr.get_reg_quick_reports():
if pdata.supported and pdata.category == category:
showlst.append(pdata)
showlst.sort(key=lambda x: x.name)
if showlst:
menu_item, quick_menu = self.add_submenu(_("Quick View"))
for pdata in showlst:
callback = make_quick_report_callback(
pdata, category, self.view.dbstate, self.view.uistate,
handle)
self.add_menuitem(quick_menu, pdata.name, callback)
return True
return False
def add_web_connect_submenu(self, handle):
"""
Adds Web Connect menu if some installed.
"""
def flatten(L):
"""
Flattens a possibly nested list. Removes None results, too.
"""
retval = []
if isinstance(L, (list, tuple)):
for item in L:
fitem = flatten(item)
if fitem is not None:
retval.extend(fitem)
elif L is not None:
retval.append(L)
return retval
# select the web connects to show
pmgr = GuiPluginManager.get_instance()
plugins = pmgr.process_plugin_data('WebConnect')
nav_group = self.view.navigation_type()
try:
connections = [plug(nav_group) if isinstance(plug, abc.Callable) else
plug for plug in plugins]
except BaseException:
import traceback
traceback.print_exc()
connections = []
connections = flatten(connections)
connections.sort(key=lambda plug: plug.name)
if connections:
menu_item, web_menu = self.add_submenu(_("Web Connection"))
for connect in connections:
callback = connect(self.view.dbstate, self.view.uistate,
nav_group, handle)
self.add_menuitem(web_menu, connect.name, callback)
return True
return False
def family_menu(self, handle):
"""
Popup menu for family node.
"""
family = self.dbstate.db.get_family_from_handle(handle)
if family:
add_menuitem(self, _('Edit'),
handle, self.actions.edit_family)
add_menuitem(self, _('Delete'),
family, self.actions.remove_family)
self.add_separator()
# build tag submenu
_item, tag_menu = self.add_submenu(label=_("Tags"))
add_menuitem(tag_menu, _('Select tags for family'),
[handle, 'family'], self.actions.edit_tag_list)
add_menuitem(tag_menu, _('Organize Tags...'),
[handle, 'family'], self.actions.organize_tags)
# build spouses menu
_item, sp_menu = self.add_submenu(label=_("Spouses"))
f_handle = family.get_father_handle()
m_handle = family.get_mother_handle()
if f_handle:
spouse = self.dbstate.db.get_person_from_handle(f_handle)
self.add_menuitem(sp_menu, displayer.display(spouse),
self.graph_widget.move_to_person,
f_handle, True)
else:
add_menuitem(sp_menu, _('Add father'), [family, 'father'],
self.actions.add_spouse_to_family)
if m_handle:
spouse = self.dbstate.db.get_person_from_handle(m_handle)
self.add_menuitem(sp_menu, displayer.display(spouse),
self.graph_widget.move_to_person,
m_handle, True)
else:
add_menuitem(sp_menu, _('Add mother'), [family, 'mother'],
self.actions.add_spouse_to_family)
self.add_children_submenu(family=family)
# QuickReports section
self.add_separator()
q_exists = self.add_quickreport_submenu(CATEGORY_QR_FAMILY, handle)
if q_exists:
self.add_separator()
self.append_help_menu_entry()
def add_children_submenu(self, person=None, family=None):
"""
Go over children and build their menu.
"""
item, child_menu = self.add_submenu(_("Children"))
no_child = True
childlist = []
if family:
for child_ref in family.get_child_ref_list():
childlist.append(child_ref.ref)
# allow to add a child to this family
add_menuitem(child_menu, _('Add child to family'),
family.get_handle(), self.actions.add_child_to_family)
self.add_separator(child_menu)
no_child = False
elif person:
childlist = find_children(self.dbstate.db, person)
for child_handle in childlist:
child = self.dbstate.db.get_person_from_handle(child_handle)
if not child:
continue
if no_child:
no_child = False
if find_children(self.dbstate.db, child):
label = Gtk.Label(label='<b><i>%s</i></b>'
% escape(displayer.display(child)))
else:
label = Gtk.Label(label=escape(displayer.display(child)))
child_item = Gtk.MenuItem()
label.set_use_markup(True)
label.show()
label.set_halign(Gtk.Align.START)
child_item.add(label)
child_item.connect("activate", self.graph_widget.move_to_person,
child_handle, True)
child_item.show()
child_menu.append(child_item)
if no_child:
item.set_sensitive(0)
def add_menuitem(self, menu, label, func, *args):
"""
Adds menu item.
"""
item = Gtk.MenuItem(label=label)
item.connect("activate", func, *args)
item.show()
menu.append(item)
return item
def add_submenu(self, label):
"""
Adds submenu.
"""
item = Gtk.MenuItem(label=label)
item.set_submenu(Gtk.Menu())
item.show()
self.append(item)
submenu = item.get_submenu()
submenu.set_reserve_toggle_size(False)
return item, submenu
def add_separator(self, menu=None):
"""
Adds separator to menu.
"""
if menu is None:
menu = self
menu_item = Gtk.SeparatorMenuItem()
menu_item.show()
menu.append(menu_item)
def append_help_menu_entry(self):
"""
Adds help (about) menu entry.
"""
item = Gtk.MenuItem(label=_("About Graph View"))
item.connect("activate", self.actions.on_help_clicked)
item.show()
self.append(item)
class Actions(Callback):
"""
Define actions.
"""
__signals__ = {
'focus-person-changed' : (str, ),
'active-changed' : (str, ),
'rebuild-graph' : None,
}
def __init__(self, dbstate, uistate, bookmarks):
"""
bookmarks - person bookmarks from GraphView(NavigationView).
"""
Callback.__init__(self)
self.dbstate = dbstate
self.uistate = uistate
self.bookmarks = bookmarks
def on_help_clicked(self, widget):
"""
Display the relevant portion of Gramps manual.
"""
display_url(WIKI_PAGE)
def add_spouse(self, obj):
"""
Add spouse to person (create new family to person).
See: gramps/plugins/view/relview.py (add_spouse)
"""
handle = obj.get_data()
family = Family()
person = self.dbstate.db.get_person_from_handle(handle)
if not person:
return
if person.gender == Person.MALE:
family.set_father_handle(person.handle)
else:
family.set_mother_handle(person.handle)
try:
EditFamily(self.dbstate, self.uistate, [], family)
except WindowActiveError:
pass
# set edited person to scroll on it after rebuilding graph
self.emit('focus-person-changed', (handle, ))
def add_spouse_to_family(self, obj):
"""
Adds spouse to existing family.
See: editfamily.py
"""
family, kind = obj.get_data()
try:
dialog = EditFamily(self.dbstate, self.uistate, [], family)
if kind == 'mother':
dialog.add_mother_clicked(None)
if kind == 'father':
dialog.add_father_clicked(None)
except WindowActiveError:
pass
def edit_person(self, obj, person_handle=None):
"""
Start a person editor for the selected person.
"""
if not (obj or person_handle):
return False
if person_handle:
handle = person_handle
else:
handle = obj.get_data()
person = self.dbstate.db.get_person_from_handle(handle)
try:
EditPerson(self.dbstate, self.uistate, [], person)
except WindowActiveError:
pass
# set edited person to scroll on it after rebuilding graph
self.emit('focus-person-changed', (handle, ))
def set_home_person(self, obj):
"""
Set the home person for database and make it active.
"""
handle = obj.get_data()
person = self.dbstate.db.get_person_from_handle(handle)
if person:
self.dbstate.db.set_default_person_handle(handle)
self.emit('active-changed', (handle, ))
def edit_family(self, obj, family_handle=None):
"""
Start a family editor for the selected family.
"""
if not (obj or family_handle):
return False
if family_handle:
handle = family_handle
else:
handle = obj.get_data()
family = self.dbstate.db.get_family_from_handle(handle)
try:
EditFamily(self.dbstate, self.uistate, [], family)
except WindowActiveError:
pass
# set edited family person to scroll on it after rebuilding graph
f_handle = family.get_father_handle()
if f_handle:
self.emit('focus-person-changed', (f_handle, ))
else:
m_handle = family.get_mother_handle()
if m_handle:
self.emit('focus-person-changed', (m_handle, ))
def copy_person_to_clipboard(self, obj):
"""
Renders the person data into some lines of text
and puts that into the clipboard.
"""
person_handle = obj.get_data()
person = self.dbstate.db.get_person_from_handle(person_handle)
if person:
_cb = Gtk.Clipboard.get_for_display(Gdk.Display.get_default(),
Gdk.SELECTION_CLIPBOARD)
format_helper = FormattingHelper(self.dbstate)
_cb.set_text(format_helper.format_person(person, 11), -1)
return True
return False
def edit_tag_list(self, obj):
"""
Edit tag list for person or family.
"""
handle, otype = obj.get_data()
if otype == 'person':
target = self.dbstate.db.get_person_from_handle(handle)
self.emit('focus-person-changed', (handle, ))
elif otype == 'family':
target = self.dbstate.db.get_family_from_handle(handle)
f_handle = target.get_father_handle()
if f_handle:
self.emit('focus-person-changed', (f_handle, ))
else:
m_handle = target.get_mother_handle()
if m_handle:
self.emit('focus-person-changed', (m_handle, ))
else:
return False
if target:
tag_list = []
for tag_handle in target.get_tag_list():
tag = self.dbstate.db.get_tag_from_handle(tag_handle)
if tag:
tag_list.append((tag_handle, tag.get_name()))
all_tags = []
for tag_handle in self.dbstate.db.get_tag_handles(
sort_handles=True):
tag = self.dbstate.db.get_tag_from_handle(tag_handle)
all_tags.append((tag.get_handle(), tag.get_name()))
try:
editor = EditTagList(tag_list, all_tags, self.uistate, [])
if editor.return_list is not None:
tag_list = editor.return_list
# Save tags to target object.
# Make the dialog modal so that the user can't start
# another database transaction while the one setting
# tags is still running.
pmon = progressdlg.ProgressMonitor(
progressdlg.GtkProgressDialog,
("", self.uistate.window, Gtk.DialogFlags.MODAL),
popup_time=2)
status = progressdlg.LongOpStatus(msg=_("Adding Tags"),
total_steps=1,
interval=1 // 20)
pmon.add_op(status)
target.set_tag_list([item[0] for item in tag_list])
if otype == 'person':
msg = _('Adding Tags to person (%s)') % handle
with DbTxn(msg, self.dbstate.db) as trans:
self.dbstate.db.commit_person(target, trans)
status.heartbeat()
else:
msg = _('Adding Tags to family (%s)') % handle
with DbTxn(msg, self.dbstate.db) as trans:
self.dbstate.db.commit_family(target, trans)
status.heartbeat()
status.end()
except WindowActiveError:
pass
def organize_tags(self, obj):
"""
Display the Organize Tags dialog.
see: .gramps.gui.view.tags
"""
handle, otype = obj.get_data()
if otype == 'person':
target = self.dbstate.db.get_person_from_handle(handle)
self.emit('focus-person-changed', (handle, ))
elif otype == 'family':
target = self.dbstate.db.get_family_from_handle(handle)
f_handle = target.get_father_handle()
if f_handle:
self.emit('focus-person-changed', (f_handle, ))
else:
m_handle = target.get_mother_handle()
if m_handle:
self.emit('focus-person-changed', (m_handle, ))
OrganizeTagsDialog(self.dbstate.db, self.uistate, [])
self.emit('rebuild-graph')
def add_parents_to_person(self, obj):
"""
Open dialog to add parents to person.
"""
person_handle = obj.get_data()
family = Family()
childref = ChildRef()
childref.set_reference_handle(person_handle)
family.add_child_ref(childref)
try:
EditFamily(self.dbstate, self.uistate, [], family)
except WindowActiveError:
return
# set edited person to scroll on it after rebuilding graph
self.emit('focus-person-changed', (person_handle, ))
def add_child_to_family(self, obj):
"""
Open person editor to create and add child to family.
"""
family_handle = obj.get_data()
callback = lambda x: self.__callback_add_child(x, family_handle)
person = Person()
name = Name()
# the editor requires a surname
name.add_surname(Surname())
name.set_primary_surname(0)
family = self.dbstate.db.get_family_from_handle(family_handle)
# try to get father
father_handle = family.get_father_handle()
if father_handle:
father = self.dbstate.db.get_person_from_handle(father_handle)
if father:
preset_name(father, name)
person.set_primary_name(name)
try:
EditPerson(self.dbstate, self.uistate, [], person,
callback=callback)
except WindowActiveError:
pass
def __callback_add_child(self, person, family_handle):
"""
Write data to db.
Callback from self.add_child_to_family().
"""
ref = ChildRef()
ref.ref = person.get_handle()
family = self.dbstate.db.get_family_from_handle(family_handle)
family.add_child_ref(ref)
with DbTxn(_("Add Child to Family"), self.dbstate.db) as trans:
# add parentref to child
person.add_parent_family_handle(family_handle)
# default relationship is used
self.dbstate.db.commit_person(person, trans)
# add child to family
self.dbstate.db.commit_family(family, trans)
def remove_person(self, obj):
"""
Remove a person from the database.
see: libpersonview.py
"""
person = obj.get_data()
msg1 = _('Delete %s?') % displayer.display(person)
msg2 = (_('Deleting the person [%s] will remove it '
'from the database.') % person.gramps_id)
dialog = QuestionDialog2(msg1, msg2,
_("Yes"), _("No"),
self.uistate.window)
if dialog.run():
# set the busy cursor, so the user knows that we are working
self.uistate.set_busy_cursor(True)
# create the transaction
with DbTxn('', self.dbstate.db) as trans:
# create description to save
description = (_("Delete Person (%s)")
% displayer.display(person))
# delete the person from the database
# Above will emit person-delete signal
self.dbstate.db.delete_person_from_database(person, trans)
trans.set_description(description)
self.uistate.set_busy_cursor(False)
def remove_family(self, obj):
"""
Remove a family from the database.
see: familyview.py
"""
family = obj.get_data()
msg1 = _('Delete family [%s]?') % family.gramps_id
msg2 = _('Deleting the family will remove it from the database.')
dialog = QuestionDialog2(msg1, msg2,
_("Yes"), _("No"),
self.uistate.window)
if dialog.run():
# set the busy cursor, so the user knows that we are working
self.uistate.set_busy_cursor(True)
# create the transaction
with DbTxn('', self.dbstate.db) as trans:
# create description to save
description = _("Delete Family [%s]") % family.gramps_id
# delete the family from the database
self.dbstate.db.remove_family_relationships(family.handle,
trans)
trans.set_description(description)
self.uistate.set_busy_cursor(False)
def add_to_bookmarks(self, obj):
"""
Adds bookmark for person.
See: navigationview.py and bookmarks.py
"""
handle, person = obj.get_data()
self.bookmarks.add(handle)
name = displayer.display(person)
self.uistate.push_message(self.dbstate,
_("%s has been bookmarked") % name)
def remove_from_bookmarks(self, obj):
"""
Remove person from the list of bookmarked people.
See: bookmarks.py
"""
handle = obj.get_data()
self.bookmarks.remove_handles([handle])
| gramps-project/addons-source | GraphView/graphview.py | Python | gpl-2.0 | 167,934 | [
"FLEUR"
] | 3f99aca903a0b50b1d80f3debf66565020209cefc2608e51100384479ca9e83f |
# Copyright (c) Mathias Kaerlev 2012.
# This file is part of Anaconda.
# Anaconda is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Anaconda is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Anaconda. If not, see <http://www.gnu.org/licenses/>.
"""
LacewingServer.mfx
Lacewing Server - Jamie McLaughlin (http://www.aquadasoft.com)
Copyright 2007-2010 Jamie McLaughlin
This extension is a full implementation of the Lacewing networking
protocol, acting as a server. More information is available at
http://lacewing.aquadasoft.com
Ported to Python by Mathias Kaerlev
"""
from mmfparser.player.extensions.common import UserExtension, HiddenObject
from mmfparser.player.event.actions.common import Action
from mmfparser.player.event.conditions.common import Condition
from mmfparser.player.event.expressions.common import Expression
# Actions
class Action0(Action):
"""
Lacewing server->Host
Parameters:
0: Port (default 6121) (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
port = self.evaluate_index(0) or 6121
instance.objectPlayer.host(port)
class Action1(Action):
"""
Lacewing server->Stop hosting
"""
def execute(self, instance):
instance.objectPlayer.stop()
class FactoryAction(Action):
def execute(self, instance):
factory = instance.objectPlayer.factory
if factory is None:
return
self.handle_action(factory)
def handle_action(self, factory):
raise NotImplementedError()
class Action2(FactoryAction):
"""
Set welcome message
Parameters:
0: Welcome message (EXPSTRING, ExpressionParameter)
"""
def handle_action(self, factory):
value = self.evaluate_index(0)
factory.welcomeMessage = value
class EnableInteractiveAction(Action):
def execute(self, instance):
instance.objectPlayer.handlers[self.name].set_interactive()
class EnablePassiveAction(Action):
def execute(self, instance):
instance.objectPlayer.handlers[self.name].set_passive()
class Action3(EnableInteractiveAction):
"""
Enable conditions->On connect request->Interactive
"""
name = 'OnConnectRequest'
class Action4(EnablePassiveAction):
"""
Enable conditions->On connect request->Passive (faster)
"""
name = 'OnConnectRequest'
class Action5(EnableInteractiveAction):
"""
Enable conditions->On disconnect->Interactive
"""
name = 'OnDisconnect'
class Action6(EnablePassiveAction):
"""
Enable conditions->On disconnect->Passive (faster)
"""
name = 'OnDisconnect'
class Action7(EnableInteractiveAction):
"""
Enable conditions->On message to server->Interactive
"""
name = 'OnServerMessage'
class Action8(EnablePassiveAction):
"""
Enable conditions->On message to server->Passive (faster)
"""
name = 'OnServerMessage'
class Action9(EnableInteractiveAction):
"""
Enable conditions->On message to channel->Interactive
"""
name = 'OnChannelMessage'
class Action10(EnablePassiveAction):
"""
Enable conditions->On message to channel->Passive (faster)
"""
name = 'OnChannelMessage'
class Action11(EnableInteractiveAction):
"""
Enable conditions->On message to peer->Interactive
"""
name = 'OnPeerMessage'
class Action12(EnablePassiveAction):
"""
Enable conditions->On message to peer->Passive (faster)
"""
name = 'OnPeerMessage'
class Action13(EnableInteractiveAction):
"""
Enable conditions->On channel join request->Interactive
"""
name = 'OnChannelJoinRequest'
class Action14(EnablePassiveAction):
"""
Enable conditions->On channel join request->Passive (faster)
"""
name = 'OnChannelJoinRequest'
class Action15(EnableInteractiveAction):
"""
Enable conditions->On channel leave request->Interactive
"""
name = 'OnChannelLeaveRequest'
class Action16(EnablePassiveAction):
"""
Enable conditions->On channel leave request->Passive (faster)
"""
name = 'OnChannelLeaveRequest'
class Action17(EnableInteractiveAction):
"""
Enable conditions->On set name request->Interactive
"""
name = 'OnSetNameRequest'
class Action18(EnablePassiveAction):
"""
Enable conditions->On set name request->Passive (faster)
"""
name = 'OnSetNameRequest'
class Action19(Action):
"""
On interactive condition->Deny (for on [..] request)
"""
def execute(self, instance):
instance.objectPlayer.denyValue = True
class Action20(Action):
"""
On interactive condition->Change name (for name set/change request)
Parameters:
0: New name (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
instance.objectPlayer.eventValue = self.evaluate_expression(
self.get_parameter(0))
class Action21(Action):
"""
On interactive condition->Change channel name (for channel join request)
Parameters:
0: New name (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
instance.objectPlayer.eventValue = self.evaluate_expression(
self.get_parameter(0))
class Action22(Action):
"""
Channel->Close channel
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action23(Action):
"""
Channel->Select the channel master
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action24(Action):
"""
Channel->Select by name
Parameters:
0: Name (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
return
class Action25(Action):
"""
Channel->Loop all channels
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action26(Action):
"""
Client->Disconnect
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action27(Action):
"""
Client->Loop client's channels
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action28(Action):
"""
Client->Select by name
Parameters:
0: Name (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action29(Action):
"""
Client->Select by ID
Parameters:
0: ID (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action30(Action):
"""
Client->Loop all clients
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action31(Action):
"""
Send->Text->To client
Parameters:
0: Subchannel (0-255) (EXPRESSION, ExpressionParameter)
1: Text to send (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action32(Action):
"""
Send->Text->To channel
Parameters:
0: Subchannel (0-255) (EXPRESSION, ExpressionParameter)
1: Text to send (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action33(Action):
"""
Send->Number->To client
Parameters:
0: Subchannel (0-255) (EXPRESSION, ExpressionParameter)
1: Number to send (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action34(Action):
"""
Send->Number->To channel
Parameters:
0: Subchannel (0-255) (EXPRESSION, ExpressionParameter)
1: Number to send (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action35(Action):
"""
Send->Stack->To client
Parameters:
0: Subchannel (0-255) (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action36(Action):
"""
Send->Stack->To channel
Parameters:
0: Subchannel (0-255) (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action37(Action):
"""
Blast->Text->To client
Parameters:
0: Subchannel (0-255) (EXPRESSION, ExpressionParameter)
1: Text to send (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action38(Action):
"""
Blast->Text->To channel
Parameters:
0: Subchannel (0-255) (EXPRESSION, ExpressionParameter)
1: Text to send (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action39(Action):
"""
Blast->Number->To client
Parameters:
0: Subchannel (0-255) (EXPRESSION, ExpressionParameter)
1: Number to send (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action40(Action):
"""
Blast->Number->To channel
Parameters:
0: Subchannel (0-255) (EXPRESSION, ExpressionParameter)
1: Number to send (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action41(Action):
"""
Blast->Stack->To client
Parameters:
0: Subchannel (0-255) (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action42(Action):
"""
Blast->Stack->To channel
Parameters:
0: Subchannel (0-255) (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action43(Action):
"""
Send stack->Push byte->ASCII character
Parameters:
0: Byte (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action44(Action):
"""
Send stack->Push byte->Integer value
Parameters:
0: Byte (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action45(Action):
"""
Send stack->Push short
Parameters:
0: Short (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action46(Action):
"""
Send stack->Push integer
Parameters:
0: Integer (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action47(Action):
"""
Send stack->Push float
Parameters:
0: Float (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action48(Action):
"""
Send stack->Push string->Without null terminator
Parameters:
0: String (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action49(Action):
"""
Send stack->Push string->With null terminator
Parameters:
0: String (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action50(Action):
"""
Send stack->Push binary
Parameters:
0: Address (EXPRESSION, ExpressionParameter)
1: Size (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action51(Action):
"""
Send stack->Push file
Parameters:
0: File to push (FILENAME, Filename)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action52(Action):
"""
Send stack->Compress (ZLIB)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action53(Action):
"""
Send stack->Clear
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action54(Action):
"""
Received stack->Save to a file
Parameters:
0: Position (EXPRESSION, ExpressionParameter)
1: Size (EXPRESSION, ExpressionParameter)
2: Filename (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action55(Action):
"""
Received stack->Append to a file
Parameters:
0: Position (EXPRESSION, ExpressionParameter)
1: Size (EXPRESSION, ExpressionParameter)
2: Filename (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action56(Action):
"""
Received stack->Uncompress (ZLIB)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action57(Action):
"""
Channel->Loop clients
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action58(Action):
"""
On interactive condition->Drop message (for on message to channel/peer)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action59(Action):
"""
Client->Select sender (for "on message to peer")
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action60(Action):
"""
Client->Select receiver (for "on message to peer")
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action61(Action):
"""
Channel->Loop all channels (with loop name)
Parameters:
0: Loop name (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action62(Action):
"""
Client->Loop all clients (with loop name)
Parameters:
0: Loop name (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action63(Action):
"""
Client->Loop client's channels (with loop name)
Parameters:
0: Loop name (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action64(Action):
"""
Flash Player policy server->Host
Parameters:
0: XML policy file (FILENAME, Filename)
1: - ((unknown -256))
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action65(Action):
"""
Flash Player policy server->Stop hosting
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action66(Action):
"""
Client->Set local client data
Parameters:
0: Key (EXPSTRING, ExpressionParameter)
1: Value (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action67(Action):
"""
Received stack->Move cursor
Parameters:
0: Position (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action68(Action):
"""
Channel->Set local channel data
Parameters:
0: Key (EXPSTRING, ExpressionParameter)
1: Value (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action69(Action):
"""
Build #17 (DLL)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
# Conditions
class Condition0(Condition):
"""
On error
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition1(Condition):
"""
Connection->On connect request
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition2(Condition):
"""
Connection->On disconnect
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition3(Condition):
"""
Channel->On join request
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition4(Condition):
"""
Channel->On leave request
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition5(Condition):
"""
Channel->On all channels loop
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition6(Condition):
"""
Channel->On client's channels loop
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition7(Condition):
"""
Client->On all clients loop
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition8(Condition):
"""
Client->On channel clients loop
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition9(Condition):
"""
Client->Client is the channel master
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition10(Condition):
"""
Client->On name set request
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition11(Condition):
"""
Message->Sent->On text message to server
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition12(Condition):
"""
Message->Sent->On number message to server
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition13(Condition):
"""
Message->Sent->On stack message to server
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition14(Condition):
"""
Message->Sent->On any message to server
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition15(Condition):
"""
Message->Sent->On text message to channel
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition16(Condition):
"""
Message->Sent->On number message to channel
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition17(Condition):
"""
Message->Sent->On stack message to channel
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition18(Condition):
"""
Message->Sent->On any message to channel
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition19(Condition):
"""
Message->Sent->On text message to peer
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition20(Condition):
"""
Message->Sent->On number message to peer
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition21(Condition):
"""
Message->Sent->On stack message to peer
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition22(Condition):
"""
Message->Sent->On any message to peer
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition23(Condition):
"""
Message->Blasted->On text message to server
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition24(Condition):
"""
Message->Blasted->On number message to server
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition25(Condition):
"""
Message->Blasted->On stack message to server
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition26(Condition):
"""
Message->Blasted->On any message to server
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition27(Condition):
"""
Message->Blasted->On text message to channel
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition28(Condition):
"""
Message->Blasted->On number message to channel
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition29(Condition):
"""
Message->Blasted->On stack message to channel
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition30(Condition):
"""
Message->Blasted->On any message to channel
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition31(Condition):
"""
Message->Blasted->On text message to peer
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition32(Condition):
"""
Message->Blasted->On number message to peer
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition33(Condition):
"""
Message->Blasted->On stack message to peer
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition34(Condition):
"""
Message->Blasted->On any message to peer
Parameters:
0: Subchannel (-1 for any) (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition35(Condition):
"""
Channel->[With loop name] On all channels loop
Parameters:
0: Loop name (EXPSTRING, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition36(Condition):
"""
Channel->[With loop name] On client's channels loop
Parameters:
0: Loop name (EXPSTRING, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition37(Condition):
"""
Client->[With loop name] On all clients loop
Parameters:
0: Loop name (EXPSTRING, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition38(Condition):
"""
Client->[With loop name] On channel clients loop
Parameters:
0: Loop name (EXPSTRING, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition39(Condition):
"""
Client->[With loop name] On channel clients loop finished
Parameters:
0: Loop name (EXPSTRING, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition40(Condition):
"""
Channel->[With loop name] On all channels loop finished
Parameters:
0: Loop name (EXPSTRING, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition41(Condition):
"""
Channel->[With loop name] On client's channels loop finished
Parameters:
0: Loop name (EXPSTRING, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition42(Condition):
"""
Client->On channel clients loop finished
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition43(Condition):
"""
Channel->On all channels loop finished
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition44(Condition):
"""
Client->[With loop name] On all clients loop finished
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition45(Condition):
"""
Channel->On client's channels loop finished
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition46(Condition):
"""
Lacewing server is hosting
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition47(Condition):
"""
Flash Player policy server is hosting
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition48(Condition):
"""
Channel->Channel is hidden from the channel list
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition49(Condition):
"""
Channel->Channel is set to close automatically
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
# Expressions
class Expression0(Expression):
"""
Error string (for on error)
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression1(Expression):
"""
Lacewing version string
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression2(Expression):
"""
Send stack size
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression3(Expression):
"""
Requested name (for name set/change request)
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression4(Expression):
"""
Requested channel name (for channel join request)
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression5(Expression):
"""
Channel->Name
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression6(Expression):
"""
Channel->Client count
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression7(Expression):
"""
Client->Name
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression8(Expression):
"""
Client->ID
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression9(Expression):
"""
Client->IP address
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression10(Expression):
"""
Client->Connection time
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression11(Expression):
"""
Client->Channel count
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression12(Expression):
"""
Received->Get text
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression13(Expression):
"""
Received->Get number
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression14(Expression):
"""
Received->Get stack size
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression15(Expression):
"""
Received->Get stack memory address
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression16(Expression):
"""
Received->Get stack data->Byte->ASCII character
Parameters:
0: Index (Int)
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression17(Expression):
"""
Received->Get stack data->Byte->Integer value->Unsigned
Parameters:
0: Index (Int)
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression18(Expression):
"""
Received->Get stack data->Byte->Integer value->Signed
Parameters:
0: Index (Int)
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression19(Expression):
"""
Received->Get stack data->Short->Unsigned
Parameters:
0: Index (Int)
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression20(Expression):
"""
Received->Get stack data->Short->Signed
Parameters:
0: Index (Int)
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression21(Expression):
"""
Received->Get stack data->Integer->Unsigned
Parameters:
0: Index (Int)
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression22(Expression):
"""
Received->Get stack data->Integer->Signed
Parameters:
0: Index (Int)
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression23(Expression):
"""
Received->Get stack data->Float
Parameters:
0: Index (Int)
Return type: Float
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression24(Expression):
"""
Received->Get stack data->String->With size
Parameters:
0: Index (Int)
1: Size (Int)
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression25(Expression):
"""
Received->Get stack data->String->Null terminated
Parameters:
0: Index (Int)
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression26(Expression):
"""
Received->Get subchannel
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression27(Expression):
"""
Channel->Number of channels on the server
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression28(Expression):
"""
Client->Get local client data
Parameters:
0: Key (String)
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression29(Expression):
"""
Received->Get stack data (with cursor)->Byte->ASCII character
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression30(Expression):
"""
Received->Get stack data (with cursor)->Byte->Integer value->Unsigned
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression31(Expression):
"""
Received->Get stack data (with cursor)->Byte->Integer value->Signed
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression32(Expression):
"""
Received->Get stack data (with cursor)->Short->Unsigned
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression33(Expression):
"""
Received->Get stack data (with cursor)->Short->Signed
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression34(Expression):
"""
Received->Get stack data (with cursor)->Integer->Unsigned
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression35(Expression):
"""
Received->Get stack data (with cursor)->Integer->Signed
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression36(Expression):
"""
Received->Get stack data (with cursor)->Float
Return type: Float
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression37(Expression):
"""
Received->Get stack data (with cursor)->String->With size
Parameters:
0: Size (Int)
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression38(Expression):
"""
Received->Get stack data (with cursor)->String->Null terminated
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression39(Expression):
"""
Client->Get client protocol implementation
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression40(Expression):
"""
Channel->Get local channel data
Parameters:
0: Key (String)
Return type: String
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
from twisted.internet import reactor, protocol
from twisted.protocols import basic
from lacewing.server import (ServerProtocol, ServerDatagram,
ServerFactory)
class ExtensionProtocol(ServerProtocol):
pass
class ExtensionFactory(ServerFactory):
protocol = ExtensionProtocol
class FlashPolicyProtocol(basic.LineReceiver):
delimiter = '\x00'
MAX_LENGTH = 64
def lineReceived(self, request):
if request != '<policy-file-request/>':
self.transport.loseConnection()
return
self.transport.write(self.factory.policyData)
class FlashPolicyFactory(protocol.ServerFactory):
protocol = FlashPolicyProtocol
policyData = None
def __init__(self, data):
self.policyData = data + '\x00'
class HandlerOption(object):
passive = interactive = False
def set_passive(self):
self.interactive = False
self.passive = True
def set_interactive(self):
self.passive = False
self.interactive = True
def __bool__(self):
return self.passive or self.interactive
class DefaultObject(HiddenObject):
clearStack = None
isGlobal = None
subApplicationGlobal = None
globalIdentifier = None
factory = None
port = None
udpPort = None
policyPort = None
# event stuff
handlers = None
denyValue = False
eventValue = None
def created(self, data):
self.clearStack = bool(data.readByte())
self.isGlobal = bool(data.readByte())
self.subApplicationGlobal = bool(data.readByte())
self.globalIdentifier = data.readString()
self.handlers = {
'OnConnectRequest' : HandlerOption(),
'OnDisconnect' : HandlerOption(),
'OnServerMessage' : HandlerOption(),
'OnChannelMessage' : HandlerOption(),
'OnPeerMessage' : HandlerOption(),
'OnChannelJoinRequest' : HandlerOption(),
'OnSetNameRequest' : HandlerOption()
}
def host(self, port = 6121):
self.factory = newFactory = ExtensionFactory()
self.port = reactor.listenTCP(port, newFactory)
self.udpPort = reactor.listenUDP(port, ServerDatagram(newFactory))
reactor.run()
def stop(self):
if self.port is None:
return
self.port.stopListening()
self.udpPort.stopListening()
self.factory = self.port = self.udpPort = None
def host_policy(self, data):
self.policyPort = reactor.listenTCP(843, FlashPolicyFactory(data))
reactor.run()
def stop_policy(self):
if self.policyPort is None:
return
self.policyPort.stopListening()
self.policyPort = None
def on_detach(self):
reactor.callFromThread(reactor.stop)
class LacewingServer(UserExtension):
objectPlayer = DefaultObject
actions = {
0 : Action0,
1 : Action1,
2 : Action2,
3 : Action3,
4 : Action4,
5 : Action5,
6 : Action6,
7 : Action7,
8 : Action8,
9 : Action9,
10 : Action10,
11 : Action11,
12 : Action12,
13 : Action13,
14 : Action14,
15 : Action15,
16 : Action16,
17 : Action17,
18 : Action18,
19 : Action19,
20 : Action20,
21 : Action21,
25 : Action22,
26 : Action23,
27 : Action24,
28 : Action25,
29 : Action26,
30 : Action27,
31 : Action28,
32 : Action29,
33 : Action30,
34 : Action31,
35 : Action32,
36 : Action33,
37 : Action34,
38 : Action35,
39 : Action36,
40 : Action37,
41 : Action38,
42 : Action39,
43 : Action40,
44 : Action41,
45 : Action42,
46 : Action43,
47 : Action44,
48 : Action45,
49 : Action46,
50 : Action47,
51 : Action48,
52 : Action49,
53 : Action50,
54 : Action51,
55 : Action52,
56 : Action53,
57 : Action54,
58 : Action55,
59 : Action56,
60 : Action57,
61 : Action58,
62 : Action59,
63 : Action60,
64 : Action61,
65 : Action62,
66 : Action63,
68 : Action64,
69 : Action65,
70 : Action66,
71 : Action67,
72 : Action68,
-1 : Action69,
}
conditions = {
0 : Condition0,
1 : Condition1,
2 : Condition2,
3 : Condition3,
4 : Condition4,
5 : Condition5,
6 : Condition6,
7 : Condition7,
8 : Condition8,
9 : Condition9,
10 : Condition10,
12 : Condition11,
13 : Condition12,
14 : Condition13,
15 : Condition14,
16 : Condition15,
17 : Condition16,
18 : Condition17,
19 : Condition18,
20 : Condition19,
21 : Condition20,
22 : Condition21,
23 : Condition22,
24 : Condition23,
25 : Condition24,
26 : Condition25,
27 : Condition26,
28 : Condition27,
29 : Condition28,
30 : Condition29,
31 : Condition30,
32 : Condition31,
33 : Condition32,
34 : Condition33,
35 : Condition34,
36 : Condition35,
37 : Condition36,
38 : Condition37,
39 : Condition38,
40 : Condition39,
41 : Condition40,
43 : Condition41,
44 : Condition42,
45 : Condition43,
46 : Condition44,
47 : Condition45,
48 : Condition46,
49 : Condition47,
50 : Condition48,
51 : Condition49,
}
expressions = {
0 : Expression0,
1 : Expression1,
2 : Expression2,
3 : Expression3,
4 : Expression4,
5 : Expression5,
6 : Expression6,
7 : Expression7,
8 : Expression8,
9 : Expression9,
10 : Expression10,
11 : Expression11,
12 : Expression12,
13 : Expression13,
14 : Expression14,
15 : Expression15,
16 : Expression16,
17 : Expression17,
18 : Expression18,
19 : Expression19,
20 : Expression20,
21 : Expression21,
22 : Expression22,
23 : Expression23,
24 : Expression24,
25 : Expression25,
26 : Expression26,
27 : Expression27,
28 : Expression28,
29 : Expression29,
30 : Expression30,
31 : Expression31,
32 : Expression32,
33 : Expression33,
34 : Expression34,
35 : Expression35,
36 : Expression36,
37 : Expression37,
38 : Expression38,
39 : Expression39,
40 : Expression40,
}
extension = LacewingServer()
def get_extension():
return extension
| joaormatos/anaconda | mmfparser/player/extensions/LacewingServer/__init__.py | Python | gpl-3.0 | 47,484 | [
"BLAST"
] | abd126338ea562f2ac6fe152ebe1e10190b49e61b108b1f8f54023bf84c0e832 |
''' CacheFeederAgent
This agent feeds the Cache tables with the outputs of the cache commands.
'''
from DIRAC import S_OK
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.LCG.GOCDBClient import GOCDBClient
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.Command import CommandCaller
from DIRAC.ResourceStatusSystem.Utilities import Utils
ResourceManagementClient = getattr( Utils.voimport( 'DIRAC.ResourceStatusSystem.Client.ResourceManagementClient' ), 'ResourceManagementClient' )
__RCSID__ = '$Id: $'
AGENT_NAME = 'ResourceStatus/CacheFeederAgent'
class CacheFeederAgent( AgentModule ):
'''
The CacheFeederAgent feeds the cache tables for the client and the accounting.
It runs periodically a set of commands, and stores it's results on the
tables.
'''
# Too many public methods
# pylint: disable-msg=R0904
def __init__( self, *args, **kwargs ):
AgentModule.__init__( self, *args, **kwargs )
self.commands = {}
self.clients = {}
self.cCaller = None
self.rmClient = None
def initialize( self ):
self.am_setOption( 'shifterProxy', 'DataManager' )
self.rmClient = ResourceManagementClient()
self.commands[ 'Downtime' ] = [ { 'Downtime' : {} } ]
self.commands[ 'SpaceTokenOccupancy' ] = [ { 'SpaceTokenOccupancy' : {} } ]
# PilotsCommand
# self.commands[ 'Pilots' ] = [
# { 'PilotsWMS' : { 'element' : 'Site', 'siteName' : None } },
# { 'PilotsWMS' : { 'element' : 'Resource', 'siteName' : None } }
# ]
# FIXME: do not forget about hourly vs Always ...etc
# AccountingCacheCommand
# self.commands[ 'AccountingCache' ] = [
# {'SuccessfullJobsBySiteSplitted' :{'hours' :24, 'plotType' :'Job' }},
# {'FailedJobsBySiteSplitted' :{'hours' :24, 'plotType' :'Job' }},
# {'SuccessfullPilotsBySiteSplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'FailedPilotsBySiteSplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'SuccessfullPilotsByCESplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'FailedPilotsByCESplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'RunningJobsBySiteSplitted' :{'hours' :24, 'plotType' :'Job' }},
# # {'RunningJobsBySiteSplitted' :{'hours' :168, 'plotType' :'Job' }},
# # {'RunningJobsBySiteSplitted' :{'hours' :720, 'plotType' :'Job' }},
# # {'RunningJobsBySiteSplitted' :{'hours' :8760, 'plotType' :'Job' }},
# ]
# VOBOXAvailability
# self.commands[ 'VOBOXAvailability' ] = [
# { 'VOBOXAvailability' : {} }
#
# Reuse clients for the commands
self.clients[ 'GOCDBClient' ] = GOCDBClient()
self.clients[ 'ReportGenerator' ] = RPCClient( 'Accounting/ReportGenerator' )
self.clients[ 'ReportsClient' ] = ReportsClient()
self.clients[ 'ResourceStatusClient' ] = ResourceStatusClient()
self.clients[ 'ResourceManagementClient' ] = ResourceManagementClient()
self.clients[ 'WMSAdministrator' ] = RPCClient( 'WorkloadManagement/WMSAdministrator' )
self.cCaller = CommandCaller
return S_OK()
def loadCommand( self, commandModule, commandDict ):
commandName = commandDict.keys()[ 0 ]
commandArgs = commandDict[ commandName ]
commandTuple = ( '%sCommand' % commandModule, '%sCommand' % commandName )
commandObject = self.cCaller.commandInvocation( commandTuple, pArgs = commandArgs,
clients = self.clients )
if not commandObject[ 'OK' ]:
self.log.error( 'Error initializing %s' % commandName )
return commandObject
commandObject = commandObject[ 'Value' ]
# Set master mode
commandObject.masterMode = True
self.log.info( '%s/%s' % ( commandModule, commandName ) )
return S_OK( commandObject )
def execute( self ):
for commandModule, commandList in self.commands.items():
self.log.info( '%s module initialization' % commandModule )
for commandDict in commandList:
commandObject = self.loadCommand( commandModule, commandDict )
if not commandObject[ 'OK' ]:
self.log.error( commandObject[ 'Message' ] )
continue
commandObject = commandObject[ 'Value' ]
results = commandObject.doCommand()
if not results[ 'OK' ]:
self.log.error( 'Failed to execute command', '%s: %s' % ( commandModule, results[ 'Message' ] ) )
continue
results = results[ 'Value' ]
if not results:
self.log.info( 'Empty results' )
continue
self.log.verbose( 'Command OK Results' )
self.log.verbose( results )
return S_OK()
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| vmendez/DIRAC | ResourceStatusSystem/Agent/CacheFeederAgent.py | Python | gpl-3.0 | 5,763 | [
"DIRAC"
] | e115257f90adcfe146e690a56844951d8473ef1d4f140af1e8552564c186dcff |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import struct
def getrec(reclabelarray, verbose=False):
"""Reads binary files JOBARC and JAINDX and returns contents
of each record in *reclabelarray*.
"""
knownlabels = {
"AU_LENGT": 'DOUBLE',
"CHARGE_E": 'DOUBLE',
"AMU ": 'DOUBLE',
"NUC_MAGN": 'DOUBLE',
"MASS_ELE": 'DOUBLE',
"MASS_PRO": 'DOUBLE',
"HBAR ": 'DOUBLE',
"AU_MASSP": 'DOUBLE',
"SP_LIGHT": 'DOUBLE',
"AU_EV ": 'DOUBLE',
"AVOGADRO": 'DOUBLE',
"AU_ENERG": 'DOUBLE',
"AU_CM-1 ": 'DOUBLE',
"CM-1_KCA": 'DOUBLE',
"CM-1_KJ ": 'DOUBLE',
"AU_DIPOL": 'DOUBLE',
"AU_VELOC": 'DOUBLE',
"AU_TIME ": 'DOUBLE',
"EL_GFACT": 'DOUBLE',
"EA_IRREP": 'INTEGER',
"UHFRHF ": 'INTEGER',
"IFLAGS ": 'INTEGER',
"IFLAGS2 ": 'INTEGER',
"OCCUPYA ": 'INTEGER',
"NUMDROPA": 'INTEGER',
"JODAFLAG": 'INTEGER',
"TITLE ": 'CHARACTER',
"NCNSTRNT": 'INTEGER',
"ICNSTRNT": 'INTEGER',
"VCNSTRNT": 'DOUBLE',
"NMPROTON": 'INTEGER',
"NREALATM": 'INTEGER',
"COORDINT": 'DOUBLE',
"VARNAINT": 'DOUBLE',
"COORD000": 'DOUBLE',
"ROTCONST": 'DOUBLE',
"ORIENT2 ": 'DOUBLE', # input orientation into interial frame
"LINEAR ": 'INTEGER',
"NATOMS ": 'INTEGER',
"COORD ": 'DOUBLE',
"ORIENTMT": 'DOUBLE', # input orientation from ZMAT (mostly useful for Cartesians) to Cfour standard orientation
"ATOMMASS": 'DOUBLE',
"ORIENT3 ": 'DOUBLE',
"FULLPTGP": 'CHARACTER',
"FULLORDR": 'INTEGER',
"FULLNIRR": 'INTEGER',
"FULLNORB": 'INTEGER',
"FULLSYOP": 'DOUBLE',
"FULLPERM": 'INTEGER',
"FULLMEMB": 'INTEGER',
"FULLPOPV": 'INTEGER',
"FULLCLSS": 'INTEGER',
"FULLSTGP": 'CHARACTER',
"ZMAT2MOL": 'INTEGER',
"COMPPTGP": 'CHARACTER',
"COMPORDR": 'INTEGER',
"COMPNIRR": 'INTEGER',
"COMPNORB": 'INTEGER',
"COMPSYOP": 'DOUBLE',
"COMPPERM": 'INTEGER',
"COMPMEMB": 'INTEGER',
"COMPPOPV": 'INTEGER',
"COMPCLSS": 'INTEGER',
"COMPSTGP": 'CHARACTER',
"BMATRIX ": 'DOUBLE',
"NUCREP ": 'DOUBLE',
"TIEDCORD": 'INTEGER',
"MPVMZMAT": 'INTEGER',
"ATOMCHRG": 'INTEGER',
"NTOTSHEL": 'INTEGER',
"NTOTPRIM": 'INTEGER',
"BASISEXP": 'DOUBLE',
"BASISCNT": 'DOUBLE',
"SHELLSIZ": 'INTEGER',
"SHELLPRM": 'INTEGER',
"SHELLANG": 'INTEGER',
"SHELLLOC": 'INTEGER',
"SHOFFSET": 'INTEGER',
"SHELLORB": 'INTEGER',
"PROFFSET": 'INTEGER',
"PRIMORBT": 'INTEGER',
"FULSHLNM": 'INTEGER',
"FULSHLTP": 'INTEGER',
"FULSHLSZ": 'INTEGER',
"FULSHLAT": 'INTEGER',
"JODAOUT ": 'INTEGER',
"NUMIIII ": 'INTEGER',
"NUMIJIJ ": 'INTEGER',
"NUMIIJJ ": 'INTEGER',
"NUMIJKL ": 'INTEGER',
"NBASTOT ": 'INTEGER',
"NAOBASFN": 'INTEGER',
"NUMBASIR": 'INTEGER',
"FAOBASIR": 'DOUBLE',
"AO2SO ": 'DOUBLE',
"FULLSOAO": 'DOUBLE',
"FULLAOSO": 'DOUBLE',
"AO2SOINV": 'DOUBLE',
"CART3CMP": 'DOUBLE',
"CART2CMP": 'DOUBLE',
"CMP3CART": 'DOUBLE',
"CMP2CART": 'DOUBLE',
"ANGMOMBF": 'INTEGER',
"NBASATOM": 'INTEGER',
"NAOBFORB": 'INTEGER',
"MAP2ZMAT": 'INTEGER',
"CENTERBF": 'INTEGER',
"CNTERBF0": 'INTEGER',
"ANMOMBF0": 'INTEGER',
"CMP2ZMAT": 'DOUBLE',
"ZMAT2CMP": 'DOUBLE',
"OVERLAP ": 'DOUBLE',
"ONEHAMIL": 'DOUBLE',
"AOOVRLAP": 'DOUBLE',
"SHALFMAT": 'DOUBLE',
"SCFEVCA0": 'DOUBLE',
"RPPBMAT ": 'DOUBLE',
"OCCUPYA0": 'INTEGER',
"SYMPOPOA": 'INTEGER',
"SYMPOPVA": 'INTEGER',
"SCFEVLA0": 'DOUBLE',
"SCFDENSA": 'DOUBLE',
"FOCKA ": 'DOUBLE',
"SMHALF ": 'DOUBLE',
"EVECOAOA": 'DOUBLE',
"ONEHMOA ": 'DOUBLE',
"NOCCORB ": 'INTEGER',
"NVRTORB ": 'INTEGER',
"SCFENEG ": 'DOUBLE',
"TOTENERG": 'DOUBLE',
"IRREPALP": 'INTEGER',
"OMEGA_A ": 'DOUBLE',
"EVECAOXA": 'DOUBLE',
"EVALORDR": 'DOUBLE',
"EVECAO_A": 'DOUBLE',
"EVCSYMAF": 'CHARACTER',
"EVCSYMAC": 'CHARACTER',
"TESTVECT": 'DOUBLE',
"MODROPA ": 'INTEGER',
"VRHARMON": 'DOUBLE',
"NEWRECRD": 'INTEGER',
"VRCORIOL": 'DOUBLE',
"VRQUADRA": 'DOUBLE',
"VRANHARM": 'DOUBLE',
"REFINERT": 'DOUBLE',
"DIDQ ": 'DOUBLE',
"REFCOORD": 'DOUBLE',
"REFDIPOL": 'DOUBLE',
"REFGRADI": 'DOUBLE',
"REFDIPDR": 'DOUBLE',
"REFNORMC": 'DOUBLE',
"REFD2EZ ": 'DOUBLE',
"REFFREQS": 'DOUBLE',
"REFORIEN": 'DOUBLE',
"NUSECORD": 'INTEGER',
"NZMATANH": 'INTEGER',
"ISELECTQ": 'INTEGER',
"NEXTGEOM": 'DOUBLE',
"NEXTGEO1": 'DOUBLE',
"FCMDISPL": 'DOUBLE',
"GRDDISPL": 'DOUBLE',
"DPMDISPL": 'DOUBLE',
"DIPDISPL": 'DOUBLE',
"NMRDISPL": 'DOUBLE',
"SRTDISPL": 'DOUBLE',
"CHIDISPL": 'DOUBLE',
"POLDISPL": 'DOUBLE',
"EFGDISPL": 'DOUBLE',
"THEDISPL": 'DOUBLE',
"JFCDISPL": 'DOUBLE',
"JSDDISPL": 'DOUBLE',
"JSODISPL": 'DOUBLE',
"JDSODISP": 'DOUBLE',
"CUBCOUNT": 'INTEGER',
"FCMMAPER": 'DOUBLE',
"QPLSMINS": 'INTEGER',
"CUBCOORD": 'INTEGER',
"PASS1 ": 'INTEGER',
"REFFORDR": 'INTEGER',
"REFFSYOP": 'DOUBLE',
"REFFPERM": 'INTEGER',
"REFNUMIC": 'INTEGER',
"REFAMAT ": 'DOUBLE',
"REFTTEN ": 'DOUBLE',
"REFLINER": 'INTEGER',
"DIPOLMOM": 'DOUBLE',
"POLARTEN": 'DOUBLE',
"CHITENSO": 'DOUBLE',
"EFGTENSO": 'DOUBLE',
"IRREPPOP": 'INTEGER',
"REORDERA": 'INTEGER',
"IRREPBET": 'INTEGER',
"SCFEVLB0": 'DOUBLE',
"SCFEVCB0": 'DOUBLE',
"IRREPCOU": 'INTEGER',
"IDROPA ": 'INTEGER',
"OCCSCF ": 'INTEGER',
"VRTSCF ": 'INTEGER',
"SCFEVECA": 'DOUBLE',
"NCOMPA ": 'INTEGER',
"NBASCOMP": 'INTEGER',
"SCFEVALA": 'DOUBLE',
"SCFEVALB": 'DOUBLE',
"SVAVA0 ": 'INTEGER',
"SVAVA0X ": 'INTEGER',
"SVAVA0I ": 'INTEGER',
"SVBVB0 ": 'INTEGER',
"SVBVB0X ": 'INTEGER',
"SVBVB0I ": 'INTEGER',
"SOAOA0 ": 'INTEGER',
"SOAOA0X ": 'INTEGER',
"SOAOA0I ": 'INTEGER',
"SOBOB0 ": 'INTEGER',
"SOBOB0X ": 'INTEGER',
"SOBOB0I ": 'INTEGER',
"SVAVA1 ": 'INTEGER',
"SVAVA1X ": 'INTEGER',
"SVAVA1I ": 'INTEGER',
"SVBVB1 ": 'INTEGER',
"SVBVB1X ": 'INTEGER',
"SVBVB1I ": 'INTEGER',
"SOAOA1 ": 'INTEGER',
"SOAOA1X ": 'INTEGER',
"SOAOA1I ": 'INTEGER',
"SOBOB1 ": 'INTEGER',
"SOBOB1X ": 'INTEGER',
"SOBOB1I ": 'INTEGER',
"SVAOA2 ": 'INTEGER',
"SVAOA2X ": 'INTEGER',
"SVAOA2I ": 'INTEGER',
"SVBOB2 ": 'INTEGER',
"SVBOB2X ": 'INTEGER',
"SVBOB2I ": 'INTEGER',
"SOBVA2 ": 'INTEGER',
"SOBVA2X ": 'INTEGER',
"SOBVA2I ": 'INTEGER',
"SVBOA2 ": 'INTEGER',
"SVBOA2X ": 'INTEGER',
"SVBOA2I ": 'INTEGER',
"SVAVB2 ": 'INTEGER',
"SVAVB2X ": 'INTEGER',
"SVAVB2I ": 'INTEGER',
"SOAOB2 ": 'INTEGER',
"SOAOB2X ": 'INTEGER',
"SOAOB2I ": 'INTEGER',
"SOAVA2 ": 'INTEGER',
"SOAVA2X ": 'INTEGER',
"SOAVA2I ": 'INTEGER',
"SOBVB2 ": 'INTEGER',
"SOBVB2X ": 'INTEGER',
"SOBVB2I ": 'INTEGER',
"SOAVB2 ": 'INTEGER',
"SOAVB2X ": 'INTEGER',
"SOAVB2I ": 'INTEGER',
"SVAVA2 ": 'INTEGER',
"SVAVA2X ": 'INTEGER',
"SVAVA2I ": 'INTEGER',
"SVBVB2 ": 'INTEGER',
"SVBVB2X ": 'INTEGER',
"SVBVB2I ": 'INTEGER',
"SOAOA2 ": 'INTEGER',
"SOAOA2X ": 'INTEGER',
"SOAOA2I ": 'INTEGER',
"SOBOB2 ": 'INTEGER',
"SOBOB2X ": 'INTEGER',
"SOBOB2I ": 'INTEGER',
"SYMPOPOB": 'INTEGER',
"SYMPOPVB": 'INTEGER',
"T2NORM ": 'DOUBLE',
"MOIOVEC ": 'INTEGER',
"MOIOWRD ": 'INTEGER',
"MOIOSIZ ": 'INTEGER',
"MOIODIS ": 'INTEGER',
"MOIOFIL ": 'INTEGER',
"ISYMTYP ": 'INTEGER',
"TOTRECMO": 'INTEGER',
"TOTWRDMO": 'INTEGER',
"RELDENSA": 'DOUBLE',
"IINTERMA": 'DOUBLE',
"OCCNUM_A": 'DOUBLE',
"SCRATCH ": 'DOUBLE',
"SETUP2 ": 'INTEGER',
"MOLHES2 ": 'INTEGER',
"GRAD2 ": 'INTEGER',
"COORDMAS": 'INTEGER',
"NUCMULT ": 'INTEGER',
"SYMCOORD": 'DOUBLE',
"SYMCOOR2": 'DOUBLE',
"SYMCOOR3": 'DOUBLE',
"SYMMLENG": 'INTEGER',
"SKIP ": 'INTEGER',
"NSYMPERT": 'INTEGER',
"NPERTB ": 'INTEGER',
"TRANSINV": 'INTEGER',
"IBADNUMB": 'INTEGER',
"IBADINDX": 'INTEGER',
"IBADIRRP": 'INTEGER',
"IBADPERT": 'INTEGER',
"IBADSPIN": 'INTEGER',
"TREATPER": 'INTEGER',
"MAXAODSZ": 'INTEGER',
"PERTINFO": 'INTEGER',
"GRADIENT": 'DOUBLE',
"HESSIANM": 'DOUBLE',
"GRDZORDR": 'DOUBLE',
"D2EZORDR": 'DOUBLE',
"REALCORD": 'DOUBLE',
"DUMSTRIP": 'INTEGER',
"BMATRIXC": 'DOUBLE',
"REALATOM": 'INTEGER',
"NORMCORD": 'DOUBLE',
"DIPDERIV": 'DOUBLE',
"I4CDCALC": 'DOUBLE',
"FREQUENC": 'DOUBLE',
"RATMMASS": 'DOUBLE',
"RATMPOSN": 'INTEGER',
"DEGENERT": 'INTEGER',
"REFSHILD": 'DOUBLE',
"CORIZETA": 'DOUBLE',
"NMPOINTX": 'INTEGER',
"REFD3EDX": 'DOUBLE',
"BPPTOB ": 'DOUBLE',
"BPTOB ": 'DOUBLE',
"BSRTOB ": 'DOUBLE',
"BARTOB ": 'DOUBLE',
"VRTOTAL ": 'DOUBLE',
"D2DIPOLE": 'DOUBLE',
"D3DIPOLE": 'DOUBLE',
"D1DIPOLE": 'DOUBLE',
"REFNORM2": 'DOUBLE',
"NUSECOR2": 'INTEGER',
"FCMDISP2": 'DOUBLE',
"RGTDISPL": 'DOUBLE',
"CUBCOOR1": 'INTEGER',
"CUBCOOR2": 'INTEGER',
"REFFPEM2": 'INTEGER',
"RGTTENSO": 'DOUBLE',
"REFFPER2": 'INTEGER',
"REFD4EDX": 'DOUBLE',
"ZPE_ANHA": 'DOUBLE',
"OPENSLOT": 'INTEGER',
"BOLTZMAN": 'DOUBLE',
"MRCCOCC ": 'INTEGER',
"ABELPTGP": 'CHARACTER',
"ABELORDR": 'INTEGER',
"ABELNIRR": 'INTEGER',
"ABELNORB": 'INTEGER',
"ABELSYOP": 'DOUBLE',
"ABELPERM": 'INTEGER',
"ABELMEMB": 'INTEGER',
"ABELPOPV": 'INTEGER',
"ABELCLSS": 'INTEGER',
"ABELSTGP": 'CHARACTER',
"REALCHRG": 'INTEGER', # atom/mol? charge taking into acct edp
"NSOSCF ": 'INTEGER', # whether is spin orbital calc?
"SCFVCFLA": 'DOUBLE', # scf vector expanded from sph to cart basis for symm anal - determin orb sym
"EFG_SYM1": 'INTEGER', # symmetry property of components of electric field gradient integrals
"EFG_SYM2": 'INTEGER', # symm prop of comp of EFG
"DCTDISPL": 'DOUBLE',
"DANGERUS": 'INTEGER', #?
"FULLCHAR": 'CHARACTER', #?
"FULLDEGN": 'CHARACTER', #?
"FULLLABL": 'CHARACTER', #?
"FULLNIRX": 'CHARACTER', #?
"COMPCHAR": 'CHARACTER', #?
"COMPDEGN": 'CHARACTER', #?
"COMPLABL": 'CHARACTER', #?
"COMPNIRX": 'CHARACTER', #?
"ROTVECX ": 'CHARACTER', #?
"ROTVECY ": 'CHARACTER', #?
"ROTVECZ ": 'CHARACTER', #?
"COMPNSYQ": 'CHARACTER', #?
"COMPSYQT": 'CHARACTER', #?
"COMPSYMQ": 'CHARACTER', #?
"TRAVECX ": 'CHARACTER', #?
"TRAVECY ": 'CHARACTER', #?
"TRAVECZ ": 'CHARACTER', #?
"NVIBSYM ": 'CHARACTER', #?
"NUMVIBRT": 'CHARACTER', #?
"SBGRPSYM": 'CHARACTER', #?
"ORDERREF": 'CHARACTER', #?
"OPERSREF": 'CHARACTER', #?
"NVIBSYMF": 'CHARACTER', #?
"FULLNSYQ": 'CHARACTER', #?
"FULLSYQT": 'CHARACTER', #?
"FULLSYMQ": 'CHARACTER', #?
"INVPSMAT": 'CHARACTER', #?
"FDCOORDS": 'CHARACTER', #?
"FDCALCTP": 'CHARACTER', #?
"NUMPOINT": 'CHARACTER', #?
"NPTIRREP": 'CHARACTER', #?
"GRDPOINT": 'CHARACTER', #?
"DIPPOINT": 'CHARACTER', #?
"ENGPOINT": 'CHARACTER', #?
"PASS1FIN": 'CHARACTER', #?
"REFENERG": 'CHARACTER', #?
"NEXTCALC": 'CHARACTER', #?
"PRINSPIN": 'CHARACTER', #?
"PRINFROM": 'CHARACTER', #?
"PRININTO": 'CHARACTER', #?
"NEXTGEOF": 'CHARACTER', #?
"ZPE_HARM": 'DOUBLE', #?
"NDROPPED": 'INTEGER',
"REFCPTGP": 'INTEGER', #?
"REFFPTGP": 'INTEGER', #?
}
with open('JAINDX', mode='rb') as file: # b is important -> binary
fileContent = file.read()
fileLength = len(fileContent)
if fileLength == 16012:
srcints = 4
srcrecs = 4
elif fileLength == 16020:
srcints = 4
srcrecs = 8
elif fileLength == 24016:
srcints = 8
srcrecs = 4
elif fileLength == 24024:
srcints = 8
srcrecs = 8
# fixed number of slots for options
nopt = 1000
type2len = {
'DOUBLE': 8,
'INTEGER': srcints,
'CHARACTER': 1,
}
intlen2format = {
4: 'i',
8: 'l',
}
type2format = {
'DOUBLE': 'd',
'INTEGER': intlen2format[type2len['INTEGER']],
'CHARACTER': 'c',
}
if verbose:
print('\n<<< JAINDX >>>\n')
posf = srcrecs
istr = intlen2format[srcrecs]
jastart = struct.unpack(istr, fileContent[:posf])
if verbose:
print('%10s%10d%10d' % ('start', 0, posf))
poss = posf
posf = poss + 8 * nopt
istr = '8s' * nopt
jaindx = struct.unpack(istr, fileContent[poss:posf])
if verbose:
print('%10s%10d%10d' % ('jaindx', poss, posf))
poss = posf
posf = poss + srcints * nopt
istr = intlen2format[srcints] * nopt
jaindx2 = struct.unpack(istr, fileContent[poss:posf])
if verbose:
print('%10s%10d%10d' % ('jaindx2', poss, posf))
poss = posf
posf = poss + srcints * nopt
istr = intlen2format[srcints] * nopt
jaindx3 = struct.unpack(istr, fileContent[poss:posf])
if verbose:
print('%10s%10d%10d' % ('jaindx3', poss, posf))
poss = posf
posf = poss + srcints
istr = intlen2format[srcints]
jamid = struct.unpack(istr, fileContent[poss:posf])
if verbose:
print('%10s%10d%10d' % ('mid', poss, posf))
poss = posf
posf = poss + srcrecs
istr = intlen2format[srcrecs]
jaend = struct.unpack(istr, fileContent[poss:posf])
if verbose:
print('%10s%10d%10d' % ('end', poss, posf))
nrecs = jaindx.index('OPENSLOT') # number of active records
if verbose:
print('\n')
print('%20s%10d' % ('File Length:', fileLength))
print('%20s%10d' % ('srcints Int Length:', srcints))
print('%20s%10d' % ('srcrecs Int Length:', srcrecs))
print('%20s%10d' % ('First Rec:', jastart[0]))
print('%20s%10d' % ('Second Rec:', jamid[0]))
print('%20s%10d' % ('Last Rec:', jaend[0]))
print('%20s%10d' % ('Full Records:', nrecs))
print('\n')
print('\n<<< JOBARC >>>\n')
with open('JOBARC', mode='rb') as file: # b is important -> binary
fileContent = file.read()
returnRecords = {}
poss = 0
for item in range(nrecs):
posf = poss + type2len[knownlabels[jaindx[item]]] * jaindx3[item]
istr = type2format[knownlabels[jaindx[item]]] * jaindx3[item]
if knownlabels[jaindx[item]] == 'CHARACTER':
bound = type2len[knownlabels[jaindx[item]]] * jaindx3[item] * 8
posf = poss + bound
istr = str(bound) + 's'
jobarc = struct.unpack(istr, fileContent[poss:posf])
if verbose:
#print item, istr, poss, posf, '\t', jaindx[item], jaindx2[item], jaindx3[item], jobarc
if jaindx3[item] < 120:
print(jaindx[item], jaindx2[item], jaindx3[item], jobarc)
poss = posf
if jaindx[item] in reclabelarray:
returnRecords[jaindx[item]] = jobarc
return returnRecords
#if __name__ == "__main__":
# want = ['NATOMS ', 'AU_LENGT', 'COORD ', 'HBAR ', 'ATOMCHRG']
## got = get_jajo_record(want)
# got = getrec(want)
# for item in got.keys():
# print item, got[item]
| psi4/psi4 | psi4/driver/qcdb/jajo.py | Python | lgpl-3.0 | 17,941 | [
"Avogadro",
"CFOUR",
"Psi4"
] | 512bd1ce1f0e12904193b7288c716d912d79cf2b995eb5f68bf4708a5f694c9c |
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Polypeptide-related classes (construction and representation).
Simple example with multiple chains,
>>> from Bio.PDB.PDBParser import PDBParser
>>> from Bio.PDB.Polypeptide import PPBuilder
>>> structure = PDBParser().get_structure('2BEG', 'PDB/2BEG.pdb')
>>> ppb=PPBuilder()
>>> for pp in ppb.build_peptides(structure):
... print pp.get_sequence()
LVFFAEDVGSNKGAIIGLMVGGVVIA
LVFFAEDVGSNKGAIIGLMVGGVVIA
LVFFAEDVGSNKGAIIGLMVGGVVIA
LVFFAEDVGSNKGAIIGLMVGGVVIA
LVFFAEDVGSNKGAIIGLMVGGVVIA
Example with non-standard amino acids using HETATM lines in the PDB file,
in this case selenomethionine (MSE):
>>> from Bio.PDB.PDBParser import PDBParser
>>> from Bio.PDB.Polypeptide import PPBuilder
>>> structure = PDBParser().get_structure('1A8O', 'PDB/1A8O.pdb')
>>> ppb=PPBuilder()
>>> for pp in ppb.build_peptides(structure):
... print pp.get_sequence()
DIRQGPKEPFRDYVDRFYKTLRAEQASQEVKNW
TETLLVQNANPDCKTILKALGPGATLEE
TACQG
If you want to, you can include non-standard amino acids in the peptides:
>>> for pp in ppb.build_peptides(structure, aa_only=False):
... print pp.get_sequence()
... print pp.get_sequence()[0], pp[0].get_resname()
... print pp.get_sequence()[-7], pp[-7].get_resname()
... print pp.get_sequence()[-6], pp[-6].get_resname()
MDIRQGPKEPFRDYVDRFYKTLRAEQASQEVKNWMTETLLVQNANPDCKTILKALGPGATLEEMMTACQG
M MSE
M MSE
M MSE
In this case the selenomethionines (the first and also seventh and sixth from
last residues) have been shown as M (methionine) by the get_sequence method.
"""
import warnings
from Bio.Alphabet import generic_protein
from Bio.Seq import Seq
from Bio.SCOP.Raf import to_one_letter_code
from Bio.PDB.PDBExceptions import PDBException
from Bio.PDB.Residue import Residue, DisorderedResidue
from Bio.PDB.Vector import calc_dihedral, calc_angle
standard_aa_names=["ALA", "CYS", "ASP", "GLU", "PHE", "GLY", "HIS", "ILE", "LYS",
"LEU", "MET", "ASN", "PRO", "GLN", "ARG", "SER", "THR", "VAL",
"TRP", "TYR"]
aa1="ACDEFGHIKLMNPQRSTVWY"
aa3=standard_aa_names
d1_to_index={}
dindex_to_1={}
d3_to_index={}
dindex_to_3={}
# Create some lookup tables
for i in range(0, 20):
n1=aa1[i]
n3=aa3[i]
d1_to_index[n1]=i
dindex_to_1[i]=n1
d3_to_index[n3]=i
dindex_to_3[i]=n3
def index_to_one(index):
"""Index to corresponding one letter amino acid name.
>>> index_to_one(0)
'A'
>>> index_to_one(19)
'Y'
"""
return dindex_to_1[index]
def one_to_index(s):
"""One letter code to index.
>>> one_to_index('A')
0
>>> one_to_index('Y')
19
"""
return d1_to_index[s]
def index_to_three(i):
"""Index to corresponding three letter amino acid name.
>>> index_to_three(0)
'ALA'
>>> index_to_three(19)
'TYR'
"""
return dindex_to_3[i]
def three_to_index(s):
"""Three letter code to index.
>>> three_to_index('ALA')
0
>>> three_to_index('TYR')
19
"""
return d3_to_index[s]
def three_to_one(s):
"""Three letter code to one letter code.
>>> three_to_one('ALA')
'A'
>>> three_to_one('TYR')
'Y'
For non-standard amino acids, you get a KeyError:
>>> three_to_one('MSE')
Traceback (most recent call last):
...
KeyError: 'MSE'
"""
i=d3_to_index[s]
return dindex_to_1[i]
def one_to_three(s):
"""One letter code to three letter code.
>>> one_to_three('A')
'ALA'
>>> one_to_three('Y')
'TYR'
"""
i=d1_to_index[s]
return dindex_to_3[i]
def is_aa(residue, standard=False):
"""Return True if residue object/string is an amino acid.
@param residue: a L{Residue} object OR a three letter amino acid code
@type residue: L{Residue} or string
@param standard: flag to check for the 20 AA (default false)
@type standard: boolean
>>> is_aa('ALA')
True
Known three letter codes for modified amino acids are supported,
>>> is_aa('FME')
True
>>> is_aa('FME', standard=True)
False
"""
#TODO - What about special cases like XXX, can they appear in PDB files?
if not isinstance(residue, basestring):
residue=residue.get_resname()
residue=residue.upper()
if standard:
return residue in d3_to_index
else:
return residue in to_one_letter_code
class Polypeptide(list):
"""A polypeptide is simply a list of L{Residue} objects."""
def get_ca_list(self):
"""Get list of C-alpha atoms in the polypeptide.
@return: the list of C-alpha atoms
@rtype: [L{Atom}, L{Atom}, ...]
"""
ca_list=[]
for res in self:
ca=res["CA"]
ca_list.append(ca)
return ca_list
def get_phi_psi_list(self):
"""Return the list of phi/psi dihedral angles."""
ppl=[]
lng=len(self)
for i in range(0, lng):
res=self[i]
try:
n=res['N'].get_vector()
ca=res['CA'].get_vector()
c=res['C'].get_vector()
except:
# Some atoms are missing
# Phi/Psi cannot be calculated for this residue
ppl.append((None, None))
res.xtra["PHI"]=None
res.xtra["PSI"]=None
continue
# Phi
if i>0:
rp=self[i-1]
try:
cp=rp['C'].get_vector()
phi=calc_dihedral(cp, n, ca, c)
except:
phi=None
else:
# No phi for residue 0!
phi=None
# Psi
if i<(lng-1):
rn=self[i+1]
try:
nn=rn['N'].get_vector()
psi=calc_dihedral(n, ca, c, nn)
except:
psi=None
else:
# No psi for last residue!
psi=None
ppl.append((phi, psi))
# Add Phi/Psi to xtra dict of residue
res.xtra["PHI"]=phi
res.xtra["PSI"]=psi
return ppl
def get_tau_list(self):
"""List of tau torsions angles for all 4 consecutive Calpha atoms."""
ca_list=self.get_ca_list()
tau_list=[]
for i in range(0, len(ca_list)-3):
atom_list = (ca_list[i], ca_list[i+1], ca_list[i+2], ca_list[i+3])
v1, v2, v3, v4 = [a.get_vector() for a in atom_list]
tau=calc_dihedral(v1, v2, v3, v4)
tau_list.append(tau)
# Put tau in xtra dict of residue
res=ca_list[i+2].get_parent()
res.xtra["TAU"]=tau
return tau_list
def get_theta_list(self):
"""List of theta angles for all 3 consecutive Calpha atoms."""
theta_list=[]
ca_list=self.get_ca_list()
for i in range(0, len(ca_list)-2):
atom_list = (ca_list[i], ca_list[i+1], ca_list[i+2])
v1, v2, v3 = [a.get_vector() for a in atom_list]
theta=calc_angle(v1, v2, v3)
theta_list.append(theta)
# Put tau in xtra dict of residue
res=ca_list[i+1].get_parent()
res.xtra["THETA"]=theta
return theta_list
def get_sequence(self):
"""Return the AA sequence as a Seq object.
@return: polypeptide sequence
@rtype: L{Seq}
"""
s=""
for res in self:
s += to_one_letter_code.get(res.get_resname(), 'X')
seq=Seq(s, generic_protein)
return seq
def __repr__(self):
"""Return string representation of the polypeptide.
Return <Polypeptide start=START end=END>, where START
and END are sequence identifiers of the outer residues.
"""
start=self[0].get_id()[1]
end=self[-1].get_id()[1]
s="<Polypeptide start=%s end=%s>" % (start, end)
return s
class _PPBuilder:
"""Base class to extract polypeptides.
It checks if two consecutive residues in a chain are connected.
The connectivity test is implemented by a subclass.
This assumes you want both standard and non-standard amino acids.
"""
def __init__(self, radius):
"""
@param radius: distance
@type radius: float
"""
self.radius=radius
def _accept(self, residue, standard_aa_only):
"""Check if the residue is an amino acid (PRIVATE)."""
if is_aa(residue, standard=standard_aa_only):
return True
elif not standard_aa_only and "CA" in residue.child_dict:
#It has an alpha carbon...
#We probably need to update the hard coded list of
#non-standard residues, see function is_aa for details.
warnings.warn("Assuming residue %s is an unknown modified "
"amino acid" % residue.get_resname())
return True
else:
# not a standard AA so skip
return False
def build_peptides(self, entity, aa_only=1):
"""Build and return a list of Polypeptide objects.
@param entity: polypeptides are searched for in this object
@type entity: L{Structure}, L{Model} or L{Chain}
@param aa_only: if 1, the residue needs to be a standard AA
@type aa_only: int
"""
is_connected=self._is_connected
accept=self._accept
level=entity.get_level()
# Decide wich entity we are dealing with
if level=="S":
model=entity[0]
chain_list=model.get_list()
elif level=="M":
chain_list=entity.get_list()
elif level=="C":
chain_list=[entity]
else:
raise PDBException("Entity should be Structure, Model or Chain.")
pp_list=[]
for chain in chain_list:
chain_it=iter(chain)
try:
prev_res = chain_it.next()
while not accept(prev_res, aa_only):
prev_res = chain_it.next()
except StopIteration:
#No interesting residues at all in this chain
continue
pp=None
for next_res in chain_it:
if accept(prev_res, aa_only) \
and accept(next_res, aa_only) \
and is_connected(prev_res, next_res):
if pp is None:
pp=Polypeptide()
pp.append(prev_res)
pp_list.append(pp)
pp.append(next_res)
else:
#Either too far apart, or one of the residues is unwanted.
#End the current peptide
pp=None
prev_res=next_res
return pp_list
class CaPPBuilder(_PPBuilder):
"""Use CA--CA distance to find polypeptides."""
def __init__(self, radius=4.3):
_PPBuilder.__init__(self, radius)
def _is_connected(self, prev_res, next_res):
for r in [prev_res, next_res]:
if not r.has_id("CA"):
return False
n=next_res["CA"]
p=prev_res["CA"]
# Unpack disordered
if n.is_disordered():
nlist=n.disordered_get_list()
else:
nlist=[n]
if p.is_disordered():
plist=p.disordered_get_list()
else:
plist=[p]
for nn in nlist:
for pp in plist:
if (nn-pp)<self.radius:
return True
return False
class PPBuilder(_PPBuilder):
"""Use C--N distance to find polypeptides."""
def __init__(self, radius=1.8):
_PPBuilder.__init__(self, radius)
def _is_connected(self, prev_res, next_res):
if not prev_res.has_id("C"):
return False
if not next_res.has_id("N"):
return False
test_dist=self._test_dist
c=prev_res["C"]
n=next_res["N"]
# Test all disordered atom positions!
if c.is_disordered():
clist=c.disordered_get_list()
else:
clist=[c]
if n.is_disordered():
nlist=n.disordered_get_list()
else:
nlist=[n]
for nn in nlist:
for cc in clist:
# To form a peptide bond, N and C must be
# within radius and have the same altloc
# identifier or one altloc blank
n_altloc=nn.get_altloc()
c_altloc=cc.get_altloc()
if n_altloc==c_altloc or n_altloc==" " or c_altloc==" ":
if test_dist(nn, cc):
# Select the disordered atoms that
# are indeed bonded
if c.is_disordered():
c.disordered_select(c_altloc)
if n.is_disordered():
n.disordered_select(n_altloc)
return True
return False
def _test_dist(self, c, n):
"""Return 1 if distance between atoms<radius (PRIVATE)."""
if (c-n)<self.radius:
return 1
else:
return 0
if __name__=="__main__":
import sys
from Bio.PDB.PDBParser import PDBParser
p=PDBParser(PERMISSIVE=True)
s=p.get_structure("scr", sys.argv[1])
ppb=PPBuilder()
print "C-N"
for pp in ppb.build_peptides(s):
print pp.get_sequence()
for pp in ppb.build_peptides(s[0]):
print pp.get_sequence()
for pp in ppb.build_peptides(s[0]["A"]):
print pp.get_sequence()
for pp in ppb.build_peptides(s):
for phi, psi in pp.get_phi_psi_list():
print phi, psi
ppb=CaPPBuilder()
print "CA-CA"
for pp in ppb.build_peptides(s):
print pp.get_sequence()
for pp in ppb.build_peptides(s[0]):
print pp.get_sequence()
for pp in ppb.build_peptides(s[0]["A"]):
print pp.get_sequence()
| bryback/quickseq | genescript/Bio/PDB/Polypeptide.py | Python | mit | 14,402 | [
"Biopython"
] | f67968d3f43e9e44dc0c942d4d39cf962b096d09afba6829431d2a046caf6095 |
"""
TESTS is a dict with all you tests.
Keys for this will be categories' names.
Each test is dict with
"input" -- input data for user function
"answer" -- your right answer
"explanation" -- not necessary key, it's using for additional info in animation.
"""
TESTS = {
"Basics": [
{
"input": [['Doreen', 'Fred', 'Yolanda'], [['Doreen', 'Fred']]],
"answer": [0,
[['Doreen', 'Fred', 'Yolanda'], [['Doreen', 'Fred']]]],
},
{
"input": [['Nelson', 'Kaitlin', 'Amelia', 'Jack'],
[['Kaitlin', 'Jack'], ['Nelson', 'Amelia']]],
"answer": [2,
[['Nelson', 'Kaitlin', 'Amelia', 'Jack'],
[['Kaitlin', 'Jack'], ['Nelson', 'Amelia']]]],
},
{
"input": [['Allison', 'Robin', 'Petra', 'Curtis', 'Bobbie', 'Kelly'],
[['Allison', 'Curtis'], ['Robin', 'Kelly']]],
"answer": [4,
[['Allison', 'Robin', 'Petra', 'Curtis', 'Bobbie', 'Kelly'],
[['Allison', 'Curtis'], ['Robin', 'Kelly']]]],
},
{
"input": [['Melisa', 'Dee', 'Annmarie', 'Gerald', 'Rafael'],
[['Melisa', 'Gerald'], ['Rafael', 'Annmarie']]],
"answer": [2,
[['Melisa', 'Dee', 'Annmarie', 'Gerald', 'Rafael'],
[['Melisa', 'Gerald'], ['Rafael', 'Annmarie']]]],
},
{
"input": [['Ricardo', 'Eugene', 'Delia', 'Delores', 'Ella', 'Kurt'],
[['Eugene', 'Ella'], ['Delores', 'Kurt'], ['Ricardo', 'Delia']]],
"answer": [4,
[['Ricardo', 'Eugene', 'Delia', 'Delores', 'Ella', 'Kurt'],
[['Eugene', 'Ella'], ['Delores', 'Kurt'], ['Ricardo', 'Delia']]]],
},
{
"input": [
['Loraine', 'Leah', 'Jenifer', 'Russell', 'Benjamin', 'Todd', 'Maryanne', 'Penny',
'Matthew'], [['Loraine', 'Benjamin'], ['Leah', 'Matthew'], ['Todd', 'Jenifer']]],
"answer": [6,
[['Loraine', 'Leah', 'Jenifer', 'Russell', 'Benjamin', 'Todd', 'Maryanne',
'Penny', 'Matthew'],
[['Loraine', 'Benjamin'], ['Leah', 'Matthew'], ['Todd', 'Jenifer']]]],
},
],
"Extra": [
{
"input": [['Alex', 'Monique', 'Tim', 'Robert', 'Joseph', 'Kitty', 'Eugenia', 'Tamika',
'Rene', 'Maggie'],
[['Kitty', 'Robert'], ['Tamika', 'Tim'], ['Joseph', 'Maggie'],
['Alex', 'Eugenia'], ['Monique', 'Rene']]],
"answer": [8,
[['Alex', 'Monique', 'Tim', 'Robert', 'Joseph', 'Kitty', 'Eugenia',
'Tamika', 'Rene', 'Maggie'],
[['Kitty', 'Robert'], ['Tamika', 'Tim'], ['Joseph', 'Maggie'],
['Alex', 'Eugenia'], ['Monique', 'Rene']]]],
},
{
"input": [['Dorothea', 'Vincent', 'Irene', 'Lula', 'Paulette', 'Bill', 'Virginia'],
[]],
"answer": [6,
[['Dorothea', 'Vincent', 'Irene', 'Lula', 'Paulette', 'Bill', 'Virginia'],
[]]],
},
{
"input": [
['Winnie', 'Stella', 'Estela', 'Gordon', 'Jacklyn', 'Lela', 'Barbra', 'Lavonne',
'Maurice'], [['Maurice', 'Lela']]],
"answer": [7,
[['Winnie', 'Stella', 'Estela', 'Gordon', 'Jacklyn', 'Lela', 'Barbra',
'Lavonne', 'Maurice'], [['Maurice', 'Lela']]]],
},
{
"input": [
['Carl', 'Esperanza', 'Tabitha', 'Fred', 'Dixie', 'Delores', 'Erica', 'Samuel',
'Erin', 'Amber'], [['Carl', 'Erica'], ['Delores', 'Fred']]],
"answer": [7,
[['Carl', 'Esperanza', 'Tabitha', 'Fred', 'Dixie', 'Delores', 'Erica',
'Samuel', 'Erin', 'Amber'], [['Carl', 'Erica'], ['Delores', 'Fred']]]],
},
{
"input": [
['Louis', 'Theodore', 'Eleanor', 'Sondra', 'David', 'Herbert', 'Fay', 'Alexandria',
'Meghan', 'Nettie', 'Autumn', 'June', 'Jane', 'Jeffery', 'Herminia', 'Jeannie',
'Lynnette'], [['Theodore', 'Meghan'], ['Herbert', 'Eleanor'], ['Louis', 'Autumn'],
['Nettie', 'David'], ['Jeffery', 'Fay']]],
"answer": [14,
[['Louis', 'Theodore', 'Eleanor', 'Sondra', 'David', 'Herbert', 'Fay',
'Alexandria', 'Meghan', 'Nettie', 'Autumn', 'June', 'Jane', 'Jeffery',
'Herminia', 'Jeannie', 'Lynnette'],
[['Theodore', 'Meghan'], ['Herbert', 'Eleanor'], ['Louis', 'Autumn'],
['Nettie', 'David'], ['Jeffery', 'Fay']]]],
},
]
}
| Bryukh-Checkio-Tasks/checkio-mission-family-gifts | verification/tests.py | Python | mit | 5,048 | [
"Amber"
] | ff7be966cb866c0045324dbe55ed68151b1f4427c2554f6a3e0b59c2d02c489f |
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long
import os
import sys
import glob
import numpy
import string
from collections import Counter
import anvio
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__maintainer__ = "A. Murat Eren"
__email__ = "a.murat.eren@gmail.com"
__status__ = "Development"
# these are the atomic data that are generated for each contig profiled
# based on read recruitment results. anvio/contigops.py has the details:
essential_data_fields_for_anvio_profiles = ['std_coverage',
'mean_coverage',
'mean_coverage_Q2Q3',
'detection',
'abundance',
'variability']
# this is to distinguish fields that are often useless for clustering ops
# and other purposes
IS_ESSENTIAL_FIELD = lambda f: (not f.startswith('__')) and (f not in ["contig", "GC_content", "length"])
default_pdb_database_path = os.path.join(os.path.dirname(anvio.__file__), 'data/misc/PDB.db')
default_modeller_database_dir = os.path.join(os.path.dirname(anvio.__file__), 'data/misc/MODELLER/db')
default_modeller_scripts_dir = os.path.join(os.path.dirname(anvio.__file__), 'data/misc/MODELLER/scripts')
default_interacdome_data_path = os.path.join(os.path.dirname(anvio.__file__), 'data/misc/Interacdome')
clustering_configs_dir = os.path.join(os.path.dirname(anvio.__file__), 'data/clusterconfigs')
clustering_configs = {}
default_scgs_taxonomy_data_dir = os.path.join(os.path.dirname(anvio.__file__), 'data/misc/SCG_TAXONOMY')
default_scgs_for_taxonomy = ['Ribosomal_S2',
'Ribosomal_S3_C',
'Ribosomal_S6',
'Ribosomal_S7',
'Ribosomal_S8',
'Ribosomal_S9',
'Ribosomal_S11',
'Ribosomal_S20p',
'Ribosomal_L1',
'Ribosomal_L2',
'Ribosomal_L3',
'Ribosomal_L4',
'Ribosomal_L6',
'Ribosomal_L9_C',
'Ribosomal_L13',
'Ribosomal_L16',
'Ribosomal_L17',
'Ribosomal_L20',
'Ribosomal_L21p',
'Ribosomal_L22',
'ribosomal_L24',
'Ribosomal_L27A']
default_hmm_source_for_scg_taxonomy = set(["Bacteria_71"])
default_trna_taxonomy_data_dir = os.path.join(os.path.dirname(anvio.__file__), 'data/misc/TRNA_TAXONOMY')
default_anticodons_for_taxonomy = ['AAA', 'AAC', 'AAG', 'AAT', 'ACA', 'ACC', 'ACG', 'ACT', 'AGA', 'AGC',
'AGG', 'AGT', 'ATA', 'ATC', 'ATG', 'ATT', 'CAA', 'CAC', 'CAG', 'CAT',
'CCA', 'CCC', 'CCG', 'CCT', 'CGA', 'CGC', 'CGG', 'CGT', 'CTC', 'CTG',
'CTT', 'GAA', 'GAC', 'GAG', 'GAT', 'GCA', 'GCC', 'GCG', 'GCT', 'GGA',
'GGC', 'GGG', 'GGT', 'GTA', 'GTC', 'GTG', 'GTT', 'TAA', 'TAC', 'TAG',
'TAT', 'TCC', 'TCG', 'TCT', 'TGA', 'TGC', 'TGG', 'TGT', 'TTC', 'TTG',
'TTT']
default_hmm_source_for_trna_genes = set(["Transfer_RNAs"])
# The following block of constants are used in the tRNA-seq workflow.
TRNA_FEATURE_NAMES = ['trna_his_position_0',
'acceptor_stem',
'fiveprime_acceptor_stem_sequence',
'position_8',
'position_9',
'd_arm',
'd_stem',
'fiveprime_d_stem_sequence',
'd_loop',
'threeprime_d_stem_sequence',
'position_26',
'anticodon_arm',
'anticodon_stem',
'fiveprime_anticodon_stem_sequence',
'anticodon_loop',
'threeprime_anticodon_stem_sequence',
'v_loop',
't_arm',
't_stem',
'fiveprime_t_stem_sequence',
't_loop',
'threeprime_t_stem_sequence',
'threeprime_acceptor_stem_sequence',
'discriminator',
'threeprime_terminus']
TRNA_SEED_FEATURE_THRESHOLD_CHOICES = TRNA_FEATURE_NAMES[TRNA_FEATURE_NAMES.index('acceptor_stem'): TRNA_FEATURE_NAMES.index('anticodon_loop') + 1]
TRNASEQ_CHECKPOINTS = ('profile', 'normalize', 'map_fragments', 'substitutions', 'indels')
default_port_number = int(os.environ['ANVIO_PORT']) if 'ANVIO_PORT' in os.environ else 8080
blank_default = "tnf"
single_default = "tnf"
merged_default = "tnf-cov"
pan_default = "presence-absence"
trnaseq_default = "cov"
default_gene_caller = "prodigal"
# see https://github.com/merenlab/anvio/issues/1358
gene_call_types = {'CODING': 1,
'NONCODING': 2,
'UNKNOWN': 3}
max_num_items_for_hierarchical_clustering = 20000
# max coverage depth to read from BAM files using pysam.
# this parameter also can be set later using command line parameters
# we use uint16 as dtype for numpy arrays when we work on & store coverages
# which has limit of 65536, so this constant needs to be smaller than that.
# If you change this value please change all dtypes.
# (This does not apply to the tRNA-seq workflow, which stores coverages as uint32.)
max_depth_for_coverage = 60000
# default methods for hierarchical cluster analyses
distance_metric_default = 'euclidean'
linkage_method_default = 'ward'
# The purpose of the `fetch_filters` dictionary below is to filter reads as they are
# read from BAM files especially during anvi'o profiling (the primary client of this
# dictionary is `anvio/bamops.py`). Essentially, any combination of the following
# properties defined in the `read` object returned by the `fetch` function of pysam
# can be added to this dictionary to create new filters that are then globally applied
# to 'fetched' reads during profiling to exclude those that return `false`:
#
# >>> 'aend', 'alen', 'aligned_pairs', 'bin', 'blocks', 'cigar', 'cigarstring', 'cigartuples',
# 'compare', 'flag', 'from_dict', 'fromstring', 'get_aligned_pairs', 'get_blocks', 'get_cigar_stats',
# 'get_forward_qualities', 'get_forward_sequence', 'get_overlap', 'get_reference_positions',
# 'get_reference_sequence', 'get_tag', 'get_tags', 'has_tag', 'header', 'infer_query_length',
# 'infer_read_length', 'inferred_length', 'is_duplicate', 'is_paired', 'is_proper_pair',
# 'is_qcfail', 'is_read1', 'is_read2', 'is_reverse', 'is_secondary', 'is_supplementary',
# 'is_unmapped', 'isize', 'mapping_quality', 'mapq', 'mate_is_reverse', 'mate_is_unmapped', 'mpos',
# 'mrnm', 'next_reference_id', 'next_reference_name', 'next_reference_start', 'opt', 'overlap', 'pnext',
# 'pos', 'positions', 'qend', 'qlen', 'qname', 'qqual', 'qstart', 'qual', 'query', 'query_alignment_end',
# 'query_alignment_length', 'query_alignment_qualities', 'query_alignment_sequence',
# 'query_alignment_start', 'query_length', 'query_name', 'query_qualities', 'query_sequence',
# 'reference_end', 'reference_id', 'reference_length', 'reference_name', 'reference_start', 'rlen',
# 'rname', 'rnext', 'seq', 'setTag', 'set_tag', 'set_tags', 'tags', 'template_length', 'tid', 'tlen',
# 'to_dict', 'to_string', 'tostring'
#
# Please note that these variable names may change across versions of pysam. See anvio/bamops.py for most
# up-to-date usage of these filters since we are terrible at updating comments elsewhere in the code after
# making significant changes to our modules :/
fetch_filters = {None : None,
'double-forwards' : lambda x: x.is_paired and not x.is_reverse and not x.mate_is_reverse and not x.mate_is_unmapped,
'double-reverses' : lambda x: x.is_paired and x.is_reverse and x.mate_is_reverse and not x.mate_is_unmapped,
'inversions' : lambda x: (x.is_paired and not x.is_reverse and not x.mate_is_reverse and not x.mate_is_unmapped) or \
(x.is_paired and x.is_reverse and x.mate_is_reverse and not x.mate_is_unmapped) and (abs(x.tlen) < 2000),
'single-mapped-reads': lambda x: x.mate_is_unmapped,
'distant-pairs-1K' : lambda x: x.is_paired and not x.mate_is_unmapped and abs(x.tlen) > 1000}
# Whether a cigarstring operation consumes the read, reference, or both
#
#Here are the possible bam operations.
#
# M BAM_CMATCH 0
# I BAM_CINS 1
# D BAM_CDEL 2
# N BAM_CREF_SKIP 3
# S BAM_CSOFT_CLIP 4
# H BAM_CHARD_CLIP 5
# P BAM_CPAD 6
# = BAM_CEQUAL 7
# X BAM_CDIFF 8
#
#Notes
#=====
#- A description of what possible cigar operations are possible, see
# https://imgur.com/a/fiQZXNg, which comes from here:
# https://samtools.github.io/hts-specs/SAMv1.pdf
cigar_consumption = numpy.array([
(1, 1),
(1, 0),
(0, 1),
(0, 1),
(1, 0),
(0, 0),
(0, 0),
(1, 1),
(1, 1),
])
# this is to have a common language across multiple modules when genomes (whether they are MAGs,
# SAGs, or isolate genomes):
essential_genome_info = ['gc_content', 'num_contigs', 'num_splits', 'total_length', 'num_genes', 'percent_completion', 'percent_redundancy',
'genes_are_called', 'avg_gene_length', 'num_genes_per_kb', ]
levels_of_taxonomy = ["t_domain", "t_phylum", "t_class", "t_order", "t_family", "t_genus", "t_species"]
levels_of_taxonomy_unknown = {"t_domain": 'Unknown_domains',
"t_phylum": 'Unknown_phyla',
"t_class": 'Unknown_classes',
"t_order": 'Unknown_orders',
"t_family": 'Unknown_families',
"t_genus": 'Unknown_genera',
"t_species": 'Unknown_species'}
for run_type, default_config in [('single', single_default),
('merged', merged_default),
('trnaseq', trnaseq_default),
('blank', blank_default)]:
if not os.path.exists(os.path.join(clustering_configs_dir, run_type, default_config)):
print()
print(f"Error: Although there is a run type defined in the anvi'o constants for \n"
f" '{run_type}', the default clustering configuration file for it, namely \n"
f" '{default_config}', is missing from the 'anvio/data/clusterconfigs' dir. \n"
f" If you are a developer and getting this error, please make sure the file \n"
f" is in anvi'o distribution. If you are a user and getting this error, it \n"
f" something went terribly wrong with your installation :(\n")
sys.exit()
for dir in [d.strip('/').split('/')[-1] for d in glob.glob(os.path.join(clustering_configs_dir, '*/'))]:
clustering_configs[dir] = {}
for config in glob.glob(os.path.join(clustering_configs_dir, dir, '*')):
clustering_configs[dir][os.path.basename(config)] = config
allowed_chars = string.ascii_letters + string.digits + '_' + '-' + '.'
digits = string.digits
complements = str.maketrans('acgtrymkbdhvACGTRYMKBDHV', 'tgcayrkmvhdbTGCAYRKMVHDB')
unambiguous_nucleotides = set(list('ATCG'))
nucleotides = sorted(list(unambiguous_nucleotides)) + ['N']
WC_BASE_PAIRS = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C'
}
# In tRNA, wobble base pairing, including G/U, is common
WC_PLUS_WOBBLE_BASE_PAIRS = {
'A': ('T', ),
'T': ('A', 'G'),
'C': ('G', ),
'G': ('C', 'T')
}
AA_atomic_composition = {'Ala': Counter({"C":3, "H":7, "N":1, "O":2, "S":0}),
'Arg': Counter({"C":6, "H":14, "N":4, "O":2, "S":0}),
'Asn': Counter({"C":4, "H":8, "N":2, "O":3, "S":0}),
'Asp': Counter({"C":4, "H":7, "N":1, "O":4, "S":0}),
'Cys': Counter({"C":3, "H":7, "N":1, "O":2, "S":1}),
'Gln': Counter({"C":5, "H":10, "N":2, "O":3, "S":0}),
'Glu': Counter({"C":5, "H":9, "N":1, "O":4, "S":0}),
'Gly': Counter({"C":2, "H":5, "N":1, "O":2, "S":0}),
'His': Counter({"C":6, "H":9, "N":3, "O":2, "S":0}),
'Ile': Counter({"C":6, "H":13, "N":1, "O":2, "S":0}),
'Leu': Counter({"C":6, "H":13, "N":1, "O":2, "S":0}),
'Lys': Counter({"C":6, "H":14, "N":2, "O":2, "S":0}),
'Met': Counter({"C":5, "H":11, "N":1, "O":2, "S":1}),
'Phe': Counter({"C":9, "H":11, "N":1, "O":2, "S":0}),
'Pro': Counter({"C":5, "H":9, "N":1, "O":2, "S":0}),
'Ser': Counter({"C":3, "H":7, "N":1, "O":3, "S":0}),
'Thr': Counter({"C":4, "H":9, "N":1, "O":3, "S":0}),
'Trp': Counter({"C":11, "H":12, "N":2, "O":2, "S":0}),
'Tyr': Counter({"C":9, "H":11, "N":1, "O":3, "S":0}),
'Val': Counter({"C":5, "H":11, "N":1, "O":2, "S":0})}
# taken from http://prowl.rockefeller.edu/aainfo/volume.htm
# volume reference: A.A. Zamyatin, Protein Volume in Solution, Prog. Biophys. Mol. Biol. 24(1972)107-123.
# surface area reference: C. Chotia, The Nature of the Accessible and Buried Surfaces in Proteins, J. Mol. Biol., 105(1975)1-14.
AA_geometry = Counter({'Ala': {"volume":88.6, "area":115},
'Arg': {"volume":173.4, "area":225},
'Asn': {"volume":111.1, "area":150},
'Asp': {"volume":114.1, "area":160},
'Cys': {"volume":108.5, "area":135},
'Gln': {"volume":138.4, "area":190},
'Glu': {"volume":143.8, "area":180},
'Gly': {"volume":60.1, "area":75},
'His': {"volume":153.2, "area":195},
'Ile': {"volume":166.7, "area":175},
'Leu': {"volume":166.7, "area":170},
'Lys': {"volume":168.6, "area":200},
'Met': {"volume":162.9, "area":185},
'Phe': {"volume":189.9, "area":210},
'Pro': {"volume":112.7, "area":145},
'Ser': {"volume":89.0, "area":115},
'Thr': {"volume":116.1, "area":140},
'Trp': {"volume":227.8, "area":255},
'Tyr': {"volume":193.6, "area":230},
'Val': {"volume":140.0, "area":155}})
AA_to_codons = Counter({'Ala': ['GCA', 'GCC', 'GCG', 'GCT'],
'Arg': ['AGA', 'AGG', 'CGA', 'CGC', 'CGG', 'CGT'],
'Asn': ['AAC', 'AAT'],
'Asp': ['GAC', 'GAT'],
'Cys': ['TGC', 'TGT'],
'Gln': ['CAA', 'CAG'],
'Glu': ['GAA', 'GAG'],
'Gly': ['GGA', 'GGC', 'GGG', 'GGT'],
'His': ['CAC', 'CAT'],
'Ile': ['ATA', 'ATC', 'ATT'],
'Leu': ['CTA', 'CTC', 'CTG', 'CTT', 'TTA', 'TTG'],
'Lys': ['AAA', 'AAG'],
'Met': ['ATG'],
'Phe': ['TTC', 'TTT'],
'Pro': ['CCA', 'CCC', 'CCG', 'CCT'],
'STP': ['TAA', 'TAG', 'TGA'],
'Ser': ['AGC', 'AGT', 'TCA', 'TCC', 'TCG', 'TCT'],
'Thr': ['ACA', 'ACC', 'ACG', 'ACT'],
'Trp': ['TGG'],
'Tyr': ['TAC', 'TAT'],
'Val': ['GTA', 'GTC', 'GTG', 'GTT']})
AA_to_anticodons = Counter({'Ala': ['AGC', 'CGC', 'GGC', 'TGC'],
'Arg': ['ACG', 'CCG', 'CCT', 'GCG', 'TCG', 'TCT'],
'Asn': ['ATT', 'GTT'],
'Asp': ['ATC', 'GTC'],
'Cys': ['ACA', 'GCA'],
'Gln': ['CTG', 'TTG'],
'Glu': ['CTC', 'TTC'],
'Gly': ['ACC', 'CCC', 'GCC', 'TCC'],
'His': ['ATG', 'GTG'],
'Ile': ['AAT', 'GAT', 'TAT'],
'Leu': ['AAG', 'CAA', 'CAG', 'GAG', 'TAA', 'TAG'],
'Lys': ['CTT', 'TTT'],
'Met': ['CAT'],
'Phe': ['AAA', 'GAA'],
'Pro': ['AGG', 'CGG', 'GGG', 'TGG'],
'STP': ['CTA', 'TCA', 'TTA'],
'Ser': ['ACT', 'AGA', 'CGA', 'GCT', 'GGA', 'TGA'],
'Thr': ['AGT', 'CGT', 'GGT', 'TGT'],
'Trp': ['CCA'],
'Tyr': ['ATA', 'GTA'],
'Val': ['AAC', 'CAC', 'GAC', 'TAC']})
AA_to_single_letter_code = Counter({'Ala': 'A', 'Arg': 'R', 'Asn': 'N', 'Asp': 'D',
'Cys': 'C', 'Gln': 'Q', 'Glu': 'E', 'Gly': 'G',
'His': 'H', 'Ile': 'I', 'Leu': 'L', 'Lys': 'K',
'Met': 'M', 'Phe': 'F', 'Pro': 'P', 'STP': '*',
'Ser': 'S', 'Thr': 'T', 'Trp': 'W', 'Tyr': 'Y',
'Val': 'V'})
amino_acids = sorted(list(AA_to_single_letter_code.keys()))
codon_to_AA = Counter({'ATA': 'Ile', 'ATC': 'Ile', 'ATT': 'Ile', 'ATG': 'Met',
'ACA': 'Thr', 'ACC': 'Thr', 'ACG': 'Thr', 'ACT': 'Thr',
'AAC': 'Asn', 'AAT': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys',
'AGC': 'Ser', 'AGT': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg',
'CTA': 'Leu', 'CTC': 'Leu', 'CTG': 'Leu', 'CTT': 'Leu',
'CCA': 'Pro', 'CCC': 'Pro', 'CCG': 'Pro', 'CCT': 'Pro',
'CAC': 'His', 'CAT': 'His', 'CAA': 'Gln', 'CAG': 'Gln',
'CGA': 'Arg', 'CGC': 'Arg', 'CGG': 'Arg', 'CGT': 'Arg',
'GTA': 'Val', 'GTC': 'Val', 'GTG': 'Val', 'GTT': 'Val',
'GCA': 'Ala', 'GCC': 'Ala', 'GCG': 'Ala', 'GCT': 'Ala',
'GAC': 'Asp', 'GAT': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu',
'GGA': 'Gly', 'GGC': 'Gly', 'GGG': 'Gly', 'GGT': 'Gly',
'TCA': 'Ser', 'TCC': 'Ser', 'TCG': 'Ser', 'TCT': 'Ser',
'TTC': 'Phe', 'TTT': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',
'TAC': 'Tyr', 'TAT': 'Tyr', 'TAA': 'STP', 'TAG': 'STP',
'TGC': 'Cys', 'TGT': 'Cys', 'TGA': 'STP', 'TGG': 'Trp'})
anticodon_to_AA = Counter({'AAA': 'Phe', 'AAC': 'Val', 'AAG': 'Leu', 'AAT': 'Ile',
'ACA': 'Cys', 'ACC': 'Gly', 'ACG': 'Arg', 'ACT': 'Ser',
'AGA': 'Ser', 'AGC': 'Ala', 'AGG': 'Pro', 'AGT': 'Thr',
'ATA': 'Tyr', 'ATC': 'Asp', 'ATG': 'His', 'ATT': 'Asn',
'CAA': 'Leu', 'CAC': 'Val', 'CAG': 'Leu', 'CAT': 'Met',
'CCA': 'Trp', 'CCC': 'Gly', 'CCG': 'Arg', 'CCT': 'Arg',
'CGA': 'Ser', 'CGC': 'Ala', 'CGG': 'Pro', 'CGT': 'Thr',
'CTA': 'STP', 'CTC': 'Glu', 'CTG': 'Gln', 'CTT': 'Lys',
'GAA': 'Phe', 'GAC': 'Val', 'GAG': 'Leu', 'GAT': 'Ile',
'GCA': 'Cys', 'GCC': 'Gly', 'GCG': 'Arg', 'GCT': 'Ser',
'GGA': 'Ser', 'GGC': 'Ala', 'GGG': 'Pro', 'GGT': 'Thr',
'GTA': 'Tyr', 'GTC': 'Asp', 'GTG': 'His', 'GTT': 'Asn',
'TAA': 'Leu', 'TAC': 'Val', 'TAG': 'Leu', 'TAT': 'Ile',
'TCA': 'STP', 'TCC': 'Gly', 'TCG': 'Arg', 'TCT': 'Arg',
'TGA': 'Ser', 'TGC': 'Ala', 'TGG': 'Pro', 'TGT': 'Thr',
'TTA': 'STP', 'TTC': 'Glu', 'TTG': 'Gln', 'TTT': 'Lys'})
codon_to_codon_RC = Counter({'AAA': 'TTT', 'AAC': 'GTT', 'AAG': 'CTT', 'AAT': 'ATT',
'ACA': 'TGT', 'ACC': 'GGT', 'ACG': 'CGT', 'ACT': 'AGT',
'AGA': 'TCT', 'AGC': 'GCT', 'AGG': 'CCT', 'AGT': 'ACT',
'ATA': 'TAT', 'ATC': 'GAT', 'ATG': 'CAT', 'ATT': 'AAT',
'CAA': 'TTG', 'CAC': 'GTG', 'CAG': 'CTG', 'CAT': 'ATG',
'CCA': 'TGG', 'CCC': 'GGG', 'CCG': 'CGG', 'CCT': 'AGG',
'CGA': 'TCG', 'CGC': 'GCG', 'CGG': 'CCG', 'CGT': 'ACG',
'CTA': 'TAG', 'CTC': 'GAG', 'CTG': 'CAG', 'CTT': 'AAG',
'GAA': 'TTC', 'GAC': 'GTC', 'GAG': 'CTC', 'GAT': 'ATC',
'GCA': 'TGC', 'GCC': 'GGC', 'GCG': 'CGC', 'GCT': 'AGC',
'GGA': 'TCC', 'GGC': 'GCC', 'GGG': 'CCC', 'GGT': 'ACC',
'GTA': 'TAC', 'GTC': 'GAC', 'GTG': 'CAC', 'GTT': 'AAC',
'TAA': 'TTA', 'TAC': 'GTA', 'TAG': 'CTA', 'TAT': 'ATA',
'TCA': 'TGA', 'TCC': 'GGA', 'TCG': 'CGA', 'TCT': 'AGA',
'TGA': 'TCA', 'TGC': 'GCA', 'TGG': 'CCA', 'TGT': 'ACA',
'TTA': 'TAA', 'TTC': 'GAA', 'TTG': 'CAA', 'TTT': 'AAA'})
conserved_amino_acid_groups = {
'Nonpolar': ['L','V','I','M','C','H','A'],
'Aromatic': ['F','W','Y'],
'Bases': ['K','R','H'],
'Neutral Amines': ['Q', 'N'],
'Acids': ['D','E'],
'Polar and Nonpolar': ['H','Y'],
'Mostly nonpolar': ['S','T'],
'B': ['B','N','D'],
'Z': ['Z','Q','E'],
'J': ['J','L','I'],
'None': []
}
conserved_amino_acid_groups['N'] = conserved_amino_acid_groups['Neutral Amines'] + ['B']
conserved_amino_acid_groups['D'] = conserved_amino_acid_groups['Acids'] + ['B']
conserved_amino_acid_groups['Q'] = conserved_amino_acid_groups['Neutral Amines'] + ['Z']
conserved_amino_acid_groups['E'] = conserved_amino_acid_groups['Acids'] + ['Z']
conserved_amino_acid_groups['LI'] = conserved_amino_acid_groups['Nonpolar'] + ['J']
amino_acid_property_group = {}
for key in ['A','V','M','C']:
amino_acid_property_group[key] = 'Nonpolar'
for key in ['F','W']:
amino_acid_property_group[key] = 'Aromatic'
for key in ['K','R']:
amino_acid_property_group[key] = 'Bases'
for key in ['H','Y']:
amino_acid_property_group[key] = 'Polar and Nonpolar'
for key in ['S','T']:
amino_acid_property_group[key] = 'Mostly nonpolar'
for key in ['G','P','X']:
amino_acid_property_group[key] = 'None'
amino_acid_property_group['B'] = 'B'
amino_acid_property_group['Z'] = 'Z'
amino_acid_property_group['J'] = 'J'
amino_acid_property_group['N'] = 'N'
amino_acid_property_group['D'] = 'D'
amino_acid_property_group['Q'] = 'Q'
amino_acid_property_group['E'] = 'E'
amino_acid_property_group['L'] = 'LI'
amino_acid_property_group['I'] = 'LI'
codons = sorted(list(set(codon_to_AA.keys())))
coding_codons = [x for x in codons if codon_to_AA[x] != "STP"]
is_synonymous = {}
for i in coding_codons:
is_synonymous[i] = {}
for j in coding_codons:
if codon_to_AA[i] == codon_to_AA[j]:
is_synonymous[i][j] = True
else:
is_synonymous[i][j] = False
pretty_names = {}
def get_pretty_name(key):
if key in pretty_names:
return pretty_names[key]
else:
return key
def get_nt_to_num_lookup(d):
D = {order: ord(nt) for nt, order in d.items()}
lookup = 5 * numpy.ones(max(D.values()) + 1, dtype=numpy.uint8)
for order, num in D.items():
lookup[num] = order
return lookup
def get_codon_to_num_lookup(reverse_complement=False):
nts = sorted(list(unambiguous_nucleotides))
as_ints = [ord(nt) for nt in nts]
size = max(as_ints) + 1
lookup = 64 * numpy.ones((size, size, size), dtype=numpy.uint8)
num_to_codon = dict(enumerate(codons))
if reverse_complement:
num_to_codon = {k: codon_to_codon_RC[codon] for k, codon in num_to_codon.items()}
D = {tuple([ord(nt) for nt in codon]): k for k, codon in num_to_codon.items()}
for a in as_ints:
for b in as_ints:
for c in as_ints:
lookup[a, b, c] = D[(a, b, c)]
return lookup
# See utils.nt_seq_to_codon_num_array etc. for utilization of these lookup arrays
nt_to_num_lookup = get_nt_to_num_lookup({'A': 0, 'C': 1, 'G': 2, 'T': 3, 'N': 4})
nt_to_RC_num_lookup = get_nt_to_num_lookup({'A': 3, 'C': 2, 'G': 1, 'T': 0, 'N': 4})
codon_to_num_lookup = get_codon_to_num_lookup(reverse_complement=False)
codon_to_RC_num_lookup = get_codon_to_num_lookup(reverse_complement=True)
# anvi'o news stuff
anvio_news_url = "https://raw.githubusercontent.com/merenlab/anvio/master/NEWS.md"
| meren/anvio | anvio/constants.py | Python | gpl-3.0 | 25,794 | [
"pysam"
] | 5ad9a220852e715dd3a58d35fac6f94ec81326bed043bbc3a0e1664bbcb408f6 |
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`openlyricsexport` module provides the functionality for exporting
songs from the database to the OpenLyrics format.
"""
import logging
import os
from lxml import etree
from openlp.core.lib import Registry, check_directory_exists, translate
from openlp.core.utils import clean_filename
from openlp.plugins.songs.lib.xml import OpenLyrics
log = logging.getLogger(__name__)
class OpenLyricsExport(object):
"""
This provides the Openlyrics export.
"""
def __init__(self, parent, songs, save_path):
"""
Initialise the export.
"""
log.debug('initialise OpenLyricsExport')
self.parent = parent
self.manager = parent.plugin.manager
self.songs = songs
self.save_path = save_path
check_directory_exists(self.save_path)
def do_export(self):
"""
Export the songs.
"""
log.debug('started OpenLyricsExport')
openLyrics = OpenLyrics(self.manager)
self.parent.progress_bar.setMaximum(len(self.songs))
for song in self.songs:
self.application.process_events()
if self.parent.stop_export_flag:
return False
self.parent.increment_progress_bar(translate('SongsPlugin.OpenLyricsExport', 'Exporting "%s"...') %
song.title)
xml = openLyrics.song_to_xml(song)
tree = etree.ElementTree(etree.fromstring(xml.encode()))
filename = '%s (%s)' % (song.title, ', '.join([author.display_name for author in song.authors]))
filename = clean_filename(filename)
# Ensure the filename isn't too long for some filesystems
filename = '%s.xml' % filename[0:250 - len(self.save_path)]
# Pass a file object, because lxml does not cope with some special
# characters in the path (see lp:757673 and lp:744337).
tree.write(open(os.path.join(self.save_path, filename), 'wb'),
encoding='utf-8', xml_declaration=True, pretty_print=True)
return True
def _get_application(self):
"""
Adds the openlp to the class dynamically.
Windows needs to access the application in a dynamic manner.
"""
if os.name == 'nt':
return Registry().get('application')
else:
if not hasattr(self, '_application'):
self._application = Registry().get('application')
return self._application
application = property(_get_application)
| marmyshev/item_title | openlp/plugins/songs/lib/openlyricsexport.py | Python | gpl-2.0 | 4,641 | [
"Brian"
] | 8e68a0e2a3f63e9a63004412ce1d0c8ce0e6141c692df6755f337f9e0a13f132 |
''' Test_RSS_Policy_JobRunningMatchedRatioPolicy
'''
import unittest
import DIRAC.ResourceStatusSystem.Policy.JobRunningMatchedRatioPolicy as moduleTested
################################################################################
class JobRunningMatchedRatioPolicy_TestCase( unittest.TestCase ):
def setUp( self ):
'''
Setup
'''
self.moduleTested = moduleTested
self.testClass = self.moduleTested.JobRunningMatchedRatioPolicy
def tearDown( self ):
'''
Tear down
'''
del self.moduleTested
del self.testClass
################################################################################
class JobRunningMatchedRatioPolicy_Success( JobRunningMatchedRatioPolicy_TestCase ):
def test_instantiate( self ):
''' tests that we can instantiate one object of the tested class
'''
module = self.testClass()
self.assertEqual( 'JobRunningMatchedRatioPolicy', module.__class__.__name__ )
def test_evaluate( self ):
''' tests the method _evaluate
'''
module = self.testClass()
res = module._evaluate( { 'OK' : False, 'Message' : 'Bo!' } )
self.assertTrue(res['OK'])
self.assertEqual( 'Error', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Bo!', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : None } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'No values to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'No values to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{}] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'No values to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{'Running' : 0, 'Matched' : 0,
'Received': 0, 'Checking' : 0 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Not enough jobs to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{'Running' : 1, 'Matched' : 1,
'Received': 0, 'Checking' : 0 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Not enough jobs to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{ 'Running' : 10, 'Matched' : 10,
'Received': 0, 'Checking' : 0 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Banned', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Job Running / Matched ratio of 0.50', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{'Running' : 7, 'Matched' : 1,
'Received': 1, 'Checking' : 1 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Degraded', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Job Running / Matched ratio of 0.70', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{'Running' : 70, 'Matched' : 0,
'Received': 0, 'Checking' : 0 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Active', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Job Running / Matched ratio of 1.00', res[ 'Value' ][ 'Reason' ] )
################################################################################
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( JobRunningMatchedRatioPolicy_TestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( JobRunningMatchedRatioPolicy_Success ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| Andrew-McNab-UK/DIRAC | ResourceStatusSystem/Policy/test/Test_RSS_Policy_JobRunningMatchedRatioPolicy.py | Python | gpl-3.0 | 4,283 | [
"DIRAC"
] | f4c21000ca49684c76fde1b0aacc70e062d6c9a7da3bd9d1404ecdf6775adb05 |
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
'''Physicochemical constants in atomic units
These are the physical constants defined in this module (in atomic units):
'''
boltzmann = 3.1668154051341965e-06
avogadro = 6.0221415e23
lightspeed = 137.03599975303575
planck = 6.2831853071795864769
# automatically spice up the docstrings
lines = [
' ================ ==================',
' Name Value ',
' ================ ==================',
]
for key, value in sorted(globals().iteritems()):
if not isinstance(value, float):
continue
lines.append(' %16s %.10e' % (key, value))
lines.append(' ================ ==================')
__doc__ += '\n'.join(lines)
| QuantumElephant/horton | horton/constants.py | Python | gpl-3.0 | 1,499 | [
"Avogadro"
] | 70f086f8f1643bee145f8ad88bb70d4d07c7084ee555aa8b5570d6ad15fad31c |
# File: subst.py
# Author: Brian A. Vanderburg II
# Purpose: A generic SCons file substitution mechanism
# Copyright: This file is placed in the public domain.
##############################################################################
# Requirements
##############################################################################
import re
from SCons.Script import *
import SCons.Errors
# Helper/core functions
##############################################################################
# Do the substitution
def _subst_file(target, source, env, pattern, replace):
# Read file
f = open(source, "rU")
try:
contents = f.read()
finally:
f.close()
# Substitute, make sure result is a string
def subfn(mo):
value = replace(env, mo)
if not SCons.Util.is_String(value):
raise SCons.Errors.UserError("Substitution must be a string.")
return value
contents = re.sub(pattern, subfn, contents)
# Write file
f = open(target, "wt")
try:
f.write(contents)
finally:
f.close()
# Determine which keys are used
def _subst_keys(source, pattern):
# Read file
f = open(source, "rU")
try:
contents = f.read()
finally:
f.close()
# Determine keys
keys = []
def subfn(mo):
key = mo.group("key")
if key:
keys.append(key)
return ''
re.sub(pattern, subfn, contents)
return keys
# Get the value of a key as a string, or None if it is not in the environment
def _subst_value(env, key):
# Why does "if key in env" result in "KeyError: 0:"?
try:
env[key]
except KeyError:
return None
# env.subst already returns a string even if it is stored as a number
# such as env['HAVE_XYZ'] = 1
return env.subst("${%s}" % key)
# Builder related functions
##############################################################################
# Builder action
def _subst_action(target, source, env):
# Substitute in the files
pattern = env["SUBST_PATTERN"]
replace = env["SUBST_REPLACE"]
for (t, s) in zip(target, source):
_subst_file(str(t), str(s), env, pattern, replace)
return 0
# Builder message
def _subst_message(target, source, env):
items = ["Substituting vars from %s to %s" % (s, t)
for (t, s) in zip(target, source)]
return "\n".join(items)
# Builder dependency emitter
def _subst_emitter(target, source, env):
pattern = env["SUBST_PATTERN"]
for (t, s) in zip(target, source):
# When building, if a variant directory is used and source files
# are being duplicated, the source file will not be duplicated yet
# when this is called, so the source node must be used instead of
# the duplicated node
path = s.srcnode().abspath
# Get keys used
keys = _subst_keys(path, pattern)
d = dict()
for key in keys:
value = _subst_value(env, key)
if not value is None:
d[key] = value
# Only the current target depends on this dictionary
Depends(t, SCons.Node.Python.Value(d))
return target, source
# Replace @key@ with the value of that key, and @@ with a single @
##############################################################################
_SubstFile_pattern = "@(?P<key>\w*?)@"
def _SubstFile_replace(env, mo):
key = mo.group("key")
if not key:
return "@"
value = _subst_value(env, key)
if value is None:
raise SCons.Errors.UserError("Error: key %s does not exist" % key)
return value
def SubstFile(env, target, source):
return env.SubstGeneric(target,
source,
SUBST_PATTERN=_SubstFile_pattern,
SUBST_REPLACE=_SubstFile_replace)
# A substitutor similar to config.h header substitution
# Supported patterns are:
#
# Pattern: #define @key@
# Found: #define key value
# Missing: /* #define key */
#
# Pattern: #define @key@ default
# Found: #define key value
# Missing: #define key default
#
# Pattern: #undef @key@
# Found: #define key value
# Missing: #undef key
#
# The "@" is used to that these defines can be used in addition to
# other defines that you do not desire to be replaced.
##############################################################################
_SubstHeader_pattern = "(?m)^(?P<space>\\s*?)(?P<type>#define|#undef)\\s+?@(?P<key>\w+?)@(?P<ending>.*?)$"
def _SubstHeader_replace(env, mo):
space = mo.group("space")
type = mo.group("type")
key = mo.group("key")
ending = mo.group("ending")
value = _subst_value(env, key)
if not value is None:
# If found it is always #define key value
return "%s#define %s %s" % (space, key, value)
# Not found
if type == "#define":
defval = ending.strip()
if defval:
# There is a default value
return "%s#define %s %s" % (space, key, defval)
else:
# There is no default value
return "%s/* #define %s */" % (space, key)
# It was #undef
return "%s#undef %s" % (space, key)
def SubstHeader(env, target, source):
return env.SubstGeneric(target,
source,
SUBST_PATTERN=_SubstHeader_pattern,
SUBST_REPLACE=_SubstHeader_replace)
# Create builders
##############################################################################
def TOOL_SUBST(env):
# The generic builder
subst = SCons.Action.Action(_subst_action, _subst_message)
env['BUILDERS']['SubstGeneric'] = Builder(action=subst,
emitter=_subst_emitter)
# Additional ones
env.AddMethod(SubstFile)
env.AddMethod(SubstHeader)
| salilab/mdt | tools/subst.py | Python | gpl-2.0 | 5,887 | [
"Brian"
] | 80ee8e2a82d64704fd80050c6802603d0bc9fb625ddbed2fadb7434fe51627da |
# global imports
import cython
import logging
import itertools
import sys
# local imports
try:
import openbabel
except:
pass
from rdkit import Chem
from .molecule import Atom, Bond, Molecule
from .pathfinder import compute_atom_distance
from .util import partition, agglomerate, generate_combo
import rmgpy.molecule.adjlist as adjlist
import rmgpy.molecule.inchi as inchiutil
import rmgpy.molecule.resonance as resonance
# global variables:
#: This dictionary is used to shortcut lookups of a molecule's SMILES string from its chemical formula.
_known_smiles_molecules = {
'N2': 'N#N',
'CH4': 'C',
'H2O': 'O',
'C2H6': 'CC',
'H2': '[H][H]',
'H2O2': 'OO',
'C3H8': 'CCC',
'Ar': '[Ar]',
'He': '[He]',
'CH4O': 'CO',
'CO2': 'O=C=O',
'CO': '[C-]#[O+]',
'C2H4': 'C=C',
'O2': 'O=O'
}
_known_smiles_radicals = {
'CH3': '[CH3]',
'HO': '[OH]',
'C2H5': 'C[CH2]',
'O': '[O]',
'HO2': '[O]O',
'CH': '[CH]',
'H': '[H]',
'C': '[C]',
#'CO2': it could be [O][C][O] or O=[C][O]
#'CO': '[C]=O', could also be [C][O]
#'C2H4': could be [CH3][CH] or [CH2][CH2]
'O2': '[O][O]',
}
def toInChI(mol):
"""
Convert a molecular structure to an InChI string. Uses
`RDKit <http://rdkit.org/>`_ to perform the conversion.
Perceives aromaticity.
or
Convert a molecular structure to an InChI string. Uses
`OpenBabel <http://openbabel.org/>`_ to perform the conversion.
"""
try:
if not Chem.inchi.INCHI_AVAILABLE:
return "RDKitInstalledWithoutInChI"
rdkitmol = toRDKitMol(mol)
return Chem.inchi.MolToInchi(rdkitmol, options='-SNon')
except:
pass
obmol = toOBMol(mol)
obConversion = openbabel.OBConversion()
obConversion.SetOutFormat('inchi')
obConversion.SetOptions('w', openbabel.OBConversion.OUTOPTIONS)
return obConversion.WriteString(obmol).strip()
def create_U_layer(mol, auxinfo):
"""
Creates a string with the positions of the atoms that bear unpaired electrons. The string
can be used to complement the InChI with an additional layer that allows for the differentiation
between structures with multiple unpaired electrons.
The string is composed of a prefix ('u') followed by the positions of each of the unpaired electrons,
sorted in numerical order.
Example:
- methyl radical ([CH3]) : u1
- triplet methylene biradical ([CH2]) : u1,1
- ethane-1,2-diyl biradical ([CH2][CH2]): u1,2
When the molecule does not bear any unpaired electrons, None is returned.
"""
cython.declare(
minmol=Molecule,
#rdkitmol=,
u_layer=list,
i=int,
at=Atom,
equivalent_atoms=list,
)
if mol.getRadicalCount() == 0:
return None
elif mol.getFormula() == 'H':
return inchiutil.U_LAYER_PREFIX + '1'
# find the resonance isomer with the lowest u index:
minmol = generate_minimum_resonance_isomer(mol)
# create preliminary u-layer:
u_layer = []
for i, at in enumerate(minmol.atoms):
u_layer.extend([i+1] * at.radicalElectrons)
# extract equivalent atom pairs from E-layer of auxiliary info:
equivalent_atoms = inchiutil.parse_E_layer(auxinfo)
if equivalent_atoms:
# select lowest u-layer:
u_layer = find_lowest_u_layer(minmol, u_layer, equivalent_atoms)
return (inchiutil.U_LAYER_PREFIX + ','.join(map(str, u_layer)))
def toAugmentedInChI(mol):
"""
This function generates the augmented InChI canonical identifier, and that allows for the differentiation
between structures with spin states and multiple unpaired electrons.
Two additional layers are added to the InChI:
- unpaired electrons layer: the position of the unpaired electrons in the molecule
"""
cython.declare(
inchi=str,
ulayer=str,
aug_inchi=str,
)
inchi = toInChI(mol)
ulayer, player = create_augmented_layers(mol)
aug_inchi = inchiutil.compose_aug_inchi(inchi, ulayer, player)
return aug_inchi
def toInChIKey(mol):
"""
Convert a molecular structure to an InChI Key string. Uses
`OpenBabel <http://openbabel.org/>`_ to perform the conversion.
or
Convert a molecular structure to an InChI Key string. Uses
`RDKit <http://rdkit.org/>`_ to perform the conversion.
Removes check-sum dash (-) and character so that only
the 14 + 9 characters remain.
"""
try:
if not Chem.inchi.INCHI_AVAILABLE:
return "RDKitInstalledWithoutInChI"
inchi = toInChI(mol)
return Chem.inchi.InchiToInchiKey(inchi)[:-2]
except:
pass
# for atom in mol.vertices:
# if atom.isNitrogen():
obmol = toOBMol(mol)
obConversion = openbabel.OBConversion()
obConversion.SetOutFormat('inchi')
obConversion.SetOptions('w', openbabel.OBConversion.OUTOPTIONS)
obConversion.SetOptions('K', openbabel.OBConversion.OUTOPTIONS)
return obConversion.WriteString(obmol).strip()[:-2]
def toAugmentedInChIKey(mol):
"""
Adds additional layers to the InChIKey,
generating the "augmented" InChIKey.
"""
cython.declare(
key=str,
ulayer=str
)
key = toInChIKey(mol)
ulayer, player = create_augmented_layers(mol)
return inchiutil.compose_aug_inchi_key(key, ulayer, player)
def toSMARTS(mol):
"""
Convert a molecular structure to an SMARTS string. Uses
`RDKit <http://rdkit.org/>`_ to perform the conversion.
Perceives aromaticity and removes Hydrogen atoms.
"""
rdkitmol = toRDKitMol(mol)
return Chem.MolToSmarts(rdkitmol)
def toSMILES(mol):
"""
Convert a molecular structure to an SMILES string.
If there is a Nitrogen atom present it uses
`OpenBabel <http://openbabel.org/>`_ to perform the conversion,
and the SMILES may or may not be canonical.
Otherwise, it uses `RDKit <http://rdkit.org/>`_ to perform the
conversion, so it will be canonical SMILES.
While converting to an RDMolecule it will perceive aromaticity
and removes Hydrogen atoms.
"""
# If we're going to have to check the formula anyway,
# we may as well shortcut a few small known molecules.
# Dictionary lookups are O(1) so this should be fast:
# The dictionary is defined at the top of this file.
cython.declare(
atom=Atom,
# obmol=,
# rdkitmol=,
)
try:
if mol.isRadical():
return _known_smiles_radicals[mol.getFormula()]
else:
return _known_smiles_molecules[mol.getFormula()]
except KeyError:
# It wasn't in the above list.
pass
for atom in mol.vertices:
if atom.isNitrogen():
obmol = toOBMol(mol)
try:
SMILEwriter = openbabel.OBConversion()
SMILEwriter.SetOutFormat('smi')
SMILEwriter.SetOptions("i",SMILEwriter.OUTOPTIONS) # turn off isomer and stereochemistry information (the @ signs!)
except:
pass
return SMILEwriter.WriteString(obmol).strip()
rdkitmol = toRDKitMol(mol, sanitize=False)
if not mol.isAromatic():
return Chem.MolToSmiles(rdkitmol, kekuleSmiles=True)
return Chem.MolToSmiles(rdkitmol)
def toOBMol(mol):
"""
Convert a molecular structure to an OpenBabel OBMol object. Uses
`OpenBabel <http://openbabel.org/>`_ to perform the conversion.
"""
atoms = mol.vertices
obmol = openbabel.OBMol()
for atom in atoms:
a = obmol.NewAtom()
a.SetAtomicNum(atom.number)
a.SetFormalCharge(atom.charge)
orders = {'S': 1, 'D': 2, 'T': 3, 'B': 5}
for atom1 in mol.vertices:
for atom2, bond in atom1.edges.iteritems():
index1 = atoms.index(atom1)
index2 = atoms.index(atom2)
if index1 < index2:
order = orders[bond.order]
obmol.AddBond(index1+1, index2+1, order)
obmol.AssignSpinMultiplicity(True)
return obmol
def debugRDKitMol(rdmol, level=logging.INFO):
"""
Takes an rdkit molecule object and logs some debugging information
equivalent to calling rdmol.Debug() but uses our logging framework.
Default logging level is INFO but can be controlled with the `level` parameter.
Also returns the message as a string, should you want it for something.
"""
import tempfile
import os
my_temp_file = tempfile.NamedTemporaryFile()
try:
old_stdout_file_descriptor = os.dup(sys.stdout.fileno())
except:
message = "Can't access the sys.stdout file descriptor, so can't capture RDKit debug info"
print message
rdmol.Debug()
return message
os.dup2(my_temp_file.fileno(), sys.stdout.fileno())
rdmol.Debug()
os.dup2(old_stdout_file_descriptor, sys.stdout.fileno())
my_temp_file.file.seek(0)
message = my_temp_file.file.read()
message = "RDKit Molecule debugging information:\n" + message
logging.log(level, message)
return message
def toRDKitMol(mol, removeHs=True, returnMapping=False, sanitize=True):
"""
Convert a molecular structure to a RDKit rdmol object. Uses
`RDKit <http://rdkit.org/>`_ to perform the conversion.
Perceives aromaticity and, unless removeHs==False, removes Hydrogen atoms.
If returnMapping==True then it also returns a dictionary mapping the
atoms to RDKit's atom indices.
"""
# Sort the atoms before converting to ensure output is consistent
# between different runs
mol.sortAtoms()
atoms = mol.vertices
rdAtomIndices = {} # dictionary of RDKit atom indices
rdkitmol = Chem.rdchem.EditableMol(Chem.rdchem.Mol())
for index, atom in enumerate(mol.vertices):
rdAtom = Chem.rdchem.Atom(atom.element.symbol)
rdAtom.SetNumRadicalElectrons(atom.radicalElectrons)
if atom.element.symbol == 'C' and atom.lonePairs == 1 and mol.multiplicity == 1: rdAtom.SetNumRadicalElectrons(2)
rdkitmol.AddAtom(rdAtom)
if removeHs and atom.symbol == 'H':
pass
else:
rdAtomIndices[atom] = index
rdBonds = Chem.rdchem.BondType
orders = {'S': rdBonds.SINGLE, 'D': rdBonds.DOUBLE, 'T': rdBonds.TRIPLE, 'B': rdBonds.AROMATIC}
# Add the bonds
for atom1 in mol.vertices:
for atom2, bond in atom1.edges.iteritems():
index1 = atoms.index(atom1)
index2 = atoms.index(atom2)
if index1 < index2:
order = orders[bond.order]
rdkitmol.AddBond(index1, index2, order)
# Make editable mol into a mol and rectify the molecule
rdkitmol = rdkitmol.GetMol()
if sanitize:
Chem.SanitizeMol(rdkitmol)
if removeHs:
rdkitmol = Chem.RemoveHs(rdkitmol, sanitize=sanitize)
if returnMapping:
return rdkitmol, rdAtomIndices
return rdkitmol
def is_valid_combo(combo, mol, distances):
"""
Check if the combination of atom indices refers to
atoms that are adjacent in the molecule.
"""
cython.declare(
agglomerates=list,
new_distances=list,
orig_dist=dict,
new_dist=dict,
)
# compute shortest path between atoms
agglomerates = agglomerate(combo)
new_distances = compute_agglomerate_distance(agglomerates, mol)
# combo is valid if the distance is equal to the parameter distance
if len(distances) != len(new_distances): return False
for orig_dist, new_dist in zip(distances, new_distances):
# only compare the values of the dictionaries:
if sorted(orig_dist.values()) != sorted(new_dist.values()):
return False
return True
def find_lowest_u_layer(mol, u_layer, equivalent_atoms):
"""
Searches for the "minimum" combination of indices of atoms that bear unpaired electrons.
It does so by using the information on equivalent atoms to permute equivalent atoms to
obtain a combination of atoms that is the (numerically) lowest possible combination.
Each possible combination is valid if and only if the distances between the atoms of the
combination is identical to the distances between the original combination.
First, the algorithm partitions equivalent atoms that bear an unpaired electron.
Next, the combinations are generated, and for each combination it is verified whether
it pertains to a "valid" combination.
Returns a list of indices corresponding to the lowest combination of atom indices bearing
unpaired electrons.
"""
cython.declare(
new_u_layer=list,
grouped_electrons=list,
corresponding_E_layers=list,
group=list,
e_layer=list,
combos=list,
orig_agglomerates=list,
orig_distances=list,
selected_group=list,
combo=list,
)
if not equivalent_atoms:
return u_layer
new_u_layer = []
grouped_electrons, corresponding_E_layers = partition(u_layer, equivalent_atoms)
# don't process atoms that do not belong to an equivalence layer
for group, e_layer in zip(grouped_electrons[:], corresponding_E_layers[:]):
if not e_layer:
new_u_layer.extend(group)
grouped_electrons.remove(group)
corresponding_E_layers.remove(e_layer)
combos = generate_combo(grouped_electrons, corresponding_E_layers)
# compute original distance:
orig_agglomerates = agglomerate(grouped_electrons)
orig_distances = compute_agglomerate_distance(orig_agglomerates, mol)
# deflate the list of lists to be able to numerically compare them
selected_group = sorted(itertools.chain.from_iterable(grouped_electrons))
# see if any of the combos is valid and results in a lower numerical combination than the original
for combo in combos:
if is_valid_combo(combo, mol, orig_distances):
combo = sorted(itertools.chain.from_iterable(combo))
if combo < selected_group:
selected_group = combo
# add the minimized unpaired electron positions to the u-layer:
new_u_layer.extend(selected_group)
return sorted(new_u_layer)
def generate_minimum_resonance_isomer(mol):
"""
Select the resonance isomer that is isomorphic to the parameter isomer, with the lowest unpaired
electrons descriptor.
First, we generate all isomorphic resonance isomers.
Next, we return the candidate with the lowest unpaired electrons metric.
The metric is a sorted list with indices of the atoms that bear an unpaired electron
"""
cython.declare(
candidates=list,
sel=Molecule,
cand=Molecule,
metric_sel=list,
metric_cand=list,
)
candidates = resonance.generate_isomorphic_isomers(mol)
sel = candidates[0]
metric_sel = get_unpaired_electrons(sel)
for cand in candidates[1:]:
metric_cand = get_unpaired_electrons(cand)
if metric_cand < metric_sel:
sel = cand
metric_sel = metric_cand
return sel
def get_unpaired_electrons(mol):
"""
Returns a sorted list of the indices of the atoms that bear one or more
unpaired electrons.
"""
cython.declare(
locations=list,
index=int,
at=Atom,
)
locations = []
for index, at in enumerate(mol.atoms):
if at.radicalElectrons >= 1:
locations.append(index)
return sorted(locations)
def compute_agglomerate_distance(agglomerates, mol):
"""
Iterates over a list of lists containing atom indices.
For each list the distances between the atoms is computed.
A list of distances is returned.
"""
cython.declare(
distances=list,
agglomerate=list,
dist=dict,
)
distances = []
for agglomerate in agglomerates:
dist = compute_atom_distance(agglomerate, mol)
distances.append(dist)
return distances
def has_unexpected_lone_pairs(mol):
"""
Iterates over the atoms of the Molecule and returns whether
at least one atom bears an unexpected number of lone pairs.
E.g.
carbon with > 0 lone pairs
nitrogen with > 1 lone pairs
oxygen with > 2 lone pairs
The expected number of lone pairs of an element is equal to
"""
for at in mol.atoms:
try:
exp = adjlist.PeriodicSystem.lone_pairs[at.symbol]
except KeyError:
raise Exception("Unrecognized element: {}".format(at.symbol))
else:
if at.lonePairs != adjlist.PeriodicSystem.lone_pairs[at.symbol]: return True
return False
def create_augmented_layers(mol):
"""
The indices in the string refer to the atom indices in the molecule, according to the atom order
obtained by sorting the atoms using the InChI canonicalization algorithm.
First a deep copy is created of the original molecule and hydrogen atoms are removed from the molecule.
Next, the molecule is converted into an InChI string, and the auxiliary information of the inchification
procedure is retrieved.
The N-layer is parsed and used to sort the atoms of the original order according
to the order in the InChI. In case, the molecule contains atoms that cannot be distinguished
with the InChI algorithm ('equivalent atoms'), the position of the unpaired electrons is changed
as to ensure the atoms with the lowest indices are used to compose the string.
"""
if mol.getRadicalCount() == 0 and not has_unexpected_lone_pairs(mol):
return None, None
elif mol.getFormula() == 'H':
return inchiutil.U_LAYER_PREFIX + '1', None
else:
molcopy = mol.copy(deep=True)
hydrogens = filter(lambda at: at.number == 1, molcopy.atoms)
[molcopy.removeAtom(h) for h in hydrogens]
rdkitmol = toRDKitMol(molcopy)
_, auxinfo = Chem.MolToInchiAndAuxInfo(rdkitmol, options='-SNon')# suppress stereo warnings
# extract the atom numbers from N-layer of auxiliary info:
atom_indices = inchiutil.parse_N_layer(auxinfo)
atom_indices = [atom_indices.index(i + 1) for i, atom in enumerate(molcopy.atoms)]
# sort the atoms based on the order of the atom indices
molcopy.atoms = [x for (y,x) in sorted(zip(atom_indices, molcopy.atoms), key=lambda pair: pair[0])]
ulayer = create_U_layer(molcopy, auxinfo)
player = create_P_layer(molcopy, auxinfo)
return ulayer, player
def create_P_layer(mol, auxinfo):
"""
Creates a string with the positions of the atoms that bear an unexpected number of lone pairs. The string
can be used to complement the InChI with an additional layer that allows for the differentiation
between structures with lone pairs.
The string is composed of a prefix ('P_LAYER_PREFIX') followed by the positions of each of the atoms with an
unexpected number of lone pairs, sorted in numerical order.
Example:
- singlet methylene biradical ([CH2]) : 'P_LAYER_PREFIX'1
When the molecule does not bear any atoms with an unexpected number of lone pairs,
None is returned.
"""
# TODO: find the resonance isomer with the lowest p index:
minmol = mol
# create preliminary p-layer:
p_layer = []
for i, at in enumerate(mol.atoms):
try:
exp = adjlist.PeriodicSystem.lone_pairs[at.symbol]
except KeyError:
raise Exception("Unrecognized element: {}".format(at.symbol))
else:
if at.lonePairs != adjlist.PeriodicSystem.lone_pairs[at.symbol]:
if at.lonePairs == 0:
p_layer.append('{}{}'.format(i, '(0)'))
else:
p_layer.extend([i+1] * at.lonePairs)
# extract equivalent atom pairs from E-layer of auxiliary info:
equivalent_atoms = inchiutil.parse_E_layer(auxinfo)
if equivalent_atoms:
# select lowest u-layer:
u_layer = find_lowest_p_layer(minmol, p_layer, equivalent_atoms)
if p_layer:
return (inchiutil.P_LAYER_PREFIX + inchiutil.P_LAYER_SEPARATOR.join(map(str, p_layer)))
else:
return None
def find_lowest_p_layer(minmol, p_layer, equivalent_atoms):
"""
Permute the equivalent atoms and return the combination with the
lowest p-layer.
TODO: The presence of unpaired electrons complicates stuff.
"""
return minmol | chatelak/RMG-Py | rmgpy/molecule/generator.py | Python | mit | 21,132 | [
"RDKit"
] | f7a9dd043a7e4214bf1c07384e85e40770d59afba3fe54c747bb5b7ae40fee73 |
"""
Defines the plugin to take storage space information given by WLCG Accounting Json
https://twiki.cern.ch/twiki/bin/view/LCG/AccountingTaskForce#Storage_Space_Accounting
https://twiki.cern.ch/twiki/pub/LCG/AccountingTaskForce/storage_service_v4.txt
https://docs.google.com/document/d/1yzCvKpxsbcQC5K9MyvXc-vBF1HGPBk4vhjw3MEXoXf8
When this is used, the OccupancyLFN has to be the full path on the storage, and not just the LFN
"""
import json
import os
import tempfile
import shutil
import errno
import gfal2 # pylint: disable=import-error
from DIRAC import S_OK, S_ERROR
class WLCGAccountingJson(object):
""".. class:: WLCGAccountingJson
Occupancy plugin to return the space information given by WLCG Accouting Json
"""
def __init__(self, se):
self.se = se
self.log = se.log.getSubLogger("WLCGAccountingJson")
self.name = self.se.name
def _downloadJsonFile(self, occupancyLFN, filePath):
"""Download the json file at the location
:param occupancyLFN: lfn for the file
:param filePath: destination path for the file
"""
for storage in self.se.storages:
try:
ctx = gfal2.creat_context()
params = ctx.transfer_parameters()
params.overwrite = True
res = storage.updateURL(occupancyLFN)
if not res["OK"]:
continue
occupancyURL = res["Value"]
ctx.filecopy(params, occupancyURL, "file://" + filePath)
return
except gfal2.GError as e:
detailMsg = "Failed to copy file %s to destination url %s: [%d] %s" % (
occupancyURL,
filePath,
e.code,
e.message,
)
self.log.debug("Exception while copying", detailMsg)
continue
def getOccupancy(self, **kwargs):
"""Returns the space information given by WLCG Accouting Json
:returns: S_OK with dict (keys: SpaceReservation, Total, Free)
"""
occupancyLFN = kwargs["occupancyLFN"]
if not occupancyLFN:
return S_ERROR("Failed to get occupancyLFN")
tmpDirName = tempfile.mkdtemp()
filePath = os.path.join(tmpDirName, os.path.basename(occupancyLFN))
self._downloadJsonFile(occupancyLFN, filePath)
if not os.path.isfile(filePath):
return S_ERROR("No WLCGAccountingJson file of %s is downloaded." % (self.name))
with open(filePath, "r") as path:
occupancyDict = json.load(path)
# delete temp dir
shutil.rmtree(tmpDirName)
try:
storageShares = occupancyDict["storageservice"]["storageshares"]
except KeyError as e:
return S_ERROR(
errno.ENOMSG, "Issue finding storage shares. %s in %s at %s." % (repr(e), occupancyLFN, self.name)
)
spaceReservation = self.se.options.get("SpaceReservation")
# get storageshares in WLCGAccountingJson file
storageSharesSR = None
if spaceReservation:
for storageshare in storageShares:
if storageshare.get("name") == spaceReservation:
storageSharesSR = storageshare
break
else:
self.log.debug(
"Could not find SpaceReservation in CS, and get storageShares and spaceReservation from WLCGAccoutingJson."
)
shareLen = []
for storage in self.se.storages:
basePath = storage.getParameters()["Path"]
for share in storageShares:
shareLen.append((share, len(os.path.commonprefix([share["path"][0], basePath]))))
storageSharesSR = max(shareLen, key=lambda x: x[1])[0]
spaceReservation = storageSharesSR.get("name")
sTokenDict = {}
sTokenDict["SpaceReservation"] = spaceReservation
try:
sTokenDict["Total"] = storageSharesSR["totalsize"]
sTokenDict["Free"] = storageSharesSR.get("freesize", sTokenDict["Total"] - storageSharesSR["usedsize"])
except KeyError as e:
return S_ERROR(
errno.ENOMSG,
"Issue finding Total or Free space left. %s in %s storageshares." % (repr(e), spaceReservation),
)
return S_OK(sTokenDict)
| DIRACGrid/DIRAC | src/DIRAC/Resources/Storage/OccupancyPlugins/WLCGAccountingJson.py | Python | gpl-3.0 | 4,466 | [
"DIRAC"
] | c3db8d71eefba20e8da0feacef044190224e169d8fa5c49b1aaceb1b5ac0c61e |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon, John D. Chodera
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and Peter Eastman. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
"""Load an md.Topology from tripos mol2 files.
"""
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import numpy as np
import itertools
import re
from mdtraj.utils import import_
from mdtraj.utils.six.moves import cStringIO as StringIO
from mdtraj.formats.registry import _FormatRegistry
__all__ = ['load_mol2', "mol2_to_dataframes"]
@_FormatRegistry.register_loader('.mol2')
def load_mol2(filename):
"""Load a TRIPOS mol2 file from disk.
Parameters
----------
filename : str
Path to the prmtop file on disk.
Returns
-------
traj : md.Trajectory
The resulting topology, as an md.Topology object.
Notes
-----
This function should work on GAFF and sybyl style MOL2 files, but has
been primarily tested on GAFF mol2 files.
This function does NOT accept multi-structure MOL2 files!!!
The elements are guessed using GAFF atom types or via the atype string.
Examples
--------
>>> traj = md.load_mol2('mysystem.mol2')
"""
from mdtraj.core.trajectory import Trajectory
from mdtraj.core.topology import Topology
atoms, bonds = mol2_to_dataframes(filename)
atoms_mdtraj = atoms[["name", "resName"]].copy()
atoms_mdtraj["serial"] = atoms.index
#Figure out 1 letter element names
# IF this is a GAFF mol2, this line should work without issues
atoms_mdtraj["element"] = atoms.atype.map(gaff_elements)
# If this is a sybyl mol2, there should be NAN (null) values
if atoms_mdtraj.element.isnull().any():
# If this is a sybyl mol2, I think this works generally.
atoms_mdtraj["element"] = atoms.atype.apply(lambda x: x.strip(".")[0])
atoms_mdtraj["resSeq"] = np.ones(len(atoms), 'int')
atoms_mdtraj["chainID"] = np.ones(len(atoms), 'int')
bonds_mdtraj = bonds[["id0", "id1"]].values
offset = bonds_mdtraj.min() # Should this just be 1???
bonds_mdtraj -= offset
top = Topology.from_dataframe(atoms_mdtraj, bonds_mdtraj)
xyzlist = np.array([atoms[["x", "y", "z"]].values])
xyzlist /= 10.0 # Convert from angstrom to nanometer
traj = Trajectory(xyzlist, top)
return traj
def mol2_to_dataframes(filename):
"""Convert a GAFF (or sybyl) mol2 file to a pair of pandas dataframes.
Parameters
----------
filename : str
Name of mol2 filename
Returns
-------
atoms_frame : pd.DataFrame
DataFrame containing atom information
bonds_frame : pd.DataFrame
DataFrame containing bond information
Notes
-----
These dataframes may contain force field information as well as the
information necessary for constructing the coordinates and molecular
topology. This function has been tested for GAFF and sybyl-style
mol2 files but has been primarily tested on GAFF mol2 files.
This function does NOT accept multi-structure MOL2 files!!!
See Also
--------
If you just need the coordinates and bonds, use load_mol2(filename)
to get a Trajectory object.
"""
pd = import_('pandas')
with open(filename) as f:
data = dict((key, list(grp)) for key, grp in itertools.groupby(f, _parse_mol2_sections))
# Mol2 can have "status bits" at the end of the bond lines. We don't care
# about these, but they interfere with using pd_read_table because it looks
# like one line has too many columns. So we just regex out the offending
# text.
status_bit_regex = "BACKBONE|DICT|INTERRES|\|"
data["@<TRIPOS>BOND\n"] = [re.sub(status_bit_regex, lambda _: "", s)
for s in data["@<TRIPOS>BOND\n"]]
csv = StringIO()
csv.writelines(data["@<TRIPOS>BOND\n"][1:])
csv.seek(0)
bonds_frame = pd.read_table(csv, names=["bond_id", "id0", "id1", "bond_type"],
index_col=0, header=None, sep="\s*", engine='python')
csv = StringIO()
csv.writelines(data["@<TRIPOS>ATOM\n"][1:])
csv.seek(0)
atoms_frame = pd.read_csv(csv, sep="\s*", engine='python', header=None,
names=["serial", "name", "x", "y", "z",
"atype", "code", "resName", "charge"])
return atoms_frame, bonds_frame
def _parse_mol2_sections(x):
"""Helper function for parsing a section in a MOL2 file."""
if x.startswith('@<TRIPOS>'):
_parse_mol2_sections.key = x
return _parse_mol2_sections.key
gaff_elements = {
'br': 'Br',
'c': 'C',
'c1': 'C',
'c2': 'C',
'c3': 'C',
'ca': 'C',
'cc': 'C',
'cd': 'C',
'ce': 'C',
'cf': 'C',
'cg': 'C',
'ch': 'C',
'cl': 'Cl',
'cp': 'C',
'cq': 'C',
'cu': 'C',
'cv': 'C',
'cx': 'C',
'cy': 'C',
'cz': 'C',
'f': 'F',
'h1': 'H',
'h2': 'H',
'h3': 'H',
'h4': 'H',
'h5': 'H',
'ha': 'H',
'hc': 'H',
'hn': 'H',
'ho': 'H',
'hp': 'H',
'hs': 'H',
'hw': 'H',
'hx': 'H',
'i': 'I',
'n': 'N',
'n1': 'N',
'n2': 'N',
'n3': 'N',
'n4': 'N',
'na': 'N',
'nb': 'N',
'nc': 'N',
'nd': 'N',
'ne': 'N',
'nf': 'N',
'nh': 'N',
'no': 'N',
'o': 'O',
'oh': 'O',
'os': 'O',
'ow': 'O',
'p2': 'P',
'p3': 'P',
'p4': 'P',
'p5': 'P',
'pb': 'P',
'px': 'P',
'py': 'P',
's': 'S',
's2': 'S',
's4': 'S',
's6': 'S',
'sh': 'S',
'ss': 'S',
'sx': 'S',
'sy': 'S'}
| daviddesancho/mdtraj | mdtraj/formats/mol2.py | Python | lgpl-2.1 | 7,865 | [
"MDTraj",
"OpenMM"
] | be67b84cdb3386ff4242a90072c388b8279d073369798d72033050bd6c94ab0d |
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs SpecCPU2006.
From SpecCPU2006's documentation:
The SPEC CPU2006 benchmark is SPEC's industry-standardized, CPU-intensive
benchmark suite, stressing a system's processor, memory subsystem and compiler.
SpecCPU2006 homepage: http://www.spec.org/cpu2006/
"""
import logging
import posixpath
import re
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
FLAGS = flags.FLAGS
flags.DEFINE_enum('benchmark_subset', 'int', ['int', 'fp', 'all'],
'specify a subset of benchmarks to run: int, fp, all')
flags.DEFINE_string('runspec_config', 'linux64-x64-gcc47.cfg',
'name of the cpu2006 configuration to use (runspec --config'
' argument)')
flags.DEFINE_integer('runspec_iterations', 3,
'number of benchmark iterations to execute - default 3 '
'(runspec --iterations argument)')
flags.DEFINE_string('runspec_define', '',
'optional comma separated list of preprocessor macros: '
'SYMBOL[=VALUE] - e.g. numa,smt,sse=SSE4.2 (runspec '
'--define arguments)')
flags.DEFINE_boolean('runspec_enable_32bit', default=False,
help='setting this flag will result in installation of '
'multilib packages to enable use of 32-bit cpu2006 '
'binaries (useful when running on memory constrained '
'instance types where 64-bit execution may be problematic '
' - i.e. < 1.5-2GB/core)')
flags.DEFINE_boolean('runspec_keep_partial_results', False,
'speccpu will report an aggregate score even if some of '
'the component tests failed with a "NR" status. If this '
'flag is set to true, save the available results and '
'mark metadata with partial=true. If unset, partial '
'failures are treated as errors.')
BENCHMARK_NAME = 'speccpu2006'
BENCHMARK_CONFIG = """
speccpu2006:
description: Run Spec CPU2006
vm_groups:
default:
vm_spec: *default_single_core
disk_spec: *default_500_gb
"""
SPECCPU2006_TAR = 'cpu2006v1.2.tgz'
SPECCPU2006_DIR = 'cpu2006'
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
data.ResourcePath(SPECCPU2006_TAR)
def Prepare(benchmark_spec):
"""Install SpecCPU2006 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('prepare SpecCPU2006 on %s', vm)
vm.Install('wget')
vm.Install('build_tools')
vm.Install('fortran')
if (FLAGS.runspec_enable_32bit):
vm.Install('multilib')
vm.Install('numactl')
try:
local_tar_file_path = data.ResourcePath(SPECCPU2006_TAR)
except data.ResourceNotFound as e:
logging.error('Please provide %s under perfkitbenchmarker/data directory '
'before running SpecCPU2006 benchmark.', SPECCPU2006_TAR)
raise errors.Benchmarks.PrepareException(str(e))
vm.tar_file_path = posixpath.join(vm.GetScratchDir(), SPECCPU2006_TAR)
vm.spec_dir = posixpath.join(vm.GetScratchDir(), SPECCPU2006_DIR)
vm.RemoteCommand('chmod 777 %s' % vm.GetScratchDir())
vm.PushFile(local_tar_file_path, vm.GetScratchDir())
vm.RemoteCommand('cd %s && tar xvfz %s' % (vm.GetScratchDir(),
SPECCPU2006_TAR))
def ExtractScore(stdout, vm, keep_partial_results):
"""Exact the Spec (int|fp) score from stdout.
Args:
stdout: stdout from running RemoteCommand.
vm: The vm instance where Spec CPU2006 was run.
keep_partial_results: A boolean indicating whether partial results should
be extracted in the event that not all benchmarks were successfully
run. See the "runspec_keep_partial_results" flag for more info.
Sample input for SPECint:
...
...
=============================================
400.perlbench 9770 417 23.4 *
401.bzip2 9650 565 17.1 *
403.gcc 8050 364 22.1 *
429.mcf 9120 364 25.1 *
445.gobmk 10490 499 21.0 *
456.hmmer 9330 491 19.0 *
458.sjeng 12100 588 20.6 *
462.libquantum 20720 468 44.2 *
464.h264ref 22130 700 31.6 *
471.omnetpp 6250 349 17.9 *
473.astar 7020 482 14.6 *
483.xalancbmk 6900 248 27.8 *
Est. SPECint(R)_base2006 22.7
Sample input for SPECfp:
...
...
=============================================
410.bwaves 13590 717 19.0 *
416.gamess 19580 923 21.2 *
433.milc 9180 480 19.1 *
434.zeusmp 9100 600 15.2 *
435.gromacs 7140 605 11.8 *
436.cactusADM 11950 1289 9.27 *
437.leslie3d 9400 859 10.9 *
444.namd 8020 504 15.9 *
447.dealII 11440 409 28.0 *
450.soplex 8340 272 30.6 *
453.povray 5320 231 23.0 *
454.calculix 8250 993 8.31 *
459.GemsFDTD 10610 775 13.7 *
465.tonto 9840 565 17.4 *
470.lbm 13740 365 37.7 *
481.wrf 11170 788 14.2 *
482.sphinx3 19490 668 29.2 *
Est. SPECfp(R)_base2006 17.5
Returns:
A list of sample.Sample objects.
"""
results = []
re_begin_section = re.compile('^={1,}')
re_end_section = re.compile(r'Est. (SPEC.*_base2006)\s*(\S*)')
result_section = []
in_result_section = False
# Extract the summary section
for line in stdout.splitlines():
if in_result_section:
result_section.append(line)
# search for begin of result section
match = re.search(re_begin_section, line)
if match:
assert not in_result_section
in_result_section = True
continue
# search for end of result section
match = re.search(re_end_section, line)
if match:
assert in_result_section
spec_name = str(match.group(1))
try:
spec_score = float(match.group(2))
except ValueError:
# Partial results may get reported as '--' instead of a number.
spec_score = None
in_result_section = False
# remove the final SPEC(int|fp) score, which has only 2 columns.
result_section.pop()
metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus}
missing_results = []
for benchmark in result_section:
# Skip over failed runs, but count them since they make the overall
# result invalid.
if 'NR' in benchmark:
logging.warning('SpecCPU2006 missing result: %s', benchmark)
missing_results.append(str(benchmark.split()[0]))
continue
# name, ref_time, time, score, misc
name, _, _, score, _ = benchmark.split()
results.append(sample.Sample(str(name), float(score), '', metadata))
if spec_score is None:
missing_results.append(spec_name)
if missing_results:
if keep_partial_results:
metadata['partial'] = 'true'
metadata['missing_results'] = ','.join(missing_results)
else:
raise errors.Benchmarks.RunError(
'speccpu2006: results missing, see log: ' + ','.join(missing_results))
if spec_score is not None:
results.append(sample.Sample(spec_name, spec_score, '', metadata))
return results
def ParseOutput(vm):
"""Parses the output from Spec CPU2006.
Args:
vm: The vm instance where Spec CPU2006 was run.
Returns:
A list of samples to be published (in the same format as Run() returns).
"""
results = []
log_files = []
# FIXME(liquncheng): Only reference runs generate SPEC scores. The log
# id is hardcoded as 001, which might change with different runspec
# parameters. Spec CPU 2006 will generate different logs for build, test
# run, training run and ref run.
if FLAGS.benchmark_subset in ('int', 'all'):
log_files.append('CINT2006.001.ref.txt')
if FLAGS.benchmark_subset in ('fp', 'all'):
log_files.append('CFP2006.001.ref.txt')
for log in log_files:
stdout, _ = vm.RemoteCommand('cat %s/result/%s' % (vm.spec_dir, log),
should_log=True)
results.extend(ExtractScore(stdout, vm, FLAGS.runspec_keep_partial_results))
return results
def Run(benchmark_spec):
"""Run SpecCPU2006 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('SpecCPU2006 running on %s', vm)
num_cpus = vm.num_cpus
iterations = ' --iterations=' + repr(FLAGS.runspec_iterations) if \
FLAGS.runspec_iterations != 3 else ''
defines = ' --define ' + ' --define '.join(FLAGS.runspec_define.split(','))\
if FLAGS.runspec_define != '' else ''
cmd = ('cd %s; . ./shrc; ./bin/relocate; . ./shrc; rm -rf result; '
'runspec --config=%s --tune=base '
'--size=ref --noreportable --rate %s%s%s %s'
% (vm.spec_dir, FLAGS.runspec_config, num_cpus, iterations,
defines, FLAGS.benchmark_subset))
vm.RobustRemoteCommand(cmd)
logging.info('SpecCPU2006 Results:')
return ParseOutput(vm)
def Cleanup(benchmark_spec):
"""Cleanup SpecCPU2006 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm = vms[0]
vm.RemoteCommand('rm -rf %s' % vm.spec_dir)
vm.RemoteCommand('rm -f %s' % vm.tar_file_path)
| syed/PerfKitBenchmarker | perfkitbenchmarker/benchmarks/speccpu2006_benchmark.py | Python | apache-2.0 | 10,962 | [
"GAMESS",
"Gromacs",
"NAMD"
] | 43ef63c6f1f7fce6f8b0251566421cf90a11f260df345d446a3a3f51db4208e1 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
from decimal import Decimal
from kiwi.currency import currency
from stoqlib.database.queryexecuter import DateQueryState, DateIntervalQueryState
from stoqlib.domain.sale import SalesPersonSalesView, Sale
from stoqlib.gui.search.searchcolumns import SearchColumn, Column
from stoqlib.gui.search.searchdialog import SearchDialog
from stoqlib.gui.search.searchfilters import DateSearchFilter
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class SalesPersonSalesSearch(SearchDialog):
title = _("Salesperson Total Sales")
search_spec = SalesPersonSalesView
size = (-1, 450)
text_field_columns = [SalesPersonSalesView.name]
branch_filter_column = Sale.branch_id
#
# SearchDialog Hooks
#
def create_filters(self):
self.search.set_query(self.executer_query)
date_filter = DateSearchFilter(_('Date:'))
self.search.add_filter(date_filter)
self.date_filter = date_filter
def get_columns(self):
return [SearchColumn('name', title=_('Name'), data_type=str,
expand=True, sorted=True),
Column('total_quantity', title=_('Sold items'),
data_type=Decimal),
Column('total_sales', title=_('Total sales'),
data_type=Decimal),
Column('total_amount', title=_('Total amount'),
data_type=currency),
# Column('paid_value', title=_('Paid'),
# data_type=currency, visible=True),
]
def setup_widgets(self):
self.search.set_summary_label('total_amount', label=_(u'Total:'),
format='<b>%s</b>')
# TODO: Maybe this can be removed
def executer_query(self, store):
date = self.date_filter.get_state()
if isinstance(date, DateQueryState):
date = date.date
elif isinstance(date, DateIntervalQueryState):
date = (date.start, date.end)
resultset = self.search_spec.find_by_date(store, date)
return resultset
| andrebellafronte/stoq | stoqlib/gui/search/salespersonsearch.py | Python | gpl-2.0 | 3,025 | [
"VisIt"
] | fb9010329b369c15d127ccfc0007475c04765cb81026a0c7513f269de1016077 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.