text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
# CREATED:2013-03-08 15:25:18 by Brian McFee <brm2132@columbia.edu>
# unit tests for librosa.filters
#
# This test suite verifies that librosa core routines match (numerically) the output
# of various DPWE matlab implementations on a broad range of input parameters.
#
# All test data is generated by the Matlab script "makeTestData.m".
# Each test loads in a .mat file which contains the input and desired output for a given
# function. The test then runs the librosa implementation and verifies the results
# against the desired output, typically via numpy.allclose().
#
# Disable cache
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except KeyError:
pass
import glob
import numpy as np
import scipy.io
from nose.tools import eq_, raises
import warnings
import librosa
warnings.resetwarnings()
warnings.simplefilter('always')
# -- utilities --#
def files(pattern):
test_files = glob.glob(pattern)
test_files.sort()
return test_files
def load(infile):
DATA = scipy.io.loadmat(infile, chars_as_strings=True)
return DATA
# -- --#
# -- Tests --#
def test_hz_to_mel():
def __test_to_mel(infile):
DATA = load(infile)
z = librosa.hz_to_mel(DATA['f'], DATA['htk'])
assert np.allclose(z, DATA['result'])
for infile in files(os.path.join('data', 'feature-hz_to_mel-*.mat')):
yield (__test_to_mel, infile)
pass
def test_mel_to_hz():
def __test_to_hz(infile):
DATA = load(infile)
z = librosa.mel_to_hz(DATA['f'], DATA['htk'])
assert np.allclose(z, DATA['result'])
for infile in files(os.path.join('data', 'feature-mel_to_hz-*.mat')):
yield (__test_to_hz, infile)
pass
def test_hz_to_octs():
def __test_to_octs(infile):
DATA = load(infile)
z = librosa.hz_to_octs(DATA['f'])
assert np.allclose(z, DATA['result'])
for infile in files(os.path.join('data', 'feature-hz_to_octs-*.mat')):
yield (__test_to_octs, infile)
pass
def test_melfb():
def __test_default_norm(infile):
DATA = load(infile)
wts = librosa.filters.mel(DATA['sr'][0, 0],
DATA['nfft'][0, 0],
n_mels=DATA['nfilts'][0, 0],
fmin=DATA['fmin'][0, 0],
fmax=DATA['fmax'][0, 0],
htk=DATA['htk'][0, 0])
# Our version only returns the real-valued part.
# Pad out.
wts = np.pad(wts, [(0, 0),
(0, int(DATA['nfft'][0]//2 - 1))],
mode='constant')
eq_(wts.shape, DATA['wts'].shape)
assert np.allclose(wts, DATA['wts'])
for infile in files(os.path.join('data', 'feature-melfb-*.mat')):
yield (__test_default_norm, infile)
def __test_with_norm(infile):
DATA = load(infile)
# if DATA['norm'] is empty, pass None.
if DATA['norm'].shape[-1] == 0:
norm = None
else:
norm = DATA['norm'][0, 0]
wts = librosa.filters.mel(DATA['sr'][0, 0],
DATA['nfft'][0, 0],
n_mels=DATA['nfilts'][0, 0],
fmin=DATA['fmin'][0, 0],
fmax=DATA['fmax'][0, 0],
htk=DATA['htk'][0, 0],
norm=norm)
# Pad out.
wts = np.pad(wts, [(0, 0),
(0, int(DATA['nfft'][0]//2 - 1))],
mode='constant')
eq_(wts.shape, DATA['wts'].shape)
assert np.allclose(wts, DATA['wts'])
for infile in files(os.path.join('data', 'feature-melfbnorm-*.mat')):
yield (__test_with_norm, infile)
def test_mel_gap():
# This configuration should trigger some empty filters
sr = 44100
n_fft = 1024
fmin = 0
fmax = 2000
n_mels = 128
htk = True
warnings.resetwarnings()
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as out:
librosa.filters.mel(sr, n_fft, n_mels=n_mels,
fmin=fmin, fmax=fmax, htk=htk)
assert len(out) > 0
assert out[0].category is UserWarning
assert 'empty filters' in str(out[0].message).lower()
def test_chromafb():
def __test(infile):
DATA = load(infile)
octwidth = DATA['octwidth'][0, 0]
if octwidth == 0:
octwidth = None
wts = librosa.filters.chroma(DATA['sr'][0, 0],
DATA['nfft'][0, 0],
DATA['nchroma'][0, 0],
A440=DATA['a440'][0, 0],
ctroct=DATA['ctroct'][0, 0],
octwidth=octwidth,
norm=2,
base_c=False)
# Our version only returns the real-valued part.
# Pad out.
wts = np.pad(wts, [(0, 0),
(0, int(DATA['nfft'][0, 0]//2 - 1))],
mode='constant')
eq_(wts.shape, DATA['wts'].shape)
assert np.allclose(wts, DATA['wts'])
for infile in files(os.path.join('data', 'feature-chromafb-*.mat')):
yield (__test, infile)
def test__window():
def __test(n, window):
wdec = librosa.filters.__float_window(window)
if n == int(n):
n = int(n)
assert np.allclose(wdec(n), window(n))
else:
wf = wdec(n)
fn = int(np.floor(n))
assert not np.any(wf[fn:])
for n in [16, 16.0, 16.25, 16.75]:
for window_name in ['barthann', 'bartlett', 'blackman',
'blackmanharris', 'bohman', 'boxcar', 'cosine',
'flattop', 'hamming', 'hann', 'hanning',
'nuttall', 'parzen', 'triang']:
window = getattr(scipy.signal.windows, window_name)
yield __test, n, window
def test_constant_q():
def __test(sr, fmin, n_bins, bins_per_octave, tuning, filter_scale,
pad_fft, norm):
F, lengths = librosa.filters.constant_q(sr,
fmin=fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
filter_scale=filter_scale,
pad_fft=pad_fft,
norm=norm)
assert np.all(lengths <= F.shape[1])
eq_(len(F), n_bins)
if not pad_fft:
return
eq_(np.mod(np.log2(F.shape[1]), 1.0), 0.0)
# Check for vanishing negative frequencies
F_fft = np.abs(np.fft.fft(F, axis=1))
# Normalize by row-wise peak
F_fft = F_fft / np.max(F_fft, axis=1, keepdims=True)
assert not np.any(F_fft[:, -F_fft.shape[1]//2:] > 1e-4)
sr = 11025
# Try to make a cq basis too close to nyquist
yield (raises(librosa.ParameterError)(__test), sr, sr/2.0, 1, 12, 0, 1, True, 1)
# with negative fmin
yield (raises(librosa.ParameterError)(__test), sr, -60, 1, 12, 0, 1, True, 1)
# with negative bins_per_octave
yield (raises(librosa.ParameterError)(__test), sr, 60, 1, -12, 0, 1, True, 1)
# with negative bins
yield (raises(librosa.ParameterError)(__test), sr, 60, -1, 12, 0, 1, True, 1)
# with negative filter_scale
yield (raises(librosa.ParameterError)(__test), sr, 60, 1, 12, 0, -1, True, 1)
# with negative norm
yield (raises(librosa.ParameterError)(__test), sr, 60, 1, 12, 0, 1, True, -1)
for fmin in [None, librosa.note_to_hz('C3')]:
for n_bins in [12, 24]:
for bins_per_octave in [12, 24]:
for tuning in [0, 0.25]:
for filter_scale in [1, 2]:
for norm in [1, 2]:
for pad_fft in [False, True]:
yield (__test, sr, fmin, n_bins,
bins_per_octave, tuning,
filter_scale, pad_fft,
norm)
def test_window_bandwidth():
eq_(librosa.filters.window_bandwidth('hann'),
librosa.filters.window_bandwidth(scipy.signal.hann))
def test_window_bandwidth_dynamic():
# Test with a window constructor guaranteed to not exist in
# the dictionary.
# should behave like a box filter, which has enbw == 1
eq_(librosa.filters.window_bandwidth(lambda n: np.ones(n)), 1)
@raises(ValueError)
def test_window_bandwidth_missing():
librosa.filters.window_bandwidth('made up window name')
def binstr(m):
out = []
for row in m:
line = [' '] * len(row)
for i in np.flatnonzero(row):
line[i] = '.'
out.append(''.join(line))
return '\n'.join(out)
def test_cq_to_chroma():
def __test(n_bins, bins_per_octave, n_chroma, fmin, base_c, window):
# Fake up a cqt matrix with the corresponding midi notes
if fmin is None:
midi_base = 24 # C2
else:
midi_base = librosa.hz_to_midi(fmin)
midi_notes = np.linspace(midi_base,
midi_base + n_bins * 12.0 / bins_per_octave,
endpoint=False,
num=n_bins)
# We don't care past 2 decimals here.
# the log2 inside hz_to_midi can cause problems though.
midi_notes = np.around(midi_notes, decimals=2)
C = np.diag(midi_notes)
cq2chr = librosa.filters.cq_to_chroma(n_input=C.shape[0],
bins_per_octave=bins_per_octave,
n_chroma=n_chroma,
fmin=fmin,
base_c=base_c,
window=window)
chroma = cq2chr.dot(C)
for i in range(n_chroma):
v = chroma[i][chroma[i] != 0]
v = np.around(v, decimals=2)
if base_c:
resid = np.mod(v, 12)
else:
resid = np.mod(v - 9, 12)
resid = np.round(resid * n_chroma / 12.0)
assert np.allclose(np.mod(i - resid, 12), 0.0), i-resid
for n_octaves in [2, 3, 4]:
for semitones in [1, 3]:
for n_chroma in 12 * np.arange(1, 1 + semitones):
for fmin in [None] + list(librosa.midi_to_hz(range(48, 61))):
for base_c in [False, True]:
for window in [None, [1]]:
bins_per_octave = 12 * semitones
n_bins = n_octaves * bins_per_octave
if np.mod(bins_per_octave, n_chroma) != 0:
tf = raises(librosa.ParameterError)(__test)
else:
tf = __test
yield (tf, n_bins, bins_per_octave,
n_chroma, fmin, base_c, window)
@raises(librosa.ParameterError)
def test_get_window_fail():
librosa.filters.get_window(None, 32)
def test_get_window():
def __test(window):
w1 = librosa.filters.get_window(window, 32)
w2 = scipy.signal.get_window(window, 32)
assert np.allclose(w1, w2)
for window in ['hann', u'hann', 4.0, ('kaiser', 4.0)]:
yield __test, window
def test_get_window_func():
w1 = librosa.filters.get_window(scipy.signal.boxcar, 32)
w2 = scipy.signal.get_window('boxcar', 32)
assert np.allclose(w1, w2)
def test_get_window_pre():
def __test(pre_win):
win = librosa.filters.get_window(pre_win, len(pre_win))
assert np.allclose(win, pre_win)
yield __test, scipy.signal.hann(16)
yield __test, list(scipy.signal.hann(16))
yield __test, [1, 1, 1]
def test_semitone_filterbank():
# We test against Chroma Toolbox' elliptical semitone filterbank
# load data from chroma toolbox
gt_fb = scipy.io.loadmat(os.path.join('data', 'filter-muliratefb-MIDI_FB_ellip_pitch_60_96_22050_Q25'),
squeeze_me=True)['h']
# standard parameters reproduce settings from chroma toolbox
mut_ft, mut_srs = librosa.filters.semitone_filterbank()
for cur_filter_id in range(len(mut_ft)):
cur_filter_gt = gt_fb[cur_filter_id + 23]
cur_filter_mut = mut_ft[cur_filter_id]
cur_a_gt = cur_filter_gt[0]
cur_b_gt = cur_filter_gt[1]
cur_a_mut = cur_filter_mut[1]
cur_b_mut = cur_filter_mut[0]
# we deviate from the chroma toolboxes for pitches 94 and 95
# (filters 70 and 71) by processing them with a higher samplerate
if (cur_filter_id != 70) and (cur_filter_id != 71):
assert np.allclose(cur_a_gt, cur_a_mut)
assert np.allclose(cur_b_gt, cur_b_mut, atol=1e-4)
|
r9y9/librosa
|
tests/test_filters.py
|
Python
|
isc
| 13,357
|
[
"Brian"
] |
9268f101eb27fa946edccb06b3a58cf26c255179c7086fe45347b1ee3e9905df
|
#!/usr/bin/python
'''
Command module for mrMeshPy viewer
Commands and data are passed here from matab via the mrMeshPyServer.
Matlab sends a string in one transaction giving a command to the
visualisation module. This command either performs an explicit
function in the viewer (e.g. rotate the camera 90 degrees) or the
commnd describes the configuration/content of a large data chunk to
will be sent in the subsequent transaction so that we know how to
unpack the data, and how to process it (e.g. 70,000 floating point
numbers which are scalar values to show as an amplitude map.
Each command string is interpreted by the mp_commandInterpret module.
N.B. - currently command strings have a maximum length of 1024 bytes.
Commands are specifically ordered, semi-colon seperated strings which are
unpacked to describe what the user is trying to do / send from matlab.
Commands have a MINIMUM LENGTH of 6 arguments and have the following
structure and item order (zero-indexed)
0 - "cmd" -- always this, identifies it as a cmd :)
1-3 - place holders
4 - commandName - should match a command in mp_Commands file
5 - theMeshInstance - integer pointing to the the mesh window that we
want to operate on
6 onwards - commandArgs - a list of comma-separated pairs of arguments
to characterise the processing of the incoming data
blob or apply some settings to the viewport -
CAN BE EMPTY but must be set to []
Andre' Gouws 2017
'''
import vtk
import scipy.io #so we can read .mat files
import vtk
import vtk.util.numpy_support
from numpy import *
#local modules
from mp_setupVTKWindow import mrMeshVTKWindow
from mp_VTKRoutines import *
from mp_SendFunctions import *
debug = True
# master command handler
def run_mp_command(commandName, commandArgs, theMeshInstance, mainWindowUI, the_TCPserver):
if commandName == 'loadNewMesh':
mainWindowUI.statusbar.showMessage(' ... attempting to load new mesh ...')
# TODO - index will now be a new entry at the end of the exisitng .ui.vtkInstances list
newIndex = len(mainWindowUI.vtkInstances) #could be zero
# create an entry in the vtkDict to link the unique mesh ID to where it is stored
# in the vtkInstances list
mainWindowUI.vtkDict[theMeshInstance] = newIndex
# add a new tab with a new wVTK window
mrMeshVTKWindow(mainWindowUI, theMeshInstance, 'None')
mainWindowUI.tabWidget.setCurrentIndex(newIndex) #zero indexed
mainWindowUI.tabWidget.update()
#load data and generate the mesh
loadNewMesh(theMeshInstance, commandArgs, mainWindowUI, the_TCPserver)
mainWindowUI.statusbar.showMessage(' ... New mesh Loaded ...')
#the_TCPserver.socket.write(str('send useful message back here TODO'))
the_TCPserver.socket.write(str('1001'))
if debug: print mainWindowUI.vtkDict
elif commandName == 'smoothMesh':
mainWindowUI.statusbar.showMessage(' ... attempting to smooth mesh with id %s ...' %(theMeshInstance))
#load data and generate the mesh
err = smoothMesh(theMeshInstance, commandArgs, mainWindowUI, the_TCPserver)
if err == 0:
mainWindowUI.statusbar.showMessage(' ... Finished smoothing mesh with id %s ...' %(theMeshInstance))
the_TCPserver.socket.write(str('Mesh smooth complete'))
else:
mainWindowUI.statusbar.showMessage(' ... Error trying to smooth mesh with id %s ...' %(theMeshInstance))
the_TCPserver.socket.write(str('Mesh smooth failed'))
elif commandName == 'updateMeshData':
mainWindowUI.statusbar.showMessage(' ... updating mesh with id %s with current View settings ...' %(theMeshInstance))
#load data and send to the mesh
err = updateMeshData(theMeshInstance, commandArgs, mainWindowUI, the_TCPserver)
if err == 0:
mainWindowUI.statusbar.showMessage(' ... Finished: updated data for mesh id %s ...' %(theMeshInstance))
the_TCPserver.socket.write(str('Mesh update complete'))
else:
mainWindowUI.statusbar.showMessage(' ... Error trying to update mesh with id %s ...' %(theMeshInstance))
the_TCPserver.socket.write(str('Mesh update failed'))
elif commandName == 'checkMeshROI':
mainWindowUI.statusbar.showMessage(' ... MATLAB requested an ROI from mesh id %s ...' %(theMeshInstance))
#get roi data (if exists) and send to matlab
error = sendROIInfo(theMeshInstance, commandArgs, mainWindowUI, the_TCPserver) #returns 1 or 0
if error == 0:
mainWindowUI.statusbar.showMessage(' ... ROI ready to send to MATLAB from mesh id %s...' %(theMeshInstance))
else:
mainWindowUI.statusbar.showMessage(' ... No ROI to send to MATLAB from mesh id %s...' %(theMeshInstance))
the_TCPserver.socket.write(str('send useful message back here TODO'))
elif commandName == 'sendROIVertices':
mainWindowUI.statusbar.showMessage(' ... MATLAB requested an ROI from mesh id %s ...' %(theMeshInstance))
#get roi data (if exists) and send to matlab
error = sendROIVertices(theMeshInstance, commandArgs, mainWindowUI, the_TCPserver) #returns 1 or 0
if error == 0:
mainWindowUI.statusbar.showMessage(' ... ROI ready to send to MATLAB from mesh id %s...' %(theMeshInstance))
else:
mainWindowUI.statusbar.showMessage(' ... No ROI to send to MATLAB from mesh id %s...' %(theMeshInstance))
the_TCPserver.socket.write(str('send useful message back here TODO'))
elif commandName == 'rotateMeshAnimation':
mainWindowUI.statusbar.showMessage(' ... doing rotation animation ...')
#really just for testing initially
rotateMeshAnimation(theMeshInstance, commandArgs, mainWindowUI, the_TCPserver)
mainWindowUI.statusbar.showMessage(' ... Finished rotation animation ...')
the_TCPserver.socket.write(str('send useful message back here TODO'))
else:
print('mrMeshPy received a command it did not recognise')
the_TCPserver.socket.write(str('send cmd error message back here TODO'))
|
andregouws/mrMeshPy
|
mp_Commands.py
|
Python
|
mit
| 6,287
|
[
"VTK"
] |
a6ed7a93fef3fa9b396ac4ece7cf6a613740ece576b6cd42a33f6875b2ec22cb
|
# -*- coding: utf-8 -*-
#
# test_status.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Test if Set/GetStatus work properly
"""
import unittest
import nest
@nest.ll_api.check_stack
class StatusTestCase(unittest.TestCase):
"""Tests of Get/SetStatus, Get/SetDefaults, and Get/SetKernelStatus via get/set"""
def test_kernel_attributes(self):
"""Test nest attribute access of kernel attributes"""
nest.ResetKernel()
self.assertEqual(nest.GetKernelStatus(), nest.kernel_status)
self.assertEqual(nest.GetKernelStatus("resolution"), nest.resolution)
nest.resolution = 0.4
self.assertEqual(0.4, nest.resolution)
self.assertRaises(AttributeError, setattr, nest, "network_size", 120)
def test_GetKernelStatus(self):
"""GetKernelStatus"""
nest.ResetKernel()
kernel_status = nest.GetKernelStatus()
self.assertIsInstance(kernel_status, dict)
self.assertGreater(len(kernel_status), 1)
self.assertRaises(KeyError, nest.GetKernelStatus, "nonexistent_status_key")
test_keys = ("resolution", ) * 3
kernel_status = nest.GetKernelStatus(test_keys)
self.assertEqual(len(kernel_status), len(test_keys))
self.assertRaises(TypeError, nest.GetKernelStatus, 42)
def test_SetKernelStatus(self):
"""SetKernelStatus"""
nest.ResetKernel()
nest.SetKernelStatus({})
nest.SetKernelStatus({'resolution': 0.2})
self.assertRaises(ValueError, nest.SetKernelStatus, {'nonexistent_status_key': 0})
# Readonly check
self.assertRaises(ValueError, nest.SetKernelStatus, {'network_size': 120})
def test_GetDefaults(self):
"""GetDefaults"""
nest.ResetKernel()
for model in nest.Models():
model_status = nest.GetDefaults(model)
self.assertIsInstance(model_status, dict)
self.assertGreater(len(model_status), 1)
self.assertRaises(TypeError, nest.GetDefaults, model, 42)
if "V_m" in model_status:
test_value = nest.GetDefaults(model, "V_m")
self.assertIsInstance(test_value, float)
test_keys = ("V_m", ) * 3
model_status = nest.GetDefaults(model, test_keys)
self.assertEqual(len(model_status), len(test_keys))
def test_SetDefaults(self):
"""SetDefaults"""
nest.ResetKernel()
for m in nest.Models():
if 'V_m' in nest.GetDefaults(m):
v_m = nest.GetDefaults(m)['V_m']
nest.SetDefaults(m, {'V_m': -1.})
self.assertEqual(nest.GetDefaults(m, 'V_m'), -1.)
nest.SetDefaults(m, 'V_m', v_m)
self.assertEqual(nest.GetDefaults(m, 'V_m'), v_m)
self.assertRaisesRegex(
nest.kernel.NESTError, "DictError",
nest.SetDefaults, m, 'nonexistent_status_key', 0)
def test_GetStatus(self):
"""GetStatus"""
for m in nest.Models():
if 'V_m' in nest.GetDefaults(m):
nest.ResetKernel()
n = nest.Create(m)
d = nest.GetStatus(n)
self.assertIsInstance(d, tuple)
self.assertIsInstance(d[0], dict)
self.assertGreater(len(d[0]), 1)
v1 = nest.GetStatus(n)[0]['V_m']
v2 = nest.GetStatus(n, 'V_m')[0]
self.assertEqual(v1, v2)
n = nest.Create(m, 10)
d = nest.GetStatus(n, 'V_m')
self.assertEqual(len(d), len(n))
self.assertIsInstance(d[0], float)
test_keys = ("V_m", ) * 3
d = nest.GetStatus(n, test_keys)
self.assertEqual(len(d), len(n))
self.assertEqual(len(d[0]), len(test_keys))
def test_SetStatus(self):
"""SetStatus with dict"""
for m in nest.Models():
if 'V_m' in nest.GetDefaults(m):
nest.ResetKernel()
n = nest.Create(m)
nest.SetStatus(n, {'V_m': 1.})
self.assertEqual(nest.GetStatus(n, 'V_m')[0], 1.)
def test_SetStatusList(self):
"""SetStatus with list"""
for m in nest.Models():
if 'V_m' in nest.GetDefaults(m):
nest.ResetKernel()
n = nest.Create(m)
nest.SetStatus(n, [{'V_m': 2.}])
self.assertEqual(nest.GetStatus(n, 'V_m')[0], 2.)
def test_SetStatusParam(self):
"""SetStatus with parameter"""
for m in nest.Models():
if 'V_m' in nest.GetDefaults(m):
nest.ResetKernel()
n = nest.Create(m)
nest.SetStatus(n, 'V_m', 3.)
self.assertEqual(nest.GetStatus(n, 'V_m')[0], 3.)
def test_SetStatusVth_E_L(self):
"""SetStatus of reversal and threshold potential """
excluded = ['a2eif_cond_exp_HW', 'mat2_psc_exp', 'amat2_psc_exp']
models = [m for m in nest.Models() if m not in excluded]
for m in models:
if all(key in nest.GetDefaults(m) for key in ('V_th', 'E_L')):
nest.ResetKernel()
neuron1 = nest.Create(m)
neuron2 = nest.Create(m)
# must not depend on the order
new_EL = -90.
new_Vth = -10.
if 'V_reset' in nest.GetDefaults(m):
nest.SetStatus(neuron1 + neuron2, {'V_reset': new_EL})
nest.SetStatus(neuron1, {'E_L': new_EL})
nest.SetStatus(neuron2, {'V_th': new_Vth})
nest.SetStatus(neuron1, {'V_th': new_Vth})
nest.SetStatus(neuron2, {'E_L': new_EL})
vth1, vth2 = nest.GetStatus(neuron1 + neuron2, 'V_th')
self.assertEqual(vth1, vth2)
def test_SetStatusV_th_smaller_V_reset(self):
"""SetStatus of reversal and threshold potential
check if error is raised if V_reset > V_th"""
for m in nest.Models():
if all(key in nest.GetDefaults(m) for key in ('V_th', 'V_reset')):
nest.ResetKernel()
neuron = nest.Create(m)
# should raise exception
self.assertRaisesRegex(
nest.kernel.NESTError, "BadProperty",
nest.SetStatus, neuron,
{'V_reset': 10., 'V_th': 0.}
)
def suite():
suite = unittest.makeSuite(StatusTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
sdiazpier/nest-simulator
|
testsuite/pytests/test_status.py
|
Python
|
gpl-2.0
| 7,391
|
[
"NEURON"
] |
bdf684337259fd40d25b8add0316bda917fd45b3e68e603c56a1ed33068ba747
|
#!/usr/bin/env python2
setpoints = []
values = []
POINTS = False
FIELD = False
with open("postProcessing/sets/modulwinkel/10000/plane_T.vtk", "rb") as f:
for row in f:
row.strip()
if row.startswith("POINTS"):
print "p"
POINTS = True
elif row.startswith("POINT_DATA"):
print "-p"
POINTS = False
elif row.startswith("T"):
print "t"
FIELD = True
elif POINTS:
setpoints.append([round(float(x), 6) for x in row.strip().split()])
# setpoints.append(row)
elif FIELD:
print "yes"
values = [round(float(x), 6) for x in row.strip().split()]
print setpoints
print values
POINTS = False
FIELD = False
surfpoints = []
with open("postProcessing/surfaces/10000/T_plane.vtk", "rb") as f:
for row in f:
if row.startswith("POINTS"):
POINTS = True
elif row.startswith("POLYGONS") or not row.strip():
POINTS = False
break
elif POINTS:
surfpoints.append([round(float(x), 6) for x in row.strip().split()])
# surfpoints.append(row)
print surfpoints
for point in surfpoints:
if point in setpoints:
print values[setpoints.index(point)]
|
zagl/of24x
|
etc/scripts/reorder.py
|
Python
|
gpl-3.0
| 1,369
|
[
"VTK"
] |
44f38ef0f913472687e123a16186e965daa968920ab098316b60552ab44a556d
|
'''
Functions for handling star formation rates
'''
import time
import numpy as np
# --- local ---
import util as UT
def LogSFR_sfms(logMstar, z_in, sfms_dict=None):
''' Wrapper for SFMS star formation rates
'''
if sfms_dict['name'] == 'constant_offset':
# the offset from the average SFMS is preserved throughout the redshift
logsfr = AverageLogSFR_sfms(logMstar, z_in, sfms_dict=sfms_dict['sfms']) + \
sfms_dict['dsfr']
elif sfms_dict['name'] == 'no_scatter':
# SFR is just the average SFMS
logsfr = AverageLogSFR_sfms(logMstar, z_in, sfms_dict=sfms_dict['sfms'])
elif sfms_dict['name'] == 'random_step':
t = UT.t_from_z(z_in)
ishift = np.abs(sfms_dict['tshift'] -
np.tile(t, (sfms_dict['tshift'].shape[1],1)).T).argmin(axis=1)
ishift[np.where((sfms_dict['tshift'])[range(len(ishift)), ishift] > t)] -= 1
dsfr = sfms_dict['amp'][range(len(ishift)), ishift]
logsfr = AverageLogSFR_sfms(logMstar, z_in, sfms_dict=sfms_dict['sfms']) + dsfr
return logsfr
def LogSFR_Q(t, logSFR_Q=None, tau_Q=None, t_Q=None):
''' Wrapper for SFR of quenching galaxies
'''
SFRQ = np.power(10, logSFR_Q)
logsfr = np.log10(SFRQ * np.exp( (t_Q - t) / tau_Q ) )
return logsfr
def AverageLogSFR_sfms(mstar, z_in, sfms_dict=None):
''' Model for the average SFR of the SFMS as a function of M* at redshift z_in.
The model takes the functional form of
log(SFR) = A * log M* + B * z + C
'''
if sfms_dict is None:
raise ValueError
if sfms_dict['name'] == 'linear':
# mass slope
A_highmass = 0.53
A_lowmass = 0.53
try:
mslope = np.repeat(A_highmass, len(mstar))
except TypeError:
mstar = np.array([mstar])
mslope = np.repeat(A_highmass, len(mstar))
# z slope
zslope = sfms_dict['zslope'] # 0.76, 1.1
# offset
offset = np.repeat(-0.11, len(mstar))
elif sfms_dict['name'] == 'kinked': # Kinked SFMS
# mass slope
A_highmass = 0.53
A_lowmass = sfms_dict['mslope_lowmass']
try:
mslope = np.repeat(A_highmass, len(mstar))
except TypeError:
mstar = np.array([mstar])
mslope = np.repeat(A_highmass, len(mstar))
lowmass = np.where(mstar < 9.5)
mslope[lowmass] = A_lowmass
# z slope
zslope = sfms_dict['zslope'] # 0.76, 1.1
# offset
offset = np.repeat(-0.11, len(mstar))
offset[lowmass] += A_lowmass - A_highmass
mu_SFR = mslope * (mstar - 10.5) + zslope * (z_in-0.0502) + offset
return mu_SFR
def ScatterLogSFR_sfms(mstar, z_in, sfms_dict=None):
''' Scatter of the SFMS logSFR as a function of M* and
redshift z_in. Hardcoded at 0.3
'''
if sfms_dict is None:
raise ValueError
return 0.3
def integSFR(logsfr, mass0, t0, tf, mass_dict=None):
''' Integrated star formation rate stellar mass using Euler or RK4 integration
M* = M*0 + f_retain * Int(SFR(t) dt, t0, tf)
Parameters
----------
logsfr : function
SFR function that accepts mass and t_cosmic as inputs
mass : ndarray
initial stellar mass
t0 : ndarray
initial cosmic time
tf : ndarray
final cosmic time
f_retain :
fraction of stellar mass not lost from SNe and winds from Wetzel Paper
'''
type = mass_dict['type'] # Euler or RK4
f_retain = mass_dict['f_retain'] # Retain fraction
delt = mass_dict['t_step'] # maximum t resolution of the integral
niter = int(np.ceil( (tf.max()-t0.min())/delt ))
niters = np.ceil( (tf - t0) / delt).astype('int')
t_n_1 = t0
t_n = t_n_1
logSFR_n_1 = logsfr(mass0, t0)
logM_n_1 = mass0
#print niter, ' ', type, ' iterations'
#print 'f_reatin = ', f_retain
if niter > 0:
for i in xrange(niter):
iter_time = time.time()
keep = np.where(niters > i)
t_n[keep] = t_n_1[keep] + delt
if type == 'euler': # Forward Euler Method
logM_n_1[keep] = np.log10(
(10. ** logM_n_1[keep]) +
delt * 10.**9. * f_retain * (10.** logSFR_n_1[keep])
)
elif type == 'rk4': # Runge Kutta
k1 = (10.0 ** logSFR_n_1)
k2_sfr = logsfr(
np.log10(10.0**logM_n_1 + (10**9 * delt)/2.0 * k1),
t_n_1 + delt/2.0)
k2 = (10.0 ** k2_sfr)
k3_sfr = logsfr(
np.log10(10.0**logM_n_1 + (10**9 * delt)/2.0 * k2),
t_n_1 + delt/2.0)
k3 = (10.0 ** k3_sfr)
k4_sfr = logsfr(
np.log10(10.0**logM_n_1 + (10**9 * delt) * k3),
t_n_1 + delt)
k4 = (10.0 ** k4_sfr)
logM_n_1[keep] = np.log10(10.0 ** logM_n_1[keep] + f_retain/6.0 * (delt * 10**9) * (k1[keep] + 2.0*k2[keep] + 2.0*k3[keep] + k4[keep]))
else:
raise NotImplementedError
if np.sum(np.isnan(logM_n_1)) > 0:
raise ValueError('There are NaNs')
# update log(SFR), and t from step n-1
logSFR_n_1[keep] = logsfr(logM_n_1, t_n)[keep]
t_n_1 = t_n
# sanity check
if np.min(logM_n_1 - mass0) < 0.0:
if np.min(logM_n_1 - mass0) > -0.001:
pass
else:
raise ValueError("integrated mass cannot decrease over cosmic time")
return logM_n_1, logSFR_n_1
def ODE_Euler(dydt, init_cond, t_arr, delt, **func_args):
'''
'''
# t where we will evaluate
t_eval = np.arange(t_arr.min(), t_arr.max()+delt, delt)
t_eval[-1] = t_arr[-1]
indices = []
for tt in t_arr[1:-1]:
idx = np.argmin(np.abs(t_eval - tt))
t_eval[idx] = tt
indices.append(idx)
indices.append(len(t_eval) - 1)
dts = t_eval[1:] - t_eval[:-1]
y = init_cond.copy()
y_out = [init_cond.copy()]
for it in range(len(dts)):
dy = dts[it] * dydt(y, t_eval[it], **func_args)
y += dy
if it+1 in indices:
y_out.append(y.copy())
return np.array(y_out)
def ODE_RK4(dydt, init_cond, t_arr, delt, **func_args):
'''
'''
# t where we will evaluate
t_eval = np.arange(t_arr.min(), t_arr.max()+delt, delt)
t_eval[-1] = t_arr[-1]
indices = []
for tt in t_arr[1:-1]:
idx = np.argmin(np.abs(t_eval - tt))
t_eval[idx] = tt
indices.append(idx)
indices.append(len(t_eval) - 1)
dts = t_eval[1:] - t_eval[:-1]
y = init_cond.copy()
y_out = [init_cond.copy()]
for it in range(len(dts)):
k1 = dts[it] * dydt(y, t_eval[it], **func_args)
k2 = dts[it] * dydt(y + 0.5 * k1, t_eval[it] + 0.5 * dts[it], **func_args)
k3 = dts[it] * dydt(y + 0.5 * k2, t_eval[it] + 0.5 * dts[it], **func_args)
k4 = dts[it] * dydt(y + k3, t_eval[it] + dts[it], **func_args)
y += (k1 + 2.*k2 + 2.*k3 + k4)/6.
if it+1 in indices:
y_out.append(y.copy())
return np.array(y_out)
def dlogMdt_MS(logMstar, t, t_initial=None, t_final=None, f_retain=None, zfromt=None, sfh_kwargs=None):
''' Integrand d(logM)/dt for solving the ODE
d(logM)/dt = SFR'(logM, t) * 10^9/(M ln(10))
SFR'(t) = SFR(M*, t+t_offset)
or
= 0 if t > tf - t_offset
'''
dlogMdt = np.zeros(len(logMstar))
within = np.where((t <= t_final) & (t >= t_initial) )
if len(within[0]) > 0:
try:
dsfr = dSFR_MS(t, sfh_kwargs)[within]
except TypeError:
dsfr = dSFR_MS(t, sfh_kwargs)
tmp = AverageLogSFR_sfms(
logMstar[within],
zfromt(t),
sfms_dict=sfh_kwargs['sfms']) + dsfr + \
9. - \
logMstar[within] + \
np.log10(f_retain) - \
0.3622157
dlogMdt[within] = np.power(10, tmp)
return dlogMdt
def dSFR_MS(t, sfh_kwargs):
'''
'''
if sfh_kwargs['name'] == 'constant_offset':
dsfr = sfh_kwargs['dsfr']
elif sfh_kwargs['name'] == 'no_scatter':
dsfr = 0.
elif sfh_kwargs['name'] in ['random_step']:
ishift = np.abs(sfh_kwargs['tshift'] - t).argmin(axis=1)
ishift[np.where((sfh_kwargs['tshift'])[range(len(ishift)), ishift] > t)] -= 1
dsfr = sfh_kwargs['amp'][range(len(ishift)), ishift]
return dsfr
def dlogMdt_Q(logMstar, t, logSFR_Q=None, tau_Q=None, t_Q=None, f_retain=None, t_final=None):
''' dlogM/dt for quenching galaxies. Note that this is derived from dM/dt.
dlogM/dt quenching = SFR(M_Q, t_Q)/(M ln10) * exp( (t_Q - t) / tau_Q )
'''
dlogMdt = np.zeros(len(logMstar))
within = np.where((t <= t_final) & (t >= t_Q))
if len(within[0]) > 0:
SFRQ = np.power(10, logSFR_Q[within] + 9. - logMstar[within])
dlogMdt[within] = f_retain * SFRQ * \
np.exp( (t_Q[within] - t) / tau_Q[within] ) / np.log(10)
return dlogMdt
def logSFRt_MS(mstar, t, method_kwargs=None):
''' log SFR(t) for different methods
'''
if method_kwargs['name'] == 'constant_offset':
# the offset from the average SFMS is preserved throughout the redshift
mu_logsfr = AverageLogSFR_sfms(mstar, UT.z_from_t(t), sfms_dict=method_kwargs['sfms'])
return mu_logsfr + method_kwargs['dsfr']
elif method_kwargs['name'] == 'no_scatter':
# SFR is just the average SFMS
mu_logsfr = AverageLogSFR_sfms(mstar, UT.z_from_t(t), sfms_dict=method_kwargs['sfms'])
return mu_logsfr
def logSFRt_Q(MQ, t, tQ=None, tau_dict=None, method_kwargs=None):
''' log SFR(t) after tQ to tf for quenching galaxies (NOTE THAT THIS IS VERY SPECIFIC)
log(SFR)_quenching = np.log10( np.exp( -(t_f - t_Q)/tau) )
'''
if method_kwargs == 'constant_offset':
mu_logsfr = AverageLogSFR_sfms(MQ, UT.z_from_t(tQ), sfms_dict=method_kwargs['sfms'])
tauQ = getTauQ(MQ, tau_dict=tau_dict)
dlogsfrq = np.log10( np.exp( (tQ - t) / tauQ ) )
return mu_logsfr + method_kwargs['dsfr'] + dlogsfrq
elif method_kwargs == 'no_scatter':
mu_logsfr = AverageLogSFR_sfms(MQ, UT.z_from_t(tQ), sfms_dict=method_kwargs['sfms'])
tauQ = getTauQ(MQ, tau_dict=tau_dict)
dlogsfrq = np.log10( np.exp( (tQ - t) / tauQ ) )
return mu_logsfr + dlogsfrq
def getTauQ(mstar, tau_dict=None):
''' Return quenching efold based on stellar mass of galaxy, Tau(M*).
'''
type = tau_dict['name']
if type == 'constant': # constant tau
n_arr = len(mstar)
tau = np.array([0.5 for i in xrange(n_arr)])
elif type == 'linear': # lienar tau(mass)
tau = -(0.8 / 1.67) * ( mstar - 9.5) + 1.0
#if np.min(tau) < 0.1: # tau[ tau < 0.1 ] = 0.1
elif type == 'instant': # instant quenching
n_arr = len(mstar)
tau = np.array([0.001 for i in range(n_arr)])
elif type == 'discrete':
# param will give 4 discrete tau at the center of mass bins
masses = np.array([9.75, 10.25, 10.75, 11.25])
if param is None:
raise ValueError('asdfasdfa')
tau = np.interp(mstar, masses, param)
tau[ tau < 0.05 ] = 0.05
elif type == 'line':
# param will give slope and yint of pivoted tau line
tau = tau_dict['slope'] * (mstar - tau_dict['fid_mass']) + tau_dict['yint']
try:
if np.min(tau) < 0.001:
tau[np.where( tau < 0.001 )] = 0.001
except ValueError:
pass
elif type == 'satellite': # quenching e-fold of satellite
tau = -0.57 * ( mstar - 9.78) + 0.8
if np.min(tau) < 0.001:
tau[np.where( tau < 0.001 )] = 0.001
elif type == 'long': # long quenching (for qa purposes)
n_arr = len(mstar)
tau = np.array([2.0 for i in xrange(n_arr)])
else:
raise NotImplementedError('asdf')
return tau
|
changhoonhahn/centralMS
|
centralms/tests/sfrs.py
|
Python
|
mit
| 12,560
|
[
"Galaxy"
] |
a3211810e90662fb6453f57a1fcdb7740dc7c790cd6071ce94076f10ff4ad11a
|
import warnings as _warnings
import numpy as np
import pandas as _pd
from atmPy.general import timeseries as _timeseries
from atmPy.tools import array_tools as _arry_tools
from atmPy.tools import plt_tools as _plt_tools
import matplotlib.pylab as _plt
from atmPy.aerosols.instruments.AMS import AMS as _AMS
from atmPy.aerosols.size_distribution import sizedistribution as _sizedistribution
try:
from netCDF4 import Dataset
except ModuleNotFoundError:
_warnings.warn('netCDF4 not installed. You might encounter some functionality limitations.')
class Data_Quality(object):
def __init__(self, parent, availability, availability_type, flag_info = None):
self.parent = parent
self.availability = availability
self.availability_type = availability_type
self.flag_info = flag_info
self.__flag_matrix = None
self.__flag_matrix_good_int_bad = None
self.__flag_matrix_good_int_bad_either_or = None
@property
def flag_matrix(self):
if type(self.__flag_matrix).__name__ == 'NoneType':
self.__flag_matrix = self._get_flag_matrix()
return self.__flag_matrix
@flag_matrix.setter
def flag_matrix(self, data):
self.__flag_matrix = data
self.__flag_matrix_good_int_bad = None
self.__flag_matrix_good_int_bad_either_or = None
@property
def flag_matrix_good_int_bad(self):
if type(self.__flag_matrix_good_int_bad).__name__ == 'NoneType':
self.__flag_matrix_good_int_bad = self._get_flag_matrix_good_int_bad()
return self.__flag_matrix_good_int_bad
@property
def flag_matrix_good_int_bad_either_or(self):
if type(self.__flag_matrix_good_int_bad_either_or).__name__ == 'NoneType':
self.__flag_matrix_good_int_bad_either_or = self._get_flag_matrix_good_int_bad_either_or()
return self.__flag_matrix_good_int_bad_either_or
def _get_flag_matrix_good_int_bad_either_or(self):
flag_matrix_good_int_bad_either_or = self.flag_matrix_good_int_bad.copy()
flag_matrix_good_int_bad_either_or['intermediate'][flag_matrix_good_int_bad_either_or['bad'] == True] = False
return flag_matrix_good_int_bad_either_or
def _get_flag_matrix_good_int_bad(self):
intermediate_flags = self.flag_info[self.flag_info.quality == 'Indeterminate'].index
bad_flags = self.flag_info[self.flag_info.quality == 'Bad'].index
flag_matrix_good_int_bad = _pd.DataFrame(index=self.availability.index, dtype=bool)
flag_matrix_good_int_bad['intermediate'] = _pd.Series(self.flag_matrix.loc[:, intermediate_flags].sum(axis=1).astype(bool), dtype=bool)
flag_matrix_good_int_bad['bad'] = _pd.Series(self.flag_matrix.loc[:, bad_flags].sum(axis=1).astype(bool), dtype=bool)
flag_matrix_good_int_bad['good'] = ~ (flag_matrix_good_int_bad['intermediate'] | flag_matrix_good_int_bad['bad'])
return flag_matrix_good_int_bad
def _get_flag_matrix(self):
av = self.availability
flag_matrix = np.zeros((av.shape[0], self.flag_info.shape[0]))
good = np.zeros(flag_matrix.shape[0])
for e,flag in enumerate(av.iloc[:,0]):
b_l = list('{:0{}b}'.format(flag, self.flag_info.shape[0]))
b_l.reverse()
bs = np.array(b_l).astype(bool)
flag_matrix[e] = bs
if bs.sum() == 0:
good[e] = 1
flag_matrix = _pd.DataFrame(flag_matrix, dtype = bool,
# index = self.parent.size_distribution.data.index,
index= av.index,
columns=self.flag_info.index)
flag_matrix[0] = _pd.Series((good != 0), index = av.index)
return flag_matrix
def plot_stacked_bars(self, which='or', ax = None, resample=(6, 'H'), width=0.25, lw=0, show_missing = None, label = 'short', colormap = None, kwargs_leg = {}):
"""
Args:
which:
ax: mpl axis instance
resample:
width:
lw:
show_missing: float
This adds a gray background up the show_missing. Therefor show_missing should be the expacted number of points!
kwargs_leg: dict
kwargs passed to legend, e.g. loc, title
label: string
if 'short' bits are given in legend
if 'long' flag descirption is given
Returns:
figur, axis
"""
if which == 'or':
fmrs = self.flag_matrix_good_int_bad_either_or
elif which == 'and':
fmrs = self.flag_matrix_good_int_bad
elif which == 'all':
fmrs = self.flag_matrix
else:
raise ValueError('{} is not an option for which'.format(which))
if resample:
fmrs = self._get_resampled(fmrs, resample)
if not ax:
f, a = _plt.subplots()
else:
a = ax
f = a.get_figure()
bars = []
labels = []
if which == 'all':
if not colormap:
colormap = _plt.cm.Accent
bottom = np.zeros(fmrs.shape[0])
for e,flag in enumerate(fmrs.columns):
# b = a.bar(fmrs.index, fmrs[flag], width=width, color=_plt_tools.color_cycle[e % len(_plt_tools.color_cycle)], linewidth=lw, edgecolor='w')
b = a.bar(fmrs.index, fmrs[flag], bottom = bottom, width=width, color=colormap(e/fmrs.columns.max()), linewidth=lw, edgecolor='w')
bottom += fmrs[flag].values
bars.append(b)
if label == 'short':
labels.append(flag)
elif label == 'long':
if flag == 0:
label_st = 'no flag'
else:
label_st = self.flag_info.loc[flag, 'description']
labels.append(label_st)
else:
raise ValueError()
# if show_missing:
# a.legend(bars, labels, title = 'flag')
else:
b_g = a.bar(fmrs.index, fmrs['good'], width=width, color=_plt_tools.color_cycle[2], linewidth=lw, edgecolor='w')
b_i = a.bar(fmrs.index, fmrs['intermediate'], width=width, bottom=fmrs['good'], color=_plt_tools.color_cycle[0], linewidth=lw, edgecolor='w')
b_b = a.bar(fmrs.index, fmrs['bad'], width=width, bottom=fmrs['good'] + fmrs['intermediate'], color=_plt_tools.color_cycle[1], linewidth=lw, edgecolor='w')
bars = [b_b, b_i, b_g]
labels = ['bad', 'intermediate', 'good']
# a.legend((b_b, b_i, b_g), ('bad', 'intermediate', 'good'))
if show_missing:
x = self.availability.index
y = np.ones(x.shape) * show_missing
cg = 0.9
fb = a.fill_between(x, y, color=[cg, cg, cg])
bars.append(fb)
labels.append('missing')
a.legend(bars, labels, **kwargs_leg)
a.set_ylabel('Number of data points')
a.set_xlabel('Timestamp')
f.autofmt_xdate()
return f, a
def plot(self, which='or', ax = None, resample=(6, 'H'), show_missing = None, label = 'short', colormap = None, kwargs_leg = {}):
"""
Args:
which:
ax: mpl axis instance
resample:
width:
lw:
show_missing: float
This adds a gray background up the show_missing. Therefor show_missing should be the expacted number of points!
kwargs_leg: dict
kwargs passed to legend, e.g. loc, title
label: string
if 'short' bits are given in legend
if 'long' flag descirption is given
Returns:
figur, axis
"""
if which == 'or':
fmrs = self.flag_matrix_good_int_bad_either_or
elif which == 'and':
fmrs = self.flag_matrix_good_int_bad
elif which == 'all':
fmrs = self.flag_matrix
else:
raise ValueError('{} is not an option for which'.format(which))
if resample:
fmrs = self._get_resampled(fmrs, resample)
if not ax:
f, a = _plt.subplots()
else:
a = ax
f = a.get_figure()
bars = []
labels = []
if which == 'all':
if not colormap:
colormap = _plt.cm.Accent
for e,flag in enumerate(fmrs.columns):
# b = a.bar(fmrs.index, fmrs[flag], width=width, color=_plt_tools.color_cycle[e % len(_plt_tools.color_cycle)], linewidth=lw, edgecolor='w')
g, = a.plot(fmrs.index, fmrs[flag], color=colormap(e/fmrs.columns.max()))
if label == 'short':
labels.append(flag)
elif label == 'long':
if flag == 0:
label_st = 'no flag'
else:
label_st = self.flag_info.loc[flag, 'description']
labels.append(label_st)
else:
raise ValueError()
# if show_missing:
# a.legend(bars, labels, title = 'flag')
else:
b_g = a.plot(fmrs.index, fmrs['good'], color=_plt_tools.color_cycle[2])
b_i = a.plot(fmrs.index, fmrs['intermediate'], color=_plt_tools.color_cycle[0])
b_b = a.plot(fmrs.index, fmrs['bad'], color=_plt_tools.color_cycle[1])
labels = ['bad', 'intermediate', 'good']
# a.legend((b_b, b_i, b_g), ('bad', 'intermediate', 'good'))
if show_missing:
x = self.availability.index
y = np.ones(x.shape) * show_missing
cg = 0.9
fb = a.fill_between(x, y, color=[cg, cg, cg])
bars.append(fb)
labels.append('missing')
a.legend(bars, labels, **kwargs_leg)
a.set_ylabel('Number of data points')
a.set_xlabel('Timestamp')
f.autofmt_xdate()
return f, a
@staticmethod
def _get_resampled(which, period=(6, 'H')):
return which.resample(period, label='left').sum()
class ArmDataset(object):
def __init__(self, fname, data_quality = 'good', data_quality_flag_max = None, error_bad_file = True):
# self._data_period = None
self._error_bad_file = error_bad_file
self._fname = fname
if fname:
self.netCDF = Dataset(fname)
self.data_quality_flag_max = data_quality_flag_max
self.data_quality = data_quality
if type(self.time_stamps).__name__ != 'OverflowError':
self._parsing_error = False
self._parse_netCDF()
else:
self._parsing_error = True
def _concat(self, arm_data_objs, close_gaps = True):
for att in self._concatable:
first_object = getattr(arm_data_objs[0], att)
which_type = type(first_object).__name__
data_period = first_object._data_period
if which_type == 'TimeSeries_2D':
value = _timeseries.TimeSeries_2D(_pd.concat([getattr(i, att).data for i in arm_data_objs]))
elif which_type == 'TimeSeries':
value = _timeseries.TimeSeries(
_pd.concat([getattr(i, att).data for i in arm_data_objs]))
elif which_type == 'AMS_Timeseries_lev01':
value = _AMS.AMS_Timeseries_lev01(
_pd.concat([getattr(i, att).data for i in arm_data_objs]))
elif which_type == 'SizeDist_TS':
# value = _AMS.AMS_Timeseries_lev01(pd.concat([getattr(i, att).data for i in arm_data_objs]))
data = _pd.concat([getattr(i, att).data for i in arm_data_objs])
value = _sizedistribution.SizeDist_TS(data, getattr(arm_data_objs[0], att).bins, 'dNdlogDp',
ignore_data_gap_error=True,)
elif which_type == 'TimeSeries_3D':
value = _timeseries.TimeSeries_3D(_pd.concat([getattr(i, att).data for i in arm_data_objs]))
else:
raise TypeError(
'%s is not an allowed type here (TimeSeries_2D, TimeSeries)' % which_type)
if hasattr(first_object, 'availability'):
try:
avail_concat = _pd.concat([getattr(i, att).availability.availability for i in arm_data_objs])
avail = Data_Quality(None, avail_concat, None , first_object.flag_info)
value.availability = avail
except:
_warnings.warn('availability could not be concatinated make sure you converted it to a pandas frame at some point!')
value._data_period = data_period
if close_gaps:
setattr(self, att, value.close_gaps())
else:
setattr(self, att, value)
@property
def time_stamps(self):
if '__time_stamps' in dir(self):
return self.__time_stamps
else:
bt = self.netCDF.variables['base_time']
toff = self.netCDF.variables['time_offset']
try:
self.__time_stamps = _pd.to_datetime(0) + _pd.to_timedelta(bt[:].flatten()[0], unit ='s') + _pd.to_timedelta(toff[:], unit ='s')
except OverflowError as e:
txt = str(e) + ' This probably means the netcdf file is badly shaped.'
if self._error_bad_file:
raise OverflowError(txt + ' Consider setting the kwarg error_bad_file to False.')
else:
_warnings.warn(txt)
return e
self.__time_stamps.name = 'Time'
# self._time_offset = (60, 'm')
if self._time_offset:
self.__time_stamps += np.timedelta64(int(self._time_offset[0]), self._time_offset[1])
return self.__time_stamps
@time_stamps.setter
def time_stamps(self,timesamps):
self.__time_stamps = timesamps
def _data_quality_control(self):
return
def _read_variable(self, variable, reverse_qc_flag = False):
"""Reads the particular variable and replaces all masked data with NaN.
Note, if quality flag is given only values larger than the quality_control
variable are replaced with NaN.
Parameters
----------
variable: str
Variable name as devined in netCDF file
reverse_qc_flag: bool or int
Set to the number of bits, when reversion is desired
If the indeterminate (patchy) bits are on the wrong end of the qc bit
string it might make sense to reverte the bit string.
Returns
-------
ndarray
Examples
--------
self.temp = self.read_variable(ti"""
try:
var = self.netCDF.variables[variable]
except:
var = self.netCDF.variables[variable]
data = var[:]
variable_qc = "qc_" + variable
availability = np.zeros(data.shape)
availability_type = None
if variable_qc in self.netCDF.variables.keys():
var_qc = self.netCDF.variables["qc_" + variable]
data_qc = var_qc[:]
availability = data_qc.copy()
availability_type = 'qc'
if reverse_qc_flag:
if type(reverse_qc_flag) != int:
raise TypeError('reverse_qc_flag should either be False or of type integer giving the number of bits')
data_qc = _arry_tools.reverse_binary(data_qc, reverse_qc_flag)
data_qc[data_qc <= self.data_quality_flag_max] = 0
data_qc[data_qc > self.data_quality_flag_max] = 1
# if hasattr(self, 'data_quality_max_intermediat'):
# print('has it!!!!')
if data.shape != data_qc.shape:
dt = np.zeros(data.shape)
dt[data_qc == 1, : ] = 1
data_qc = dt
data = np.ma.array(data, mask = data_qc, fill_value= -9999)
elif 'missing_data' in var.ncattrs():
fill_value = var.missing_data
data = np.ma.masked_where(data == fill_value, data)
# else:
# print('no quality flag found')
if type(data).__name__ == 'MaskedArray':
try:
data.data[data.mask] = np.nan
except ValueError:
data = data.astype(float)
data.data[data.mask] = np.nan
data = data.data
# data.availability = availability
# data.availability_type = availability_type
out = {}
out['data'] = data
out['availability'] = availability
out['availability_type'] = availability_type
return out
def _read_variable2timeseries(self, variable, column_name = False, reverse_qc_flag = False):
"""
Reads the specified variables and puts them into a timeseries.
Parameters
----------
variable: string or list of strings
variable names
column_name: bool or string
this is a chance to give unites. This will also be the y-label if data
is plotted
Returns
-------
pandas.DataFrame
"""
if type(variable).__name__ == 'str':
variable = [variable]
df = _pd.DataFrame(index = self.time_stamps)
for var in variable:
variable_out = self._read_variable(var, reverse_qc_flag = reverse_qc_flag)
# if var == 'ratio_85by40_Bbs_R_10um_2p':
# import pdb
# pdb.set_trace()
df[var] = _pd.Series(variable_out['data'], index = self.time_stamps)
if column_name:
df.columns.name = column_name
out = _timeseries.TimeSeries(df)
if column_name:
out._y_label = column_name
out._data_period = self._data_period
out.availability = Data_Quality(self, variable_out['availability'], variable_out['availability_type'])
# out.availability_type = variable_out['availability_type']
return out
def _get_variable_info(self):
for v in self.netCDF.variables.keys():
var = self.netCDF.variables[v]
print(v)
print(var.long_name)
print(var.shape)
print('--------')
def _close(self):
self.netCDF.close()
delattr(self, 'netCDF')
def _parse_netCDF(self):
self._data_quality_control()
return
|
hagne/atm-py
|
atmPy/data_archives/arm/file_io/_netCDF.py
|
Python
|
mit
| 18,771
|
[
"NetCDF"
] |
121698c71146e47fc9700edda129ef71e2034eef4c6bc3e30d1da40af58b8e84
|
"""
This module implements specific error handlers for VASP runs. These handlers
try to detect common errors in vasp runs and attempt to fix them on the fly
by modifying the input files.
"""
import datetime
import logging
import operator
import os
import re
import shutil
import time
import warnings
from collections import Counter
from functools import reduce
import numpy as np
from monty.dev import deprecated
from monty.os.path import zpath
from monty.serialization import loadfn
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Incar, Kpoints, Poscar, VaspInput
from pymatgen.io.vasp.outputs import Oszicar, Outcar, Vasprun
from pymatgen.io.vasp.sets import MPScanRelaxSet
from pymatgen.transformations.standard_transformations import SupercellTransformation
from custodian.ansible.actions import FileActions
from custodian.ansible.interpreter import Modder
from custodian.custodian import ErrorHandler
from custodian.utils import backup
from custodian.vasp.interpreter import VaspModder
__author__ = "Shyue Ping Ong, William Davidson Richards, Anubhav Jain, Wei Chen, Stephen Dacek, Andrew Rosen"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__status__ = "Beta"
__date__ = "2/4/13"
VASP_BACKUP_FILES = {
"INCAR",
"KPOINTS",
"POSCAR",
"OUTCAR",
"CONTCAR",
"OSZICAR",
"vasprun.xml",
"vasp.out",
"std_err.txt",
}
class VaspErrorHandler(ErrorHandler):
"""
Master VaspErrorHandler class that handles a number of common errors
that occur during VASP runs.
"""
is_monitor = True
error_msgs = {
"tet": [
"Tetrahedron method fails",
"tetrahedron method fails",
"Fatal error detecting k-mesh",
"Fatal error: unable to match k-point",
"Routine TETIRR needs special values",
"Tetrahedron method fails (number of k-points < 4)",
"BZINTS",
],
"inv_rot_mat": ["rotation matrix was not found (increase SYMPREC)"],
"brmix": ["BRMIX: very serious problems"],
"subspacematrix": ["WARNING: Sub-Space-Matrix is not hermitian in DAV"],
"tetirr": ["Routine TETIRR needs special values"],
"incorrect_shift": ["Could not get correct shifts"],
"real_optlay": ["REAL_OPTLAY: internal error", "REAL_OPT: internal ERROR"],
"rspher": ["ERROR RSPHER"],
"dentet": ["DENTET"],
"too_few_bands": ["TOO FEW BANDS"],
"triple_product": ["ERROR: the triple product of the basis vectors"],
"rot_matrix": ["Found some non-integer element in rotation matrix", "SGRCON"],
"brions": ["BRIONS problems: POTIM should be increased"],
"pricel": ["internal error in subroutine PRICEL"],
"zpotrf": ["LAPACK: Routine ZPOTRF failed"],
"amin": ["One of the lattice vectors is very long (>50 A), but AMIN"],
"zbrent": ["ZBRENT: fatal internal in", "ZBRENT: fatal error in bracketing"],
"pssyevx": ["ERROR in subspace rotation PSSYEVX"],
"eddrmm": ["WARNING in EDDRMM: call to ZHEGV failed"],
"edddav": ["Error EDDDAV: Call to ZHEGV failed"],
"algo_tet": ["ALGO=A and IALGO=5X tend to fail"],
"grad_not_orth": ["EDWAV: internal error, the gradient is not orthogonal"],
"nicht_konv": ["ERROR: SBESSELITER : nicht konvergent"],
"zheev": ["ERROR EDDIAG: Call to routine ZHEEV failed!"],
"elf_kpar": ["ELF: KPAR>1 not implemented"],
"elf_ncl": ["WARNING: ELF not implemented for non collinear case"],
"rhosyg": ["RHOSYG"],
"posmap": ["POSMAP"],
"point_group": ["group operation missing"],
"symprec_noise": ["determination of the symmetry of your systems shows a strong"],
"dfpt_ncore": ["PEAD routines do not work for NCORE", "remove the tag NPAR from the INCAR file"],
"bravais": ["Inconsistent Bravais lattice"],
"nbands_not_sufficient": ["number of bands is not sufficient"],
"hnform": ["HNFORM: k-point generating"],
}
def __init__(
self,
output_filename="vasp.out",
natoms_large_cell=None,
errors_subset_to_catch=None,
vtst_fixes=False,
):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): This is the file where the stdout for vasp
is being redirected. The error messages that are checked are
present in the stdout. Defaults to "vasp.out", which is the
default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
natoms_large_cell (int): Number of atoms threshold to treat cell
as large. Affects the correction of certain errors. Defaults to
None (not used). Deprecated.
errors_subset_to_detect (list): A subset of errors to catch. The
default is None, which means all supported errors are detected.
Use this to only catch only a subset of supported errors.
E.g., ["eddrrm", "zheev"] will only catch the eddrmm and zheev
errors, and not others. If you wish to only excluded one or
two of the errors, you can create this list by the following
lines:
vtst_fixes (bool): Whether to consider VTST optimizers. Defaults to
False for compatibility purposes.
```
subset = list(VaspErrorHandler.error_msgs.keys())
subset.pop("eddrrm")
handler = VaspErrorHandler(errors_subset_to_catch=subset)
```
"""
self.output_filename = output_filename
self.errors = set()
self.error_count = Counter()
# threshold of number of atoms to treat the cell as large.
self.natoms_large_cell = natoms_large_cell # (deprecated)
if self.natoms_large_cell:
warnings.warn(
"natoms_large_cell is deprecated and currently does nothing.",
DeprecationWarning,
)
self.errors_subset_to_catch = errors_subset_to_catch or list(VaspErrorHandler.error_msgs.keys())
self.vtst_fixes = vtst_fixes
self.logger = logging.getLogger(self.__class__.__name__)
def check(self):
"""
Check for error.
"""
incar = Incar.from_file("INCAR")
self.errors = set()
error_msgs = set()
with open(self.output_filename) as file:
text = file.read()
for err in self.errors_subset_to_catch:
for msg in self.error_msgs[err]:
if text.find(msg) != -1:
# this checks if we want to run a charged
# computation (e.g., defects) if yes we don't
# want to kill it because there is a change in
# e-density (brmix error)
if err == "brmix" and "NELECT" in incar:
continue
self.errors.add(err)
error_msgs.add(msg)
for msg in error_msgs:
self.logger.error(msg, extra={"incar": incar.as_dict()})
return len(self.errors) > 0
def correct(self):
"""
Perform corrections.
"""
backup(VASP_BACKUP_FILES | {self.output_filename})
actions = []
vi = VaspInput.from_directory(".")
if self.errors.intersection(["tet", "dentet"]):
if vi["INCAR"].get("KSPACING"):
# decrease KSPACING by 20% in each direction (approximately double no. of kpoints)
actions.append(
{
"dict": "INCAR",
"action": {"_set": {"KSPACING": vi["INCAR"].get("KSPACING") * 0.8}},
}
)
else:
actions.append({"dict": "INCAR", "action": {"_set": {"ISMEAR": 0, "SIGMA": 0.05}}})
if "inv_rot_mat" in self.errors:
actions.append({"dict": "INCAR", "action": {"_set": {"SYMPREC": 1e-8}}})
if "brmix" in self.errors:
# If there is not a valid OUTCAR already, increment
# error count to 1 to skip first fix
if self.error_count["brmix"] == 0:
try:
assert Outcar(zpath(os.path.join(os.getcwd(), "OUTCAR"))).is_stopped is False
except Exception:
self.error_count["brmix"] += 1
if self.error_count["brmix"] == 0:
# Valid OUTCAR - simply rerun the job and increment
# error count for next time
actions.append({"dict": "INCAR", "action": {"_set": {"ISTART": 1}}})
self.error_count["brmix"] += 1
elif self.error_count["brmix"] == 1:
# Use Kerker mixing w/default values for other parameters
actions.append({"dict": "INCAR", "action": {"_set": {"IMIX": 1}}})
self.error_count["brmix"] += 1
elif self.error_count["brmix"] == 2 and vi["KPOINTS"].style == Kpoints.supported_modes.Gamma:
actions.append(
{
"dict": "KPOINTS",
"action": {"_set": {"generation_style": "Monkhorst"}},
}
)
actions.append({"dict": "INCAR", "action": {"_unset": {"IMIX": 1}}})
self.error_count["brmix"] += 1
elif self.error_count["brmix"] in [2, 3] and vi["KPOINTS"].style == Kpoints.supported_modes.Monkhorst:
actions.append(
{
"dict": "KPOINTS",
"action": {"_set": {"generation_style": "Gamma"}},
}
)
actions.append({"dict": "INCAR", "action": {"_unset": {"IMIX": 1}}})
self.error_count["brmix"] += 1
if vi["KPOINTS"].num_kpts < 1:
all_kpts_even = all(n % 2 == 0 for n in vi["KPOINTS"].kpts[0])
if all_kpts_even:
new_kpts = (tuple(n + 1 for n in vi["KPOINTS"].kpts[0]),)
actions.append(
{
"dict": "KPOINTS",
"action": {"_set": {"kpoints": new_kpts}},
}
)
else:
actions.append({"dict": "INCAR", "action": {"_set": {"ISYM": 0}}})
if vi["KPOINTS"] is not None:
if vi["KPOINTS"].style == Kpoints.supported_modes.Monkhorst:
actions.append(
{
"dict": "KPOINTS",
"action": {"_set": {"generation_style": "Gamma"}},
}
)
# Based on VASP forum's recommendation, you should delete the
# CHGCAR and WAVECAR when dealing with this error.
if vi["INCAR"].get("ICHARG", 0) < 10:
actions.append(
{
"file": "CHGCAR",
"action": {"_file_delete": {"mode": "actual"}},
}
)
actions.append(
{
"file": "WAVECAR",
"action": {"_file_delete": {"mode": "actual"}},
}
)
if "zpotrf" in self.errors:
# Usually caused by short bond distances. If on the first step,
# volume needs to be increased. Otherwise, it was due to a step
# being too big and POTIM should be decreased. If a static run
# try turning off symmetry.
try:
oszicar = Oszicar("OSZICAR")
nsteps = len(oszicar.ionic_steps)
except Exception:
nsteps = 0
if nsteps >= 1:
potim = round(vi["INCAR"].get("POTIM", 0.5) / 2.0, 2)
actions.append({"dict": "INCAR", "action": {"_set": {"ISYM": 0, "POTIM": potim}}})
elif vi["INCAR"].get("NSW", 0) == 0 or vi["INCAR"].get("ISIF", 0) in range(3):
actions.append({"dict": "INCAR", "action": {"_set": {"ISYM": 0}}})
else:
s = vi["POSCAR"].structure
s.apply_strain(0.2)
actions.append({"dict": "POSCAR", "action": {"_set": {"structure": s.as_dict()}}})
# Based on VASP forum's recommendation, you should delete the
# CHGCAR and WAVECAR when dealing with this error.
if vi["INCAR"].get("ICHARG", 0) < 10:
actions.append({"file": "CHGCAR", "action": {"_file_delete": {"mode": "actual"}}})
actions.append({"file": "WAVECAR", "action": {"_file_delete": {"mode": "actual"}}})
if self.errors.intersection(["subspacematrix"]):
if self.error_count["subspacematrix"] == 0:
actions.append({"dict": "INCAR", "action": {"_set": {"LREAL": False}}})
elif self.error_count["subspacematrix"] == 1:
actions.append({"dict": "INCAR", "action": {"_set": {"PREC": "Accurate"}}})
self.error_count["subspacematrix"] += 1
if self.errors.intersection(["rspher", "real_optlay", "nicht_konv"]):
if vi["INCAR"].get("LREAL", False) is not False:
actions.append({"dict": "INCAR", "action": {"_set": {"LREAL": False}}})
if self.errors.intersection(["tetirr", "incorrect_shift"]):
if vi["KPOINTS"] is not None:
if vi["KPOINTS"].style == Kpoints.supported_modes.Monkhorst:
actions.append(
{
"dict": "KPOINTS",
"action": {"_set": {"generation_style": "Gamma"}},
}
)
if "rot_matrix" in self.errors:
if vi["KPOINTS"] is not None:
if vi["KPOINTS"].style == Kpoints.supported_modes.Monkhorst:
actions.append(
{
"dict": "KPOINTS",
"action": {"_set": {"generation_style": "Gamma"}},
}
)
else:
actions.append({"dict": "INCAR", "action": {"_set": {"ISYM": 0}}})
if "amin" in self.errors:
actions.append({"dict": "INCAR", "action": {"_set": {"AMIN": "0.01"}}})
if "triple_product" in self.errors:
s = vi["POSCAR"].structure
trans = SupercellTransformation(((1, 0, 0), (0, 0, 1), (0, 1, 0)))
new_s = trans.apply_transformation(s)
actions.append(
{
"dict": "POSCAR",
"action": {"_set": {"structure": new_s.as_dict()}},
"transformation": trans.as_dict(),
}
)
if "pricel" in self.errors:
actions.append({"dict": "INCAR", "action": {"_set": {"SYMPREC": 1e-8, "ISYM": 0}}})
if "brions" in self.errors:
# Copy CONTCAR to POSCAR so we do not lose our progress.
actions.append({"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}})
# By default, increase POTIM per the VASP error message. But if that does not work,
# we should try IBRION = 2 since it is less sensitive to POTIM.
potim = round(vi["INCAR"].get("POTIM", 0.5) + 0.1, 2)
if self.error_count["brions"] == 1 and vi["INCAR"].get("IBRION", 0) == 1:
# Reset POTIM to default value and switch to IBRION = 2
actions.append({"dict": "INCAR", "action": {"_set": {"IBRION": 2, "POTIM": 0.5}}})
else:
# Increase POTIM
actions.append({"dict": "INCAR", "action": {"_set": {"POTIM": potim}}})
self.error_count["brions"] += 1
if "zbrent" in self.errors:
# ZBRENT is caused by numerical noise in the forces, often near the PES minimum
# This is often a severe problem for systems with many atoms, flexible
# structures (e.g. zeolites, MOFs), and surfaces with adsorbates present. It is
# a tricky one to resolve and generally occurs with IBRION = 2, which is otherwise
# a fairly robust optimization algorithm.
#
# VASP recommends moving CONTCAR to POSCAR and tightening EDIFF to improve the forces.
# That is our first option, along with setting NELMIN to 8 to ensure the forces are
# high quality. Our backup option if this does not help is to switch to IBRION = 1.
#
# If the user has specified vtst_fixes = True, we instead switch right away to FIRE, which is known
# to be much more robust near the PES minimum. It is not the default because it requires
# VTST to be installed.
ediff = vi["INCAR"].get("EDIFF", 1e-4)
# Copy CONTCAR to POSCAR. This should always be done so we don't lose our progress.
actions.append({"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}})
# Tighten EDIFF per the VASP warning message. We tighten it by a factor of 10 unless
# it is > 1e-6 (in which case we set it to 1e-6) or 1e-8 in which case we stop tightening
if ediff > 1e-8:
if ediff > 1e-6:
actions.append({"dict": "INCAR", "action": {"_set": {"EDIFF": 1e-6}}})
else:
actions.append({"dict": "INCAR", "action": {"_set": {"EDIFF": ediff / 10}}})
# Set NELMIN to 8 to further ensure we have accurate forces. NELMIN of 4 to 8 is also
# recommended if IBRION = 1 is set anyway.
if vi["INCAR"].get("NELMIN", 2) < 8:
actions.append({"dict": "INCAR", "action": {"_set": {"NELMIN": 8}}})
# FIRE almost always resolves this issue but requires VTST to be installed. We provide
# it as a non-default option for the user. It is also not very sensitive to POTIM, unlike
# IBRION = 1. FIRE requires accurate forces but is unlikely to run into the zbrent issue.
# Since accurate forces are required for FIRE, we also need EDIFF to be tight and NELMIN
# to be set, e.g. to 8. This was already done above.
if self.vtst_fixes:
if vi["INCAR"].get("IOPT", 0) != 7:
actions.append({"dict": "INCAR", "action": {"_set": {"IOPT": 7, "IBRION": 3, "POTIM": 0}}})
else:
# By default, we change IBRION to 1 if the first CONTCAR to POSCAR swap did not work.
# We do not do this right away because IBRION = 1 is very sensitive to POTIM, which may
# cause a brions error downstream. We want to avoid the loop condition of zbrent -->
# switch to IBRION = 1 --> brions --> increase POTIM --> brions --> switch back to IBRION = 2
# --> zbrent --> and so on. The best way to avoid this is trying to get it to converge in the
# first place without switching IBRION to 1.
if self.error_count["zbrent"] == 1:
actions.append({"dict": "INCAR", "action": {"_set": {"IBRION": 1}}})
self.error_count["zbrent"] += 1
if "too_few_bands" in self.errors:
if "NBANDS" in vi["INCAR"]:
nbands = vi["INCAR"]["NBANDS"]
else:
with open("OUTCAR") as f:
for line in f:
if "NBANDS" in line:
try:
d = line.split("=")
nbands = int(d[-1].strip())
break
except (IndexError, ValueError):
pass
actions.append({"dict": "INCAR", "action": {"_set": {"NBANDS": int(1.1 * nbands)}}})
if "pssyevx" in self.errors:
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "Normal"}}})
if "eddrmm" in self.errors:
# RMM algorithm is not stable for this calculation
if vi["INCAR"].get("ALGO", "Normal").lower() in ["fast", "veryfast"]:
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "Normal"}}})
else:
potim = round(vi["INCAR"].get("POTIM", 0.5) / 2.0, 2)
actions.append({"dict": "INCAR", "action": {"_set": {"POTIM": potim}}})
if vi["INCAR"].get("ICHARG", 0) < 10:
actions.append({"file": "CHGCAR", "action": {"_file_delete": {"mode": "actual"}}})
actions.append({"file": "WAVECAR", "action": {"_file_delete": {"mode": "actual"}}})
if "edddav" in self.errors:
if vi["INCAR"].get("ICHARG", 0) < 10:
actions.append({"file": "CHGCAR", "action": {"_file_delete": {"mode": "actual"}}})
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "All"}}})
if "algo_tet" in self.errors:
# ALGO=All/Damped / IALGO=5X often fails with ISMEAR < 0. There are two options VASP
# suggests: 1) Use ISMEAR = 0 (and a small sigma) to get the SCF to converge.
# 2) Use ALGO = Damped but only *after* an ISMEAR = 0 run where the wavefunction
# has been stored and read in for the subsequent run.
#
# For simplicity, we go with Option 1 here, but if the user wants high-quality
# DOS then they should consider running a subsequent job with ISMEAR = -5 and
# ALGO = Damped, provided the wavefunction has been stored.
if vi["INCAR"].get("ISMEAR", 1) < 0:
actions.append({"dict": "INCAR", "action": {"_set": {"ISMEAR": 0, "SIGMA": 0.05}}})
if vi["INCAR"].get("NEDOS") or vi["INCAR"].get("EMIN") or vi["INCAR"].get("EMAX"):
warnings.warn(
"This looks like a DOS run. You may want to follow-up this job with ALGO = Damped"
" and ISMEAR = -5, using the wavefunction from the current job.",
UserWarning,
)
if "grad_not_orth" in self.errors:
# Often coincides with algo_tet, in which the algo_tet error handler will also resolve grad_not_orth.
# When not present alongside algo_tet, the grad_not_orth error is due to how VASP is compiled.
# Depending on the optimization flag and choice of compiler, the ALGO = All and Damped algorithms
# may not work. The only fix is either to change ALGO or to recompile VASP. Since meta-GGAs/hybrids
# are often used with ALGO = All (and hybrids are incompatible with ALGO = VeryFast/Fast and slow with
# ALGO = Normal), we do not adjust ALGO in these cases.
if vi["INCAR"].get("METAGGA", "none") == "none" and not vi["INCAR"].get("LHFCALC", False):
if vi["INCAR"].get("ALGO", "Normal").lower() in ["all", "damped"]:
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "Fast"}}})
elif 53 <= vi["INCAR"].get("IALGO", 38) <= 58:
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "Fast"}, "_unset": {"IALGO": 38}}})
if "algo_tet" not in self.errors:
warnings.warn(
"EDWAV error reported by VASP without a simultaneous algo_tet error. You may wish to consider "
"recompiling VASP with the -O1 optimization if you used -O2 and this error keeps cropping up."
)
if "zheev" in self.errors:
if vi["INCAR"].get("ALGO", "Normal").lower() != "exact":
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "Exact"}}})
if "elf_kpar" in self.errors:
actions.append({"dict": "INCAR", "action": {"_set": {"KPAR": 1}}})
if "rhosyg" in self.errors:
if vi["INCAR"].get("SYMPREC", 1e-4) == 1e-4:
actions.append({"dict": "INCAR", "action": {"_set": {"ISYM": 0}}})
actions.append({"dict": "INCAR", "action": {"_set": {"SYMPREC": 1e-4}}})
if "posmap" in self.errors:
# VASP advises to decrease or increase SYMPREC by an order of magnitude
# the default SYMPREC value is 1e-5
if self.error_count["posmap"] == 0:
# first, reduce by 10x
orig_symprec = vi["INCAR"].get("SYMPREC", 1e-5)
actions.append({"dict": "INCAR", "action": {"_set": {"SYMPREC": orig_symprec / 10}}})
elif self.error_count["posmap"] == 1:
# next, increase by 100x (10x the original)
orig_symprec = vi["INCAR"].get("SYMPREC", 1e-6)
actions.append({"dict": "INCAR", "action": {"_set": {"SYMPREC": orig_symprec * 100}}})
self.error_count["posmap"] += 1
if "point_group" in self.errors:
actions.append({"dict": "INCAR", "action": {"_set": {"ISYM": 0}}})
if "symprec_noise" in self.errors:
if (vi["INCAR"].get("ISYM", 2) > 0) and (vi["INCAR"].get("SYMPREC", 1e-5) > 1e-6):
actions.append({"dict": "INCAR", "action": {"_set": {"SYMPREC": 1e-6}}})
else:
actions.append({"dict": "INCAR", "action": {"_set": {"ISYM": 0}}})
if "dfpt_ncore" in self.errors:
# note that when using "_unset" action, the value is ignored
if "NCORE" in vi["INCAR"]:
actions.append({"dict": "INCAR", "action": {"_unset": {"NCORE": 0}}})
if "NPAR" in vi["INCAR"]:
actions.append({"dict": "INCAR", "action": {"_unset": {"NPAR": 0}}})
if "bravais" in self.errors:
# VASP recommends refining the lattice parameters or changing SYMPREC.
# Appears to occurs when SYMPREC is very low, so we will change it to
# the default if it's not already. If it's the default, we will x 10.
symprec = vi["INCAR"].get("SYMPREC", 1e-5)
if symprec < 1e-5:
actions.append({"dict": "INCAR", "action": {"_set": {"SYMPREC": 1e-5}}})
else:
actions.append({"dict": "INCAR", "action": {"_set": {"SYMPREC": symprec * 10}}})
if "nbands_not_sufficient" in self.errors:
# There is something very wrong about the value of NBANDS. We don't make
# any updates to NBANDS though because it's likely the user screwed something
# up pretty badly during setup. For instance, this has happened to me if
# MAGMOM = 2*nan or something similar.
# Unfixable error. Just return None for actions.
return {"errors": ["nbands_not_sufficient"], "actions": None}
if "hnform" in self.errors:
# The only solution is to change your k-point grid or disable symmetry
# For internal calculation compatibility's sake, we do the latter
if vi["INCAR"].get("ISYM", 2) > 0:
actions.append({"dict": "INCAR", "action": {"_set": {"ISYM": 0}}})
VaspModder(vi=vi).apply_actions(actions)
return {"errors": list(self.errors), "actions": actions}
class LrfCommutatorHandler(ErrorHandler):
"""
Corrects LRF_COMMUTATOR errors by setting LPEAD=True if not already set.
Note that switching LPEAD=T can slightly change results versus the
default due to numerical evaluation of derivatives.
"""
is_monitor = True
error_msgs = {"lrf_comm": ["LRF_COMMUTATOR internal error"]}
def __init__(self, output_filename="std_err.txt"):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): This is the file where the stderr for vasp
is being redirected. The error messages that are checked are
present in the stderr. Defaults to "std_err.txt", which is the
default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
"""
self.output_filename = output_filename
self.errors = set()
self.error_count = Counter()
def check(self):
"""
Check for error.
"""
self.errors = set()
with open(self.output_filename) as f:
for line in f:
l = line.strip()
for err, msgs in LrfCommutatorHandler.error_msgs.items():
for msg in msgs:
if l.find(msg) != -1:
self.errors.add(err)
return len(self.errors) > 0
def correct(self):
"""
Perform corrections.
"""
backup(VASP_BACKUP_FILES | {self.output_filename})
actions = []
vi = VaspInput.from_directory(".")
if "lrf_comm" in self.errors:
if Outcar(zpath(os.path.join(os.getcwd(), "OUTCAR"))).is_stopped is False:
if not vi["INCAR"].get("LPEAD"):
actions.append({"dict": "INCAR", "action": {"_set": {"LPEAD": True}}})
VaspModder(vi=vi).apply_actions(actions)
return {"errors": list(self.errors), "actions": actions}
class StdErrHandler(ErrorHandler):
"""
Master StdErr class that handles a number of common errors
that occur during VASP runs with error messages only in
the standard error.
"""
is_monitor = True
error_msgs = {
"kpoints_trans": ["internal error in GENERATE_KPOINTS_TRANS: number of G-vector changed in star"],
"out_of_memory": ["Allocation would exceed memory limit"],
}
def __init__(self, output_filename="std_err.txt"):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): This is the file where the stderr for vasp
is being redirected. The error messages that are checked are
present in the stderr. Defaults to "std_err.txt", which is the
default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
"""
self.output_filename = output_filename
self.errors = set()
self.error_count = Counter()
def check(self):
"""
Check for error.
"""
self.errors = set()
with open(self.output_filename) as f:
for line in f:
l = line.strip()
for err, msgs in StdErrHandler.error_msgs.items():
for msg in msgs:
if l.find(msg) != -1:
self.errors.add(err)
return len(self.errors) > 0
def correct(self):
"""
Perform corrections.
"""
backup(VASP_BACKUP_FILES | {self.output_filename})
actions = []
vi = VaspInput.from_directory(".")
if "kpoints_trans" in self.errors:
if self.error_count["kpoints_trans"] == 0:
m = reduce(operator.mul, vi["KPOINTS"].kpts[0])
m = max(int(round(m ** (1 / 3))), 1)
if vi["KPOINTS"].style.name.lower().startswith("m"):
m += m % 2
actions.append({"dict": "KPOINTS", "action": {"_set": {"kpoints": [[m] * 3]}}})
self.error_count["kpoints_trans"] += 1
if "out_of_memory" in self.errors:
if vi["INCAR"].get("KPAR", 1) > 1:
reduced_kpar = max(vi["INCAR"].get("KPAR", 1) // 2, 1)
actions.append({"dict": "INCAR", "action": {"_set": {"KPAR": reduced_kpar}}})
VaspModder(vi=vi).apply_actions(actions)
return {"errors": list(self.errors), "actions": actions}
class AliasingErrorHandler(ErrorHandler):
"""
Master VaspErrorHandler class that handles a number of common errors
that occur during VASP runs.
"""
is_monitor = True
error_msgs = {
"aliasing": ["WARNING: small aliasing (wrap around) errors must be expected"],
"aliasing_incar": ["Your FFT grids (NGX,NGY,NGZ) are not sufficient for an accurate"],
}
def __init__(self, output_filename="vasp.out"):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): This is the file where the stdout for vasp
is being redirected. The error messages that are checked are
present in the stdout. Defaults to "vasp.out", which is the
default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
"""
self.output_filename = output_filename
self.errors = set()
def check(self):
"""
Check for error.
"""
incar = Incar.from_file("INCAR")
self.errors = set()
with open(self.output_filename) as f:
for line in f:
l = line.strip()
for err, msgs in AliasingErrorHandler.error_msgs.items():
for msg in msgs:
if l.find(msg) != -1:
# this checks if we want to run a charged
# computation (e.g., defects) if yes we don't
# want to kill it because there is a change in e-
# density (brmix error)
if err == "brmix" and "NELECT" in incar:
continue
self.errors.add(err)
return len(self.errors) > 0
def correct(self):
"""
Perform corrections.
"""
backup(VASP_BACKUP_FILES | {self.output_filename})
actions = []
vi = VaspInput.from_directory(".")
if "aliasing" in self.errors:
with open("OUTCAR") as f:
grid_adjusted = False
changes_dict = {}
r = re.compile(r".+aliasing errors.*(NG.)\s*to\s*(\d+)")
for line in f:
m = r.match(line)
if m:
changes_dict[m.group(1)] = int(m.group(2))
grid_adjusted = True
# Ensure that all NGX, NGY, NGZ have been checked
if grid_adjusted and "NGZ" in line:
actions.append({"dict": "INCAR", "action": {"_set": changes_dict}})
if vi["INCAR"].get("ICHARG", 0) < 10:
actions.extend(
[
{
"file": "CHGCAR",
"action": {"_file_delete": {"mode": "actual"}},
},
{
"file": "WAVECAR",
"action": {"_file_delete": {"mode": "actual"}},
},
]
)
break
if "aliasing_incar" in self.errors:
# vasp seems to give different warnings depending on whether the
# aliasing error was caused by user supplied inputs
d = {k: 1 for k in ["NGX", "NGY", "NGZ"] if k in vi["INCAR"].keys()}
actions.append({"dict": "INCAR", "action": {"_unset": d}})
if vi["INCAR"].get("ICHARG", 0) < 10:
actions.extend(
[
{
"file": "CHGCAR",
"action": {"_file_delete": {"mode": "actual"}},
},
{
"file": "WAVECAR",
"action": {"_file_delete": {"mode": "actual"}},
},
]
)
VaspModder(vi=vi).apply_actions(actions)
return {"errors": list(self.errors), "actions": actions}
class DriftErrorHandler(ErrorHandler):
"""
Corrects for total drift exceeding the force convergence criteria.
"""
def __init__(self, max_drift=None, to_average=3, enaug_multiply=2):
"""
Initializes the handler with max drift
Args:
max_drift (float): This defines the max drift. Leaving this at the default of None gets the max_drift from
EDFIFFG
"""
self.max_drift = max_drift
self.to_average = int(to_average)
self.enaug_multiply = enaug_multiply
def check(self):
"""
Check for error.
"""
incar = Incar.from_file("INCAR")
if incar.get("EDIFFG", 0.1) >= 0 or incar.get("NSW", 0) <= 1:
# Only activate when force relaxing and ionic steps
# NSW check prevents accidental effects when running DFPT
return False
if not self.max_drift:
self.max_drift = incar["EDIFFG"] * -1
try:
outcar = Outcar("OUTCAR")
except Exception:
# Can't perform check if Outcar not valid
return False
if len(outcar.data.get("drift", [])) < self.to_average:
# Ensure enough steps to get average drift
return False
curr_drift = outcar.data.get("drift", [])[::-1][: self.to_average]
curr_drift = np.average([np.linalg.norm(d) for d in curr_drift])
return curr_drift > self.max_drift
def correct(self):
"""
Perform corrections.
"""
backup(VASP_BACKUP_FILES)
actions = []
vi = VaspInput.from_directory(".")
incar = vi["INCAR"]
outcar = Outcar("OUTCAR")
# Move CONTCAR to POSCAR
actions.append({"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}})
# First try adding ADDGRID
if not incar.get("ADDGRID", False):
actions.append({"dict": "INCAR", "action": {"_set": {"ADDGRID": True}}})
# Otherwise set PREC to High so ENAUG can be used to control Augmentation Grid Size
elif incar.get("PREC", "Accurate").lower() != "high":
actions.append({"dict": "INCAR", "action": {"_set": {"PREC": "High"}}})
actions.append(
{
"dict": "INCAR",
"action": {"_set": {"ENAUG": incar.get("ENCUT", 520) * 2}},
}
)
# PREC is already high and ENAUG set so just increase it
else:
actions.append(
{
"dict": "INCAR",
"action": {"_set": {"ENAUG": int(incar.get("ENAUG", 1040) * self.enaug_multiply)}},
}
)
curr_drift = outcar.data.get("drift", [])[::-1][: self.to_average]
curr_drift = np.average([np.linalg.norm(d) for d in curr_drift])
VaspModder(vi=vi).apply_actions(actions)
return {
"errors": f"Excessive drift {curr_drift} > {self.max_drift}",
"actions": actions,
}
class MeshSymmetryErrorHandler(ErrorHandler):
"""
Corrects the mesh symmetry error in VASP. This error is sometimes
non-fatal. So this error handler only checks at the end of the run,
and if the run has converged, no error is recorded.
"""
is_monitor = False
def __init__(self, output_filename="vasp.out", output_vasprun="vasprun.xml"):
"""
Initializes the handler with the output files to check.
Args:
output_filename (str): This is the file where the stdout for vasp
is being redirected. The error messages that are checked are
present in the stdout. Defaults to "vasp.out", which is the
default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
output_vasprun (str): Filename for the vasprun.xml file. Change
this only if it is different from the default (unlikely).
"""
self.output_filename = output_filename
self.output_vasprun = output_vasprun
def check(self):
"""
Check for error.
"""
msg = "Reciprocal lattice and k-lattice belong to different class of lattices."
vi = VaspInput.from_directory(".")
# disregard this error if KSPACING is set and no KPOINTS file is generated
if vi["INCAR"].get("KSPACING", False):
return False
# According to VASP admins, you can disregard this error
# if symmetry is off
# Also disregard if automatic KPOINT generation is used
if (not vi["INCAR"].get("ISYM", True)) or vi["KPOINTS"].style == Kpoints.supported_modes.Automatic:
return False
try:
v = Vasprun(self.output_vasprun)
if v.converged:
return False
except Exception:
pass
with open(self.output_filename) as f:
for line in f:
l = line.strip()
if l.find(msg) != -1:
return True
return False
def correct(self):
"""
Perform corrections.
"""
backup(VASP_BACKUP_FILES | {self.output_filename})
vi = VaspInput.from_directory(".")
m = reduce(operator.mul, vi["KPOINTS"].kpts[0])
m = max(int(round(m ** (1 / 3))), 1)
if vi["KPOINTS"].style.name.lower().startswith("m"):
m += m % 2
actions = [{"dict": "KPOINTS", "action": {"_set": {"kpoints": [[m] * 3]}}}]
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["mesh_symmetry"], "actions": actions}
class UnconvergedErrorHandler(ErrorHandler):
"""
Check if a run is converged.
"""
is_monitor = False
def __init__(self, output_filename="vasprun.xml"):
"""
Initializes the handler with the output file to check.
Args:
output_vasprun (str): Filename for the vasprun.xml file. Change
this only if it is different from the default (unlikely).
"""
self.output_filename = output_filename
def check(self):
"""
Check for error.
"""
try:
v = Vasprun(self.output_filename)
if not v.converged:
return True
except Exception:
pass
return False
def correct(self):
"""
Perform corrections.
"""
v = Vasprun(self.output_filename)
algo = v.incar.get("ALGO", "Normal").lower()
actions = []
if not v.converged_electronic:
# Ladder from VeryFast to Fast to Normal to All
# (except for meta-GGAs and hybrids).
# These progressively switch to more stable but more
# expensive algorithms.
if v.incar.get("METAGGA", "--") != "--":
# If meta-GGA, go straight to Algo = All. Algo = All is recommended in the VASP
# manual and some meta-GGAs explicitly say to set Algo = All for proper convergence.
# I am using "--" as the check for METAGGA here because this is the default in the
# vasprun.xml file
if algo != "all":
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "All"}}})
elif v.incar.get("LHFCALC", False):
# If a hybrid is used, do not set Algo = Fast or VeryFast. Hybrid calculations do not
# support these algorithms, but no warning is printed.
if algo != "all":
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "All"}}})
# See the VASP manual section on LHFCALC for more information.
elif algo != "damped":
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "Damped", "TIME": 0.5}}})
else:
if algo == "veryfast":
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "Fast"}}})
elif algo == "fast":
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "Normal"}}})
elif algo == "normal":
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "All"}}})
else:
# Try mixing as last resort
new_settings = {
"ISTART": 1,
"ALGO": "Normal",
"NELMDL": -6,
"BMIX": 0.001,
"AMIX_MAG": 0.8,
"BMIX_MAG": 0.001,
}
if not all(v.incar.get(k, "") == val for k, val in new_settings.items()):
actions.append({"dict": "INCAR", "action": {"_set": new_settings}})
elif not v.converged_ionic:
# Just continue optimizing and let other handles fix ionic
# optimizer parameters
actions.append({"dict": "INCAR", "action": {"_set": {"IBRION": 1}}})
actions.append({"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}})
if actions:
vi = VaspInput.from_directory(".")
backup(VASP_BACKUP_FILES)
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["Unconverged"], "actions": actions}
# Unfixable error. Just return None for actions.
return {"errors": ["Unconverged"], "actions": None}
class IncorrectSmearingHandler(ErrorHandler):
"""
Check if a calculation is a metal (zero bandgap), has been run with
ISMEAR=-5, and is not a static calculation, which is only appropriate for
semiconductors. If this occurs, this handler will rerun the calculation
using the smearing settings appropriate for metals (ISMEAR=2, SIGMA=0.2).
"""
is_monitor = False
def __init__(self, output_filename="vasprun.xml"):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): Filename for the vasprun.xml file. Change
this only if it is different from the default (unlikely).
"""
self.output_filename = output_filename
def check(self):
"""
Check for error.
"""
try:
v = Vasprun(self.output_filename)
# check whether bandgap is zero, tetrahedron smearing was used
# and relaxation is performed.
if v.eigenvalue_band_properties[0] == 0 and v.incar.get("ISMEAR", 1) < -3 and v.incar.get("NSW", 0) > 1:
return True
except Exception:
pass
return False
def correct(self):
"""
Perform corrections.
"""
backup(VASP_BACKUP_FILES | {self.output_filename})
vi = VaspInput.from_directory(".")
actions = []
actions.append({"dict": "INCAR", "action": {"_set": {"ISMEAR": 2}}})
actions.append({"dict": "INCAR", "action": {"_set": {"SIGMA": 0.2}}})
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["IncorrectSmearing"], "actions": actions}
class ScanMetalHandler(ErrorHandler):
"""
Check if a SCAN calculation is a metal (zero bandgap) but has been run with
a KSPACING value appropriate for semiconductors. If this occurs, this handler
will rerun the calculation using the KSPACING setting appropriate for metals
(KSPACING=0.22). Note that this handler depends on values set in MPScanRelaxSet.
"""
is_monitor = False
def __init__(self, output_filename="vasprun.xml"):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): Filename for the vasprun.xml file. Change
this only if it is different from the default (unlikely).
"""
self.output_filename = output_filename
def check(self):
"""
Check for error.
"""
try:
v = Vasprun(self.output_filename)
# check whether bandgap is zero and tetrahedron smearing was used
if v.eigenvalue_band_properties[0] == 0 and v.incar.get("KSPACING", 1) > 0.22:
return True
except Exception:
pass
return False
def correct(self):
"""
Perform corrections.
"""
backup(VASP_BACKUP_FILES | {self.output_filename})
vi = VaspInput.from_directory(".")
_dummy_structure = Structure(
[1, 0, 0, 0, 1, 0, 0, 0, 1],
["I"],
[[0, 0, 0]],
)
new_vis = MPScanRelaxSet(_dummy_structure, bandgap=0)
actions = []
actions.append({"dict": "INCAR", "action": {"_set": {"KSPACING": new_vis.incar["KSPACING"]}}})
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["ScanMetal"], "actions": actions}
class LargeSigmaHandler(ErrorHandler):
"""
When ISMEAR > 0 (Methfessel-Paxton), monitor the magnitude of the entropy
term T*S in the OUTCAR file. If the entropy term is larger than 1 meV/atom, reduce the
value of SIGMA. See VASP documentation for ISMEAR.
"""
is_monitor = True
def __init__(self):
"""
Initializes the handler with a buffer time.
"""
def check(self):
"""
Check for error.
"""
incar = Incar.from_file("INCAR")
try:
outcar = Outcar("OUTCAR")
except Exception:
# Can't perform check if Outcar not valid
return False
if incar.get("ISMEAR", 0) > 0:
# Read the latest entropy term.
outcar.read_pattern(
{"entropy": r"entropy T\*S.*= *(\D\d*\.\d*)"}, postprocess=float, reverse=True, terminate_on_match=True
)
n_atoms = Structure.from_file("POSCAR").num_sites
if outcar.data.get("entropy", []):
entropy_per_atom = abs(np.max(outcar.data.get("entropy"))) / n_atoms
# if more than 1 meV/atom, reduce sigma
if entropy_per_atom > 0.001:
return True
return False
def correct(self):
"""
Perform corrections.
"""
backup(VASP_BACKUP_FILES)
actions = []
vi = VaspInput.from_directory(".")
sigma = vi["INCAR"].get("SIGMA", 0.2)
# Reduce SIGMA by 0.06 if larger than 0.08
# this will reduce SIGMA from the default of 0.2 to the practical
# minimum value of 0.02 in 3 steps
if sigma > 0.08:
actions.append(
{
"dict": "INCAR",
"action": {"_set": {"SIGMA": sigma - 0.06}},
}
)
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["LargeSigma"], "actions": actions}
@deprecated(
message="This handler is no longer supported and its use is no "
"longer recommended. It will be removed in v2020.x."
)
class MaxForceErrorHandler(ErrorHandler):
"""
Checks that the desired force convergence has been achieved. Otherwise
restarts the run with smaller EDIFFG. (This is necessary since energy
and force convergence criteria cannot be set simultaneously)
"""
is_monitor = False
def __init__(self, output_filename="vasprun.xml", max_force_threshold=0.25):
"""
Args:
input_filename (str): name of the vasp INCAR file
output_filename (str): name to look for the vasprun
max_force_threshold (float): Threshold for max force for
restarting the run. (typically should be set to the value
that the creator looks for)
"""
self.output_filename = output_filename
self.max_force_threshold = max_force_threshold
def check(self):
"""
Check for error.
"""
try:
v = Vasprun(self.output_filename)
forces = np.array(v.ionic_steps[-1]["forces"])
sdyn = v.final_structure.site_properties.get("selective_dynamics")
if sdyn:
forces[np.logical_not(sdyn)] = 0
max_force = max(np.linalg.norm(forces, axis=1))
if max_force > self.max_force_threshold and v.converged is True:
return True
except Exception:
pass
return False
def correct(self):
"""
Perform corrections.
"""
backup(VASP_BACKUP_FILES | {self.output_filename})
vi = VaspInput.from_directory(".")
ediff = vi["INCAR"].get("EDIFF", 1e-4)
ediffg = vi["INCAR"].get("EDIFFG", ediff * 10)
actions = [
{"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}},
{"dict": "INCAR", "action": {"_set": {"EDIFFG": ediffg * 0.5}}},
]
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["MaxForce"], "actions": actions}
class PotimErrorHandler(ErrorHandler):
"""
Check if a run has excessively large positive energy changes.
This is typically caused by too large a POTIM. Runs typically
end up crashing with some other error (e.g. BRMIX) as the geometry
gets progressively worse.
"""
is_monitor = True
def __init__(self, input_filename="POSCAR", output_filename="OSZICAR", dE_threshold=1):
"""
Initializes the handler with the input and output files to check.
Args:
input_filename (str): This is the POSCAR file that the run
started from. Defaults to "POSCAR". Change
this only if it is different from the default (unlikely).
output_filename (str): This is the OSZICAR file. Change
this only if it is different from the default (unlikely).
dE_threshold (float): The threshold energy change. Defaults to 1eV.
"""
self.input_filename = input_filename
self.output_filename = output_filename
self.dE_threshold = dE_threshold
def check(self):
"""
Check for error.
"""
try:
oszicar = Oszicar(self.output_filename)
n = len(Poscar.from_file(self.input_filename).structure)
max_dE = max(s["dE"] for s in oszicar.ionic_steps[1:]) / n
if max_dE > self.dE_threshold:
return True
except Exception:
return False
return None
def correct(self):
"""
Perform corrections.
"""
backup(VASP_BACKUP_FILES)
vi = VaspInput.from_directory(".")
potim = vi["INCAR"].get("POTIM", 0.5)
ibrion = vi["INCAR"].get("IBRION", 0)
if potim < 0.2 and ibrion != 3:
actions = [{"dict": "INCAR", "action": {"_set": {"IBRION": 3, "SMASS": 0.75}}}]
elif potim < 0.1:
actions = [{"dict": "INCAR", "action": {"_set": {"SYMPREC": 1e-8}}}]
else:
actions = [{"dict": "INCAR", "action": {"_set": {"POTIM": potim * 0.5}}}]
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["POTIM"], "actions": actions}
class FrozenJobErrorHandler(ErrorHandler):
"""
Detects an error when the output file has not been updated
in timeout seconds. Changes ALGO to Normal from Fast
"""
is_monitor = True
def __init__(self, output_filename="vasp.out", timeout=21600):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): This is the file where the stdout for vasp
is being redirected. The error messages that are checked are
present in the stdout. Defaults to "vasp.out", which is the
default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
timeout (int): The time in seconds between checks where if there
is no activity on the output file, the run is considered
frozen. Defaults to 3600 seconds, i.e., 1 hour.
"""
self.output_filename = output_filename
self.timeout = timeout
def check(self):
"""
Check for error.
"""
st = os.stat(self.output_filename)
if time.time() - st.st_mtime > self.timeout:
return True
return None
def correct(self):
"""
Perform corrections.
"""
backup(VASP_BACKUP_FILES | {self.output_filename})
vi = VaspInput.from_directory(".")
actions = []
if vi["INCAR"].get("ALGO", "Normal").lower() == "fast":
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "Normal"}}})
else:
actions.append({"dict": "INCAR", "action": {"_set": {"SYMPREC": 1e-8}}})
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["Frozen job"], "actions": actions}
class NonConvergingErrorHandler(ErrorHandler):
"""
Check if a run is hitting the maximum number of electronic steps at the
last nionic_steps ionic steps (default=10). If so, change ALGO using a
multi-step ladder scheme or kill the job.
"""
is_monitor = True
def __init__(self, output_filename="OSZICAR", nionic_steps=10):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): This is the OSZICAR file. Change
this only if it is different from the default (unlikely).
nionic_steps (int): The threshold number of ionic steps that
needs to hit the maximum number of electronic steps for the
run to be considered non-converging.
"""
self.output_filename = output_filename
self.nionic_steps = nionic_steps
def check(self):
"""
Check for error.
"""
vi = VaspInput.from_directory(".")
nelm = vi["INCAR"].get("NELM", 60)
try:
oszicar = Oszicar(self.output_filename)
esteps = oszicar.electronic_steps
if len(esteps) > self.nionic_steps:
return all(len(e) == nelm for e in esteps[-(self.nionic_steps + 1) : -1])
except Exception:
pass
return False
def correct(self):
"""
Perform corrections.
"""
vi = VaspInput.from_directory(".")
algo = vi["INCAR"].get("ALGO", "Normal").lower()
amix = vi["INCAR"].get("AMIX", 0.4)
bmix = vi["INCAR"].get("BMIX", 1.0)
amin = vi["INCAR"].get("AMIN", 0.1)
actions = []
# Ladder from VeryFast to Fast to Normal to All
# (except for meta-GGAs and hybrids).
# These progressively switch to more stable but more
# expensive algorithms.
if vi["INCAR"].get("METAGGA", "none").lower() != "none":
# If meta-GGA, go straight to Algo = All. Algo = All is recommended in the VASP
# manual and some meta-GGAs explicitly say to set Algo = All for proper convergence.
# I am using "none" here because METAGGA is a string variable and this is the default
if algo != "all":
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "All"}}})
elif vi["INCAR"].get("LHFCALC", False):
# If a hybrid is used, do not set Algo = Fast or VeryFast. Hybrid calculations do not
# support these algorithms, but no warning is printed.
if algo != "all":
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "All"}}})
# uncomment the line below for a backup option
# elif algo != "damped":
# actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "Damped", "Time": 0.5}}})
else:
if algo == "veryfast":
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "Fast"}}})
elif algo == "fast":
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "Normal"}}})
elif algo == "normal":
actions.append({"dict": "INCAR", "action": {"_set": {"ALGO": "All"}}})
elif amix > 0.1 and bmix > 0.01:
# Try linear mixing
actions.append(
{
"dict": "INCAR",
"action": {"_set": {"ALGO": "Normal", "AMIX": 0.1, "BMIX": 0.01, "ICHARG": 2}},
}
)
elif bmix < 3.0 and amin > 0.01:
# Try increasing bmix
actions.append(
{
"dict": "INCAR",
"action": {"_set": {"Algo": "Normal", "AMIN": 0.01, "BMIX": 3.0, "ICHARG": 2}},
}
)
if actions:
backup(VASP_BACKUP_FILES)
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["Non-converging job"], "actions": actions}
# Unfixable error. Just return None for actions.
return {"errors": ["Non-converging job"], "actions": None}
@classmethod
def from_dict(cls, d):
"""
Custom from_dict method to preserve backwards compatibility with
older versions of Custodian.
"""
if "change_algo" in d:
del d["change_algo"]
return cls(
output_filename=d.get("output_filename", "OSZICAR"),
nionic_steps=d.get("nionic_steps", 10),
)
class WalltimeHandler(ErrorHandler):
"""
Check if a run is nearing the walltime. If so, write a STOPCAR with
LSTOP or LABORT = .True.. You can specify the walltime either in the init (
which is unfortunately necessary for SGE and SLURM systems. If you happen
to be running on a PBS system and the PBS_WALLTIME variable is in the run
environment, the wall time will be automatically determined if not set.
"""
is_monitor = True
# The WalltimeHandler should not terminate as we want VASP to terminate
# itself naturally with the STOPCAR.
is_terminating = False
# This handler will be unrecoverable, but custodian shouldn't raise an
# error
raises_runtime_error = False
def __init__(self, wall_time=None, buffer_time=300, electronic_step_stop=False):
"""
Initializes the handler with a buffer time.
Args:
wall_time (int): Total walltime in seconds. If this is None and
the job is running on a PBS system, the handler will attempt to
determine the walltime from the PBS_WALLTIME environment
variable. If the wall time cannot be determined or is not
set, this handler will have no effect.
buffer_time (int): The min amount of buffer time in secs at the
end that the STOPCAR will be written. The STOPCAR is written
when the time remaining is < the higher of 3 x the average
time for each ionic step and the buffer time. Defaults to
300 secs, which is the default polling time of Custodian.
This is typically sufficient for the current ionic step to
complete. But if other operations are being performed after
the run has stopped, the buffer time may need to be increased
accordingly.
electronic_step_stop (bool): Whether to check for electronic steps
instead of ionic steps (e.g. for static runs on large systems or
static HSE runs, ...). Be careful that results such as density
or wavefunctions might not be converged at the electronic level.
Should be used with LWAVE = .True. to be useful. If this is
True, the STOPCAR is written with LABORT = .TRUE. instead of
LSTOP = .TRUE.
"""
if wall_time is not None:
self.wall_time = wall_time
elif "PBS_WALLTIME" in os.environ:
self.wall_time = int(os.environ["PBS_WALLTIME"])
elif "SBATCH_TIMELIMIT" in os.environ:
self.wall_time = int(os.environ["SBATCH_TIMELIMIT"])
else:
self.wall_time = None
self.buffer_time = buffer_time
# Sets CUSTODIAN_WALLTIME_START as the start time to use for
# future jobs in the same batch environment. Can also be
# set manually be the user in the batch environment.
if "CUSTODIAN_WALLTIME_START" in os.environ:
self.start_time = datetime.datetime.strptime(
os.environ["CUSTODIAN_WALLTIME_START"], "%a %b %d %H:%M:%S %Z %Y"
)
else:
self.start_time = datetime.datetime.now()
os.environ["CUSTODIAN_WALLTIME_START"] = datetime.datetime.strftime(
self.start_time, "%a %b %d %H:%M:%S UTC %Y"
)
self.electronic_step_stop = electronic_step_stop
self.electronic_steps_timings = [0]
self.prev_check_time = self.start_time
def check(self):
"""
Check for error.
"""
if self.wall_time:
run_time = datetime.datetime.now() - self.start_time
total_secs = run_time.total_seconds()
outcar = Outcar("OUTCAR")
if not self.electronic_step_stop:
# Determine max time per ionic step.
outcar.read_pattern({"timings": r"LOOP\+.+real time(.+)"}, postprocess=float)
time_per_step = np.max(outcar.data.get("timings")) if outcar.data.get("timings", []) else 0
else:
# Determine max time per electronic step.
outcar.read_pattern({"timings": "LOOP:.+real time(.+)"}, postprocess=float)
time_per_step = np.max(outcar.data.get("timings")) if outcar.data.get("timings", []) else 0
# If the remaining time is less than average time for 3
# steps or buffer_time.
time_left = self.wall_time - total_secs
if time_left < max(time_per_step * 3, self.buffer_time):
return True
return False
def correct(self):
"""
Perform corrections.
"""
content = "LSTOP = .TRUE." if not self.electronic_step_stop else "LABORT = .TRUE."
# Write STOPCAR
actions = [{"file": "STOPCAR", "action": {"_file_create": {"content": content}}}]
m = Modder(actions=[FileActions])
for a in actions:
m.modify(a["action"], a["file"])
return {"errors": ["Walltime reached"], "actions": None}
class CheckpointHandler(ErrorHandler):
"""
This is not an error handler per se, but rather a checkpointer. What this
does is that every X seconds, a STOPCAR and CHKPT will be written. This
forces VASP to stop at the end of the next ionic step. The files are then
copied into a subdir, and then the job is restarted. To use this proper,
max_errors in Custodian must be set to a very high value, and you
probably wouldn't want to use any standard VASP error handlers. The
checkpoint will be stored in subdirs chk_#. This should be used in
combination with the StoppedRunHandler.
"""
is_monitor = True
# The CheckpointHandler should not terminate as we want VASP to terminate
# itself naturally with the STOPCAR.
is_terminating = False
def __init__(self, interval=3600):
"""
Initializes the handler with an interval.
Args:
interval (int): Interval at which to checkpoint in seconds.
Defaults to 3600 (1 hr).
"""
self.interval = interval
self.start_time = datetime.datetime.now()
self.chk_counter = 0
def check(self):
"""
Check for error.
"""
run_time = datetime.datetime.now() - self.start_time
total_secs = run_time.seconds + run_time.days * 3600 * 24
if total_secs > self.interval:
return True
return False
def correct(self):
"""
Perform corrections.
"""
content = "LSTOP = .TRUE."
chkpt_content = f'Index: {self.chk_counter}\nTime: "{datetime.datetime.now()}"'
self.chk_counter += 1
# Write STOPCAR
actions = [
{"file": "STOPCAR", "action": {"_file_create": {"content": content}}},
{
"file": "chkpt.yaml",
"action": {"_file_create": {"content": chkpt_content}},
},
]
m = Modder(actions=[FileActions])
for a in actions:
m.modify(a["action"], a["file"])
# Reset the clock.
self.start_time = datetime.datetime.now()
return {"errors": ["Checkpoint reached"], "actions": actions}
def __str__(self):
return f"CheckpointHandler with interval {self.interval}"
class StoppedRunHandler(ErrorHandler):
"""
This is not an error handler per se, but rather a checkpointer. What this
does is that every X seconds, a STOPCAR will be written. This forces VASP to
stop at the end of the next ionic step. The files are then copied into a
subdir, and then the job is restarted. To use this proper, max_errors in
Custodian must be set to a very high value, and you probably wouldn't
want to use any standard VASP error handlers. The checkpoint will be
stored in subdirs chk_#. This should be used in combination with the
StoppedRunHandler.
"""
is_monitor = False
# The CheckpointHandler should not terminate as we want VASP to terminate
# itself naturally with the STOPCAR.
is_terminating = False
def __init__(self):
"""
Dummy init.
"""
pass
def check(self):
"""
Check for error.
"""
return os.path.exists("chkpt.yaml")
def correct(self):
"""
Perform corrections.
"""
d = loadfn("chkpt.yaml")
i = d["Index"]
name = shutil.make_archive(os.path.join(os.getcwd(), f"vasp.chk.{i}"), "gztar")
actions = [{"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}]
m = Modder(actions=[FileActions])
for a in actions:
m.modify(a["action"], a["file"])
actions.append({"Checkpoint": name})
return {"errors": ["Stopped run."], "actions": actions}
class PositiveEnergyErrorHandler(ErrorHandler):
"""
Check if a run has positive absolute energy.
If so, change ALGO from Fast to Normal or kill the job.
"""
is_monitor = True
def __init__(self, output_filename="OSZICAR"):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): This is the OSZICAR file. Change
this only if it is different from the default (unlikely).
"""
self.output_filename = output_filename
def check(self):
"""
Check for error.
"""
try:
oszicar = Oszicar(self.output_filename)
if oszicar.final_energy > 0:
return True
except Exception:
pass
return False
def correct(self):
"""
Perform corrections.
"""
# change ALGO = Fast to Normal if ALGO is !Normal
vi = VaspInput.from_directory(".")
algo = vi["INCAR"].get("ALGO", "Normal").lower()
if algo not in ["normal", "n"]:
backup(VASP_BACKUP_FILES)
actions = [{"dict": "INCAR", "action": {"_set": {"ALGO": "Normal"}}}]
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["Positive energy"], "actions": actions}
if algo == "normal":
potim = round(vi["INCAR"].get("POTIM", 0.5) / 2.0, 2)
actions = [{"dict": "INCAR", "action": {"_set": {"POTIM": potim}}}]
VaspModder(vi=vi).apply_actions(actions)
return {"errors": ["Positive energy"], "actions": actions}
# Unfixable error. Just return None for actions.
return {"errors": ["Positive energy"], "actions": None}
|
materialsproject/custodian
|
custodian/vasp/handlers.py
|
Python
|
mit
| 72,810
|
[
"VASP",
"pymatgen"
] |
635ef4caf03452bb3e6affb37778a8dedcbbd98462f0d244e149968fa5f6927f
|
from django.db import connection
from django.core.urlresolvers import reverse
from django.conf import settings
from askbot.tests.utils import AskbotTestCase
class CacheTests(AskbotTestCase):
def setUp(self):
user = self.create_user('other_user')
self.question = self.post_question(user=user)
self.post_answer(user=user, question=self.question)
settings.DEBUG = True # because it's forsed to False
def tearDown(self):
settings.DEBUG = False
def visit_question(self):
self.client.get(self.question.get_absolute_url(), follow=True)
def test_anonymous_question_cache(self):
self.visit_question()
before_count = len(connection.queries)
self.visit_question()
#second hit to the same question should give fewer queries
after_count = len(connection.queries)
self.assertTrue(before_count > after_count,
('Expected fewer queries after calling visit_question. ' +
'Before visit: %d. After visit: %d.') % (before_count, after_count))
|
divio/askbot-devel
|
askbot/tests/cache_tests.py
|
Python
|
gpl-3.0
| 1,074
|
[
"VisIt"
] |
5e29958d934ff467be977358655da0d5c90af0e24b09d6ef42d9a9e2e2d4fbaf
|
# Copyright 2008-2009 by Peter Cock. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SeqIO support for the "ace" file format.
You are expected to use this module via the Bio.SeqIO functions.
See also the Bio.Sequencing.Ace module which offers more than just accessing
the contig consensus sequences in an ACE file as SeqRecord objects.
"""
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_nucleotide, generic_dna, generic_rna, Gapped
from Bio.Sequencing import Ace
#This is a generator function!
def AceIterator(handle):
"""Returns SeqRecord objects from an ACE file.
This uses the Bio.Sequencing.Ace module to do the hard work. Note that
by iterating over the file in a single pass, we are forced to ignore any
WA, CT, RT or WR footer tags.
Ace files include the base quality for each position, which are taken
to be PHRED style scores. Just as if you had read in a FASTQ or QUAL file
using PHRED scores using Bio.SeqIO, these are stored in the SeqRecord's
letter_annotations dictionary under the "phred_quality" key.
>>> from Bio import SeqIO
>>> handle = open("Ace/consed_sample.ace", "rU")
>>> for record in SeqIO.parse(handle, "ace"):
... print record.id, record.seq[:10]+"...", len(record)
... print max(record.letter_annotations["phred_quality"])
Contig1 agccccgggc... 1475
90
However, ACE files do not include a base quality for any gaps in the
consensus sequence, and these are represented in Biopython with a quality
of None. Using zero would be misleading as there may be very strong
evidence to support the gap in the consensus.
>>> from Bio import SeqIO
>>> handle = open("Ace/contig1.ace", "rU")
>>> for record in SeqIO.parse(handle, "ace"):
... print record.id, "..." + record.seq[85:95]+"..."
... print record.letter_annotations["phred_quality"][85:95]
... print max(record.letter_annotations["phred_quality"])
Contig1 ...AGAGG-ATGC...
[57, 57, 54, 57, 57, None, 57, 72, 72, 72]
90
Contig2 ...GAATTACTAT...
[68, 68, 68, 68, 68, 68, 68, 68, 68, 68]
90
"""
for ace_contig in Ace.parse(handle):
#Convert the ACE contig record into a SeqRecord...
consensus_seq_str = ace_contig.sequence
#Assume its DNA unless there is a U in it,
if "U" in consensus_seq_str:
if "T" in consensus_seq_str:
#Very odd! Error?
alpha = generic_ncleotide
else:
alpha = generic_rna
else:
alpha = generic_dna
if "*" in consensus_seq_str:
#For consistency with most other file formats, map
#any * gaps into - gaps.
assert "-" not in consensus_seq_str
consensus_seq = Seq(consensus_seq_str.replace("*","-"),
Gapped(alpha, gap_char="-"))
else:
consensus_seq = Seq(consensus_seq_str, alpha)
#TODO? - Base segments (BS lines) which indicates which read
#phrap has chosen to be the consensus at a particular position.
#Perhaps as SeqFeature objects?
#TODO - Supporting reads (RD lines, plus perhaps QA and DS lines)
#Perhaps as SeqFeature objects?
seq_record = SeqRecord(consensus_seq,
id = ace_contig.name,
name = ace_contig.name)
#Consensus base quality (BQ lines). Note that any gaps (originally
#as * characters) in the consensus do not get a quality entry, so
#we assign a quality of None (zero would be missleading as there may
#be excelent support for having a gap here).
quals = []
i=0
for base in consensus_seq:
if base == "-":
quals.append(None)
else:
quals.append(ace_contig.quality[i])
i+=1
assert i == len(ace_contig.quality)
seq_record.letter_annotations["phred_quality"] = quals
yield seq_record
#All done
def _test():
"""Run the Bio.SeqIO module's doctests.
This will try and locate the unit tests directory, and run the doctests
from there in order that the relative paths used in the examples work.
"""
import doctest
import os
if os.path.isdir(os.path.join("..","..","Tests")):
print "Runing doctests..."
cur_dir = os.path.abspath(os.curdir)
os.chdir(os.path.join("..","..","Tests"))
assert os.path.isfile("Ace/consed_sample.ace")
doctest.testmod()
os.chdir(cur_dir)
del cur_dir
print "Done"
if __name__ == "__main__":
_test()
|
NirBenTalLab/proorigami-cde-package
|
cde-root/usr/lib64/python2.4/site-packages/Bio/SeqIO/AceIO.py
|
Python
|
mit
| 4,926
|
[
"Biopython"
] |
06f8c8f1e96093f1e2216825a01bf76b34908be026dab3bcc041b9a35e27deed
|
import numpy as np
import math
import sys
import os
sys.path.insert(0,os.environ['learningml']+'/GoF/')
import classifier_eval
from sklearn import tree
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
from keras.wrappers.scikit_learn import KerasClassifier
for dim in range(2,11):
comp_file_list=[]
####################################################################
# Gaussian samples operation
####################################################################
for i in range(100):
comp_file_list.append((os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gaussian_same_projection_on_each_axis_redefined_{1}D_1000_0.6_0.2_0.1_{0}.txt".format(i,dim),os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gaussian_same_projection_on_each_axis_redefined_{1}D_1000_0.6_0.2_0.075_{0}.txt".format(i,dim)))
#originally for nn we had 100 and 4
clf = KerasClassifier(classifier_eval.make_keras_model,n_hidden_layers=6,dimof_middle=100,dimof_input=dim)
####################################################################
classifier_eval.classifier_eval(name=str(dim)+"Dgaussian_same_projection_redefined__0_1__0_1_optimised_keras_mode_2_binary",comp_file_list=comp_file_list,clf=clf)
|
weissercn/learningml
|
learningml/GoF/optimisation_and_evaluation/not_automated/nn_gaussian_same_projection/nn_Gaussian_same_projection_evaluation_of_optimised_classifiers.py
|
Python
|
mit
| 1,283
|
[
"Gaussian"
] |
96fa546da1eb4820ad8d39fc03f59ac55f21bb2a949d4b664bb331b284604af3
|
# -*- coding: utf-8 -*-
# Copyright 2013 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import os
import unittest
from splinter import Browser
from .fake_webapp import EXAMPLE_APP
from .base import WebDriverTests
def firefox_installed():
try:
Browser("firefox")
except OSError:
return False
return True
class FirefoxBrowserTest(WebDriverTests, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser("firefox", headless=True)
@classmethod
def tearDownClass(cls):
cls.browser.quit()
def setUp(self):
self.browser.visit(EXAMPLE_APP)
def test_attach_file(self):
"should provide a way to change file field value"
file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "mockfile.txt"
)
self.browser.attach_file("file", file_path)
self.browser.find_by_name("upload").click()
html = self.browser.html
self.assertIn("text/plain", html)
self.assertIn(open(file_path, "rb").read().decode("utf-8"), html)
def test_should_support_with_statement(self):
with Browser("firefox"):
pass
class FirefoxWithExtensionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
extension_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "firebug.xpi"
)
cls.browser = Browser("firefox", extensions=[extension_path], headless=True)
def test_create_a_firefox_instance_with_extension(self):
"should be able to load an extension"
self.assertIn(
"firebug@software.joehewitt.com",
os.listdir(self.browser.driver.profile.extensionsDir),
)
@classmethod
def tearDownClass(cls):
cls.browser.quit()
class FirefoxBrowserProfilePreferencesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
preferences = {
"dom.max_script_run_time": 360,
"devtools.inspector.enabled": True,
}
cls.browser = Browser("firefox", profile_preferences=preferences, headless=True)
def test_preference_set(self):
preferences = self.browser.driver.profile.default_preferences
self.assertIn("dom.max_script_run_time", preferences)
value = preferences.get("dom.max_script_run_time")
self.assertEqual(int(value), 360)
@classmethod
def tearDownClass(cls):
cls.browser.quit()
class FirefoxBrowserCapabilitiesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
capabilities = {"acceptSslCerts": False, "javascriptEnabled": True}
cls.browser = Browser("firefox", capabilities=capabilities, headless=True)
def test_capabilities_set(self):
capabilities = self.browser.driver.capabilities
self.assertIn("acceptSslCerts", capabilities)
self.assertEqual(False, capabilities.get("acceptSslCerts"))
self.assertIn("javascriptEnabled", capabilities)
self.assertEqual(True, capabilities.get("javascriptEnabled"))
@classmethod
def tearDownClass(cls):
cls.browser.quit()
class FirefoxBrowserFullScreenTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser("firefox", fullscreen=True, headless=True)
@classmethod
def tearDownClass(cls):
cls.browser.quit()
|
bmcculley/splinter
|
tests/test_webdriver_firefox.py
|
Python
|
bsd-3-clause
| 3,498
|
[
"VisIt"
] |
1cd46e5c7ef77b86c8af1898ef931132035320cb23f5d5822d8cea6dab7abaa0
|
#!/usr/bin/env python2.7
#
# Copyright 2016 The AMP HTML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the license.
#
"""A build script which (thus far) works on Ubuntu 14."""
# TODO(powdercloud): Make a gulp file or similar for this. For now
# it's simply split off from the main build.py in the parent
# directory, but this is not an idiomatic use to build a Javascript or
# Polymer project, and unlike for the parent directory there's no
# particular benefit to using Python.
import glob
import logging
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
def Die(msg):
"""Prints error and exits with status 1.
Args:
msg: The error message to emit
"""
print >> sys.stderr, msg
sys.exit(1)
def GetNodeJsCmd():
"""Ensure Node.js is installed and return the proper command to run."""
logging.info('entering ...')
for cmd in ['node', 'nodejs']:
try:
output = subprocess.check_output([cmd, '--eval', 'console.log("42")'])
if output.strip() == '42':
logging.info('... done')
return cmd
except (subprocess.CalledProcessError, OSError):
continue
Die('Node.js not found. Try "apt-get install nodejs".')
def CheckPrereqs():
"""Checks that various prerequisites for this script are satisfied."""
logging.info('entering ...')
if platform.system() != 'Linux' and platform.system() != 'Darwin':
Die('Sorry, this script assumes Linux or Mac OS X thus far. '
'Please feel free to edit the source and fix it to your needs.')
# Ensure source files are available.
for f in ['webui.js', 'index.html',
'logo-blue.svg', 'package.json']:
if not os.path.exists(f):
Die('%s not found. Must run in amp_validator source directory.' % f)
# Ensure that npm is installed.
try:
npm_version = subprocess.check_output(['npm', '--version'])
except (subprocess.CalledProcessError, OSError):
Die('npm package manager not found. Try "apt-get install npm".')
# Ensure npm version '1.3.10' or newer.
m = re.search('^(\\d+)\\.(\\d+)\\.(\\d+)$', npm_version)
if (int(m.group(1)), int(m.group(2)), int(m.group(3))) < (1, 3, 10):
Die('Expected npm version 1.3.10 or newer, saw: %s' % npm_version)
logging.info('... done')
def SetupOutDir(out_dir):
"""Sets up a clean output directory.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
if os.path.exists(out_dir):
subprocess.check_call(['rm', '-rf', out_dir])
os.mkdir(out_dir)
logging.info('... done')
def InstallNodeDependencies():
"""Installs the dependencies using npm."""
logging.info('entering ...')
# Install the project dependencies specified in package.json into
# node_modules.
logging.info('installing AMP Validator webui dependencies ...')
subprocess.check_call(
['npm', 'install'],
stdout=(open(os.devnull, 'wb') if os.environ.get('TRAVIS') else sys.stdout))
logging.info('... done')
def CreateWebuiAppengineDist(out_dir):
"""Creates the webui vulcanized directory to deploy to Appengine.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
try:
tempdir = tempfile.mkdtemp()
# Merge the contents of webui with the installed node_modules into a
# common root (a temp directory). This lets us use the vulcanize tool.
for entry in os.listdir('.'):
if entry != 'node_modules':
if os.path.isfile(entry):
shutil.copyfile(entry, os.path.join(tempdir, entry))
else:
shutil.copytree(entry, os.path.join(tempdir, entry))
for entry in os.listdir('node_modules'):
if entry == 'web-animations-js':
shutil.copytree(os.path.join('node_modules', entry),
os.path.join(tempdir, '@polymer', entry))
elif entry != '@polymer':
shutil.copytree(os.path.join('node_modules', entry),
os.path.join(tempdir, entry))
for entry in os.listdir('node_modules/@polymer'):
shutil.copytree(os.path.join('node_modules/@polymer', entry),
os.path.join(tempdir, '@polymer', entry))
vulcanized_index_html = subprocess.check_output([
'node_modules/vulcanize/bin/vulcanize',
'--inline-scripts', '--inline-css',
'-p', tempdir, 'index.html'])
finally:
shutil.rmtree(tempdir)
webui_out = os.path.join(out_dir, 'webui_appengine')
shutil.copytree('.', webui_out, ignore=shutil.ignore_patterns('dist'))
f = open(os.path.join(webui_out, 'index.html'), 'w')
f.write(vulcanized_index_html)
f.close()
logging.info('... success')
def Main():
"""The main method, which executes all build steps and runs the tests."""
logging.basicConfig(
format='[[%(filename)s %(funcName)s]] - %(message)s',
level=(logging.ERROR if os.environ.get('TRAVIS') else logging.INFO))
nodejs_cmd = GetNodeJsCmd()
CheckPrereqs()
InstallNodeDependencies()
SetupOutDir(out_dir='dist')
CreateWebuiAppengineDist(out_dir='dist')
if __name__ == '__main__':
Main()
|
ADITION/amphtml
|
validator/webui/build.py
|
Python
|
apache-2.0
| 5,761
|
[
"GULP"
] |
e9f17c2cc1e3b3d172aa5955825d8404188eb7f73629b7f3ff8a285d8a37d7e0
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RDelayedmatrixstats(RPackage):
"""Functions that Apply to Rows and Columns of 'DelayedMatrix' Objects
A port of the 'matrixStats' API for use with DelayedMatrix objects from
the 'DelayedArray' package. High-performing functions operating on rows
and columns of DelayedMatrix objects, e.g. col / rowMedians(), col /
rowRanks(), and col / rowSds(). Functions optimized per data type and
for subsetted calculations such that both memory usage and processing
time is minimized."""
homepage = "https://github.com/PeteHaitch/DelayedMatrixStats"
git = "https://git.bioconductor.org/packages/DelayedMatrixStats.git"
version('1.12.3', commit='2b3091dfa9b3bab914e3a4157182063714ba86ae')
version('1.6.1', commit='4378d1898a403305a94b122c4f36d1215fa7708d')
version('1.4.0', commit='eb5b390ef99651fe87a346848f807de95afe8971')
version('1.2.0', commit='de868e730be6280dfad41a280ab09f4d3083c9ac')
version('1.0.3', commit='e29a3444980ff727c5b12286884b06dfaebf5b5b')
depends_on('r-matrixgenerics', when='@1.12.2:', type=('build', 'run'))
depends_on('r-delayedarray', type=('build', 'run'))
depends_on('r-delayedarray@0.5.27:', when='@1.2.0:', type=('build', 'run'))
depends_on('r-delayedarray@0.7.37:', when='@1.4.0:', type=('build', 'run'))
depends_on('r-delayedarray@0.9.8:', when='@1.6.1:', type=('build', 'run'))
depends_on('r-delayedarray@0.15.3:', when='@1.12.2:', type=('build', 'run'))
depends_on('r-matrixstats@0.53.1:', type=('build', 'run'))
depends_on('r-matrixstats@0.55.0:', when='@1.6.1:', type=('build', 'run'))
depends_on('r-matrixstats@0.56.0:', when='@1.12.2:', type=('build', 'run'))
depends_on('r-sparsematrixstats', when='@1.12.2:', type=('build', 'run'))
depends_on('r-matrix', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-s4vectors@0.17.5:', when='@1.2.0:', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-hdf5array@1.7.10:', when='@1.4.0:', type=('build', 'run'))
depends_on('r-hdf5array@1.17.2:', when='@1.12.2:', type=('build', 'run'))
depends_on('r-biocparallel', when='@1.4.0:', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-delayedmatrixstats/package.py
|
Python
|
lgpl-2.1
| 2,467
|
[
"Bioconductor"
] |
55b1f5677bca101a5fa315754e36c440a720b45ebb3109af8990a730a7ebd362
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ldap
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_equal, assert_false
import desktop.conf
from desktop.lib.test_utils import grant_access
from desktop.lib.django_test_util import make_logged_in_client
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from useradmin.models import LdapGroup, UserProfile
from useradmin.models import get_profile
from hadoop import pseudo_hdfs4
from hadoop.pseudo_hdfs4 import is_live_cluster
from views import sync_ldap_users, sync_ldap_groups, import_ldap_users, import_ldap_groups, \
add_ldap_users, add_ldap_groups, sync_ldap_users_groups
import ldap_access
from tests import BaseUserAdminTests, LdapTestConnection, reset_all_groups, reset_all_users
def get_nonsense_config():
return {'nonsense': {
'users': {},
'groups': {}
}}
class TestUserAdminLdap(BaseUserAdminTests):
def test_useradmin_ldap_user_group_membership_sync(self):
settings.MIDDLEWARE_CLASSES.append('useradmin.middleware.LdapSynchronizationMiddleware')
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Make sure LDAP groups exist or they won't sync
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
reset = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
# Import curly who is part of TestUsers and Test Administrators
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=False, import_by_dn=False)
# Set a password so that we can login
user = User.objects.get(username='curly')
user.set_password('test')
user.save()
# Should have 0 groups
assert_equal(0, user.groups.all().count())
# Make an authenticated request as curly so that we can see call middleware.
c = make_logged_in_client('curly', 'test', is_superuser=False)
grant_access("curly", "test", "useradmin")
response = c.get('/useradmin/users')
# Refresh user groups
user = User.objects.get(username='curly')
# Should have 3 groups now. 2 from LDAP and 1 from 'grant_access' call.
assert_equal(3, user.groups.all().count(), user.groups.all())
# Now remove a group and try again.
old_group = ldap_access.CACHED_LDAP_CONN._instance.users['curly']['groups'].pop()
# Make an authenticated request as curly so that we can see call middleware.
response = c.get('/useradmin/users')
# Refresh user groups
user = User.objects.get(username='curly')
# Should have 2 groups now. 1 from LDAP and 1 from 'grant_access' call.
assert_equal(3, user.groups.all().count(), user.groups.all())
finally:
settings.MIDDLEWARE_CLASSES.remove('useradmin.middleware.LdapSynchronizationMiddleware')
for finish in reset:
finish()
def test_useradmin_ldap_suboordinate_group_integration(self):
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("suboordinate"))
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 3)
assert_equal(Group.objects.all().count(), 2)
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 2)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all().order_by('username')[1].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='moe').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 3)
assert_equal(User.objects.get(username='moe').groups.all().count(), 1)
# Import all members of TestUsers and members of subgroups
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 4)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_nested_group_integration(self):
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("nested"))
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 3)
assert_equal(Group.objects.all().count(), 2)
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 2)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all().order_by('username')[1].username, larry.username)
# Only sync already imported
assert_equal(test_users.user_set.all().count(), 3)
ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='moe').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 3)
assert_equal(User.objects.get(username='moe').groups.all().count(), 1)
# Import all members of TestUsers and not members of suboordinate groups (even though specified)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Nested group import
# First without recursive import, then with.
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedGroups', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
nested_groups = Group.objects.get(name='NestedGroups')
nested_group = Group.objects.get(name='NestedGroup')
assert_true(LdapGroup.objects.filter(group=nested_groups).exists())
assert_true(LdapGroup.objects.filter(group=nested_group).exists())
assert_equal(nested_groups.user_set.all().count(), 0, nested_groups.user_set.all())
assert_equal(nested_group.user_set.all().count(), 0, nested_group.user_set.all())
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedGroups', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
nested_groups = Group.objects.get(name='NestedGroups')
nested_group = Group.objects.get(name='NestedGroup')
assert_true(LdapGroup.objects.filter(group=nested_groups).exists())
assert_true(LdapGroup.objects.filter(group=nested_group).exists())
assert_equal(nested_groups.user_set.all().count(), 0, nested_groups.user_set.all())
assert_equal(nested_group.user_set.all().count(), 1, nested_group.user_set.all())
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_suboordinate_posix_group_integration(self):
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("suboordinate"))
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 2, User.objects.all())
assert_equal(Group.objects.all().count(), 2, Group.objects.all())
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 1)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 1)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 1)
# Import all members of PosixGroup and members of subgroups
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_nested_posix_group_integration(self):
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test nested groups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("nested"))
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 2, User.objects.all())
assert_equal(Group.objects.all().count(), 2, Group.objects.all())
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 1)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 1)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 1)
# Import all members of PosixGroup and members of subgroups (there should be no subgroups)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Import all members of NestedPosixGroups and members of subgroups
reset_all_users()
reset_all_groups()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedPosixGroups', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='NestedPosixGroups')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_user_integration(self):
if is_live_cluster():
raise SkipTest('HUE-2897: Skipping because the DB may not be case sensitive')
done = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
done.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Try importing a user
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'lårry', sync_groups=False, import_by_dn=False)
larry = User.objects.get(username='lårry')
assert_true(larry.first_name == 'Larry')
assert_true(larry.last_name == 'Stooge')
assert_true(larry.email == 'larry@stooges.com')
assert_true(get_profile(larry).creation_method == str(UserProfile.CreationMethod.EXTERNAL))
# Should be a noop
sync_ldap_users(ldap_access.CACHED_LDAP_CONN)
sync_ldap_groups(ldap_access.CACHED_LDAP_CONN)
assert_equal(User.objects.all().count(), 1)
assert_equal(Group.objects.all().count(), 0)
# Make sure that if a Hue user already exists with a naming collision, we
# won't overwrite any of that user's information.
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'otherguy', sync_groups=False, import_by_dn=False)
hue_user = User.objects.get(username='otherguy')
assert_equal(get_profile(hue_user).creation_method, str(UserProfile.CreationMethod.HUE))
assert_equal(hue_user.first_name, 'Different')
# Make sure LDAP groups exist or they won't sync
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
# Try importing a user and sync groups
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=True, import_by_dn=False)
curly = User.objects.get(username='curly')
assert_equal(curly.first_name, 'Curly')
assert_equal(curly.last_name, 'Stooge')
assert_equal(curly.email, 'curly@stooges.com')
assert_equal(get_profile(curly).creation_method, str(UserProfile.CreationMethod.EXTERNAL))
assert_equal(2, curly.groups.all().count(), curly.groups.all())
reset_all_users()
reset_all_groups()
# Test import case sensitivity
done.append(desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True))
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Lårry', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Lårry').exists())
assert_true(User.objects.filter(username='lårry').exists())
# Test lower case
User.objects.filter(username__iexact='Rock').delete()
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
done.append(desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing(True))
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
User.objects.filter(username='Rock').delete()
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
finally:
for finish in done:
finish()
def test_add_ldap_users(self):
if is_live_cluster():
raise SkipTest('HUE-2897: Skipping because the DB may not be case sensitive')
done = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
done.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
URL = reverse(add_ldap_users)
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client('test', is_superuser=True)
assert_true(c.get(URL))
response = c.post(URL, dict(server='nonsense', username_pattern='moe', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
response = c.post(URL, dict(server='nonsense', username_pattern='bad_name', password1='test', password2='test'))
assert_true('Could not' in response.context['form'].errors['username_pattern'][0], response)
# Test wild card
response = c.post(URL, dict(server='nonsense', username_pattern='*rr*', password1='test', password2='test'))
assert_true('/useradmin/users' in response['Location'], response)
# Test ignore case
done.append(desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True))
User.objects.filter(username='moe').delete()
assert_false(User.objects.filter(username='Moe').exists())
assert_false(User.objects.filter(username='moe').exists())
response = c.post(URL, dict(server='nonsense', username_pattern='Moe', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
assert_false(User.objects.filter(username='Moe').exists())
assert_true(User.objects.filter(username='moe').exists())
# Test lower case
done.append(desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing(True))
User.objects.filter(username__iexact='Rock').delete()
assert_false(User.objects.filter(username='Rock').exists())
assert_false(User.objects.filter(username='rock').exists())
response = c.post(URL, dict(server='nonsense', username_pattern='rock', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
# Test regular with spaces (should fail)
response = c.post(URL, dict(server='nonsense', username_pattern='user with space', password1='test', password2='test'))
assert_true("Username must not contain whitespaces and ':'" in response.context['form'].errors['username_pattern'][0], response)
# Test dn with spaces in username and dn (should fail)
response = c.post(URL, dict(server='nonsense', username_pattern='uid=user with space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True))
assert_true("Could not get LDAP details for users in pattern" in response.content, response.content)
response = c.get(reverse(desktop.views.log_view))
assert_true("{username}: Username must not contain whitespaces".format(username='user with space') in response.content, response.content)
# Test dn with spaces in dn, but not username (should succeed)
response = c.post(URL, dict(server='nonsense', username_pattern='uid=user without space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True))
assert_true(User.objects.filter(username='spaceless').exists())
finally:
for finish in done:
finish()
def test_add_ldap_groups(self):
URL = reverse(add_ldap_groups)
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client(username='test', is_superuser=True)
reset = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
assert_true(c.get(URL))
response = c.post(URL, dict(server='nonsense', groupname_pattern='TestUsers'))
assert_true('Location' in response, response)
assert_true('/useradmin/groups' in response['Location'])
# Test warning notification for failed users on group import
# Import test_longfirstname user
ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=test_longfirstname,ou=People,dc=example,dc=com', 'TestUsers')
response = c.post(URL, dict(server='nonsense', groupname_pattern='TestUsers', import_members=True), follow=True)
assert_true('Failed to import following users: test_toolongusernametoolongusername, test_longfirstname' in response.content, response.content)
# Test with space
response = c.post(URL, dict(server='nonsense', groupname_pattern='Test Administrators'))
assert_true('Location' in response, response)
assert_true('/useradmin/groups' in response['Location'], response)
response = c.post(URL, dict(server='nonsense', groupname_pattern='toolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongname'))
assert_true('Ensure this value has at most 256 characters' in response.context['form'].errors['groupname_pattern'][0], response)
# Test wild card
response = c.post(URL, dict(server='nonsense', groupname_pattern='*r*'))
assert_true('/useradmin/groups' in response['Location'], response)
finally:
for finish in reset:
finish()
def test_sync_ldap_users_groups(self):
URL = reverse(sync_ldap_users_groups)
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client('test', is_superuser=True)
reset = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
assert_true(c.get(URL))
assert_true(c.post(URL))
finally:
for finish in reset:
finish()
def test_ldap_exception_handling(self):
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
class LdapTestConnectionError(LdapTestConnection):
def find_users(self, user, find_by_dn=False):
raise ldap.LDAPError('No such object')
ldap_access.CACHED_LDAP_CONN = LdapTestConnectionError()
c = make_logged_in_client('test', is_superuser=True)
reset = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
response = c.post(reverse(add_ldap_users), dict(server='nonsense', username_pattern='moe', password1='test', password2='test'), follow=True)
assert_true('There was an error when communicating with LDAP' in response.content, response)
finally:
for finish in reset:
finish()
class TestUserAdminLdapWithHadoop(BaseUserAdminTests):
requires_hadoop = True
def test_ensure_home_directory_add_ldap_users(self):
URL = reverse(add_ldap_users)
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True)
cluster.fs.setuser(cluster.superuser)
reset = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
assert_true(c.get(URL))
response = c.post(URL, dict(server='nonsense', username_pattern='moe', password1='test', password2='test'))
assert_true('/useradmin/users' in response['Location'])
assert_false(cluster.fs.exists('/user/moe'))
# Try same thing with home directory creation.
response = c.post(URL, dict(server='nonsense', username_pattern='curly', password1='test', password2='test', ensure_home_directory=True))
assert_true('/useradmin/users' in response['Location'])
assert_true(cluster.fs.exists('/user/curly'))
response = c.post(URL, dict(server='nonsense', username_pattern='bad_name', password1='test', password2='test'))
assert_true('Could not' in response.context['form'].errors['username_pattern'][0])
assert_false(cluster.fs.exists('/user/bad_name'))
# See if moe, who did not ask for his home directory, has a home directory.
assert_false(cluster.fs.exists('/user/moe'))
# Try wild card now
response = c.post(URL, dict(server='nonsense', username_pattern='*rr*', password1='test', password2='test', ensure_home_directory=True))
assert_true('/useradmin/users' in response['Location'])
assert_true(cluster.fs.exists('/user/curly'))
assert_true(cluster.fs.exists(u'/user/lårry'))
assert_false(cluster.fs.exists('/user/otherguy'))
finally:
# Clean up
for finish in reset:
finish()
if cluster.fs.exists('/user/curly'):
cluster.fs.rmtree('/user/curly')
if cluster.fs.exists(u'/user/lårry'):
cluster.fs.rmtree(u'/user/lårry')
if cluster.fs.exists('/user/otherguy'):
cluster.fs.rmtree('/user/otherguy')
def test_ensure_home_directory_sync_ldap_users_groups(self):
URL = reverse(sync_ldap_users_groups)
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True)
cluster.fs.setuser(cluster.superuser)
reset = []
# Set to nonsensical value just to force new config usage.
# Should continue to use cached connection.
reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config()))
try:
c.post(reverse(add_ldap_users), dict(server='nonsense', username_pattern='curly', password1='test', password2='test'))
assert_false(cluster.fs.exists('/user/curly'))
assert_true(c.post(URL, dict(server='nonsense', ensure_home_directory=True)))
assert_true(cluster.fs.exists('/user/curly'))
finally:
for finish in reset:
finish()
if cluster.fs.exists('/user/curly'):
cluster.fs.rmtree('/user/curly')
|
vmax-feihu/hue
|
apps/useradmin/src/useradmin/test_ldap.py
|
Python
|
apache-2.0
| 37,269
|
[
"MOE"
] |
8c74c8908bb07b298113d7425846640fef04e3888ecd61b43cb2faac2c4e70af
|
### clustering.py
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#import matplotlib
#matplotlib.use('GTKAgg')
import sys, os, string
command_args = string.join(sys.argv,' ')
if len(sys.argv[1:])>0 and '--' in command_args: commandLine=True
else: commandLine=False
import traceback
try:
import math
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
import matplotlib
if commandLine==False:
try: matplotlib.use('TkAgg')
except Exception: pass
try: matplotlib.rcParams['backend'] = 'TkAgg'
except Exception: pass
else:
### TkAgg doesn't work when AltAnalyze is run on the command-line
try: matplotlib.use('Agg')
except Exception: pass
try: matplotlib.rcParams['backend'] = 'Agg'
except Exception: pass
try:
import matplotlib.pyplot as pylab
import matplotlib.colors as mc
import matplotlib.mlab as mlab
import matplotlib.ticker as tic
from matplotlib.patches import Circle
from mpl_toolkits.mplot3d import Axes3D
matplotlib.rcParams['axes.linewidth'] = 0.5
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = 'Arial'
except Exception:
print traceback.format_exc()
print 'Matplotlib support not enabled'
import scipy
try: from scipy.sparse.csgraph import _validation
except Exception: pass
from scipy.linalg import svd
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance as dist
try: import numpy; np = numpy
except Exception:
print 'Numpy import error...'
print traceback.format_exc()
try:
import igraph.vendor.texttable
except ImportError: pass
try:
from sklearn.decomposition import PCA, FastICA
except Exception: pass
#pylab.ion() # closes Tk window after show - could be nice to include
except Exception:
print traceback.format_exc()
pass
import time
import unique
import statistics
import os
import export
import webbrowser
import warnings
import UI
try:
warnings.simplefilter("ignore", numpy.ComplexWarning)
warnings.simplefilter("ignore", DeprecationWarning) ### Annoying depreciation warnings (occurs in sch somewhere)
#This shouldn't be needed in python 2.7 which suppresses DeprecationWarning - Larsson
except Exception: None
import WikiPathways_webservice
try:
import fastcluster as fc
#print 'Using fastcluster instead of scipy hierarchical cluster'
#fc = sch
except Exception:
#print 'Using scipy insteady of fastcluster (not installed)'
try: fc = sch ### fastcluster uses the same convention names for linkage as sch
except Exception: print 'Scipy support not present...'
def getColorRange(x):
""" Determines the range of colors, centered at zero, for normalizing cmap """
vmax=x.max()
vmin=x.min()
if vmax<0 and vmin<0: direction = 'negative'
elif vmax>0 and vmin>0: direction = 'positive'
else: direction = 'both'
if direction == 'both':
vmax = max([vmax,abs(vmin)])
vmin = -1*vmax
return vmax,vmin
else:
return vmax,vmin
def heatmap(x, row_header, column_header, row_method, column_method, row_metric, column_metric, color_gradient,
dataset_name, display=False, contrast=None, allowAxisCompression=True,Normalize=True,PriorColumnClusters=None, PriorRowClusters=None):
print "Performing hieararchical clustering using %s for columns and %s for rows" % (column_metric,row_metric)
show_color_bars = True ### Currently, the color bars don't exactly reflect the dendrogram colors
try: ExportCorreleationMatrix = exportCorreleationMatrix
except Exception: ExportCorreleationMatrix = False
try: os.mkdir(root_dir) ### May need to create this directory
except Exception: None
if display == False:
pylab.figure() ### Add this to avoid a Tkinter bug after running MarkerFinder (not sure why it is needed) - creates a second empty window when display == True
if row_method == 'hopach' or column_method == 'hopach':
### Test R and hopach
"""
try:
import R_test
except Exception,e:
#print traceback.format_exc()
print 'Failed to install hopach or R not installed (install R before using hopach)'
row_method = 'average'; column_method = 'average'
if len(column_header)==2: column_method = 'average'
if len(row_header)==2: row_method = 'average'
"""
pass
"""
Prototype methods:
http://old.nabble.com/How-to-plot-heatmap-with-matplotlib--td32534593.html
http://stackoverflow.com/questions/7664826/how-to-get-flat-clustering-corresponding-to-color-clusters-in-the-dendrogram-cre
Scaling the color gradient so that zero is white:
http://stackoverflow.com/questions/2369492/generate-a-heatmap-in-matplotlib-using-a-scatter-data-set
Other cluster methods:
http://stackoverflow.com/questions/9362304/how-to-get-centroids-from-scipys-hierarchical-agglomerative-clustering
x is a m by n ndarray, m observations, n genes
"""
### Perform the associated clustering by HOPACH via PYPE or Rpy to R
if row_method == 'hopach' or column_method == 'hopach':
try:
""" HOPACH is a clustering method implemented in R that builds a hierarchical tree of clusters by recursively
partitioning a data set, while ordering and possibly collapsing clusters at each level:
http://www.bioconductor.org/packages/release/bioc/html/hopach.html
"""
import R_interface
#reload(R_interface)
if row_method == 'hopach' and column_method == 'hopach': cluster_method = 'both'
elif row_method == 'hopach': cluster_method = 'gene'
else: cluster_method = 'array'
if row_metric == 'cosine': metric_gene = "euclid"
elif row_metric == 'euclidean': metric_gene = "cosangle"
elif row_metric == 'correlation': metric_gene = "cor"
else: metric_gene = "cosangle"
if column_metric == 'cosine': metric_array = "euclid"
elif column_metric == 'euclidean': metric_array = "cosangle"
elif column_metric == 'correlation': metric_array = "cor"
else: metric_array = "euclid"
### Returned are the row_order and column_order in the Scipy clustering output format
newFilename, Z1, Z2 = R_interface.remoteHopach(inputFilename,cluster_method,metric_gene,metric_array)
if newFilename != inputFilename:
### If there were duplicates, re-import the matrix data for the cleaned up filename
try:
matrix, column_header, row_header, dataset_name, group_db = importData(newFilename,Normalize=normalize,reverseOrder=False)
except Exception:
matrix, column_header, row_header, dataset_name, group_db = importData(newFilename)
x = numpy.array(matrix)
except Exception:
row_method = 'average'; column_method = 'average'
print traceback.format_exc()
print 'hopach failed... continue with an alternative method'
skipClustering = False
try:
if len(PriorColumnClusters)>0 and len(PriorRowClusters)>0 and row_method==None and column_method == None:
print 'Prior generated clusters being used rather re-clustering'
"""
try:
if len(targetGeneIDs)>0:
PriorColumnClusters=[] ### If orderded genes input, we want to retain this order rather than change
except Exception: pass
"""
if len(PriorColumnClusters)>0: ### this corresponds to the above line
Z1={}; Z2={}
Z1['level'] = PriorRowClusters; Z1['level'].reverse()
Z2['level'] = PriorColumnClusters; #Z2['level'].reverse()
Z1['leaves'] = range(0,len(row_header)); #Z1['leaves'].reverse()
Z2['leaves'] = range(0,len(column_header)); #Z2['leaves'].reverse()
row_method = 'hopach'
column_method = 'hopach'
skipClustering = True
except Exception,e:
#print traceback.format_exc()
pass
n = len(x[0]); m = len(x)
if color_gradient == 'red_white_blue':
cmap=pylab.cm.bwr
if color_gradient == 'red_black_sky':
cmap=RedBlackSkyBlue()
if color_gradient == 'red_black_blue':
cmap=RedBlackBlue()
if color_gradient == 'red_black_green':
cmap=RedBlackGreen()
if color_gradient == 'yellow_black_blue':
cmap=YellowBlackBlue()
if color_gradient == 'black_yellow_blue':
cmap=BlackYellowBlue()
if color_gradient == 'seismic':
cmap=pylab.cm.seismic
if color_gradient == 'green_white_purple':
cmap=pylab.cm.PiYG_r
if color_gradient == 'coolwarm':
cmap=pylab.cm.coolwarm
vmin=x.min()
vmax=x.max()
vmax = max([vmax,abs(vmin)])
if Normalize != False:
vmin = vmax*-1
elif 'Clustering-Zscores-' in dataset_name:
vmin = vmax*-1
default_window_hight = 8.5
default_window_width = 12
if len(column_header)>80:
default_window_width = 14
if len(column_header)>100:
default_window_width = 16
if contrast == None:
scaling_factor = 2.5 #2.5
else:
try: scaling_factor = float(contrast)
except Exception: scaling_factor = 2.5
#print vmin/scaling_factor
norm = matplotlib.colors.Normalize(vmin/scaling_factor, vmax/scaling_factor) ### adjust the max and min to scale these colors by 2.5 (1 scales to the highest change)
fig = pylab.figure(figsize=(default_window_width,default_window_hight)) ### could use m,n to scale here
pylab.rcParams['font.size'] = 7.5
if show_color_bars == False:
color_bar_w = 0.000001 ### Invisible but not gone (otherwise an error persists)
else:
color_bar_w = 0.0125 ### Sufficient size to show
bigSampleDendrogram = True
if bigSampleDendrogram == True and row_method==None and column_method != None and allowAxisCompression == True:
dg2 = 0.30
dg1 = 0.43
else: dg2 = 0.1; dg1 = 0.63
try:
if EliteGeneSets != [''] and EliteGeneSets !=[]:
matrix_horiz_pos = 0.27
elif skipClustering:
if len(row_header)<100:
matrix_horiz_pos = 0.20
else:
matrix_horiz_pos = 0.27
else:
matrix_horiz_pos = 0.14
except Exception:
matrix_horiz_pos = 0.14
## calculate positions for all elements
# ax1, placement of dendrogram 1, on the left of the heatmap
[ax1_x, ax1_y, ax1_w, ax1_h] = [0.05,0.235,matrix_horiz_pos,dg1] ### The last controls matrix hight, second value controls the position of the matrix relative to the bottom of the view [0.05,0.22,0.2,0.6]
width_between_ax1_axr = 0.004
height_between_ax1_axc = 0.004 ### distance between the top color bar axis and the matrix
# axr, placement of row side colorbar
[axr_x, axr_y, axr_w, axr_h] = [0.31,0.1,color_bar_w-0.002,0.6] ### second to last controls the width of the side color bar - 0.015 when showing [0.31,0.1,color_bar_w,0.6]
axr_x = ax1_x + ax1_w + width_between_ax1_axr
axr_y = ax1_y; axr_h = ax1_h
width_between_axr_axm = 0.004
# axc, placement of column side colorbar (3rd value controls the width of the matrix!)
[axc_x, axc_y, axc_w, axc_h] = [0.4,0.63,0.6,color_bar_w] ### last one controls the hight of the top color bar - 0.015 when showing [0.4,0.63,0.5,color_bar_w]
axc_x = axr_x + axr_w + width_between_axr_axm
axc_y = ax1_y + ax1_h + height_between_ax1_axc
height_between_axc_ax2 = 0.004
# axm, placement of heatmap for the data matrix
[axm_x, axm_y, axm_w, axm_h] = [0.4,0.9,2.5,0.5] #[0.4,0.9,2.5,0.5]
axm_x = axr_x + axr_w + width_between_axr_axm
axm_y = ax1_y; axm_h = ax1_h
axm_w = axc_w
# ax2, placement of dendrogram 2, on the top of the heatmap
[ax2_x, ax2_y, ax2_w, ax2_h] = [0.3,0.72,0.6,dg2] ### last one controls hight of the dendrogram [0.3,0.72,0.6,0.135]
ax2_x = axr_x + axr_w + width_between_axr_axm
ax2_y = ax1_y + ax1_h + height_between_ax1_axc + axc_h + height_between_axc_ax2
ax2_w = axc_w
# axcb - placement of the color legend
[axcb_x, axcb_y, axcb_w, axcb_h] = [0.02,0.938,0.17,0.025] ### Last one controls the hight [0.07,0.88,0.18,0.076]
# axcc - placement of the colum colormap legend colormap (distinct map)
[axcc_x, axcc_y, axcc_w, axcc_h] = [0.02,0.12,0.17,0.025] ### Last one controls the hight [0.07,0.88,0.18,0.076]
# Compute and plot top dendrogram
if column_method == 'hopach':
ind2 = numpy.array(Z2['level']) ### from R_interface - hopach root cluster level
elif column_method != None:
start_time = time.time()
#print x;sys.exit()
d2 = dist.pdist(x.T)
#print d2
#import mdistance2
#d2 = mdistance2.mpdist(x.T)
#print d2;sys.exit()
D2 = dist.squareform(d2)
ax2 = fig.add_axes([ax2_x, ax2_y, ax2_w, ax2_h], frame_on=False)
if ExportCorreleationMatrix:
new_matrix=[]
for i in D2:
#string.join(map(inverseDist,i),'\t')
log2_data = map(inverseDist,i)
avg = statistics.avg(log2_data)
log2_norm = map(lambda x: x-avg,log2_data)
new_matrix.append(log2_norm)
x = numpy.array(new_matrix)
row_header = column_header
#sys.exit()
Y2 = fc.linkage(D2, method=column_method, metric=column_metric) ### array-clustering metric - 'average', 'single', 'centroid', 'complete'
#Y2 = sch.fcluster(Y2, 10, criterion = "maxclust")
try: Z2 = sch.dendrogram(Y2)
except Exception:
if column_method == 'average':
column_metric = 'euclidean'
else: column_method = 'average'
Y2 = fc.linkage(D2, method=column_method, metric=column_metric)
Z2 = sch.dendrogram(Y2)
#ind2 = sch.fcluster(Y2,0.6*D2.max(), 'distance') ### get the correlations
#ind2 = sch.fcluster(Y2,0.2*D2.max(), 'maxclust') ### alternative method biased based on number of clusters to obtain (like K-means)
ind2 = sch.fcluster(Y2,0.7*max(Y2[:,2]),'distance') ### This is the default behavior of dendrogram
ax2.set_xticks([]) ### Hides ticks
ax2.set_yticks([])
time_diff = str(round(time.time()-start_time,1))
print 'Column clustering completed in %s seconds' % time_diff
else:
ind2 = ['NA']*len(column_header) ### Used for exporting the flat cluster data
# Compute and plot left dendrogram
if row_method == 'hopach':
ind1 = numpy.array(Z1['level']) ### from R_interface - hopach root cluster level
elif row_method != None:
start_time = time.time()
d1 = dist.pdist(x)
D1 = dist.squareform(d1) # full matrix
# postion = [left(x), bottom(y), width, height]
#print D1;sys.exit()
Y1 = fc.linkage(D1, method=row_method, metric=row_metric) ### gene-clustering metric - 'average', 'single', 'centroid', 'complete'
no_plot=False ### Indicates that we want to show the dendrogram
try:
if runGOElite: no_plot = True
elif len(PriorColumnClusters)>0 and len(PriorRowClusters)>0 and row_method==None and column_method == None:
no_plot = True ### If trying to instantly view prior results, no dendrogram will be display, but prior GO-Elite can
else:
ax1 = fig.add_axes([ax1_x, ax1_y, ax1_w, ax1_h], frame_on=False) # frame_on may be False - this window conflicts with GO-Elite labels
except Exception:
ax1 = fig.add_axes([ax1_x, ax1_y, ax1_w, ax1_h], frame_on=False) # frame_on may be False
try: Z1 = sch.dendrogram(Y1, orientation='right',no_plot=no_plot) ### This is where plotting occurs
except Exception:
row_method = 'average'
try:
Y1 = fc.linkage(D1, method=row_method, metric=row_metric)
Z1 = sch.dendrogram(Y1, orientation='right',no_plot=no_plot)
except Exception:
row_method = 'ward'
Y1 = fc.linkage(D1, method=row_method, metric=row_metric)
Z1 = sch.dendrogram(Y1, orientation='right',no_plot=no_plot)
#ind1 = sch.fcluster(Y1,0.6*D1.max(),'distance') ### get the correlations
#ind1 = sch.fcluster(Y1,0.2*D1.max(),'maxclust')
ind1 = sch.fcluster(Y1,0.7*max(Y1[:,2]),'distance') ### This is the default behavior of dendrogram
if ExportCorreleationMatrix:
Z1 = sch.dendrogram(Y2, orientation='right')
Y1 = Y2
d1 = d2
D1 = D2
ind1 = ind2
try: ax1.set_xticks([]); ax1.set_yticks([]) ### Hides ticks
except Exception: pass
time_diff = str(round(time.time()-start_time,1))
print 'Row clustering completed in %s seconds' % time_diff
else:
ind1 = ['NA']*len(row_header) ### Used for exporting the flat cluster data
# Plot distance matrix.
axm = fig.add_axes([axm_x, axm_y, axm_w, axm_h]) # axes for the data matrix
xt = x
if column_method != None:
idx2 = Z2['leaves'] ### apply the clustering for the array-dendrograms to the actual matrix data
xt = xt[:,idx2]
#ind2 = ind2[:,idx2] ### reorder the flat cluster to match the order of the leaves the dendrogram
""" Error can occur here if hopach was selected in a prior run but now running NONE """
ind2 = [ind2[i] for i in idx2] ### replaces the above due to numpy specific windows version issue
if row_method != None:
idx1 = Z1['leaves'] ### apply the clustering for the gene-dendrograms to the actual matrix data
prior_xt = xt
xt = xt[idx1,:] # xt is transformed x
#ind1 = ind1[idx1,:] ### reorder the flat cluster to match the order of the leaves the dendrogram
try: ind1 = [ind1[i] for i in idx1] ### replaces the above due to numpy specific windows version issue
except Exception:
if 'MarkerGenes' in dataset_name:
ind1 = ['NA']*len(row_header) ### Used for exporting the flat cluster data
row_method = None
### taken from http://stackoverflow.com/questions/2982929/plotting-results-of-hierarchical-clustering-ontop-of-a-matrix-of-data-in-python/3011894#3011894
im = axm.matshow(xt, aspect='auto', origin='lower', cmap=cmap, norm=norm) ### norm=norm added to scale coloring of expression with zero = white or black
axm.set_xticks([]) ### Hides x-ticks
axm.set_yticks([])
#axm.set_axis_off() ### Hide border
#fix_verts(ax1,1)
#fix_verts(ax2,0)
### Adjust the size of the fonts for genes and arrays based on size and character length
row_fontsize = 5
column_fontsize = 5
column_text_max_len = max(map(lambda x: len(x), column_header)) ### Get the maximum length of a column annotation
if len(row_header)<75:
row_fontsize = 6.5
if len(row_header)<50:
row_fontsize = 8
if len(row_header)<25:
row_fontsize = 11
if len(column_header)<75:
column_fontsize = 6.5
if len(column_header)<50:
column_fontsize = 8
if len(column_header)<25:
column_fontsize = 11
if column_text_max_len < 15:
column_fontsize = 15
elif column_text_max_len > 30:
column_fontsize = 6.5
else:
column_fontsize = 10
try:
if len(justShowTheseIDs)>50:
column_fontsize = 7
elif len(justShowTheseIDs)>0:
column_fontsize = 10
if len(justShowTheseIDs)>0:
additional_symbols=[]
import gene_associations, OBO_import
try:
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
#symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
except Exception: gene_to_symbol={}; symbol_to_gene={}
except Exception: pass
# Add text
new_row_header=[]
new_column_header=[]
for i in range(x.shape[0]):
if row_method != None:
new_row_header.append(row_header[idx1[i]])
else:
new_row_header.append(row_header[i])
for i in range(x.shape[1]):
if column_method != None:
new_column_header.append(column_header[idx2[i]])
else: ### When not clustering columns
new_column_header.append(column_header[i])
dataset_name = string.replace(dataset_name,'Clustering-','')### clean up the name if already a clustered file
if '-hierarchical' in dataset_name:
dataset_name = string.split(dataset_name,'-hierarchical')[0]
filename = 'Clustering-%s-hierarchical_%s_%s.pdf' % (dataset_name,column_metric,row_metric)
elite_dir, cdt_file, SystemCode = exportFlatClusterData(root_dir + filename, root_dir, dataset_name, new_row_header,new_column_header,xt,ind1,ind2,vmax,display)
def ViewPNG(png_file_dir):
if os.name == 'nt':
try: os.startfile('"'+png_file_dir+'"')
except Exception: os.system('open "'+png_file_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+png_file_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+png_file_dir+'"')
try:
try:
temp1=len(justShowTheseIDs)
if 'monocle' in justShowTheseIDs and ('driver' not in justShowTheseIDs and 'guide' not in justShowTheseIDs):
import R_interface
print 'Running Monocle through R (be patient, this can take 20 minutes+)'
R_interface.performMonocleAnalysisFromHeatmap(species,cdt_file[:-3]+'txt',cdt_file[:-3]+'txt')
png_file_dir = root_dir+'/Monocle/monoclePseudotime.png'
#print png_file_dir
ViewPNG(png_file_dir)
except Exception: pass # no justShowTheseIDs
except Exception:
print '...Monocle error:'
print traceback.format_exc()
pass
cluster_elite_terms={}; ge_fontsize=11.5; top_genes=[]; proceed=True
try:
try:
if 'driver' in justShowTheseIDs or 'guide' in justShowTheseIDs: proceed = False
except Exception: pass
if proceed:
try:
cluster_elite_terms,top_genes = remoteGOElite(elite_dir,SystemCode=SystemCode)
if cluster_elite_terms['label-size']>40: ge_fontsize = 9.5
except Exception:
pass
except Exception: pass #print traceback.format_exc()
if len(cluster_elite_terms)<1:
try:
elite_dirs = string.split(elite_dir,'GO-Elite')
old_elite_dir = elite_dirs[0]+'GO-Elite'+elite_dirs[-1] ### There are actually GO-Elite/GO-Elite directories for the already clustered
old_elite_dir = string.replace(old_elite_dir,'ICGS/','')
if len(PriorColumnClusters)>0 and len(PriorRowClusters)>0 and skipClustering:
cluster_elite_terms,top_genes = importGOEliteResults(old_elite_dir)
except Exception,e:
#print traceback.format_exc()
pass
try:
if len(justShowTheseIDs)<1 and len(top_genes) > 0 and column_fontsize < 9:
column_fontsize = 10
if len(justShowTheseIDs)<1:
additional_symbols=[]
import gene_associations, OBO_import
try:
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
#symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
except Exception: gene_to_symbol={}; symbol_to_gene={}
except Exception: pass
def formatpval(p):
if '-' in p: p1=p[:1]+p[-4:]
else:
p1 = '{number:.{digits}f}'.format(number=float(p), digits=3)
p1=str(p1)
#print traceback.format_exc();sys.exit()
return p1
# Add text
new_row_header=[]
new_column_header=[]
ci=0 ### index of entries in the cluster
last_cluster=1
interval = int(float(string.split(str(len(row_header)/35.0),'.')[0]))+1 ### for enrichment term labels with over 100 genes
increment=interval-2
if len(row_header)<100: increment = interval-1
label_pos=-0.03*len(column_header)-.5
#print label_pos
try:
if 'top' in justShowTheseIDs: justShowTheseIDs.remove('top')
if 'positive' in justShowTheseIDs: justShowTheseIDs.remove('positive')
if 'amplify' in justShowTheseIDs: justShowTheseIDs.remove('amplify')
if 'IntraCorrelatedOnly' in justShowTheseIDs: justShowTheseIDs.remove('IntraCorrelatedOnly')
if 'GuideOnlyCorrelation' in justShowTheseIDs: justShowTheseIDs.remove('GuideOnlyCorrelation')
except Exception:
pass
for i in range(x.shape[0]):
if len(row_header)<40:
radj = len(row_header)*0.009 ### row offset value to center the vertical position of the row label
elif len(row_header)<70:
radj = len(row_header)*0.007 ### row offset value to center the vertical position of the row label
else:
radj = len(row_header)*0.005
cluster = str(ind1[i])
if cluster == 'NA':
new_index = i
try: cluster = 'cluster-'+string.split(row_header[new_index],':')[0]
except Exception: pass
if cluster != last_cluster:
ci=0
increment=0
#print cluster,i,row_header[idx1[i]]
color = 'black'
if row_method != None:
try:
if row_header[idx1[i]] in justShowTheseIDs:
if len(row_header)>len(justShowTheseIDs):
color = 'red'
else: color = 'black'
except Exception: pass
if len(row_header)<106: ### Don't visualize gene associations when more than 100 rows
axm.text(x.shape[1]-0.5, i-radj, ' '+row_header[idx1[i]],fontsize=row_fontsize, color=color, picker=True)
new_row_header.append(row_header[idx1[i]])
new_index = idx1[i]
else:
try:
if row_header[i] in justShowTheseIDs: color = 'red'
else: color = 'black'
except Exception: pass
if len(row_header)<106: ### Don't visualize gene associations when more than 100 rows
axm.text(x.shape[1]-0.5, i-radj, ' '+row_header[i],fontsize=row_fontsize, color=color, picker=True) ### When not clustering rows
new_row_header.append(row_header[i])
new_index = i ### This is different when clustering rows versus not
if len(row_header)<106:
"""
if cluster in cluster_elite_terms:
try:
term = cluster_elite_terms[cluster][ci][1]
axm.text(-1.5, i-radj, term,horizontalalignment='right',fontsize=row_fontsize)
except Exception: pass
ci+=1
"""
pass
else:
feature_id = row_header[new_index]
if ':' in feature_id:
if 'ENS' != feature_id[:3] or 'G0000' in feature_id:
feature_id = string.split(feature_id,':')[1]
else:
feature_id = string.split(feature_id,':')[0]
try: feature_id = gene_to_symbol[feature_id][0]
except Exception: pass
if (' ' in feature_id and ('ENS' in feature_id or 'G0000' in feature_id)):
feature_id = string.split(feature_id,' ')[1]
try:
if feature_id in justShowTheseIDs: color = 'red'
else: color = 'black'
except Exception: pass
try:
if feature_id in justShowTheseIDs or (len(justShowTheseIDs)<1 and feature_id in top_genes):
axm.text(x.shape[1]-0.5, i-radj, ' '+feature_id,fontsize=column_fontsize, color=color,picker=True) ### When not clustering rows
#axm.text(x.shape[1]-0.5, i-radj, ' '+"-",fontsize=column_fontsize, color=color,picker=True) ### When not clustering rows
elif ' ' in row_header[new_index]:
symbol = string.split(row_header[new_index], ' ')[-1]
if symbol in justShowTheseIDs:
axm.text(x.shape[1]-0.5, i-radj, ' '+row_header[new_index],fontsize=column_fontsize, color=color,picker=True)
#axm.text(x.shape[1]-0.5, i-radj, ' '+"-",fontsize=column_fontsize, color=color,picker=True)
except Exception: pass
if cluster in cluster_elite_terms:
if cluster != last_cluster:
cluster_intialized = False
try:
increment+=1
#print [increment,interval,cluster],cluster_elite_terms[cluster][ci][1];sys.exit()
#if increment == interval or (
#print increment,interval,len(row_header),cluster_intialized
if (increment == interval) or (len(row_header)>200 and increment == (interval-9) and cluster_intialized==False): ### second argument brings the label down
cluster_intialized=True
atypical_cluster = False
if ind1[i+9] == 'NA': ### This occurs for custom cluster, such MarkerFinder (not cluster numbers)
atypical_cluster = True
cluster9 = 'cluster-'+string.split(row_header[new_index+9],':')[0]
if (len(row_header)>200 and str(cluster9)!=cluster): continue
elif (len(row_header)>200 and str(ind1[i+9])!=cluster): continue ### prevents the last label in a cluster from overlapping with the first in the next cluster
pvalue,original_term = cluster_elite_terms[cluster][ci]
term = original_term
if 'GO:' in term:
term = string.split(term, '(')[0]
if ':WP' in term:
term = string.split(term, ':WP')[0]
pvalue = formatpval(str(pvalue))
term += ' p='+pvalue
if atypical_cluster == False:
term += ' (c'+str(cluster)+')'
try: cluster_elite_terms[term] = cluster_elite_terms[cluster,original_term] ### store the new term name with the associated genes
except Exception: pass
axm.text(label_pos, i-radj, term,horizontalalignment='right',fontsize=ge_fontsize, picker=True, color = 'blue')
increment=0
ci+=1
except Exception,e:
#print traceback.format_exc();sys.exit()
increment=0
last_cluster = cluster
def onpick1(event):
text = event.artist
print('onpick1 text:', text.get_text())
if 'TreeView' in text.get_text():
try: openTreeView(cdt_file)
except Exception: print 'Failed to open TreeView'
elif 'p=' not in text.get_text():
webbrowser.open('http://www.genecards.org/cgi-bin/carddisp.pl?gene='+string.replace(text.get_text(),' ',''))
else:
#"""
import TableViewer
header = ['Associated Genes']
tuple_list = []
for gene in cluster_elite_terms[text.get_text()]:
tuple_list.append([(gene)])
TableViewer.viewTable(text.get_text(),header,tuple_list) #"""
cluster_prefix = 'c'+string.split(text.get_text(),'(c')[1][:-1]+'-'
for geneSet in EliteGeneSets:
if geneSet == 'GeneOntology':
png_file_dir = elite_dir+'/GO-Elite_results/networks/'+cluster_prefix+'GO'+'.png'
elif geneSet == 'WikiPathways':
png_file_dir = elite_dir+'/GO-Elite_results/networks/'+cluster_prefix+'local'+'.png'
elif len(geneSet)>1:
png_file_dir = elite_dir+'/GO-Elite_results/networks/'+cluster_prefix+geneSet+'.png'
#try: UI.GUI(root_dir,'ViewPNG',[],png_file_dir)
#except Exception: print traceback.format_exc()
try:
alt_png_file_dir = elite_dir+'/GO-Elite_results/networks/'+cluster_prefix+eliteGeneSet+'.png'
png_file_dirs = string.split(alt_png_file_dir,'GO-Elite/')
alt_png_file_dir = png_file_dirs[0]+'GO-Elite/'+png_file_dirs[-1]
except Exception: pass
if os.name == 'nt':
try: os.startfile('"'+png_file_dir+'"')
except Exception:
try: os.system('open "'+png_file_dir+'"')
except Exception: os.startfile('"'+alt_png_file_dir+'"')
elif 'darwin' in sys.platform:
try: os.system('open "'+png_file_dir+'"')
except Exception: os.system('open "'+alt_png_file_dir+'"')
elif 'linux' in sys.platform:
try: os.system('xdg-open "'+png_file_dir+'"')
except Exception: os.system('xdg-open "'+alt_png_file_dir+'"')
#print cluster_elite_terms[text.get_text()]
fig.canvas.mpl_connect('pick_event', onpick1)
for i in range(x.shape[1]):
adji = i
### Controls the vertical position of the column (array) labels
if len(row_header)<3:
cadj = len(row_header)*-0.26 ### column offset value
elif len(row_header)<4:
cadj = len(row_header)*-0.23 ### column offset value
elif len(row_header)<6:
cadj = len(row_header)*-0.18 ### column offset value
elif len(row_header)<10:
cadj = len(row_header)*-0.08 ### column offset value
elif len(row_header)<15:
cadj = len(row_header)*-0.04 ### column offset value
elif len(row_header)<20:
cadj = len(row_header)*-0.05 ### column offset value
elif len(row_header)<22:
cadj = len(row_header)*-0.06 ### column offset value
elif len(row_header)<23:
cadj = len(row_header)*-0.08 ### column offset value
elif len(row_header)>200:
cadj = -2
else:
cadj = -0.9
#cadj = -1
if len(column_header)>15:
adji = i-0.1 ### adjust the relative position of the column label horizontally
if len(column_header)>20:
adji = i-0.2 ### adjust the relative position of the column label horizontally
if len(column_header)>25:
adji = i-0.2 ### adjust the relative position of the column label horizontally
if len(column_header)>30:
adji = i-0.25 ### adjust the relative position of the column label horizontally
if len(column_header)>35:
adji = i-0.3 ### adjust the relative position of the column label horizontally
if column_method != None:
axm.text(adji, cadj, ''+column_header[idx2[i]], rotation=270, verticalalignment="top",fontsize=column_fontsize) # rotation could also be degrees
new_column_header.append(column_header[idx2[i]])
else: ### When not clustering columns
axm.text(adji, cadj, ''+column_header[i], rotation=270, verticalalignment="top",fontsize=column_fontsize)
new_column_header.append(column_header[i])
# Plot colside colors
# axc --> axes for column side colorbar
group_name_list=[]
ind1_clust,ind2_clust = ind1,ind2
ind1,ind2,group_name_list,cb_status = updateColorBarData(ind1,ind2,new_column_header,new_row_header,row_method)
if (column_method != None or 'column' in cb_status) and show_color_bars == True:
axc = fig.add_axes([axc_x, axc_y, axc_w, axc_h]) # axes for column side colorbar
cmap_c = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF', '#CCCCE0','#000066','#FFFF00', '#FF1493'])
#cmap_c = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF','#FFFF00', '#FF1493'])
if len(unique.unique(ind2))==2: ### cmap_c is too few colors
#cmap_c = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF'])
cmap_c = matplotlib.colors.ListedColormap(['w', 'k'])
elif len(unique.unique(ind2))==3: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C'])
elif len(unique.unique(ind2))==4: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C','#FEBC18'])
elif len(unique.unique(ind2))==5: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#3D3181', '#FEBC18', '#EE2C3C'])
elif len(unique.unique(ind2))==6: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
#cmap_c = matplotlib.colors.ListedColormap(['w', '#0B9B48', 'w', '#5D82C1','#4CB1E4','#71C065'])
#cmap_c = matplotlib.colors.ListedColormap(['w', 'w', 'k', 'w','w','w'])
elif len(unique.unique(ind2))==7: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
#cmap_c = matplotlib.colors.ListedColormap(['w', 'w', 'w', 'k', 'w','w','w'])
#cmap_c = matplotlib.colors.ListedColormap(['w','w', '#0B9B48', 'w', '#5D82C1','#4CB1E4','#71C065'])
#elif len(unique.unique(ind2))==10: cmap_c = matplotlib.colors.ListedColormap(['w', 'w', 'w', 'w', 'w', 'w', 'w', 'w', 'w', 'k'])
elif len(unique.unique(ind2))==11:
cmap_c = matplotlib.colors.ListedColormap(['#DC2342', 'k', '#0B9B48', '#FDDF5E', '#E0B724', 'w', '#5D82C1', '#F79020', '#4CB1E4', '#983894', '#71C065'])
elif len(unique.unique(ind2))>0: ### cmap_c is too few colors
cmap_c = pylab.cm.gist_rainbow
dc = numpy.array(ind2, dtype=int)
dc.shape = (1,len(ind2))
im_c = axc.matshow(dc, aspect='auto', origin='lower', cmap=cmap_c)
axc.set_xticks([]) ### Hides ticks
if 'hopach' == column_method and len(group_name_list)>0:
axc.set_yticklabels(['','Groups'],fontsize=10)
else:
axc.set_yticks([])
#axc.set_frame_on(False) ### Hide border
if len(group_name_list)>0: ### Add a group color legend key
if 'hopach' == column_method: ### allows us to add the second color bar
axcd = fig.add_axes([ax2_x, ax2_y, ax2_w, color_bar_w]) # dendrogram coordinates with color_bar_w substituted - can use because dendrogram is not used
cmap_c = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF', '#CCCCE0','#000066','#FFFF00', '#FF1493'])
#cmap_c = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF','#FFFF00', '#FF1493'])
if len(unique.unique(ind2_clust))==2: ### cmap_c is too few colors
#cmap_c = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF'])
cmap_c = matplotlib.colors.ListedColormap(['w', 'k'])
elif len(unique.unique(ind2_clust))==3: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C'])
elif len(unique.unique(ind2_clust))==4: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C', '#FEBC18'])
elif len(unique.unique(ind2_clust))==5: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#3D3181', '#FEBC18', '#EE2C3C'])
elif len(unique.unique(ind2_clust))==6: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
elif len(unique.unique(ind2_clust))==7: ### cmap_c is too few colors
cmap_c = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
elif len(unique.unique(ind2_clust))>0: ### cmap_c is too few colors
cmap_c = pylab.cm.gist_rainbow
dc = numpy.array(ind2_clust, dtype=int)
dc.shape = (1,len(ind2_clust))
im_cd = axcd.matshow(dc, aspect='auto', origin='lower', cmap=cmap_c)
#axcd.text(-1,-1,'clusters')
axcd.set_yticklabels(['','Clusters'],fontsize=10)
#pylab.yticks(range(1),['HOPACH clusters'])
axcd.set_xticks([]) ### Hides ticks
#axcd.set_yticks([])
axd = fig.add_axes([axcc_x, axcc_y, axcc_w, axcc_h])
group_name_list.sort()
group_colors = map(lambda x: x[0],group_name_list)
group_names = map(lambda x: x[1],group_name_list)
cmap_d = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF', '#CCCCE0','#000066','#FFFF00', '#FF1493'])
#cmap_d = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF','#FFFF00', '#FF1493'])
if len(unique.unique(ind2))==2: ### cmap_c is too few colors
#cmap_d = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF'])
cmap_d = matplotlib.colors.ListedColormap(['w', 'k'])
elif len(unique.unique(ind2))==3: ### cmap_c is too few colors
cmap_d = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C'])
elif len(unique.unique(ind2))==4: ### cmap_c is too few colors
cmap_d = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C', '#FEBC18'])
elif len(unique.unique(ind2))==5: ### cmap_c is too few colors
cmap_d = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#3D3181', '#FEBC18', '#EE2C3C'])
elif len(unique.unique(ind2))==6: ### cmap_c is too few colors
cmap_d = matplotlib.colors.ListedColormap(['#88BF47', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
#cmap_d = matplotlib.colors.ListedColormap(['w', '#0B9B48', 'w', '#5D82C1','#4CB1E4','#71C065'])
#cmap_d = matplotlib.colors.ListedColormap(['w', 'w', 'k', 'w', 'w','w','w'])
elif len(unique.unique(ind2))==7: ### cmap_c is too few colors
cmap_d = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
#cmap_d = matplotlib.colors.ListedColormap(['w', 'w', 'w', 'k', 'w','w','w'])
#cmap_d = matplotlib.colors.ListedColormap(['w','w', '#0B9B48', 'w', '#5D82C1','#4CB1E4','#71C065'])
#elif len(unique.unique(ind2))==10: cmap_d = matplotlib.colors.ListedColormap(['w', 'w', 'w', 'w', 'w', 'w', 'w', 'w', 'w', 'k'])
elif len(unique.unique(ind2))==11:
#Eryth Gfi1 Gran HSCP-1 HSCP-2 IG2 MDP Meg Mono Multi-Lin Myelo
cmap_d = matplotlib.colors.ListedColormap(['#DC2342', 'k', '#0B9B48', '#FDDF5E', '#E0B724', 'w', '#5D82C1', '#F79020', '#4CB1E4', '#983894', '#71C065'])
elif len(unique.unique(ind2))>0: ### cmap_c is too few colors
cmap_d = pylab.cm.gist_rainbow
dc = numpy.array(group_colors, dtype=int)
dc.shape = (1,len(group_colors))
im_c = axd.matshow(dc, aspect='auto', origin='lower', cmap=cmap_d)
axd.set_yticks([])
#axd.set_xticklabels(group_names, rotation=45, ha='left')
pylab.xticks(range(len(group_names)),group_names,rotation=45,ha='left')
#cmap_c = matplotlib.colors.ListedColormap(map(lambda x: GroupDB[x][-1], new_column_header))
if show_color_bars == False:
axc = fig.add_axes([axc_x, axc_y, axc_w, axc_h]) # axes for column side colorbar
axc.set_frame_on(False)
# Plot rowside colors
# axr --> axes for row side colorbar
if (row_method != None or 'row' in cb_status) and show_color_bars == True:
axr = fig.add_axes([axr_x, axr_y, axr_w, axr_h]) # axes for column side colorbar
try:
dr = numpy.array(ind1, dtype=int)
dr.shape = (len(ind1),1)
#print ind1, len(ind1)
cmap_r = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF', '#FFFF00', '#FF1493'])
if len(unique.unique(ind1))>4: ### cmap_r is too few colors
cmap_r = pylab.cm.gist_rainbow
im_r = axr.matshow(dr, aspect='auto', origin='lower', cmap=cmap_r)
axr.set_xticks([]) ### Hides ticks
axr.set_yticks([])
#axr.set_frame_on(False) ### Hide border
except Exception:
row_method = None
pass ### likely occurs for some reason when row_method should be None
if show_color_bars == False:
axr = fig.add_axes([axr_x, axr_y, axr_w, axr_h]) # axes for column side colorbar
axr.set_frame_on(False)
# Plot color legend
axcb = fig.add_axes([axcb_x, axcb_y, axcb_w, axcb_h], frame_on=False) # axes for colorbar
cb = matplotlib.colorbar.ColorbarBase(axcb, cmap=cmap, norm=norm, orientation='horizontal')
#axcb.set_title("colorkey",fontsize=14)
if 'LineageCorrelations' in dataset_name:
cb.set_label("Lineage Correlation Z Scores",fontsize=11)
elif 'Heatmap' in root_dir:
cb.set_label("GO-Elite Z Scores",fontsize=11)
else:
cb.set_label("Differential Expression (log2)",fontsize=10)
### Add filename label to the heatmap
if len(dataset_name)>30:fontsize = 10
else: fontsize = 12.5
fig.text(0.015, 0.970, dataset_name, fontsize = fontsize)
### Render and save the graphic
pylab.savefig(root_dir + filename)
#print 'Exporting:',filename
filename = filename[:-3]+'png'
pylab.savefig(root_dir + filename, dpi=100) #,dpi=200
includeBackground=False
try:
if 'TkAgg' != matplotlib.rcParams['backend']:
includeBackground = False
except Exception: pass
if includeBackground:
fig.text(0.020, 0.070, 'Open heatmap in TreeView (click here)', fontsize = 11.5, picker=True,color = 'red', backgroundcolor='white')
else:
fig.text(0.020, 0.070, 'Open heatmap in TreeView (click here)', fontsize = 11.5, picker=True,color = 'red')
if 'Outlier' in dataset_name and 'Removed' not in dataset_name:
graphic_link.append(['Hierarchical Clustering - Outlier Genes Genes',root_dir+filename])
elif 'Relative' in dataset_name:
graphic_link.append(['Hierarchical Clustering - Significant Genes (Relative comparisons)',root_dir+filename])
elif 'LineageCorrelations' in filename:
graphic_link.append(['Hierarchical Clustering - Lineage Correlations',root_dir+filename])
elif 'MarkerGenes' in filename:
graphic_link.append(['Hierarchical Clustering - MarkerFinder',root_dir+filename])
elif 'AltExonConfirmed' in filename:
graphic_link.append(['Hierarchical Clustering - AltExonConfirmed',root_dir+filename])
elif 'AltExon' in filename:
graphic_link.append(['Hierarchical Clustering - AltExon',root_dir+filename])
else:
graphic_link.append(['Hierarchical Clustering - Significant Genes',root_dir+filename])
if display:
proceed=True
try:
if 'driver' in justShowTheseIDs or 'guide' in justShowTheseIDs:
proceed = False
except Exception: pass
if proceed:
print 'Exporting:',filename
try: pylab.show()
except Exception: None ### when run in headless mode
fig.clf()
#fig.close() causes segfault
#pylab.close() causes segfault
def openTreeView(filename):
import subprocess
fn = filepath("AltDatabase/TreeView/TreeView.jar")
retcode = subprocess.Popen(['java', "-Xmx500m", '-jar', fn, "-r", filename])
def remoteGOElite(elite_dir,SystemCode = None):
mod = 'Ensembl'
if SystemCode == 'Ae':
mod = 'AltExon'
pathway_permutations = 'FisherExactTest'
filter_method = 'z-score'
z_threshold = 1.96
p_val_threshold = 0.05
change_threshold = 0
if runGOElite:
resources_to_analyze = EliteGeneSets
if 'all' in resources_to_analyze:
resources_to_analyze = 'all'
returnPathways = 'no'
root = None
import GO_Elite
reload(GO_Elite)
input_files = dir_list = unique.read_directory(elite_dir) ### Are there any files to analyze?
if len(input_files)>0 and resources_to_analyze !=['']:
print '\nBeginning to run GO-Elite analysis on all results'
file_dirs = elite_dir,None,elite_dir
enrichmentAnalysisType = 'ORA'
#enrichmentAnalysisType = 'URA'
variables = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,enrichmentAnalysisType,root
try: GO_Elite.remoteAnalysis(variables,'non-UI Heatmap')
except Exception: 'GO-Elite failed for:',elite_dir
try: UI.openDirectory(elite_dir+'/GO-Elite_results')
except Exception: None
cluster_elite_terms,top_genes = importGOEliteResults(elite_dir)
return cluster_elite_terms,top_genes
else:
return {},[]
else:
return {},[]
def importGOEliteResults(elite_dir):
global eliteGeneSet
pruned_results = elite_dir+'/GO-Elite_results/CompleteResults/ORA_pruned/pruned-results_z-score_elite.txt' ### This is the exception (not moved)
if os.path.isfile(pruned_results) == False:
pruned_results = elite_dir+'/GO-Elite_results/pruned-results_z-score_elite.txt'
firstLine=True
cluster_elite_terms={}
all_term_length=[0]
for line in open(pruned_results,'rU').xreadlines():
data = line.rstrip()
values = string.split(data,'\t')
if firstLine:
firstLine=False
try: symbol_index = values.index('gene symbols')
except Exception: symbol_index = None
else:
try: symbol_index = values.index('gene symbols')
except Exception: pass
try:
eliteGeneSet = string.split(values[0][1:],'-')[1][:-4]
try: cluster = str(int(float(string.split(values[0][1:],'-')[0])))
except Exception:
cluster = string.join(string.split(values[0],'-')[:-1],'-')
term = values[2]
all_term_length.append(len(term))
pval = float(values[9])
try: cluster_elite_terms[cluster].append([pval,term])
except Exception: cluster_elite_terms[cluster] = [[pval,term]]
if symbol_index!=None:
symbols = string.split(values[symbol_index],'|')
cluster_elite_terms[cluster,term] = symbols
except Exception,e: pass
for cluster in cluster_elite_terms:
cluster_elite_terms[cluster].sort()
cluster_elite_terms['label-size'] = max(all_term_length)
top_genes = []; count=0
ranked_genes = elite_dir+'/GO-Elite_results/CompleteResults/ORA_pruned/gene_associations/pruned-gene-ranking.txt'
for line in open(ranked_genes,'rU').xreadlines():
data = line.rstrip()
values = string.split(data,'\t')
count+=1
if len(values)>2:
if values[2]!='Symbol':
try: top_genes.append((int(values[4]),values[2]))
except Exception: pass
top_genes.sort(); top_genes.reverse()
top_genes = map(lambda x: x[1],top_genes[:21])
return cluster_elite_terms,top_genes
def mergeRotateAroundPointPage(page, page2, rotation, tx, ty):
from pyPdf import PdfFileWriter, PdfFileReader
translation = [[1, 0, 0],
[0, 1, 0],
[-tx,-ty,1]]
rotation = math.radians(rotation)
rotating = [[math.cos(rotation), math.sin(rotation),0],
[-math.sin(rotation),math.cos(rotation), 0],
[0, 0, 1]]
rtranslation = [[1, 0, 0],
[0, 1, 0],
[tx,ty,1]]
ctm = numpy.dot(translation, rotating)
ctm = numpy.dot(ctm, rtranslation)
return page.mergeTransformedPage(page2, [ctm[0][0], ctm[0][1],
ctm[1][0], ctm[1][1],
ctm[2][0], ctm[2][1]])
def mergePDFs2(pdf1,pdf2,outPdf):
from pyPdf import PdfFileWriter, PdfFileReader
input1 = PdfFileReader(file(pdf1, "rb"))
page1 = input1.getPage(0)
input2 = PdfFileReader(file(pdf2, "rb"))
page2 = input2.getPage(0)
page3 = mergeRotateAroundPointPage(page1, page2,
page1.get('/Rotate') or 0,
page2.mediaBox.getWidth()/2, page2.mediaBox.getWidth()/2)
output = PdfFileWriter()
output.addPage(page3)
outputStream = file(outPdf, "wb")
output.write(outputStream)
outputStream.close()
def mergePDFs(pdf1,pdf2,outPdf):
# http://stackoverflow.com/questions/6041244/how-to-merge-two-landscape-pdf-pages-using-pypdf
from pyPdf import PdfFileWriter, PdfFileReader
input1 = PdfFileReader(file(pdf1, "rb"))
page1 = input1.getPage(0)
page1.mediaBox.upperRight = (page1.mediaBox.getUpperRight_x(), page1.mediaBox.getUpperRight_y())
input2 = PdfFileReader(file(pdf2, "rb"))
page2 = input2.getPage(0)
page2.mediaBox.getLowerLeft_x = (page2.mediaBox.getLowerLeft_x(), page2.mediaBox.getLowerLeft_y())
# Merge
page2.mergePage(page1)
# Output
output = PdfFileWriter()
output.addPage(page1)
outputStream = file(outPdf, "wb")
output.write(outputStream)
outputStream.close()
"""
def merge_horizontal(out_filename, left_filename, right_filename):
#Merge the first page of two PDFs side-to-side
import pyPdf
# open the PDF files to be merged
with open(left_filename) as left_file, open(right_filename) as right_file, open(out_filename, 'w') as output_file:
left_pdf = pyPdf.PdfFileReader(left_file)
right_pdf = pyPdf.PdfFileReader(right_file)
output = pyPdf.PdfFileWriter()
# get the first page from each pdf
left_page = left_pdf.pages[0]
right_page = right_pdf.pages[0]
# start a new blank page with a size that can fit the merged pages side by side
page = output.addBlankPage(
width=left_page.mediaBox.getWidth() + right_page.mediaBox.getWidth(),
height=max(left_page.mediaBox.getHeight(), right_page.mediaBox.getHeight()),
)
# draw the pages on that new page
page.mergeTranslatedPage(left_page, 0, 0)
page.mergeTranslatedPage(right_page, left_page.mediaBox.getWidth(), 0)
# write to file
output.write(output_file)
"""
def inverseDist(value):
if value == 0: value = 1
return math.log(value,2)
def getGOEliteExportDir(root_dir,dataset_name):
if 'AltResults' in root_dir:
root_dir = string.split(root_dir,'AltResults')[0]
if 'ExpressionInput' in root_dir:
root_dir = string.split(root_dir,'ExpressionInput')[0]
if 'ExpressionOutput' in root_dir:
root_dir = string.split(root_dir,'ExpressionOutput')[0]
if 'DataPlots' in root_dir:
root_dir = string.replace(root_dir,'DataPlots','GO-Elite')
elite_dir = root_dir
else:
elite_dir = root_dir+'/GO-Elite'
try: os.mkdir(elite_dir)
except Exception: pass
return elite_dir+'/clustering/'+dataset_name
def systemCodeCheck(IDs):
import gene_associations
id_type_db={}
for id in IDs:
id_type = gene_associations.predictIDSourceSimple(id)
try: id_type_db[id_type]+=1
except Exception: id_type_db[id_type]=1
id_type_count=[]
for i in id_type_db:
id_type_count.append((id_type_db[i],i))
id_type_count.sort()
id_type = id_type_count[-1][-1]
return id_type
def exportFlatClusterData(filename, root_dir, dataset_name, new_row_header,new_column_header,xt,ind1,ind2,vmax,display):
""" Export the clustered results as a text file, only indicating the flat-clusters rather than the tree """
filename = string.replace(filename,'.pdf','.txt')
export_text = export.ExportFile(filename)
column_header = string.join(['UID','row_clusters-flat']+new_column_header,'\t')+'\n' ### format column-names for export
export_text.write(column_header)
column_clusters = string.join(['column_clusters-flat','']+ map(str, ind2),'\t')+'\n' ### format column-flat-clusters for export
export_text.write(column_clusters)
### The clusters, dendrogram and flat clusters are drawn bottom-up, so we need to reverse the order to match
#new_row_header = new_row_header[::-1]
#xt = xt[::-1]
try: elite_dir = getGOEliteExportDir(root_dir,dataset_name)
except Exception: elite_dir = None
elite_columns = string.join(['InputID','SystemCode'])
try: sy = systemCodeCheck(new_row_header)
except Exception: sy = None
### Export each row in the clustered data matrix xt
i=0
cluster_db={}
export_lines = []
for row in xt:
try:
id = new_row_header[i]
original_id = str(id)
if sy == '$En:Sy':
cluster = 'cluster-'+string.split(id,':')[0]
elif sy == 'S' and ':' in id:
cluster = 'cluster-'+string.split(id,':')[0]
elif sy == 'Sy' and ':' in id:
cluster = 'cluster-'+string.split(id,':')[0]
else:
cluster = 'c'+str(ind1[i])
except Exception:
pass
try:
if 'MarkerGenes' in originalFilename:
cluster = 'cluster-'+string.split(id,':')[0]
id = string.split(id,':')[1]
if ' ' in id:
id = string.split(id,' ')[0]
if 'G000' in id: sy = 'En'
else: sy = 'Sy'
except Exception: pass
try: cluster_db[cluster].append(id)
except Exception: cluster_db[cluster] = [id]
export_lines.append(string.join([original_id,str(ind1[i])]+map(str, row),'\t')+'\n')
i+=1
### Reverse the order of the file
export_lines.reverse()
for line in export_lines:
export_text.write(line)
export_text.close()
### Export GO-Elite input files
allGenes={}
for cluster in cluster_db:
export_elite = export.ExportFile(elite_dir+'/'+cluster+'.txt')
if sy==None:
export_elite.write('ID\n')
else:
export_elite.write('ID\tSystemCode\n')
for id in cluster_db[cluster]:
try:
i1,i2 = string.split(id,' ')
if i1==i2: id = i1
except Exception: pass
if sy == '$En:Sy':
id = string.split(id,':')[1]
ids = string.split(id,' ')
if 'ENS' in ids[0] or 'G0000' in ids[0]: id = ids[0]
else: id = ids[-1]
sc = 'En'
elif sy == 'Sy' and ':' in id:
id = string.split(id,':')[1]
ids = string.split(id,' ')
sc = 'Sy'
elif sy == 'En:Sy':
id = string.split(id,' ')[0]
sc = 'En'
elif sy == 'Ae':
l = string.split(id,':')
if len(l)==2:
id = string.split(id,':')[0] ### Use the Ensembl
if len(l) == 3:
id = string.split(id,':')[1] ### Use the Ensembl
sc = 'En'
if ' ' in id:
ids = string.split(id,' ')
if 'ENS' in ids[-1] or 'G0000' in ids[-1]: id = ids[-1]
else: id = ids[0]
elif sy == 'En' and '&' in id:
for i in string.split(id,'&'):
if 'G0000' in i: id = i; sc = 'En'; break
elif sy == 'Sy' and 'EFN' in id:
sc = 'En'
else:
sc = sy
if sy == 'S':
if ':' in id:
id = string.split(id,':')[-1]
sc = 'Ae'
if '&' in id:
sc = 'Ae'
try: export_elite.write(id+'\t'+sc+'\n')
except Exception: export_elite.write(id+'\n') ### if no System Code known
allGenes[id]=[]
export_elite.close()
try:
if storeGeneSetName != None:
if len(storeGeneSetName)>0 and ('driver' not in justShowTheseIDs and 'guide' not in justShowTheseIDs):
exportCustomGeneSet(storeGeneSetName,species,allGenes)
print 'Exported geneset to "StoredGeneSets"'
except Exception: pass
### Export as CDT file
filename = string.replace(filename,'.txt','.cdt')
if display:
try: exportJTV(filename, new_column_header, new_row_header,vmax=vmax)
except Exception: pass
export_cdt = export.ExportFile(filename)
column_header = string.join(['UNIQID','NAME','GWEIGHT']+new_column_header,'\t')+'\n' ### format column-names for export
export_cdt.write(column_header)
eweight = string.join(['EWEIGHT','','']+ ['1']*len(new_column_header),'\t')+'\n' ### format column-flat-clusters for export
export_cdt.write(eweight)
### Export each row in the clustered data matrix xt
i=0; cdt_lines=[]
for row in xt:
cdt_lines.append(string.join([new_row_header[i]]*2+['1']+map(str, row),'\t')+'\n')
i+=1
### Reverse the order of the file
cdt_lines.reverse()
for line in cdt_lines:
export_cdt.write(line)
export_cdt.close()
return elite_dir, filename, sc
def exportJTV(cdt_dir, column_header, row_header,vmax=None):
### This is a config file for TreeView
filename = string.replace(cdt_dir,'.cdt','.jtv')
export_jtv = export.ExportFile(filename)
cscale = '3'
if len(column_header)>100:
cscale = '1.5'
if len(column_header)>200:
cscale = '1.1'
if len(column_header)>300:
cscale = '0.6'
if len(column_header)>400:
cscale = '0.3'
hscale = '5'
if len(row_header)< 50:
hscale = '10'
if len(row_header)>100:
hscale = '3'
if len(row_header)>500:
hscale = '1'
if len(row_header)>1000:
hscale = '0.5'
contrast = str(float(vmax)/4)[:4] ### base the contrast on the heatmap vmax variable
"""
config = '<DocumentConfig><UrlExtractor/><ArrayUrlExtractor/><MainView><ColorExtractor>'
config+= '<ColorSet down="#00FFFF"/></ColorExtractor><ArrayDrawer/><GlobalXMap>'
config+= '<FixedMap type="Fixed" scale="'+cscale+'"/><FillMap type="Fill"/><NullMap type="Null"/>'
config+= '</GlobalXMap><GlobalYMap><FixedMap type="Fixed" scale="'+hscale+'"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></GlobalYMap><ZoomXMap><FixedMap type="Fixed"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></ZoomXMap><ZoomYMap><FixedMap type="Fixed"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></ZoomYMap><TextView><TextView><GeneSummary/></TextView><TextView>'
config+= '<GeneSummary/></TextView><TextView><GeneSummary/></TextView></TextView><ArrayNameView>'
config+= '<ArraySummary included="0"/></ArrayNameView><AtrSummary/><GtrSummary/></MainView></DocumentConfig>'
export_jtv.write(config)
"""
config = '<DocumentConfig><UrlExtractor/><ArrayUrlExtractor/><MainView><ColorExtractor>'
config+= '<ColorSet down="#00FFFF"/></ColorExtractor><ArrayDrawer/><GlobalXMap>'
config+= '<FixedMap type="Fixed" scale="'+cscale+'"/><FillMap type="Fill"/><NullMap type="Null"/>'
config+= '</GlobalXMap><GlobalYMap><FixedMap type="Fixed" scale="'+hscale+'"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></GlobalYMap><ZoomXMap><FixedMap type="Fixed"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></ZoomXMap><ZoomYMap><FixedMap type="Fixed"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></ZoomYMap><TextView><TextView><GeneSummary/></TextView><TextView>'
config+= '<GeneSummary/></TextView><TextView><GeneSummary/></TextView></TextView><ArrayNameView>'
config+= '<ArraySummary included="0"/></ArrayNameView><AtrSummary/><GtrSummary/></MainView><Views>'
config+= '<View type="Dendrogram" dock="1"><ColorExtractor contrast="'+contrast+'"><ColorSet up="#FFFF00" down="#00CCFF"/>'
config+= '</ColorExtractor><ArrayDrawer/><GlobalXMap current="Fill"><FixedMap type="Fixed"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></GlobalXMap><GlobalYMap current="Fill"><FixedMap type="Fixed"/><FillMap type="Fill"/>'
config+= '<NullMap type="Null"/></GlobalYMap><ZoomXMap><FixedMap type="Fixed"/><FillMap type="Fill"/><NullMap type="Null"/>'
config+= '</ZoomXMap><ZoomYMap current="Fixed"><FixedMap type="Fixed"/><FillMap type="Fill"/><NullMap type="Null"/></ZoomYMap>'
config+= '<TextView><TextView><GeneSummary/></TextView><TextView><GeneSummary/></TextView><TextView><GeneSummary/></TextView>'
config+= '</TextView><ArrayNameView><ArraySummary included="0"/></ArrayNameView><AtrSummary/><GtrSummary/></View></Views></DocumentConfig>'
export_jtv.write(config)
### How to create custom colors - http://matplotlib.sourceforge.net/examples/pylab_examples/custom_cmap.html
def updateColorBarData(ind1,ind2,column_header,row_header,row_method):
""" Replace the top-level cluster information with group assignments for color bar coloring (if group data present)"""
cb_status = 'original'
group_number_list=[]
group_name_list=[]
try: ### Error if GroupDB not recognized as global
if column_header[0] in GroupDB: ### Thus group assignments exist for column headers
cb_status = 'column'
for header in column_header:
group,color,color_num = GroupDB[header]
group_number_list.append(color_num) ### will replace ind2
if (color_num,group) not in group_name_list:
group_name_list.append((color_num,group))
ind2 = group_number_list
if row_header[0] in GroupDB and row_method == None: ### Thus group assignments exist for row headers
group_number_list=[]
if cb_status == 'column': cb_status = 'column-row'
else: cb_status = 'row'
for header in row_header:
group,color,color_num = GroupDB[header]
group_number_list.append(color_num) ### will replace ind2
#group_number_list.reverse()
ind1 = group_number_list
except Exception: None
return ind1,ind2,group_name_list,cb_status
def ConvertFromHex(color1,color2,color3):
c1tuple = tuple(ord(c) for c in color1.lsstrip('0x').decode('hex'))
c2tuple = tuple(ord(c) for c in color2.lsstrip('0x').decode('hex'))
c3tuple = tuple(ord(c) for c in color3.lsstrip('0x').decode('hex'))
def RedBlackSkyBlue():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.9),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
my_cmap = mc.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def RedBlackBlue():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
my_cmap = mc.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def RedBlackGreen():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
my_cmap = mc.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def YellowBlackBlue():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.8),
(0.5, 0.1, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
### yellow is created by adding y = 1 to RedBlackSkyBlue green last tuple
### modulate between blue and cyan using the last y var in the first green tuple
my_cmap = mc.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def BlackYellowBlue():
cdict = {'red': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 0.8),
(0.5, 0.1, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0))
}
### yellow is created by adding y = 1 to RedBlackSkyBlue green last tuple
### modulate between blue and cyan using the last y var in the first green tuple
my_cmap = mc.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def filepath(filename):
fn = unique.filepath(filename)
return fn
def importData(filename,Normalize=False,reverseOrder=True,geneFilter=None,zscore=False):
global priorColumnClusters
global priorRowClusters
getRowClusters=False
start_time = time.time()
fn = filepath(filename)
matrix=[]
original_matrix=[]
row_header=[]
x=0; inputMax=0; inputMin=100
filename = string.replace(filename,'\\','/')
dataset_name = string.split(filename,'/')[-1][:-4]
if '.cdt' in filename: start = 3
else: start = 1
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
if '.cdt' in filename: t = [t[0]]+t[3:]
if t[1] == 'row_clusters-flat':
t = t = [t[0]]+t[2:]
### color samples by annotated groups if an expression file
if ('exp.' in filename or 'filteredExp.' in filename) and ':' not in data:
filename = string.replace(filename,'-steady-state.txt','.txt')
try:
import ExpressionBuilder
sample_group_db = ExpressionBuilder.simplerGroupImport(filename)
new_headers = []
for v in t:
if v in sample_group_db:
v = sample_group_db[v]+':'+v
new_headers.append(v)
t = new_headers
except Exception:
#print traceback.format_exc()
pass
group_db, column_header = assignGroupColors(t[1:])
x=1
elif 'column_clusters-flat' in t:
try:
prior = map(lambda x: int(float(x)),t[2:])
#priorColumnClusters = dict(zip(column_header, prior))
priorColumnClusters = prior
except Exception:
pass
start = 2
getRowClusters = True
priorRowClusters=[]
elif 'EWEIGHT' in t: pass
else:
gene = t[0]
if geneFilter==None:
proceed = True
elif gene in geneFilter:
proceed = True
else:
proceed = False
if proceed:
nullsPresent = False
#if ' ' not in t and '' not in t: ### Occurs for rows with missing data
try: s = map(float,t[start:])
except Exception:
nullsPresent=True
s=[]
for value in t[start:]:
try: s.append(float(value))
except Exception: s.append(0.000101)
#s = numpy.ma.masked_values(s, 0.000101)
original_matrix.append(s)
if max(s)>inputMax: inputMax = max(s)
if min(s)<inputMin: inputMin = min(s)
#if (abs(max(s)-min(s)))>2:
if Normalize!=False:
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
if Normalize=='row mean':
#avg = min(s)
avg = numpy.mean(s)
else: avg = avg = numpy.median(s)
if nullsPresent:
s=[] ### Needs to be done to zero out the values
for value in t[start:]:
try: s.append(float(value)-avg)
except Exception: s.append(0.000101)
#s = numpy.ma.masked_values(s, 0.000101)
else:
s = map(lambda x: x-avg,s) ### normalize to the mean
if ' ' in gene:
try:
g1,g2 = string.split(gene,' ')
if g1 == g2: gene = g1
except Exception: pass
if getRowClusters:
try:
#priorRowClusters[gene]=int(float(t[1]))
priorRowClusters.append(int(float(t[1])))
except Exception: pass
if zscore:
### convert to z-scores for normalization prior to PCA
avg = numpy.mean(s)
std = numpy.std(s)
if std ==0:
std = 0.1
try: s = map(lambda x: (x-avg)/std,s)
except Exception: pass
if geneFilter==None:
matrix.append(s)
row_header.append(gene)
else:
if gene in geneFilter:
matrix.append(s)
row_header.append(gene)
x+=1
if inputMax>100: ### Thus, not log values
print 'Converting values to log2...'
matrix=[]
k=0
if inputMin==0: increment = 1#0.01
else: increment = 1
for s in original_matrix:
if 'counts.' in filename:
s = map(lambda x: math.log(x+1,2),s)
else:
try: s = map(lambda x: math.log(x+increment,2),s)
except Exception:
print filename
print Normalize
print row_header[k], min(s),max(s); kill
if Normalize!=False:
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
if Normalize=='row mean':
avg = numpy.average(s)
else: avg = avg = numpy.median(s)
s = map(lambda x: x-avg,s) ### normalize to the mean
matrix.append(s)
k+=1
del original_matrix
if zscore: print 'Converting values to normalized z-scores...'
#reverseOrder = True ### Cluster order is background (this is a temporary workaround)
if reverseOrder == True:
matrix.reverse(); row_header.reverse()
time_diff = str(round(time.time()-start_time,1))
try:
print '%d rows and %d columns imported for %s in %s seconds...' % (len(matrix),len(column_header),dataset_name,time_diff)
except Exception:
print 'No data in input file.'; force_error
### Add groups for column pre-clustered samples if there
group_db2, row_header2 = assignGroupColors(list(row_header)) ### row_header gets sorted in this function and will get permenantly screwed up if not mutated
#if '.cdt' in filename: matrix.reverse(); row_header.reverse()
for i in group_db2:
if i not in group_db: group_db[i] = group_db2[i]
return matrix, column_header, row_header, dataset_name, group_db
def importSIF(filename):
fn = filepath(filename)
edges=[]
x=0
if '/' in filename:
dataset_name = string.split(filename,'/')[-1][:-4]
else:
dataset_name = string.split(filename,'\\')[-1][:-4]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
parent,type,child = string.split(data,'\t')
if 'AltAnalyze' in dataset_name:
### This is the order for proper directed interactions in the AltAnalyze-interaction viewer
edges.append([parent,child,type])
else:
if '(' in parent: ### for TF-target annotations
parent = string.split(parent,'(')[0]
if ':' in child:
child = string.split(child,':')[1]
if 'TF' in dataset_name or 'UserSuppliedAssociations' in dataset_name or 'WGRV' in dataset_name:
edges.append([parent,child,type]) ### Do this to indicate that the TF is regulating the target
else:
edges.append([child,parent,type])
edges = unique.unique(edges)
return edges
def assignGroupColors(t):
""" Assign a unique color to each group. Optionally used for cluster display. """
column_header=[]; group_number_db={}
groupNamesPresent=False # Some samples may have missing group names which will result in a clustering error
for i in t:
if ':' in i: groupNamesPresent = True
for i in t:
repls = {'.2txt' : '', '.2bed' : '', '.2tab' : ''}
i=reduce(lambda a, kv: a.replace(*kv), repls.iteritems(), i)
if ':' in i:
group,j = string.split(i,':')[:2]
group_number_db[group]=[]
elif groupNamesPresent:
group_number_db['UNK']=[]
i = 'UNK:'+i
column_header.append(i)
#import random
k = 0
group_db={}; color_db={}
color_list = ['r', 'b', 'y', 'g', 'w', 'k', 'm']
if len(group_number_db)>3:
color_list = []
cm = pylab.cm.get_cmap('gist_rainbow') #gist_ncar # binary
for i in range(len(group_number_db)):
color_list.append(cm(1.*i/len(group_number_db))) # color will now be an RGBA tuple
#color_list=[]
#color_template = [1,1,1,0,0,0,0.5,0.5,0.5,0.25,0.25,0.25,0.75,0.75,0.75]
t.sort() ### Ensure that all clusters have the same order of groups
for i in column_header:
repls = {'.2txt' : '', '.2bed' : '', '.2tab' : ''}
i=reduce(lambda a, kv: a.replace(*kv), repls.iteritems(), i)
if ':' in i:
group,j = string.split(i,':')[:2]
try: color,ko = color_db[group]
except Exception:
try: color_db[group] = color_list[k],k
except Exception:
### If not listed in the standard color set add a new random color
rgb = tuple(scipy.rand(3)) ### random color
#rgb = tuple(random.sample(color_template,3)) ### custom alternative method
color_list.append(rgb)
color_db[group] = color_list[k], k
color,ko = color_db[group]
k+=1
group_db[i] = group, color, ko
#column_header.append(i)
return group_db, column_header
def verifyFile(filename):
status = 'not found'
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines(): status = 'found';break
except Exception: status = 'not found'
return status
def AppendOrWrite(export_path):
export_path = filepath(export_path)
status = verifyFile(export_path)
if status == 'not found':
export_data = export.ExportFile(export_path) ### Write this new file
else:
export_data = open(export_path,'a') ### Appends to existing file
return export_path, export_data, status
def exportCustomGeneSet(geneSetName,species,allGenes):
for gene in allGenes:break
if 'ENS' not in gene:
try:
import gene_associations; import OBO_import
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
except Exception: symbol_to_gene={}
if species != None:
export_path, export_data, status = AppendOrWrite('AltDatabase/goelite/'+species+'/gene-mapp/Ensembl-StoredGeneSets.txt')
stored_lines=[]
for line in open(export_path,'rU').xreadlines(): stored_lines.append(line)
if status == 'not found':
export_data.write('GeneID\tEmpty\tGeneSetName\n')
for gene in allGenes:
if ' ' in gene:
a,b=string.split(gene,' ')
if 'ENS' in a: gene = a
else: gene = b
if 'ENS' not in gene and gene in symbol_to_gene:
gene = symbol_to_gene[gene][0]
line = gene+'\t\t'+geneSetName+'\n'
if line not in stored_lines:
export_data.write(line)
export_data.close()
else:
print 'Could not store since no species name provided.'
def writetSNEScores(scores,outputdir):
export_obj = export.ExportFile(outputdir)
for matrix_row in scores:
matrix_row = map(str,matrix_row)
export_obj.write(string.join(matrix_row,'\t')+'\n')
export_obj.close()
def importtSNEScores(inputdir):
scores=[]
### Imports tSNE scores to allow for different visualizations of the same scatter plot
for line in open(inputdir,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
t=map(float,t)
scores.append(t)
return scores
def tSNE(matrix, column_header,dataset_name,group_db,display=True,showLabels=False,
row_header=None,colorByGene=None,species=None,reimportModelScores=True):
try: prior_clusters = priorColumnClusters
except Exception: prior_clusters=[]
try:
if len(prior_clusters)>0 and len(group_db)==0:
newColumnHeader=[]
i=0
for sample_name in column_header:
newColumnHeader.append(str(prior_clusters[i])+':'+sample_name)
i+=1
group_db, column_header = assignGroupColors(newColumnHeader)
except Exception,e:
#print e
group_db={}
if reimportModelScores:
print 'Re-importing t-SNE model scores rather than calculating from scratch',
try: scores = importtSNEScores(root_dir+dataset_name+'-tSNE_scores.txt'); print '...import finished'
except Exception:
reimportModelScores=False; print '...import failed'
if reimportModelScores==False:
from sklearn.manifold import TSNE
X=matrix.T
#model = TSNE(n_components=2, random_state=0,init='pca',early_exaggeration=4.0,perplexity=20)
model = TSNE(n_components=2)
#model = TSNE(n_components=2, random_state=0, n_iter=10000, early_exaggeration=10)
scores=model.fit_transform(X)
### Export the results for optional re-import later
writetSNEScores(scores,root_dir+dataset_name+'-tSNE_scores.txt')
#pylab.scatter(scores[:,0], scores[:,1], 20, labels);
fig = pylab.figure()
ax = fig.add_subplot(111)
pylab.xlabel('TSNE-X')
pylab.ylabel('TSNE-Y')
axes = getAxesTransposed(scores) ### adds buffer space to the end of each axis and creates room for a legend
pylab.axis(axes)
marker_size = 15
if len(column_header)>20:
marker_size = 12
if len(column_header)>40:
marker_size = 10
if len(column_header)>150:
marker_size = 7
if len(column_header)>500:
marker_size = 5
if len(column_header)>1000:
marker_size = 4
if len(column_header)>2000:
marker_size = 3
### Color By Gene
if colorByGene != None and len(matrix)==0:
print 'Gene %s not found in the imported dataset... Coloring by groups.' % colorByGene
if colorByGene != None and len(matrix)>0:
gene_translation_db={}
matrix = numpy.array(matrix)
min_val = matrix.min() ### min val
if ' ' in colorByGene:
genes = string.split(colorByGene,' ')
else:
genes = [colorByGene]
genePresent=False
numberGenesPresent=[]
for gene in genes:
if gene in row_header:
numberGenesPresent.append(gene)
genePresent = True
### Translate symbol to Ensembl
if len(numberGenesPresent)==0:
try:
import gene_associations; import OBO_import
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
for symbol in genes:
if symbol in symbol_to_gene:
gene = symbol_to_gene[symbol][0]
if gene in row_header:
numberGenesPresent.append(gene)
genePresent = True
gene_translation_db[symbol]=gene
except Exception: pass
numberGenesPresent = len(numberGenesPresent)
if numberGenesPresent==1:
cm = pylab.cm.get_cmap('Reds')
else:
if numberGenesPresent==2:
cm = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF'])
elif numberGenesPresent==3:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C'])
elif numberGenesPresent==4:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C', '#FEBC18'])
elif numberGenesPresent==5:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#3D3181', '#FEBC18', '#EE2C3C'])
elif numberGenesPresent==6:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
elif numberGenesPresent==7:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
else:
cm = pylab.cm.get_cmap('gist_rainbow')
if genePresent:
dataset_name+='-'+colorByGene
group_db={}
bestGeneAssociated={}
k=0
for gene in genes:
try:
try: i = row_header.index(gene)
except Exception: i = row_header.index(gene_translation_db[gene])
values = map(float,matrix[i])
min_val = min(values)
bin_size = (max(values)-min_val)/8
max_val = max(values)
ranges = []
iz=min_val
while iz < (max(values)-bin_size/100):
r = iz,iz+bin_size
if len(ranges)==7:
r = iz,max_val
ranges.append(r)
iz+=bin_size
color_db = {}
for i in range(len(ranges)):
if i==0:
color = '#C0C0C0'
else:
if numberGenesPresent==1:
### use a single color gradient
color = cm(1.*i/len(ranges))
#color = cm(1.*(i+1)/len(ranges))
else:
if i>2:
color = cm(k)
else:
color = '#C0C0C0'
color_db[ranges[i]] = color
i=0
for val in values:
sample = column_header[i]
for (l,u) in color_db:
range_index = ranges.index((l,u)) ### what is the ranking of this range
if val>=l and val<=u:
color = color_db[(l,u)]
color_label = [gene+'-range: '+str(l)[:4]+'-'+str(u)[:4],color,'']
group_db[sample] = color_label
try: bestGeneAssociated[sample].append([range_index,val,color_label])
except Exception: bestGeneAssociated[sample] = [[range_index,val,color_label]]
i+=1
#print min(values),min_val,bin_size,max_val
if len(genes)>1:
### Collapse and rank multiple gene results
for sample in bestGeneAssociated:
bestGeneAssociated[sample].sort()
color_label = bestGeneAssociated[sample][-1][-1]
if numberGenesPresent>1:
index = bestGeneAssociated[sample][-1][0]
if index > 2:
gene = string.split(color_label[0],'-')[0]
else:
gene = 'Null'
color_label[0] = gene
group_db[sample] = color_label
except Exception:
print [gene], 'not found in rows...'
#print traceback.format_exc()
k+=1
else:
print [colorByGene], 'not found in rows...'
pylab.title('t-SNE - '+dataset_name)
group_names={}
i=0
for sample_name in column_header: #scores[0]
### Add the text labels for each
try:
### Get group name and color information
group_name,color,k = group_db[sample_name]
if group_name not in group_names:
label = group_name ### Only add once for each group
else: label = None
group_names[group_name] = color
except Exception:
color = 'r'; label=None
ax.plot(scores[i][0],scores[i][1],color=color,marker='o',markersize=marker_size,label=label,markeredgewidth=0,picker=True)
#except Exception: print i, len(scores[pcB]);kill
if showLabels:
try: sample_name = ' '+string.split(sample_name,':')[1]
except Exception: pass
ax.text(scores[i][0],scores[i][1],sample_name,fontsize=11)
i+=1
group_count = []
for i in group_db:
if group_db[i][0] not in group_count:
group_count.append(group_db[i][0])
#print len(group_count)
Lfontsize = 8
if len(group_count)>20:
Lfontsize = 10
if len(group_count)>30:
Lfontsize = 8
if len(group_count)>40:
Lfontsize = 6
if len(group_count)>50:
Lfontsize = 5
i=0
box = ax.get_position()
if len(group_count) > 0: ### Make number larger to get the legend in the plot -- BUT, the axis buffer above has been disabled
# Shink current axis by 20%
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
try: ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),fontsize = Lfontsize) ### move the legend over to the right of the plot
except Exception: ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
else:
ax.set_position([box.x0, box.y0, box.width, box.height])
pylab.legend(loc="upper left", prop={'size': 10})
filename = 'Clustering-%s-t-SNE.pdf' % dataset_name
try: pylab.savefig(root_dir + filename)
except Exception: None ### Rare error
#print 'Exporting:',filename
filename = filename[:-3]+'png'
try: pylab.savefig(root_dir + filename) #dpi=200
except Exception: None ### Rare error
graphic_link.append(['Principal Component Analysis',root_dir+filename])
if display:
print 'Exporting:',filename
try:
pylab.show()
except Exception:
pass### when run in headless mode
def excludeHighlyCorrelatedHits(x,row_header):
### For methylation data or other data with redundant signatures, remove these and only report the first one
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning) ### hides import warnings
D1 = numpy.corrcoef(x)
i=0
exclude={}
gene_correlations={}
include = []
for score_ls in D1:
k=0
for v in score_ls:
if str(v)!='nan':
if v>1.00 and k!=i:
#print row_header[i], row_header[k], v
if row_header[i] not in exclude:
exclude[row_header[k]]=[]
#if k not in exclude: include.append(row_header[k])
k+=1
#if i not in exclude: include.append(row_header[i])
i+=1
#print len(exclude),len(row_header);sys.exit()
return exclude
def PrincipalComponentAnalysis(matrix, column_header, row_header, dataset_name,
group_db, display=False, showLabels=True, algorithm='SVD', geneSetName=None,
species=None, pcA=1,pcB=2, colorByGene=None):
print "Performing Principal Component Analysis..."
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank
try: prior_clusters = priorColumnClusters
except Exception: prior_clusters=[]
try:
if len(prior_clusters)>0 and len(group_db)==0:
newColumnHeader=[]
i=0
for sample_name in column_header:
newColumnHeader.append(str(prior_clusters[i])+':'+sample_name)
i+=1
group_db, column_header = assignGroupColors(newColumnHeader)
except Exception,e:
#print e
group_db={}
pcA-=1
pcB-=1
""" Based in part on code from:
http://glowingpython.blogspot.com/2011/07/principal-component-analysis-with-numpy.html
Performs performs principal components analysis
(PCA) on the n-by-p data matrix A
Rows of A correspond to observations, columns to variables.
Returns :
coeff :
is a p-by-p matrix, each column containing coefficients
for one principal component.
score :
the principal component scores; that is, the representation
of A in the principal component space. Rows of SCORE
correspond to observations, columns to components.
latent :
a vector containing the eigenvalues
of the covariance matrix of A.
"""
# computing eigenvalues and eigenvectors of covariance matrix
if algorithm == 'SVD': use_svd = True
else: use_svd = False
#M = (matrix-mean(matrix.T,axis=1)).T # subtract the mean (along columns)
Mdif = matrix/matrix.std()
Mdif = Mdif.T
u, s, vt = svd(Mdif, 0)
fracs = s**2/np.sum(s**2)
entropy = -sum(fracs*np.log(fracs))/np.log(np.min(vt.shape))
label1 = 'PC%i (%2.1f%%)' %(pcA+1, fracs[0]*100)
label2 = 'PC%i (%2.1f%%)' %(pcB+1, fracs[1]*100)
#http://docs.scipy.org/doc/scipy/reference/sparse.html
#scipy.sparse.linalg.svds - sparse svd
#idx = numpy.argsort(vt[0,:])
#print idx;sys.exit() # Use this as your cell order or use a density analysis to get groups
#### FROM LARSSON ########
#100 most correlated Genes with PC1
#print vt
PCsToInclude = 4
correlated_db={}
allGenes={}
new_matrix = []
new_headers = []
added_indexes=[]
x = 0
#100 most correlated Genes with PC1
print 'exporting PCA loading genes to:',root_dir+'/PCA/correlated.txt'
exportData = export.ExportFile(root_dir+'/PCA/correlated.txt')
matrix = zip(*matrix) ### transpose this back to normal
try:
while x<PCsToInclude:
idx = numpy.argsort(u[:,x])
correlated = map(lambda i: row_header[i],idx[:300])
anticorrelated = map(lambda i: row_header[i],idx[-300:])
correlated_db[x] = correlated,anticorrelated
### Create a new filtered matrix of loading gene indexes
fidx = list(idx[:300])+list(idx[-300:])
for i in fidx:
if i not in added_indexes:
added_indexes.append(i)
new_headers.append(row_header[i])
new_matrix.append(matrix[i])
x+=1
#redundant_genes = excludeHighlyCorrelatedHits(numpy.array(new_matrix),new_headers)
redundant_genes = []
for x in correlated_db:
correlated,anticorrelated = correlated_db[x]
count=0
for gene in correlated:
if gene not in redundant_genes and count<100:
exportData.write(gene+'\tcorrelated-PC'+str(x+1)+'\n'); allGenes[gene]=[]
count+=1
count=0
for gene in anticorrelated:
if gene not in redundant_genes and count<100:
exportData.write(gene+'\tanticorrelated-PC'+str(x+1)+'\n'); allGenes[gene]=[]
count+=1
exportData.close()
if geneSetName != None:
if len(geneSetName)>0:
exportCustomGeneSet(geneSetName,species,allGenes)
print 'Exported geneset to "StoredGeneSets"'
except Exception:
pass
###########################
#if len(row_header)>20000:
#print '....Using eigenvectors of the real symmetric square matrix for efficiency...'
#[latent,coeff] = scipy.sparse.linalg.eigsh(cov(M))
#scores=mlab.PCA(scores)
if use_svd == False:
[latent,coeff] = linalg.eig(cov(M))
scores = dot(coeff.T,M) # projection of the data in the new space
else:
### transform u into the same structure as the original scores from linalg.eig coeff
scores = vt
fig = pylab.figure()
ax = fig.add_subplot(111)
pylab.xlabel(label1)
pylab.ylabel(label2)
axes = getAxes(scores) ### adds buffer space to the end of each axis and creates room for a legend
pylab.axis(axes)
marker_size = 15
if len(column_header)>20:
marker_size = 12
if len(column_header)>40:
marker_size = 10
if len(column_header)>150:
marker_size = 7
if len(column_header)>500:
marker_size = 5
if len(column_header)>1000:
marker_size = 4
if len(column_header)>2000:
marker_size = 3
#marker_size = 9
#samples = list(column_header)
### Color By Gene
if colorByGene != None:
gene_translation_db={}
matrix = numpy.array(matrix)
min_val = matrix.min() ### min val
if ' ' in colorByGene:
genes = string.split(colorByGene,' ')
else:
genes = [colorByGene]
genePresent=False
numberGenesPresent=[]
for gene in genes:
if gene in row_header:
numberGenesPresent.append(gene)
genePresent = True
### Translate symbol to Ensembl
if len(numberGenesPresent)==0:
try:
import gene_associations; import OBO_import
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
for symbol in genes:
if symbol in symbol_to_gene:
gene = symbol_to_gene[symbol][0]
if gene in row_header:
numberGenesPresent.append(gene)
genePresent = True
gene_translation_db[symbol]=gene
except Exception: pass
numberGenesPresent = len(numberGenesPresent)
if numberGenesPresent==1:
cm = pylab.cm.get_cmap('Reds')
else:
if numberGenesPresent==2:
cm = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF'])
elif numberGenesPresent==3:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C'])
elif numberGenesPresent==4:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C', '#FEBC18'])
elif numberGenesPresent==5:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#3D3181', '#FEBC18', '#EE2C3C'])
elif numberGenesPresent==6:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
elif numberGenesPresent==7:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
else:
cm = pylab.cm.get_cmap('gist_rainbow')
if genePresent:
dataset_name+='-'+colorByGene
group_db={}
bestGeneAssociated={}
k=0
for gene in genes:
try:
try: i = row_header.index(gene)
except Exception: i = row_header.index(gene_translation_db[gene])
values = map(float,matrix[i])
min_val = min(values)
bin_size = (max(values)-min_val)/8
max_val = max(values)
ranges = []
iz=min_val
while iz < (max(values)-bin_size/100):
r = iz,iz+bin_size
if len(ranges)==7:
r = iz,max_val
ranges.append(r)
iz+=bin_size
color_db = {}
for i in range(len(ranges)):
if i==0:
color = '#C0C0C0'
else:
if numberGenesPresent==1:
### use a single color gradient
color = cm(1.*i/len(ranges))
#color = cm(1.*(i+1)/len(ranges))
else:
if i>2:
color = cm(k)
else:
color = '#C0C0C0'
color_db[ranges[i]] = color
i=0
for val in values:
sample = column_header[i]
for (l,u) in color_db:
range_index = ranges.index((l,u)) ### what is the ranking of this range
if val>=l and val<=u:
color = color_db[(l,u)]
color_label = [gene+'-range: '+str(l)[:4]+'-'+str(u)[:4],color,'']
group_db[sample] = color_label
try: bestGeneAssociated[sample].append([range_index,val,color_label])
except Exception: bestGeneAssociated[sample] = [[range_index,val,color_label]]
i+=1
#print min(values),min_val,bin_size,max_val
if len(genes)>1:
### Collapse and rank multiple gene results
for sample in bestGeneAssociated:
bestGeneAssociated[sample].sort()
color_label = bestGeneAssociated[sample][-1][-1]
if numberGenesPresent>1:
index = bestGeneAssociated[sample][-1][0]
if index > 2:
gene = string.split(color_label[0],'-')[0]
else:
gene = 'Null'
color_label[0] = gene
group_db[sample] = color_label
except Exception:
print [gene], 'not found in rows...'
#print traceback.format_exc()
k+=1
else:
print [colorByGene], 'not found in rows...'
pylab.title('Principal Component Analysis - '+dataset_name)
group_names={}
i=0
for sample_name in column_header: #scores[0]
### Add the text labels for each
try:
### Get group name and color information
group_name,color,k = group_db[sample_name]
if group_name not in group_names:
label = group_name ### Only add once for each group
else: label = None
group_names[group_name] = color
except Exception:
color = 'r'; label=None
try: ax.plot(scores[pcA][i],scores[1][i],color=color,marker='o',markersize=marker_size,label=label,markeredgewidth=0,picker=True)
except Exception, e: print e; print i, len(scores[pcB]);kill
if showLabels:
try: sample_name = ' '+string.split(sample_name,':')[1]
except Exception: pass
ax.text(scores[pcA][i],scores[pcB][i],sample_name,fontsize=11)
i+=1
group_count = []
for i in group_db:
if group_db[i][0] not in group_count:
group_count.append(group_db[i][0])
#print len(group_count)
Lfontsize = 8
if len(group_count)>20:
Lfontsize = 10
if len(group_count)>30:
Lfontsize = 8
if len(group_count)>40:
Lfontsize = 6
if len(group_count)>50:
Lfontsize = 5
i=0
#group_count = group_count*10 ### force the legend box out of the PCA core plot
box = ax.get_position()
if len(group_count) > 0:
# Shink current axis by 20%
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
try: ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),fontsize = Lfontsize) ### move the legend over to the right of the plot
except Exception: ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
else:
ax.set_position([box.x0, box.y0, box.width, box.height])
pylab.legend(loc="upper left", prop={'size': 10})
filename = 'Clustering-%s-PCA.pdf' % dataset_name
try: pylab.savefig(root_dir + filename)
except Exception: None ### Rare error
#print 'Exporting:',filename
filename = filename[:-3]+'png'
try: pylab.savefig(root_dir + filename) #dpi=200
except Exception: None ### Rare error
graphic_link.append(['Principal Component Analysis',root_dir+filename])
if display:
print 'Exporting:',filename
try:
pylab.show()
except Exception:
pass### when run in headless mode
fig.clf()
def ica(filename):
showLabels=True
X, column_header, row_header, dataset_name, group_db = importData(filename)
X = map(numpy.array, zip(*X)) ### coverts these to tuples
column_header, row_header = row_header, column_header
ica = FastICA()
scores = ica.fit(X).transform(X) # Estimate the sources
scores /= scores.std(axis=0)
fig = pylab.figure()
ax = fig.add_subplot(111)
pylab.xlabel('ICA-X')
pylab.ylabel('ICA-Y')
pylab.title('ICA - '+dataset_name)
axes = getAxes(scores) ### adds buffer space to the end of each axis and creates room for a legend
pylab.axis(axes)
marker_size = 15
if len(column_header)>20:
marker_size = 12
if len(column_header)>40:
marker_size = 10
if len(column_header)>150:
marker_size = 7
if len(column_header)>500:
marker_size = 5
if len(column_header)>1000:
marker_size = 4
if len(column_header)>2000:
marker_size = 3
group_names={}
i=0
for sample_name in row_header: #scores[0]
### Add the text labels for each
try:
### Get group name and color information
group_name,color,k = group_db[sample_name]
if group_name not in group_names:
label = group_name ### Only add once for each group
else: label = None
group_names[group_name] = color
except Exception:
color = 'r'; label=None
ax.plot(scores[0][i],scores[1][i],color=color,marker='o',markersize=marker_size,label=label)
if showLabels:
ax.text(scores[0][i],scores[1][i],sample_name,fontsize=8)
i+=1
pylab.title('ICA recovered signals')
pylab.show()
def plot_samples(S, axis_list=None):
pylab.scatter(S[:, 0], S[:, 1], s=20, marker='o', linewidths=0, zorder=10,
color='red', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
pylab.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
pylab.quiver(0, 0, x_axis, y_axis, zorder=11, width=2, scale=6,
color=color)
pylab.xlabel('x')
pylab.ylabel('y')
def PCA3D(matrix, column_header, row_header, dataset_name, group_db,
display=False, showLabels=True, algorithm='SVD',geneSetName=None,
species=None,colorByGene=None):
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank
fig = pylab.figure()
ax = fig.add_subplot(111, projection='3d')
start = time.time()
#M = (matrix-mean(matrix.T,axis=1)).T # subtract the mean (along columns)
try: prior_clusters = priorColumnClusters
except Exception: prior_clusters=[]
try:
if len(prior_clusters)>0 and len(group_db)==0:
newColumnHeader=[]
i=0
for sample_name in column_header:
newColumnHeader.append(str(prior_clusters[i])+':'+sample_name)
i+=1
group_db, column_header = assignGroupColors(newColumnHeader)
except Exception,e:
#print e
group_db={}
if algorithm == 'SVD': use_svd = True
else: use_svd = False
Mdif = matrix/matrix.std()
Mdif = Mdif.T
u, s, vt = svd(Mdif, 0)
fracs = s**2/np.sum(s**2)
entropy = -sum(fracs*np.log(fracs))/np.log(np.min(vt.shape))
label1 = 'PC%i (%2.1f%%)' %(0+1, fracs[0]*100)
label2 = 'PC%i (%2.1f%%)' %(1+1, fracs[1]*100)
label3 = 'PC%i (%2.1f%%)' %(2+1, fracs[2]*100)
PCsToInclude = 4
correlated_db={}
allGenes={}
new_matrix = []
new_headers = []
added_indexes=[]
x = 0
#100 most correlated Genes with PC1
print 'exporting PCA loading genes to:',root_dir+'/PCA/correlated.txt'
exportData = export.ExportFile(root_dir+'/PCA/correlated.txt')
matrix = zip(*matrix) ### transpose this back to normal
try:
while x<PCsToInclude:
idx = numpy.argsort(u[:,x])
correlated = map(lambda i: row_header[i],idx[:300])
anticorrelated = map(lambda i: row_header[i],idx[-300:])
correlated_db[x] = correlated,anticorrelated
### Create a new filtered matrix of loading gene indexes
fidx = list(idx[:300])+list(idx[-300:])
for i in fidx:
if i not in added_indexes:
added_indexes.append(i)
new_headers.append(row_header[i])
new_matrix.append(matrix[i])
x+=1
#redundant_genes = excludeHighlyCorrelatedHits(numpy.array(new_matrix),new_headers)
redundant_genes = []
for x in correlated_db:
correlated,anticorrelated = correlated_db[x]
count=0
for gene in correlated:
if gene not in redundant_genes and count<100:
exportData.write(gene+'\tcorrelated-PC'+str(x+1)+'\n'); allGenes[gene]=[]
count+=1
count=0
for gene in anticorrelated:
if gene not in redundant_genes and count<100:
exportData.write(gene+'\tanticorrelated-PC'+str(x+1)+'\n'); allGenes[gene]=[]
count+=1
exportData.close()
if geneSetName != None:
if len(geneSetName)>0:
exportCustomGeneSet(geneSetName,species,allGenes)
print 'Exported geneset to "StoredGeneSets"'
except ZeroDivisionError:
pass
#numpy.Mdiff.toFile(root_dir+'/PCA/correlated.txt','\t')
if use_svd == False:
[latent,coeff] = linalg.eig(cov(M))
scores = dot(coeff.T,M) # projection of the data in the new space
else:
### transform u into the same structure as the original scores from linalg.eig coeff
scores = vt
end = time.time()
print 'PCA completed in', end-start, 'seconds.'
### Hide the axis number labels
#ax.w_xaxis.set_ticklabels([])
#ax.w_yaxis.set_ticklabels([])
#ax.w_zaxis.set_ticklabels([])
#"""
#ax.set_xticks([]) ### Hides ticks
#ax.set_yticks([])
#ax.set_zticks([])
ax.set_xlabel(label1)
ax.set_ylabel(label2)
ax.set_zlabel(label3)
#"""
#pylab.title('Principal Component Analysis\n'+dataset_name)
"""
pylab.figure()
pylab.xlabel('Principal Component 1')
pylab.ylabel('Principal Component 2')
"""
axes = getAxes(scores,PlotType='3D') ### adds buffer space to the end of each axis and creates room for a legend
pylab.axis(axes)
Lfontsize = 8
group_count = []
for i in group_db:
if group_db[i][0] not in group_count:
group_count.append(group_db[i][0])
### Color By Gene
if colorByGene != None:
gene_translation_db={}
matrix = numpy.array(matrix)
min_val = matrix.min() ### min val
if ' ' in colorByGene:
genes = string.split(colorByGene,' ')
else:
genes = [colorByGene]
genePresent=False
numberGenesPresent=[]
for gene in genes:
if gene in row_header:
numberGenesPresent.append(gene)
genePresent = True
### Translate symbol to Ensembl
if len(numberGenesPresent)==0:
try:
import gene_associations; import OBO_import
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
for symbol in genes:
if symbol in symbol_to_gene:
gene = symbol_to_gene[symbol][0]
if gene in row_header:
numberGenesPresent.append(gene)
genePresent = True
gene_translation_db[symbol]=gene
except Exception: pass
numberGenesPresent = len(numberGenesPresent)
if numberGenesPresent==1:
cm = pylab.cm.get_cmap('Reds')
else:
if numberGenesPresent==2:
cm = matplotlib.colors.ListedColormap(['#00FF00', '#1E90FF'])
elif numberGenesPresent==3:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C'])
elif numberGenesPresent==4:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#3D3181', '#EE2C3C', '#FEBC18'])
elif numberGenesPresent==5:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#3D3181', '#FEBC18', '#EE2C3C'])
elif numberGenesPresent==6:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
elif numberGenesPresent==7:
cm = matplotlib.colors.ListedColormap(['#88BF47', '#63C6BB', '#29C3EC', '#3D3181', '#7B4976','#FEBC18', '#EE2C3C'])
else:
cm = pylab.cm.get_cmap('gist_rainbow')
if genePresent:
dataset_name+='-'+colorByGene
group_db={}
bestGeneAssociated={}
k=0
for gene in genes:
try:
try: i = row_header.index(gene)
except Exception: i = row_header.index(gene_translation_db[gene])
values = map(float,matrix[i])
min_val = min(values)
bin_size = (max(values)-min_val)/8
max_val = max(values)
ranges = []
iz=min_val
while iz < (max(values)-bin_size/100):
r = iz,iz+bin_size
if len(ranges)==7:
r = iz,max_val
ranges.append(r)
iz+=bin_size
color_db = {}
for i in range(len(ranges)):
if i==0:
color = '#C0C0C0'
else:
if numberGenesPresent==1:
### use a single color gradient
color = cm(1.*i/len(ranges))
#color = cm(1.*(i+1)/len(ranges))
else:
if i>2:
color = cm(k)
else:
color = '#C0C0C0'
color_db[ranges[i]] = color
i=0
for val in values:
sample = column_header[i]
for (l,u) in color_db:
range_index = ranges.index((l,u)) ### what is the ranking of this range
if val>=l and val<=u:
color = color_db[(l,u)]
color_label = [gene+'-range: '+str(l)[:4]+'-'+str(u)[:4],color,'']
group_db[sample] = color_label
try: bestGeneAssociated[sample].append([range_index,val,color_label])
except Exception: bestGeneAssociated[sample] = [[range_index,val,color_label]]
i+=1
#print min(values),min_val,bin_size,max_val
if len(genes)>1:
### Collapse and rank multiple gene results
for sample in bestGeneAssociated:
bestGeneAssociated[sample].sort()
color_label = bestGeneAssociated[sample][-1][-1]
if numberGenesPresent>1:
index = bestGeneAssociated[sample][-1][0]
if index > 2:
gene = string.split(color_label[0],'-')[0]
else:
gene = 'Null'
color_label[0] = gene
group_db[sample] = color_label
except Exception:
print [gene], 'not found in rows...'
#print traceback.format_exc()
k+=1
else:
print [colorByGene], 'not found in rows...'
#print len(group_count)
if len(group_count)>20:
Lfontsize = 10
if len(group_count)>30:
Lfontsize = 8
if len(group_count)>40:
Lfontsize = 6
if len(group_count)>50:
Lfontsize = 5
if len(scores[0])>150:
markersize = 7
else:
markersize = 10
i=0
group_names={}
for x in scores[0]:
### Add the text labels for each
sample_name = column_header[i]
try:
### Get group name and color information
group_name,color, k = group_db[sample_name]
if group_name not in group_names:
label = group_name ### Only add once for each group
else: label = None
group_names[group_name] = color, k
except Exception:
color = 'r'; label=None
ax.plot([scores[0][i]],[scores[1][i]],[scores[2][i]],color=color,marker='o',markersize=markersize,label=label,markeredgewidth=0,picker=True) #markeredgecolor=color
if showLabels:
#try: sample_name = ' '+string.split(sample_name,':')[1]
#except Exception: pass
ax.text(scores[0][i],scores[1][i],scores[2][i], ' '+sample_name,fontsize=9)
i+=1
# Shink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
#pylab.legend(loc="upper left", prop={'size': 10})
try: ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),fontsize = Lfontsize) ### move the legend over to the right of the plot
except Exception: ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
filename = 'Clustering-%s-3D-PCA.pdf' % dataset_name
pylab.savefig(root_dir + filename)
#print 'Exporting:',filename
filename = filename[:-3]+'png'
pylab.savefig(root_dir + filename) #dpi=200
graphic_link.append(['Principal Component Analysis',root_dir+filename])
if display:
print 'Exporting:',filename
try: pylab.show()
except Exception: None ### when run in headless mode
fig.clf()
def getAxes1(scores,PlotType=None):
""" Adjust these axes to account for (A) legend size (left hand upper corner)
and (B) long sample name extending to the right
"""
try:
x_range = max(scores[0])-min(scores[0])
y_range = max(scores[1])-min(scores[1])
if PlotType == '3D':
x_axis_min = min(scores[0])-(x_range/10)
x_axis_max = max(scores[0])+(x_range/10)
y_axis_min = min(scores[1])-(y_range/10)
y_axis_max = max(scores[1])+(y_range/10)
else:
x_axis_min = min(scores[0])-(x_range/10)
x_axis_max = max(scores[0])+(x_range/10)
y_axis_min = min(scores[1])-(y_range/10)
y_axis_max = max(scores[1])+(y_range/10)
except KeyError:
None
return [x_axis_min, x_axis_max, y_axis_min, y_axis_max]
def getAxes(scores,PlotType=None):
""" Adjust these axes to account for (A) legend size (left hand upper corner)
and (B) long sample name extending to the right
"""
try:
x_range = max(scores[0])-min(scores[0])
y_range = max(scores[1])-min(scores[1])
if PlotType == '3D':
x_axis_min = min(scores[0])-(x_range/1.5)
x_axis_max = max(scores[0])+(x_range/1.5)
y_axis_min = min(scores[1])-(y_range/5)
y_axis_max = max(scores[1])+(y_range/5)
else:
x_axis_min = min(scores[0])-(x_range/10)
x_axis_max = max(scores[0])+(x_range/10)
y_axis_min = min(scores[1])-(y_range/10)
y_axis_max = max(scores[1])+(y_range/10)
except KeyError:
None
return [x_axis_min, x_axis_max, y_axis_min, y_axis_max]
def getAxesTransposed(scores):
""" Adjust these axes to account for (A) legend size (left hand upper corner)
and (B) long sample name extending to the right
"""
scores = map(numpy.array, zip(*scores))
try:
x_range = max(scores[0])-min(scores[0])
y_range = max(scores[1])-min(scores[1])
x_axis_min = min(scores[0])-int((float(x_range)/7))
x_axis_max = max(scores[0])+int((float(x_range)/7))
y_axis_min = min(scores[1])-int(float(y_range/7))
y_axis_max = max(scores[1])+int(float(y_range/7))
except KeyError:
None
return [x_axis_min, x_axis_max, y_axis_min, y_axis_max]
def Kmeans(features, column_header, row_header):
#http://www.janeriksolem.net/2009/04/clustering-using-scipys-k-means.html
#class1 = numpy.array(numpy.random.standard_normal((100,2))) + numpy.array([5,5])
#class2 = 1.5 * numpy.array(numpy.random.standard_normal((100,2)))
features = numpy.vstack((class1,class2))
centroids,variance = scipy.cluster.vq.kmeans(features,2)
code,distance = scipy.cluster.vq.vq(features,centroids)
"""
This generates two normally distributed classes in two dimensions. To try and cluster the points, run k-means with k=2 like this.
The variance is returned but we don't really need it since the SciPy implementation computes several runs (default is 20) and selects the one with smallest variance for us. Now you can check where each data point is assigned using the vector quantization function in the SciPy package.
By checking the value of code we can see if there are any incorrect assignments. To visualize, we can plot the points and the final centroids.
"""
pylab.plot([p[0] for p in class1],[p[1] for p in class1],'*')
pylab.plot([p[0] for p in class2],[p[1] for p in class2],'r*')
pylab.plot([p[0] for p in centroids],[p[1] for p in centroids],'go')
pylab.show()
"""
def displaySimpleNetworkX():
import networkx as nx
print 'Graphing output with NetworkX'
gr = nx.Graph(rotate=90,bgcolor='white') ### commands for neworkx and pygraphviz are the same or similiar
edges = importSIF('Config/TissueFateMap.sif')
### Add nodes and edges
for (node1,node2,type) in edges:
gr.add_edge(node1,node2)
draw_networkx_edges
#gr['Myometrium']['color']='red'
# Draw as PNG
nx.draw_shell(gr) #wopi, gvcolor, wc, ccomps, tred, sccmap, fdp, circo, neato, acyclic, nop, gvpr, dot, sfdp. - fdp
pylab.savefig('LineageNetwork.png')
def displaySimpleNetwork(sif_filename,fold_db,pathway_name):
import pygraphviz as pgv
#print 'Graphing output with PygraphViz'
gr = pgv.AGraph(bgcolor='white',directed=True) ### Graph creation and setting of attributes - directed indicates arrows should be added
#gr = pgv.AGraph(rotate='90',bgcolor='lightgray')
### Set graph attributes
gr.node_attr['style']='filled'
gr.graph_attr['label']='%s Network' % pathway_name
edges = importSIF(sif_filename)
if len(edges) > 700:
print sif_filename, 'too large to visualize...'
else:
### Add nodes and edges
for (node1,node2,type) in edges:
nodes = (node1,node2)
gr.add_edge(nodes)
child, parent = nodes
edge = gr.get_edge(nodes[0],nodes[1])
if 'TF' in pathway_name or 'WGRV' in pathway_name:
node = child ### This is the regulating TF
else:
node = parent ### This is the pathway
n=gr.get_node(node)
### http://www.graphviz.org/doc/info/attrs.html
n.attr['penwidth'] = 4
n.attr['fillcolor']= '#FFFF00' ### yellow
n.attr['shape']='rectangle'
#n.attr['weight']='yellow'
#edge.attr['arrowhead'] = 'diamond' ### set the arrow type
id_color_db = WikiPathways_webservice.getHexadecimalColorRanges(fold_db,'Genes')
for gene_symbol in id_color_db:
color_code = id_color_db[gene_symbol]
try:
n=gr.get_node(gene_symbol)
n.attr['fillcolor']= '#'+string.upper(color_code) #'#FF0000'
#n.attr['rotate']=90
except Exception: None
# Draw as PNG
#gr.layout(prog='dot') #fdp (spring embedded), sfdp (OK layout), neato (compressed), circo (lots of empty space), dot (hierarchical - linear)
gr.layout(prog='neato')
output_filename = '%s.png' % sif_filename[:-4]
#print output_filename
gr.draw(output_filename)
"""
def findParentDir(filename):
filename = string.replace(filename,'//','/')
filename = string.replace(filename,'\\','/')
x = string.find(filename[::-1],'/')*-1 ### get just the parent directory
return filename[:x]
def findFilename(filename):
filename = string.replace(filename,'//','/')
filename = string.replace(filename,'\\','/')
x = string.find(filename[::-1],'/')*-1 ### get just the parent directory
return filename[x:]
def runHierarchicalClustering(matrix, row_header, column_header, dataset_name,
row_method, row_metric, column_method, column_metric,
color_gradient, display=False, contrast=None,
allowAxisCompression=True,Normalize=True):
""" Running with cosine or other distance metrics can often produce negative Z scores
during clustering, so adjustments to the clustering may be required.
=== Options Include ===
row_method = 'average'
column_method = 'single'
row_metric = 'cosine'
column_metric = 'euclidean'
color_gradient = 'red_white_blue'
color_gradient = 'red_black_sky'
color_gradient = 'red_black_blue'
color_gradient = 'red_black_green'
color_gradient = 'yellow_black_blue'
color_gradient == 'coolwarm'
color_gradient = 'seismic'
color_gradient = 'green_white_purple'
"""
try:
if allowLargeClusters: maxSize = 20000
else: maxSize = 7000
except Exception: maxSize = 7000
try:
PriorColumnClusters=priorColumnClusters
PriorRowClusters=priorRowClusters
except Exception:
PriorColumnClusters=None
PriorRowClusters=None
run = False
print 'max allowed cluster size:',maxSize
if len(matrix)>0 and (len(matrix)<maxSize or row_method == None):
#if len(matrix)>5000: row_metric = 'euclidean'
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
try:
### Default for display is False, when set to True, Pylab will render the image
heatmap(numpy.array(matrix), row_header, column_header, row_method, column_method,
row_metric, column_metric, color_gradient, dataset_name, display=display,
contrast=contrast,allowAxisCompression=allowAxisCompression,Normalize=Normalize,
PriorColumnClusters=PriorColumnClusters,PriorRowClusters=PriorRowClusters)
run = True
except Exception:
print traceback.format_exc()
try:
pylab.clf()
pylab.close() ### May result in TK associated errors later on
import gc
gc.collect()
except Exception: None
if len(matrix)<10000:
print 'Error using %s ... trying euclidean instead' % row_metric
row_metric = 'cosine'; row_method = 'average' ### cityblock
else:
print 'Error with hierarchical clustering... only clustering arrays'
row_method = None ### Skip gene clustering
try:
heatmap(numpy.array(matrix), row_header, column_header, row_method, column_method,
row_metric, column_metric, color_gradient, dataset_name, display=display,
contrast=contrast,allowAxisCompression=allowAxisCompression,Normalize=Normalize,
PriorColumnClusters=PriorColumnClusters,PriorRowClusters=PriorRowClusters)
run = True
except Exception:
print traceback.format_exc()
print 'Unable to generate cluster due to dataset incompatibilty.'
elif len(matrix)==0:
print_out = 'SKIPPING HIERARCHICAL CLUSTERING!!! - Your dataset file has no associated rows.'
print print_out
else:
print_out = 'SKIPPING HIERARCHICAL CLUSTERING!!! - Your dataset file is over the recommended size limit for clustering ('+str(maxSize)+' rows). Please cluster later using "Additional Analyses"'
print print_out
try:
pylab.clf()
pylab.close() ### May result in TK associated errors later on
import gc
gc.collect()
except Exception: None
return run
def debugTKBug():
return None
def runHCexplicit(filename, graphics, row_method, row_metric, column_method, column_metric, color_gradient,
extra_params, display=True, contrast=None, Normalize=False, JustShowTheseIDs=[],compressAxis=True):
""" Explicit method for hieararchical clustering with defaults defined by the user (see below function) """
#print [filename, graphics, row_method, row_metric, column_method, column_metric, color_gradient, contrast, Normalize]
global root_dir
global inputFilename
global originalFilename
global graphic_link
global allowLargeClusters
global GroupDB
global justShowTheseIDs
global targetGeneIDs
global normalize
global rho_cutoff
global species
global runGOElite
global EliteGeneSets
global storeGeneSetName
EliteGeneSets=[]
targetGene=[]
filterByPathways=False
runGOElite = False
justShowTheseIDs = JustShowTheseIDs
allowLargeClusters = True
if compressAxis:
allowAxisCompression = True
else:
allowAxisCompression = False
graphic_link=graphics ### Store all locations of pngs
inputFilename = filename ### Used when calling R
filterIDs = False
normalize = Normalize
try:
### Specific additional optional parameters for filtering
transpose = extra_params.Transpose()
try:
rho_cutoff = extra_params.RhoCutoff()
print 'Setting correlation cutoff to a rho of',rho_cutoff
except Exception:
rho_cutoff = 0.5 ### Always done if no rho, but only used if getGeneCorrelations == True
#print 'Setting correlation cutoff to a rho of',rho_cutoff
PathwayFilter = extra_params.PathwaySelect()
GeneSet = extra_params.GeneSet()
OntologyID = extra_params.OntologyID()
Normalize = extra_params.Normalize()
normalize = Normalize
filterIDs = True
species = extra_params.Species()
platform = extra_params.Platform()
vendor = extra_params.Vendor()
newInput = findParentDir(inputFilename)+'/GeneSetClustering/'+findFilename(inputFilename)
targetGene = extra_params.GeneSelection() ### Select a gene or ID to get the top correlating genes
getGeneCorrelations = extra_params.GetGeneCorrelations() ### Select a gene or ID to get the top correlating genes
filterByPathways = extra_params.FilterByPathways()
PathwayFilter, filterByPathways = verifyPathwayName(PathwayFilter,GeneSet,OntologyID,filterByPathways)
justShowTheseIDs_var = extra_params.JustShowTheseIDs()
if len(justShowTheseIDs_var)>0:
justShowTheseIDs = justShowTheseIDs_var
elif len(targetGene)>0:
targetGene = string.replace(targetGene,'\n',' ')
targetGene = string.replace(targetGene,'\r',' ')
justShowTheseIDs = string.split(targetGene,' ')
try:
EliteGeneSets = extra_params.ClusterGOElite()
if EliteGeneSets != ['']: runGOElite = True
except Exception:
#print traceback.format_exc()
pass
try:
storeGeneSetName = extra_params.StoreGeneSetName()
except Exception:
storeGeneSetName = ''
except Exception,e:
#print traceback.format_exc();sys.exit()
transpose = extra_params
root_dir = findParentDir(filename)
if 'ExpressionOutput/Clustering' in root_dir:
root_dir = string.replace(root_dir,'ExpressionOutput/Clustering','DataPlots')
elif 'ExpressionOutput' in root_dir:
root_dir = string.replace(root_dir,'ExpressionOutput','DataPlots') ### Applies to clustering of LineageProfiler results
root_dir = string.replace(root_dir,'/Clustering','') ### Applies to clustering of MarkerFinder results
else:
root_dir += '/DataPlots/'
try: os.mkdir(root_dir) ### May need to create this directory
except Exception: None
if row_method == 'hopach': reverseOrder = False
else: reverseOrder = True
#"""
matrix, column_header, row_header, dataset_name, group_db = importData(filename,Normalize=Normalize,reverseOrder=reverseOrder)
GroupDB = group_db
inputFilename = string.replace(inputFilename,'.cdt','.txt')
originalFilename = inputFilename
try:
if len(priorColumnClusters)>0 and priorRowClusters>0 and row_method==None and column_method == None:
try: justShowTheseIDs = importPriorDrivers(inputFilename)
except Exception: pass #justShowTheseIDs=[]
except Exception:
#print traceback.format_exc()
pass
#print len(matrix),;print len(column_header),;print len(row_header)
if filterIDs:
transpose_update = True ### Since you can filterByPathways and getGeneCorrelations, only transpose once
if filterByPathways: ### Restrict analyses to only a single pathway/gene-set/ontology term
if isinstance(PathwayFilter, tuple) or isinstance(PathwayFilter, list):
FileName = string.join(list(PathwayFilter),' ')
FileName = string.replace(FileName,':','-')
else: FileName = PathwayFilter
if len(FileName)>40:
FileName = FileName[:40]
try: inputFilename = string.replace(newInput,'.txt','_'+FileName+'.txt') ### update the pathway reference for HOPACH
except Exception: inputFilename = string.replace(newInput,'.txt','_GeneSets.txt')
vars = filterByPathway(matrix,row_header,column_header,species,platform,vendor,GeneSet,PathwayFilter,OntologyID,transpose)
try: dataset_name += '-'+FileName
except Exception: dataset_name += '-GeneSets'
transpose_update = False
if 'amplify' in targetGene:
targetGene = string.join(vars[1],' ')+' amplify '+targetGene ### amplify the gene sets, but need the original matrix and headers (not the filtered)
else: matrix,row_header,column_header = vars
try:
alt_targetGene = string.replace(targetGene,'amplify','')
alt_targetGene = string.replace(alt_targetGene,'amplify','')
alt_targetGene = string.replace(alt_targetGene,'driver','')
alt_targetGene = string.replace(alt_targetGene,'guide','')
alt_targetGene = string.replace(alt_targetGene,'top','')
alt_targetGene = string.replace(alt_targetGene,'positive','')
alt_targetGene = string.replace(alt_targetGene,'excludeCellCycle','')
alt_targetGene = string.replace(alt_targetGene,'monocle','')
alt_targetGene = string.replace(alt_targetGene,'GuideOnlyCorrelation','')
alt_targetGene = string.replace(alt_targetGene,' ','')
except Exception:
alt_targetGene = ''
if getGeneCorrelations and targetGene != 'driver' and targetGene != 'GuideOnlyCorrelation' and \
targetGene != 'guide' and targetGene !='excludeCellCycle' and \
targetGene !='top' and targetGene != ' monocle' and \
targetGene !='positive' and len(alt_targetGene)>0: ###Restrict analyses to only genes that correlate with the target gene of interest
allowAxisCompression = False
if transpose and transpose_update == False: transpose_update = False ### If filterByPathways selected
elif transpose and transpose_update: transpose_update = True ### If filterByPathways not selected
else: transpose_update = False ### If transpose == False
if '\r' in targetGene or '\n' in targetGene:
targetGene = string.replace(targetGene, '\r',' ')
targetGene = string.replace(targetGene, '\n',' ')
if len(targetGene)>15:
inputFilename = string.replace(newInput,'.txt','-'+targetGene[:50]+'.txt') ### update the pathway reference for HOPACH
dataset_name += '-'+targetGene[:50]
else:
inputFilename = string.replace(newInput,'.txt','-'+targetGene+'.txt') ### update the pathway reference for HOPACH
dataset_name += '-'+targetGene
inputFilename = root_dir+'/'+string.replace(findFilename(inputFilename),'|',' ')
inputFilename = root_dir+'/'+string.replace(findFilename(inputFilename),':',' ') ### need to be careful of C://
dataset_name = string.replace(dataset_name,'|',' ')
dataset_name = string.replace(dataset_name,':',' ')
try:
matrix,row_header,column_header,row_method = getAllCorrelatedGenes(matrix,row_header,column_header,species,platform,vendor,targetGene,row_method,transpose_update)
except Exception:
print traceback.format_exc()
print targetGene, 'not found in input expression file. Exiting. \n\n'
badExit
targetGeneIDs = targetGene
exportTargetGeneList(targetGene,inputFilename)
else:
if transpose: ### Transpose the data matrix
print 'Transposing the data matrix'
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, column_header
#print len(matrix),;print len(column_header),;print len(row_header)
if len(column_header)>1000 or len(row_header)>1000:
print 'Performing hierarchical clustering (please be patient)...'
runHierarchicalClustering(matrix, row_header, column_header, dataset_name, row_method, row_metric,
column_method, column_metric, color_gradient, display=display,contrast=contrast,
allowAxisCompression=allowAxisCompression, Normalize=Normalize)
#"""
#graphic_link = [root_dir+'Clustering-exp.myeloid-steady-state-amplify positive Mki67 Clec4a2 Gria3 Ifitm6 Gfi1b -hierarchical_cosine_cosine.txt']
if 'driver' in targetGene or 'guide' in targetGene:
import RNASeq
input_file = graphic_link[-1][-1][:-4]+'.txt'
if 'excludeCellCycle' in targetGene: excludeCellCycle = True
else: excludeCellCycle = False
print 'excludeCellCycle',excludeCellCycle
targetGene = RNASeq.remoteGetDriverGenes(species,platform,input_file,excludeCellCycle=excludeCellCycle,ColumnMethod=column_method)
extra_params.setGeneSelection(targetGene) ### force correlation to these
extra_params.setGeneSet('None Selected') ### silence this
graphic_link= runHCexplicit(filename, graphic_link, row_method, row_metric, column_method, column_metric, color_gradient,
extra_params, display=display, contrast=contrast, Normalize=Normalize, JustShowTheseIDs=JustShowTheseIDs,compressAxis=compressAxis)
return graphic_link
def importPriorDrivers(inputFilename):
filename = string.replace(inputFilename,'Clustering-','')
filename = string.split(filename,'-hierarchical')[0]+'-targetGenes.txt'
genes = open(filename, "rU")
genes = map(lambda x: cleanUpLine(x),genes)
return genes
def exportTargetGeneList(targetGene,inputFilename):
exclude=['positive','top','driver', 'guide', 'amplify','GuideOnlyCorrelation']
exportFile = inputFilename[:-4]+'-targetGenes.txt'
eo = export.ExportFile(root_dir+findFilename(exportFile))
targetGenes = string.split(targetGene,' ')
for gene in targetGenes:
if gene not in exclude:
try: eo.write(gene+'\n')
except Exception: print 'Error export out gene (bad ascii):', [gene]
eo.close()
def debugPylab():
pylab.figure()
pylab.close()
pylab.figure()
def verifyPathwayName(PathwayFilter,GeneSet,OntologyID,filterByPathways):
import gene_associations
### If the user supplied an Ontology ID rather than a Ontology term name, lookup the term name and return this as the PathwayFilter
if len(OntologyID)>0:
PathwayFilter = gene_associations.lookupOntologyID(GeneSet,OntologyID,type='ID')
filterByPathways = True
return PathwayFilter, filterByPathways
def filterByPathway(matrix,row_header,column_header,species,platform,vendor,GeneSet,PathwayFilter,OntologyID,transpose):
### Filter all the matrix and header entries for IDs in the selected pathway
import gene_associations
import OBO_import
exportData = export.ExportFile(inputFilename)
matrix2=[]; row_header2=[]
if 'Ontology' in GeneSet: directory = 'nested'
else: directory = 'gene-mapp'
print "GeneSet(s) to analyze:",PathwayFilter
if isinstance(PathwayFilter, tuple) or isinstance(PathwayFilter, list): ### see if it is one or more pathways
associated_IDs={}
for p in PathwayFilter:
associated = gene_associations.simpleGenePathwayImport(species,GeneSet,p,OntologyID,directory)
for i in associated:associated_IDs[i]=[]
else:
associated_IDs = gene_associations.simpleGenePathwayImport(species,GeneSet,PathwayFilter,OntologyID,directory)
gene_annotations = gene_associations.importGeneData(species,'Ensembl')
vendor = string.replace(vendor,'other:','') ### For other IDs
try: array_to_ens = gene_associations.filterGeneToUID(species,'Ensembl',vendor,associated_IDs)
except Exception: array_to_ens={}
if platform == "3'array":
### IDs thus won't be Ensembl - need to translate
try:
#ens_to_array = gene_associations.getGeneToUidNoExon(species,'Ensembl-'+vendor); print vendor, 'IDs imported...'
array_to_ens = gene_associations.filterGeneToUID(species,'Ensembl',vendor,associated_IDs)
except Exception:
pass
#print platform, vendor, 'not found!!! Exiting method'; badExit
#array_to_ens = gene_associations.swapKeyValues(ens_to_array)
try:
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
except Exception:
pass
i=0
original_rows={} ### Don't add the same original ID twice if it associates with different Ensembl IDs
for row_id in row_header:
original_id = row_id; symbol = row_id
if 'SampleLogFolds' in inputFilename or 'RelativeLogFolds' in inputFilename or 'AltConfirmed' in inputFilename or 'MarkerGenes' in inputFilename or 'blah' not in inputFilename:
try: row_id,symbol = string.split(row_id,' ')[:2] ### standard ID convention is ID space symbol
except Exception:
try: symbol = gene_to_symbol[row_id][0]
except Exception: None
if len(symbol)==0: symbol = row_id
if ':' in row_id:
try:
cluster,row_id = string.split(row_id,':')
updated_row_id = cluster+':'+symbol
except Exception:
pass
else:
updated_row_id = symbol
try: original_id = updated_row_id
except Exception: pass
if platform == "3'array":
try:
try: row_ids = array_to_ens[row_id]
except Exception: row_ids = symbol_to_gene[symbol]
except Exception:
row_ids = [row_id]
else:
try:
try: row_ids = array_to_ens[row_id]
except Exception: row_ids = symbol_to_gene[symbol]
except Exception:
row_ids = [row_id]
for row_id in row_ids:
if row_id in associated_IDs:
if 'SampleLogFolds' in inputFilename or 'RelativeLogFolds' in inputFilename:
if original_id != symbol:
row_id = original_id+' '+symbol
else: row_id = symbol
else:
try: row_id = gene_annotations[row_id].Symbol()
except Exception: None ### If non-Ensembl data
if original_id not in original_rows: ### Don't add the same ID twice if associated with mult. Ensembls
matrix2.append(matrix[i])
#row_header2.append(row_id)
row_header2.append(original_id)
original_rows[original_id]=None
i+=1
if transpose:
matrix2 = map(numpy.array, zip(*matrix2)) ### coverts these to tuples
column_header, row_header2 = row_header2, column_header
exportData.write(string.join(['UID']+column_header,'\t')+'\n') ### title row export
i=0
for row_id in row_header2:
exportData.write(string.join([row_id]+map(str,matrix2[i]),'\t')+'\n') ### export values
i+=1
print len(row_header2), 'filtered IDs'
exportData.close()
return matrix2,row_header2,column_header
def getAllCorrelatedGenes(matrix,row_header,column_header,species,platform,vendor,targetGene,row_method,transpose):
### Filter all the matrix and header entries for IDs in the selected targetGene
resort_by_ID_name=False
if resort_by_ID_name:
index=0; new_row_header=[]; new_matrix=[]; temp_row_header = []
for name in row_header: temp_row_header.append((name,index)); index+=1
temp_row_header.sort()
for (name,index) in temp_row_header:
new_row_header.append(name)
new_matrix.append(matrix[index])
matrix = new_matrix
row_header = new_row_header
exportData = export.ExportFile(inputFilename)
try:
import gene_associations
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
#import OBO_import; symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
except Exception:
print 'No Ensembl-Symbol database available for',species
if platform == "3'array":
### IDs thus won't be Ensembl - need to translate
try:
if ':' in vendor:
vendor = string.split(vendor,':')[1]
#ens_to_array = gene_associations.getGeneToUidNoExon(species,'Ensembl-'+vendor); print vendor, 'IDs imported...'
array_to_ens = gene_associations.filterGeneToUID(species,'Ensembl',vendor,{})
except Exception,e:
array_to_ens={}
for uid in array_to_ens:
for gid in array_to_ens[uid]:
if gid in gene_to_symbol:
symbol = gene_to_symbol[gid][0]
try: gene_to_symbol[uid].append(symbol)
except Exception: gene_to_symbol[uid] = [symbol]
matrix2=[]
row_header2=[]
matrix_db={} ### Used to optionally sort according to the original order
multipleGenes = False
i=0
### If multiple genes entered, just display these
targetGenes=[targetGene]
if ' ' in targetGene or ',' in targetGene or '|' in targetGene or '\n' in targetGene or '\r' in targetGene:
multipleGenes = True
if ' ' in targetGene: delim = ' '
if ',' in targetGene: delim = ','
if '|' in targetGene and 'alt_junction' not in originalFilename: delim = '|'
if '\n' in targetGene: delim = '\n'
if '\r' in targetGene: delim = '\r'
targetGenes = string.split(targetGene,delim)
if row_method != None: targetGenes.sort()
for row_id in row_header:
original_rowid = row_id
symbol=row_id
if ':' in row_id:
a,b = string.split(row_id,':')[:2]
if 'ENS' in a or len(a)==17:
try:
row_id = a
symbol = gene_to_symbol[row_id][0]
except Exception: symbol =''
elif 'ENS' not in b and len(a)!=17:
row_id = b
elif 'ENS' in b:
symbol = original_rowid
row_id = a
try: row_id,symbol = string.split(row_id,' ')[:2] ### standard ID convention is ID space symbol
except Exception:
try: symbol = gene_to_symbol[row_id][0]
except Exception:
if 'ENS' not in original_rowid:
row_id, symbol = row_id, row_id
if 'ENS' not in original_rowid and len(original_rowid)!=17:
if original_rowid != symbol:
symbol = original_rowid+' '+symbol
for gene in targetGenes:
if string.lower(gene) == string.lower(row_id) or string.lower(gene) == string.lower(symbol) or string.lower(original_rowid)==string.lower(gene):
matrix2.append(matrix[i]) ### Values for the row
row_header2.append(symbol)
matrix_db[symbol]=matrix[i]
i+=1
i=0
#for gene in targetGenes:
# if gene not in matrix_db: print gene
else:
i=0
original_rows={} ### Don't add the same original ID twice if it associates with different Ensembl IDs
for row_id in row_header:
original_id = row_id
symbol = 'NA'
if 'SampleLogFolds' in inputFilename or 'RelativeLogFolds' in inputFilename or 'blah' not in inputFilename:
try: row_id,symbol = string.split(row_id,' ')[:2] ### standard ID convention is ID space symbol
except Exception:
try: symbol = gene_to_symbol[row_id][0]
except Exception:
row_id, symbol = row_id, row_id
original_id = row_id
if row_id == targetGene or symbol == targetGene:
targetGeneValues = matrix[i] ### Values for the row
break
i+=1
i=0
if multipleGenes==False: limit = 50
else: limit = 140 # lower limit is 132
print 'limit:',limit
if multipleGenes==False or 'amplify' in targetGene or 'correlated' in targetGene:
row_header3=[] ### Convert to symbol if possible
if multipleGenes==False:
targetGeneValue_array = [targetGeneValues]
else:
targetGeneValue_array = matrix2
if len(row_header2)>4 and len(row_header)<20000:
print 'Performing all pairwise corelations...',
corr_matrix = numpyCorrelationMatrixGene(matrix,row_header,row_header2,gene_to_symbol)
print 'complete'
matrix2=[]; original_headers=row_header2; row_header2 = []
matrix2_alt=[]; row_header2_alt=[]
### If one gene entered, display the most positive and negative correlated
import markerFinder; k=0
for targetGeneValues in targetGeneValue_array:
correlated=[]
anticorrelated=[]
try: targetGeneID = original_headers[k]
except Exception: targetGeneID=''
try:
rho_results = list(corr_matrix[targetGeneID])
except Exception:
#print traceback.format_exc()
rho_results = markerFinder.simpleScipyPearson(matrix,targetGeneValues)
correlated_symbols={}
#print targetGeneID, rho_results[:130][-1];sys.exit()
for (rho,ind) in rho_results[:limit]: ### Get the top-50 correlated plus the gene of interest
proceed = True
if 'top' in targetGene:
if rho_results[4][0]<rho_cutoff: proceed = False
if rho>rho_cutoff and proceed: #and rho_results[3][0]>rho_cutoff:# ensures only clustered genes considered
rh = row_header[ind]
#if gene_to_symbol[rh][0] in targetGenes:correlated.append(gene_to_symbol[rh][0])
#correlated.append(gene_to_symbol[rh][0])
if len(row_header2)<100 or multipleGenes:
rh = row_header[ind]
#print rh, rho # Ly6c1, S100a8
if matrix[ind] not in matrix2:
if 'correlated' in targetGene:
if rho!=1:
matrix2.append(matrix[ind])
row_header2.append(rh)
if targetGeneValues not in matrix2: ### gene ID systems can be different between source and query
matrix2.append(targetGeneValues)
row_header2.append(targetGeneID)
try:correlated_symbols[gene_to_symbol[rh][0]]=ind
except Exception: correlated_symbols[rh]=ind
#print targetGeneValues, targetGene;sys.exit()
else:
matrix2.append(matrix[ind])
row_header2.append(rh)
try: correlated_symbols[gene_to_symbol[rh][0]]=ind
except Exception: correlated_symbols[rh]=ind
#if rho!=1: print gene_to_symbol[rh][0],'pos',targetGeneID
#sys.exit()
rho_results.reverse()
for (rho,ind) in rho_results[:limit]: ### Get the top-50 anti-correlated plus the gene of interest
if rho<-1*rho_cutoff and 'positive' not in targetGene:
rh = row_header[ind]
#if gene_to_symbol[rh][0] in targetGenes:anticorrelated.append(gene_to_symbol[rh][0])
#anticorrelated.append(gene_to_symbol[rh][0])
if len(row_header2)<100 or multipleGenes:
rh = row_header[ind]
if matrix[ind] not in matrix2:
if 'correlated' in targetGene:
if rho!=1:
matrix2.append(matrix[ind])
row_header2.append(rh)
if targetGeneValues not in matrix2:
matrix2.append(targetGeneValues)
row_header2.append(targetGeneID)
try: correlated_symbols[gene_to_symbol[rh][0]]=ind
except Exception: correlated_symbols[rh]=ind
#print targetGeneValues, targetGene;sys.exit()
else:
matrix2.append(matrix[ind])
row_header2.append(rh)
try: correlated_symbols[gene_to_symbol[rh][0]]=ind
except Exception: correlated_symbols[rh]=ind
#if rho!=1: print gene_to_symbol[rh][0],'neg',targetGeneID
try:
### print overlapping input genes that are correlated
if len(correlated_symbols)>0:
potentially_redundant=[]
for i in targetGenes:
if i in correlated_symbols:
if i != targetGeneID: potentially_redundant.append((i,correlated_symbols[i]))
if len(potentially_redundant)>0:
### These are intra-correlated genes based on the original filtered query
#print targetGeneID, potentially_redundant
for (rh,ind) in potentially_redundant:
matrix2_alt.append(matrix[ind])
row_header2_alt.append(rh)
rho_results.reverse()
#print targetGeneID, correlated_symbols, rho_results[:5]
except Exception:
pass
k+=1
#print targetGeneID+'\t'+str(len(correlated))+'\t'+str(len(anticorrelated))
#sys.exit()
if 'IntraCorrelatedOnly' in targetGene:
matrix2 = matrix2_alt
row_header2 = row_header2_alt
for r in row_header2:
try:
row_header3.append(gene_to_symbol[r][0])
except Exception: row_header3.append(r)
row_header2 = row_header3
#print len(row_header2),len(row_header3),len(matrix2);sys.exit()
matrix2.reverse() ### Display from top-to-bottom rather than bottom-to-top (this is how the clusters are currently ordered in the heatmap)
row_header2.reverse()
if 'amplify' not in targetGene:
row_method = None ### don't cluster the rows (row_method)
if 'amplify' not in targetGene and 'correlated' not in targetGene:
### reorder according to orignal
matrix_temp=[]
header_temp=[]
#print targetGenes
for symbol in targetGenes:
if symbol in matrix_db:
matrix_temp.append(matrix_db[symbol]); header_temp.append(symbol)
#print len(header_temp), len(matrix_db)
if len(header_temp) >= len(matrix_db): ### Hence it worked and all IDs are the same type
matrix2 = matrix_temp
row_header2 = header_temp
if transpose:
matrix2 = map(numpy.array, zip(*matrix2)) ### coverts these to tuples
column_header, row_header2 = row_header2, column_header
exclude=[]
#exclude = excludeHighlyCorrelatedHits(numpy.array(matrix2),row_header2)
exportData.write(string.join(['UID']+column_header,'\t')+'\n') ### title row export
i=0
for row_id in row_header2:
if ':' in row_id:
a,b = string.split(row_id,':')[:2]
if 'ENS' in a:
try: row_id=string.replace(row_id,a,gene_to_symbol[a][0])
except Exception,e: pass
row_header2[i] = row_id
elif 'ENS' in row_id and ' ' in row_id:
row_id = string.split(row_id, ' ')[1]
row_header2[i] = row_id
elif ' ' in row_id:
try: a,b = string.split(row_id, ' ')
except Exception: a = 1; b=2
if a==b:
row_id = a
if row_id not in exclude:
exportData.write(string.join([row_id]+map(str,matrix2[i]),'\t')+'\n') ### export values
i+=1
print len(row_header2), 'top-correlated IDs'
exportData.close()
return matrix2,row_header2,column_header,row_method
def numpyCorrelationMatrixGeneStore(x,rows,genes,gene_to_symbol):
### Decided not to use since it would require writing out the whole correlation matrix which is huge (1+GB) and time-intensive to import
start = time.time()
output_file = string.replace(originalFilename,'.txt','.corrmatrix')
status = verifyFile(output_file)
gene_correlations={}
if status == 'found':
try: symbol = gene_to_symbol[rows[i]][0]
except Exception: symbol = '$'
def splitInt(x):
rho,ind = string.split(x,'|')
return (float(rho),int(float(ind)))
for line in open(output_file,'rU').xreadlines():
data = line.rstrip()
t = string.split(data,'\t')
scores = map(lambda x: splitInt(x), t[1:])
gene_correlations[t[0]] = scores
else:
eo=export.ExportFile(output_file)
#D1 = numpy.ma.corrcoef(x)
D1 = numpy.corrcoef(x)
i=0
for score_ls in D1:
scores = []
try: symbol = gene_to_symbol[rows[i]][0]
except Exception: symbol = '$'
if rows[i] in genes or symbol in genes:
k=0
for v in score_ls:
if str(v)!='nan':
scores.append((v,k))
k+=1
scores.sort()
scores.reverse()
if len(symbol)==1: symbol = rows[i]
gene_correlations[symbol] = scores
export_values = [symbol]
for (v,k) in scores: ### re-import next time to save time
export_values.append(str(v)[:5]+'|'+str(k))
eo.write(string.join(export_values,'\t')+'\n')
i+=1
eo.close()
print len(gene_correlations)
print time.time() - start, 'seconds';sys.exit()
return gene_correlations
def numpyCorrelationMatrixGene(x,rows,genes,gene_to_symbol):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning) ### hides import warnings
#D1 = numpy.ma.corrcoef(x)
D1 = numpy.corrcoef(x)
i=0
gene_correlations={}
for score_ls in D1:
scores = []
try: symbol = gene_to_symbol[rows[i]][0]
except Exception: symbol = '$'
if rows[i] in genes or symbol in genes:
k=0
for v in score_ls:
if str(v)!='nan':
scores.append((v,k))
k+=1
scores.sort()
scores.reverse()
if len(symbol)==1: symbol = rows[i]
gene_correlations[symbol] = scores
i+=1
return gene_correlations
def runHCOnly(filename,graphics,Normalize=False):
""" Simple method for hieararchical clustering with defaults defined by the function rather than the user (see above function) """
global root_dir
global graphic_link
global inputFilename
global GroupDB
global allowLargeClusters
global runGOElite
global EliteGeneSets
runGOElite = False
EliteGeneSets=[]
allowLargeClusters = False
###############
global inputFilename
global originalFilename
global GroupDB
global justShowTheseIDs
global targetGeneIDs
global normalize
global species
global storeGeneSetName
targetGene=[]
filterByPathways=False
###############
graphic_link=graphics ### Store all locations of pngs
inputFilename = filename ### Used when calling R
root_dir = findParentDir(filename)
if 'ExpressionOutput/Clustering' in root_dir:
root_dir = string.replace(root_dir,'ExpressionOutput/Clustering','DataPlots')
elif 'ExpressionOutput' in root_dir:
root_dir = string.replace(root_dir,'ExpressionOutput','DataPlots') ### Applies to clustering of LineageProfiler results
else:
root_dir += '/DataPlots/'
try: os.mkdir(root_dir) ### May need to create this directory
except Exception: None
row_method = 'average'
column_method = 'weighted'
row_metric = 'cosine'
column_metric = 'cosine'
if 'Lineage' in filename or 'Elite' in filename:
color_gradient = 'red_white_blue'
else:
color_gradient = 'yellow_black_blue'
color_gradient = 'red_black_sky'
matrix, column_header, row_header, dataset_name, group_db = importData(filename,Normalize=Normalize)
GroupDB = group_db
runHierarchicalClustering(matrix, row_header, column_header, dataset_name,
row_method, row_metric, column_method, column_metric, color_gradient, display=False, Normalize=Normalize)
return graphic_link
def runPCAonly(filename,graphics,transpose,showLabels=True,plotType='3D',display=True,
algorithm='SVD',geneSetName=None, species=None, zscore=True, colorByGene=None,
reimportModelScores=True):
global root_dir
global graphic_link
graphic_link=graphics ### Store all locations of pngs
root_dir = findParentDir(filename)
root_dir = string.replace(root_dir,'ExpressionOutput/Clustering','DataPlots')
root_dir = string.replace(root_dir,'ExpressionInput','DataPlots')
if 'DataPlots' not in root_dir:
root_dir += '/DataPlots/'
try: os.mkdir(root_dir) ### May need to create this directory
except Exception: None
### Transpose matrix and build PCA
geneFilter=None
if algorithm == 't-SNE' and reimportModelScores:
dataset_name = string.split(filename,'/')[-1][:-4]
try:
### if the scores are present, we only need to import the genes of interest (save time importing large matrices)
importtSNEScores(root_dir+dataset_name+'-tSNE_scores.txt')
if len(colorByGene)==None:
geneFilter = [''] ### It won't import the matrix, basically
elif ' ' in colorByGene:
geneFilter = string.split(colorByGene,' ')
else:
geneFilter = [colorByGene]
except Exception:
geneFilter = [''] ### It won't import the matrix, basically
matrix, column_header, row_header, dataset_name, group_db = importData(filename,zscore=zscore,geneFilter=geneFilter)
if transpose == False: ### We normally transpose the data, so if True, we don't transpose (I know, it's confusing)
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, column_header
if len(column_header)>1000 or len(row_header)>1000 and algorithm != 't-SNE':
print 'Performing Principal Component Analysis (please be patient)...'
#PrincipalComponentAnalysis(numpy.array(matrix), row_header, column_header, dataset_name, group_db, display=True)
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
if algorithm == 't-SNE':
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, column_header
tSNE(numpy.array(matrix),column_header,dataset_name,group_db,display=display,
showLabels=showLabels,row_header=row_header,colorByGene=colorByGene,species=species,
reimportModelScores=reimportModelScores)
elif plotType == '3D':
try: PCA3D(numpy.array(matrix), row_header, column_header, dataset_name, group_db,
display=display, showLabels=showLabels, algorithm=algorithm, geneSetName=geneSetName,
species=species, colorByGene=colorByGene)
except Exception:
print traceback.format_exc()
PrincipalComponentAnalysis(numpy.array(matrix), row_header, column_header,
dataset_name, group_db, display=display, showLabels=showLabels, algorithm=algorithm,
geneSetName=geneSetName, species=species, colorByGene=colorByGene)
else:
PrincipalComponentAnalysis(numpy.array(matrix), row_header, column_header, dataset_name,
group_db, display=display, showLabels=showLabels, algorithm=algorithm,
geneSetName=geneSetName, species=species, colorByGene=colorByGene)
return graphic_link
def outputClusters(filenames,graphics,Normalize=False,Species=None,platform=None,vendor=None):
""" Peforms PCA and Hiearchical clustering on exported log-folds from AltAnalyze """
global root_dir
global graphic_link
global inputFilename
global GroupDB
global allowLargeClusters
global EliteGeneSets
EliteGeneSets=[]
global runGOElite
runGOElite = False
allowLargeClusters=False
graphic_link=graphics ### Store all locations of pngs
filename = filenames[0] ### This is the file to cluster with "significant" gene changes
inputFilename = filename ### Used when calling R
root_dir = findParentDir(filename)
root_dir = string.replace(root_dir,'ExpressionOutput/Clustering','DataPlots')
### Transpose matrix and build PCA
original = importData(filename,Normalize=Normalize)
matrix, column_header, row_header, dataset_name, group_db = original
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, column_header
if len(row_header)<700000 and len(column_header)<700000 and len(column_header)>2:
PrincipalComponentAnalysis(numpy.array(matrix), row_header, column_header, dataset_name, group_db)
else:
print 'SKIPPING PCA!!! - Your dataset file is over or under the recommended size limit for clustering (>7000 rows). Please cluster later using "Additional Analyses".'
row_method = 'average'
column_method = 'average'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'red_white_blue'
color_gradient = 'red_black_sky'
global species
species = Species
if 'LineageCorrelations' not in filename and 'Zscores' not in filename:
EliteGeneSets=['GeneOntology']
runGOElite = True
### Generate Significant Gene HeatMap
matrix, column_header, row_header, dataset_name, group_db = original
GroupDB = group_db
runHierarchicalClustering(matrix, row_header, column_header, dataset_name, row_method, row_metric, column_method, column_metric, color_gradient, Normalize=Normalize)
### Generate Outlier and other Significant Gene HeatMap
for filename in filenames[1:]:
inputFilename = filename
matrix, column_header, row_header, dataset_name, group_db = importData(filename,Normalize=Normalize)
GroupDB = group_db
try:
runHierarchicalClustering(matrix, row_header, column_header, dataset_name, row_method, row_metric, column_method, column_metric, color_gradient, Normalize=Normalize)
except Exception: print 'Could not cluster',inputFilename,', file not found'
return graphic_link
def importEliteGeneAssociations(gene_filename):
fn = filepath(gene_filename)
x=0; fold_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if data[0]=='#': x=0
elif x==0: x=1
else:
geneid=t[0];symbol=t[1]
fold = 0
try:
if '|' in t[6]:
fold = float(string.split(t[6])[0]) ### Sometimes there are multiple folds for a gene (multiple probesets)
except Exception:
None
try: fold=float(t[6])
except Exception: None
fold_db[symbol] = fold
return fold_db
def importPathwayLevelFolds(filename):
fn = filepath(filename)
x=0
folds_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if len(data)==0: x=0
elif x==0:
z_score_indexes = []; i=0
z_headers = []
for header in t:
if 'z_score.' in header:
z_score_indexes.append(i)
header = string.split(header,'z_score.')[1] ### Get rid of z_score.
if 'AS.' in header:
header = string.split(header,'.p')[0] ### Remove statistics details
header = 'AS.'+string.join(string.split(header,'_')[2:],'_') ### species and array type notation
else:
header = string.join(string.split(header,'-')[:-2],'-')
if '-fold' in header:
header = string.join(string.split(header,'-')[:-1],'-')
z_headers.append(header)
i+=1
headers = string.join(['Gene-Set Name']+z_headers,'\t')+'\n'
x=1
else:
term_name=t[1];geneset_type=t[2]
zscores = map(lambda x: t[x], z_score_indexes)
max_z = max(map(float, zscores)) ### If there are a lot of terms, only show the top 70
line = string.join([term_name]+zscores,'\t')+'\n'
try: zscore_db[geneset_type].append((max_z,line))
except Exception: zscore_db[geneset_type] = [(max_z,line)]
exported_files = []
for geneset_type in zscore_db:
### Create an input file for hierarchical clustering in a child directory (Heatmaps)
clusterinput_filename = findParentDir(filename)+'/Heatmaps/Clustering-Zscores-'+geneset_type+'.txt'
exported_files.append(clusterinput_filename)
export_text = export.ExportFile(clusterinput_filename)
export_text.write(headers) ### Header is the same for each file
zscore_db[geneset_type].sort()
zscore_db[geneset_type].reverse()
i=0 ### count the entries written
for (max_z,line) in zscore_db[geneset_type]:
if i<60:
export_text.write(line) ### Write z-score values and row names
i+=1
export_text.close()
return exported_files
def importOverlappingEliteScores(filename):
fn = filepath(filename)
x=0
zscore_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if len(data)==0: x=0
elif x==0:
z_score_indexes = []; i=0
z_headers = []
for header in t:
if 'z_score.' in header:
z_score_indexes.append(i)
header = string.split(header,'z_score.')[1] ### Get rid of z_score.
if 'AS.' in header:
header = string.split(header,'.p')[0] ### Remove statistics details
header = 'AS.'+string.join(string.split(header,'_')[2:],'_') ### species and array type notation
else:
header = string.join(string.split(header,'-')[:-2],'-')
if '-fold' in header:
header = string.join(string.split(header,'-')[:-1],'-')
z_headers.append(header)
i+=1
headers = string.join(['Gene-Set Name']+z_headers,'\t')+'\n'
x=1
else:
term_name=t[1];geneset_type=t[2]
zscores = map(lambda x: t[x], z_score_indexes)
max_z = max(map(float, zscores)) ### If there are a lot of terms, only show the top 70
line = string.join([term_name]+zscores,'\t')+'\n'
try: zscore_db[geneset_type].append((max_z,line))
except Exception: zscore_db[geneset_type] = [(max_z,line)]
exported_files = []
for geneset_type in zscore_db:
### Create an input file for hierarchical clustering in a child directory (Heatmaps)
clusterinput_filename = findParentDir(filename)+'/Heatmaps/Clustering-Zscores-'+geneset_type+'.txt'
exported_files.append(clusterinput_filename)
export_text = export.ExportFile(clusterinput_filename)
export_text.write(headers) ### Header is the same for each file
zscore_db[geneset_type].sort()
zscore_db[geneset_type].reverse()
i=0 ### count the entries written
for (max_z,line) in zscore_db[geneset_type]:
if i<60:
export_text.write(line) ### Write z-score values and row names
i+=1
export_text.close()
return exported_files
def buildGraphFromSIF(mod,species,sif_filename,ora_input_dir):
""" Imports a SIF and corresponding gene-association file to get fold changes for standardized gene-symbols """
global SpeciesCode; SpeciesCode = species
mod = 'Ensembl'
if sif_filename == None:
### Used for testing only
sif_filename = '/Users/nsalomonis/Desktop/dataAnalysis/collaborations/WholeGenomeRVista/Alex-Figure/GO-Elite_results/CompleteResults/ORA_pruned/up-2f_p05-WGRV.sif'
ora_input_dir = '/Users/nsalomonis/Desktop/dataAnalysis/collaborations/WholeGenomeRVista/Alex-Figure/up-stringent/up-2f_p05.txt'
#sif_filename = 'C:/Users/Nathan Salomonis/Desktop/Endothelial_Kidney/GO-Elite/GO-Elite_results/CompleteResults/ORA_pruned/GE.b_vs_a-fold2.0_rawp0.05-local.sif'
#ora_input_dir = 'C:/Users/Nathan Salomonis/Desktop/Endothelial_Kidney/GO-Elite/input/GE.b_vs_a-fold2.0_rawp0.05.txt'
gene_filename = string.replace(sif_filename,'.sif','_%s-gene-associations.txt') % mod
gene_filename = string.replace(gene_filename,'ORA_pruned','ORA_pruned/gene_associations')
pathway_name = string.split(sif_filename,'/')[-1][:-4]
output_filename = None
try: fold_db = importEliteGeneAssociations(gene_filename)
except Exception: fold_db={}
if ora_input_dir != None:
### This is an optional accessory function that adds fold changes from genes that are NOT in the GO-Elite pruned results (TFs regulating these genes)
try: fold_db = importDataSimple(ora_input_dir,species,fold_db,mod)
except Exception: None
try:
### Alternative Approaches dependening on the availability of GraphViz
#displaySimpleNetXGraph(sif_filename,fold_db,pathway_name)
output_filename = iGraphSimple(sif_filename,fold_db,pathway_name)
except Exception:
print traceback.format_exc()
try: displaySimpleNetwork(sif_filename,fold_db,pathway_name)
except Exception: None ### GraphViz problem
return output_filename
def iGraphSimple(sif_filename,fold_db,pathway_name):
""" Build a network export using iGraph and Cairo """
edges = importSIF(sif_filename)
id_color_db = WikiPathways_webservice.getHexadecimalColorRanges(fold_db,'Genes')
output_filename = iGraphDraw(edges,pathway_name,filePath=sif_filename,display=True,graph_layout='spring',colorDB=id_color_db)
return output_filename
def iGraphDraw(edges, pathway_name, labels=None, graph_layout='shell', display=False,
node_size=700, node_color='yellow', node_alpha=0.5, node_text_size=7,
edge_color='black', edge_alpha=0.5, edge_thickness=2, edges_pos=.3,
text_font='sans-serif',filePath='test',colorDB=None):
### Here node = vertex
output_filename=None
if len(edges) > 700 and 'AltAnalyze' not in pathway_name:
print findFilename(filePath), 'too large to visualize...'
elif len(edges) > 3000:
print findFilename(filePath), 'too large to visualize...'
else:
arrow_scaler = 1 ### To scale the arrow
if edges>40: arrow_scaler = .9
vars = formatiGraphEdges(edges,pathway_name,colorDB,arrow_scaler)
vertices,iGraph_edges,vertice_db,label_list,shape_list,vertex_size, color_list, vertex_label_colors, arrow_width, edge_colors = vars
if vertices>0:
import igraph
gr = igraph.Graph(vertices, directed=True)
canvas_scaler = 0.8 ### To scale the canvas size (bounding box)
if vertices<15: canvas_scaler = 0.5
elif vertices<25: canvas_scaler = .70
elif vertices>35:
canvas_scaler += len(iGraph_edges)/400.00
filePath,canvas_scaler = correctedFilePath(filePath,canvas_scaler) ### adjust for GO-Elite
#print vertices, len(iGraph_edges), pathway_name, canvas_scaler
canvas_size = (600*canvas_scaler,600*canvas_scaler)
gr.add_edges(iGraph_edges)
gr.vs["label"] = label_list
gr.vs["shape"] = shape_list
gr.vs["size"] = vertex_size
gr.vs["label_dist"] = [1.3]*vertices
gr.vs["label_size"] = [12]*vertices
gr.vs["color"]=color_list
gr.vs["label_color"]=vertex_label_colors
gr.es["color"] = edge_colors
gr.es["arrow_size"]=arrow_width
output_filename = '%s.pdf' % filePath[:-4]
output_filename = output_filename.encode('ascii','ignore') ### removes the damned unicode u proceeding the filename
layout = "kk"
visual_style = {}
#visual_style["layout"] = layout #The default is auto, which selects a layout algorithm automatically based on the size and connectedness of the graph
visual_style["margin"] = 50 ### white-space around the network (see vertex size)
visual_style["bbox"] = canvas_size
igraph.plot(gr,output_filename, **visual_style)
output_filename = '%s.png' % filePath[:-4]
output_filename = output_filename.encode('ascii','ignore') ### removes the damned unicode u proceeding the filename
if vertices <15: gr,visual_style = increasePlotSize(gr,visual_style)
igraph.plot(gr,output_filename, **visual_style)
#surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
return output_filename
def correctedFilePath(filePath,canvas_scaler):
""" Move this file to it's own network directory for GO-Elite """
if 'ORA_pruned' in filePath:
filePath = string.replace(filePath,'CompleteResults/ORA_pruned','networks')
try: os.mkdir(findParentDir(filePath))
except Exception: pass
canvas_scaler = canvas_scaler*1.3 ### These graphs tend to be more dense and difficult to read
return filePath,canvas_scaler
def increasePlotSize(gr,visual_style):
### To display the plot better, need to manually increase the size of everything
factor = 2
object_list = ["size","label_size"]
for i in object_list:
new=[]
for k in gr.vs[i]:
new.append(k*factor)
gr.vs[i] = new
new=[]
for i in gr.es["arrow_size"]:
new.append(i*factor)
new=[]
for i in visual_style["bbox"]:
new.append(i*factor)
visual_style["bbox"] = new
visual_style["margin"]=visual_style["margin"]*factor
return gr,visual_style
def getHMDBDataSimple():
### Determine which IDs are metabolites
program_type,database_dir = unique.whatProgramIsThis()
filename = database_dir+'/'+SpeciesCode+'/gene/HMDB.txt'
symbol_hmdb_db={}
x=0
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x==0: x=1
else:
t = string.split(data,'\t')
hmdb_id = t[0]; symbol = t[1]; ProteinNames = t[-1]
symbol_hmdb_db[symbol]=hmdb_id
return symbol_hmdb_db
def formatiGraphEdges(edges,pathway_name,colorDB,arrow_scaler):
### iGraph appears to require defined vertice number and edges as numbers corresponding to these vertices
edge_db={}
edges2=[]
vertice_db={}
shape_list=[] ### node shape in order
label_list=[] ### Names of each vertix aka node
vertex_size=[]
color_list=[]
vertex_label_colors=[]
arrow_width=[] ### Indicates the presence or absence of an arrow
edge_colors=[]
k=0
try: symbol_hmdb_db = getHMDBDataSimple()
except Exception: symbol_hmdb_db={}
for (node1,node2,type) in edges:
edge_color = 'grey'
### Assign nodes to a numeric vertix ID
if 'TF' in pathway_name or 'WGRV' in pathway_name:
pathway = node1 ### This is the regulating TF
else:
pathway = node2 ### This is the pathway
if 'drugInteraction' == type: edge_color = "purple"
elif 'TBar' == type: edge_color = 'blue'
elif 'microRNAInteraction' == type: edge_color = '#53A26D'
elif 'transcription' in type: edge_color = '#FF7D7D'
if 'AltAnalyze' in pathway_name: default_node_color = 'grey'
else: default_node_color = "yellow"
if node1 in vertice_db: v1=vertice_db[node1]
else: #### Left hand node
### Only time the vertex is added to the below attribute lists
v1=k; label_list.append(node1)
rs = 1 ### relative size
if 'TF' in pathway_name or 'WGRV' in pathway_name and 'AltAnalyze' not in pathway_name:
shape_list.append('rectangle')
vertex_size.append(15)
vertex_label_colors.append('blue')
else:
if 'drugInteraction' == type:
rs = 0.75
shape_list.append('rectangle')
vertex_label_colors.append('purple')
default_node_color = "purple"
elif 'Metabolic' == type and node1 in symbol_hmdb_db:
shape_list.append('triangle-up')
vertex_label_colors.append('blue') #dark green
default_node_color = 'grey' #'#008000'
elif 'microRNAInteraction' == type:
rs = 0.75
shape_list.append('triangle-up')
vertex_label_colors.append('#008000') #dark green
default_node_color = 'grey' #'#008000'
else:
shape_list.append('circle')
vertex_label_colors.append('black')
vertex_size.append(10*rs)
vertice_db[node1]=v1; k+=1
try:
color = '#'+string.upper(colorDB[node1])
color_list.append(color) ### Hex color
except Exception:
color_list.append(default_node_color)
if node2 in vertice_db: v2=vertice_db[node2]
else: #### Right hand node
### Only time the vertex is added to the below attribute lists
v2=k; label_list.append(node2)
if 'TF' in pathway_name or 'WGRV' in pathway_name:
shape_list.append('circle')
vertex_size.append(10)
vertex_label_colors.append('black')
default_node_color = "grey"
elif 'AltAnalyze' not in pathway_name:
shape_list.append('rectangle')
vertex_size.append(15)
vertex_label_colors.append('blue')
default_node_color = "grey"
elif 'Metabolic' == type and node2 in symbol_hmdb_db:
shape_list.append('triangle-up')
vertex_label_colors.append('blue') #dark green
default_node_color = 'grey' #'#008000'
else:
shape_list.append('circle')
vertex_size.append(10)
vertex_label_colors.append('black')
default_node_color = "grey"
vertice_db[node2]=v2; k+=1
try:
color = '#'+string.upper(colorDB[node2])
color_list.append(color) ### Hex color
except Exception: color_list.append(default_node_color)
edges2.append((v1,v2))
if type == 'physical': arrow_width.append(0)
else: arrow_width.append(arrow_scaler)
try: edge_db[v1].append(v2)
except Exception: edge_db[v1]=[v2]
try: edge_db[v2].append(v1)
except Exception: edge_db[v2]=[v1]
edge_colors.append(edge_color)
vertices = len(edge_db) ### This is the number of nodes
edge_db = eliminate_redundant_dict_values(edge_db)
vertice_db2={} ### Invert
for node in vertice_db:
vertice_db2[vertice_db[node]] = node
#print len(edges2), len(edge_colors)
print vertices, 'and', len(edges2),'edges in the iGraph network.'
return vertices,edges2,vertice_db2, label_list, shape_list, vertex_size, color_list, vertex_label_colors, arrow_width, edge_colors
def eliminate_redundant_dict_values(database):
db1={}
for key in database: list = unique.unique(database[key]); list.sort(); db1[key] = list
return db1
def importDataSimple(filename,species,fold_db,mod):
""" Imports an input ID file and converts those IDs to gene symbols for analysis with folds """
import GO_Elite
import OBO_import
import gene_associations
fn = filepath(filename)
x=0
metabolite_codes = ['Ck','Ca','Ce','Ch','Cp']
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if data[0]=='#': x=0
elif x==0: x=1
else:
if x == 1:
system_code = t[1]
if system_code in metabolite_codes:
mod = 'HMDB'
system_codes,source_types,mod_types = GO_Elite.getSourceData()
try: source_data = system_codes[system_code]
except Exception:
source_data = None
if 'ENS' in t[0]: source_data = system_codes['En']
else: ### Assume the file is composed of gene symbols
source_data = system_codes['Sy']
if source_data == mod:
source_is_mod = True
elif source_data==None:
None ### Skip this
else:
source_is_mod = False
mod_source = mod+'-'+source_data+'.txt'
gene_to_source_id = gene_associations.getGeneToUid(species,('hide',mod_source))
source_to_gene = OBO_import.swapKeyValues(gene_to_source_id)
try: gene_to_symbol = gene_associations.getGeneToUid(species,('hide',mod+'-Symbol'))
except Exception: gene_to_symbol={}
try: met_to_symbol = gene_associations.importGeneData(species,'HMDB',simpleImport=True)
except Exception: met_to_symbol={}
for i in met_to_symbol: gene_to_symbol[i] = met_to_symbol[i] ### Add metabolite names
x+=1
if source_is_mod == True:
if t[0] in gene_to_symbol:
symbol = gene_to_symbol[t[0]][0]
try: fold_db[symbol] = float(t[2])
except Exception: fold_db[symbol] = 0
else:
fold_db[t[0]] = 0 ### If not found (wrong ID with the wrong system) still try to color the ID in the network as yellow
elif t[0] in source_to_gene:
mod_ids = source_to_gene[t[0]]
try: mod_ids+=source_to_gene[t[2]] ###If the file is a SIF
except Exception:
try: mod_ids+=source_to_gene[t[1]] ###If the file is a SIF
except Exception: None
for mod_id in mod_ids:
if mod_id in gene_to_symbol:
symbol = gene_to_symbol[mod_id][0]
try: fold_db[symbol] = float(t[2]) ### If multiple Ensembl IDs in dataset, only record the last associated fold change
except Exception: fold_db[symbol] = 0
else: fold_db[t[0]] = 0
return fold_db
def clusterPathwayZscores(filename):
""" Imports a overlapping-results file and exports an input file for hierarchical clustering and clusters """
### This method is not fully written or in use yet - not sure if needed
if filename == None:
### Only used for testing
filename = '/Users/nsalomonis/Desktop/dataAnalysis/r4_Bruneau_TopHat/GO-Elite/TF-enrichment2/GO-Elite_results/overlapping-results_z-score_elite.txt'
exported_files = importOverlappingEliteScores(filename)
graphic_links=[]
for file in exported_files:
try: graphic_links = runHCOnly(file,graphic_links)
except Exception,e:
#print e
print 'Unable to generate cluster due to dataset incompatibilty.'
print 'Clustering of overlapping-results_z-score complete (see "GO-Elite_results/Heatmaps" directory)'
def clusterPathwayMeanFolds():
""" Imports the pruned-results file and exports an input file for hierarchical clustering and clusters """
filename = '/Users/nsalomonis/Desktop/User Diagnostics/Mm_spinal_cord_injury/GO-Elite/GO-Elite_results/pruned-results_z-score_elite.txt'
exported_files = importPathwayLevelFolds(filename)
def VennDiagram():
f = pylab.figure()
ax = f.gca()
rad = 1.4
c1 = Circle((-1,0),rad, alpha=.2, fc ='red',label='red')
c2 = Circle((1,0),rad, alpha=.2, fc ='blue',label='blue')
c3 = Circle((0,1),rad, alpha=.2, fc ='green',label='g')
#pylab.plot(c1,color='green',marker='o',markersize=7,label='blue')
#ax.add_patch(c1)
ax.add_patch(c2)
ax.add_patch(c3)
ax.set_xlim(-3,3)
ax.set_ylim(-3,3)
pylab.show()
def plotHistogram(filename):
matrix, column_header, row_header, dataset_name, group_db = importData(filename)
transpose=True
if transpose: ### Transpose the data matrix
print 'Transposing the data matrix'
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, column_header
pylab.figure()
for i in matrix:
pylab.hist(i, 200, normed=0, histtype='step', cumulative=-1)
#pylab.hist(matrix, 50, cumulative=-1)
pylab.show()
def multipleSubPlots(filename,uids,SubPlotType='column'):
#uids = [uids[-1]]+uids[:-1]
str_uids = string.join(uids,'_')
matrix, column_header, row_header, dataset_name, group_db = importData(filename,geneFilter=uids)
fig = pylab.figure()
def ReplaceZeros(val,min_val):
if val == 0:
return min_val
else: return val
### Order the graphs based on the original gene order
new_row_header=[]
matrix2 = []
for uid in uids:
if uid in row_header:
ind = row_header.index(uid)
new_row_header.append(uid)
try: update_exp_vals = map(lambda x: ReplaceZeros(x,0.0001),matrix[ind])
except Exception: print uid, len(matrix[ind]);sys.exit()
matrix2.append(update_exp_vals)
matrix = numpy.array(matrix2)
row_header = new_row_header
#print row_header
color_list = ['r', 'b', 'y', 'g', 'w', 'k', 'm']
groups=[]
for sample in column_header:
group = group_db[sample][0]
if group not in groups:
groups.append(group)
fontsize=10
if len(groups)>0:
color_list = []
if len(groups)==9:
cm = matplotlib.colors.ListedColormap(['#80C241', '#118943', '#6FC8BB', '#ED1D30', '#F26E21','#8051A0', '#4684C5', '#FBD019','#3A52A4'])
elif len(groups)==3:
cm = matplotlib.colors.ListedColormap(['#4684C4','#FAD01C','#7D7D7F'])
elif len(groups)==5:
cm = matplotlib.colors.ListedColormap(['#41449B','#6182C1','#9DDAEA','#42AED0','#7F7F7F'])
else:
cm = pylab.cm.get_cmap('gist_rainbow') #gist_ncar
for i in range(len(groups)):
color_list.append(cm(1.*i/len(groups))) # color will now be an RGBA tuple
for i in range(len(matrix)):
ax = pylab.subplot(5,1,1+i)
OY = matrix[i]
pylab.xlim(0,len(OY))
pylab.subplots_adjust(right=0.85)
ind = np.arange(len(OY))
if SubPlotType=='column':
index=-1
for v in OY:
index+=1
group = group_db[column_header[index]][0]
pylab.bar(index, v,edgecolor='black',linewidth=0,color=color_list[groups.index(group)])
width = .35
#print i ,row_header[i]
if SubPlotType=='plot':
pylab.plot(x,y)
ax.text(matrix.shape[1]-0.5, i, ' '+row_header[i],fontsize=16)
fig.autofmt_xdate()
pylab.subplots_adjust(hspace = .001)
temp = tic.MaxNLocator(3)
ax.yaxis.set_major_locator(temp)
ax.set_xticks([])
#ax.title.set_visible(False)
#pylab.xticks(ind + width / 2, column_header)
#ax.set_xticklabels(column_header)
#ax.xaxis.set_ticks([-1]+range(len(OY)+1))
#xtickNames = pylab.setp(pylab.gca(), xticklabels=['']+column_header)
#pylab.setp(xtickNames, rotation=90, fontsize=10)
#pylab.show()
pylab.savefig(filename[:-4]+'-'+str_uids+'.pdf')
def simpleTranspose(filename):
fn = filepath(filename)
matrix = []
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,' ')
matrix.append(t)
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
filename = filename[:-4]+'-transposed.txt'
ea = export.ExportFile(filename)
for i in matrix:
ea.write(string.join(i,'\t')+'\n')
ea.close()
def CorrdinateToBed(filename):
fn = filepath(filename)
matrix = []
translation={}
multiExon={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
data = string.replace(data,' ','')
t = string.split(data,'\t')
if '.gtf' in filename:
if 'chr' not in t[0]: chr = 'chr'+t[0]
else: chr = t[0]
start = t[3]; end = t[4]; strand = t[6]; annotation = t[8]
annotation = string.replace(annotation,'gene_id','')
annotation = string.replace(annotation,'transcript_id','')
annotation = string.replace(annotation,'gene_name','')
geneIDs = string.split(annotation,';')
geneID = geneIDs[0]; symbol = geneIDs[3]
else:
chr = t[4]; strand = t[5]; start = t[6]; end = t[7]
#if 'ENS' not in annotation:
t = [chr,start,end,geneID,'0',strand]
#matrix.append(t)
translation[geneID] = symbol
try: multiExon[geneID]+=1
except Exception: multiExon[geneID]=1
filename = filename[:-4]+'-new.bed'
ea = export.ExportFile(filename)
for i in translation:
#ea.write(string.join(i,'\t')+'\n')
ea.write(i+'\t'+translation[i]+'\t'+str(multiExon[i])+'\n')
ea.close()
def SimpleCorrdinateToBed(filename):
fn = filepath(filename)
matrix = []
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
data = string.replace(data,' ','')
t = string.split(data,'\t')
if '.bed' in filename:
print t;sys.exit()
chr = t[4]; strand = t[5]; start = t[6]; end = t[7]
if 'ENS' in t[0]:
t = [chr,start,end,t[0],'0',strand]
matrix.append(t)
filename = filename[:-4]+'-new.bed'
ea = export.ExportFile(filename)
for i in matrix:
ea.write(string.join(i,'\t')+'\n')
ea.close()
def simpleIntegrityCheck(filename):
fn = filepath(filename)
matrix = []
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
data = string.replace(data,' ','')
t = string.split(data,'\t')
matrix.append(t)
filename = filename[:-4]+'-new.bed'
ea = export.ExportFile(filename)
for i in matrix:
ea.write(string.join(i,'\t')+'\n')
ea.close()
def BedFileCheck(filename):
fn = filepath(filename)
firstRow=True
filename = filename[:-4]+'-new.bed'
ea = export.ExportFile(filename)
found = False
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstRow:
firstRow = False
else:
#if len(t) != 12: print len(t);sys.exit()
ea.write(string.join(t,'\t')+'\n')
ea.close()
def simpleFilter(filename):
fn = filepath(filename)
filename = filename[:-4]+'-new.txt'
ea = export.ExportFile(filename)
matrix = []
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,',')
uid = t[0]
#if '=chr' in t[0]:
if 1==2:
a,b = string.split(t[0],'=')
b = string.replace(b,'_',':')
uid = a+ '='+b
matrix.append(t)
ea.write(string.join([uid]+t[1:],'\t')+'\n')
ea.close()
def test(filename):
symbols2={}
firstLine=True
fn = filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
firstLine=False
header = t
i=0; start=None; alt_start=None
value_indexes=[]
groups = {}
group = 0
for h in header:
if h == 'WikiPathways': start=i
if h == 'Select Protein Classes': alt_start=i
i+=1
if start == None: start = alt_start
for h in header:
if h>i:
group[i]
i+=1
if start == None: start = alt_start
else:
uniprot = t[0]
symbols = string.replace(t[-1],';;',';')
symbols = string.split(symbols,';')
for s in symbols:
if len(s)>0:
symbols2[string.upper(s),uniprot]=[]
for (s,u) in symbols2:
ea.write(string.join([s,u],'\t')+'\n')
ea.close()
def coincentIncedenceTest(exp_file,TFs):
fn = filepath(TFs)
tfs={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
tfs[data]=[]
comparisons={}
for tf1 in tfs:
for tf2 in tfs:
if tf1!=tf2:
temp = [tf1,tf2]
temp.sort()
comparisons[tuple(temp)]=[]
gene_data={}
firstLine=True
fn = filepath(exp_file)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if firstLine:
firstLine=False
header = string.split(data,'\t')[1:]
else:
t = string.split(data,'\t')
gene = t[0]
values = map(float,t[1:])
gene_data[gene] = values
filename = TFs[:-4]+'-all-coincident-4z.txt'
ea = export.ExportFile(filename)
comparison_db={}
for comparison in comparisons:
vals1 = gene_data[comparison[0]]
vals2 = gene_data[comparison[1]]
i=0
coincident=[]
for v1 in vals1:
v2 = vals2[i]
#print v1,v2
if v1>1 and v2>1:
coincident.append(i)
i+=1
i=0
population_db={}; coincident_db={}
for h in header:
population=string.split(h,':')[0]
if i in coincident:
try: coincident_db[population]+=1
except Exception: coincident_db[population]=1
try: population_db[population]+=1
except Exception: population_db[population]=1
i+=1
import mappfinder
final_population_percent=[]
for population in population_db:
d = population_db[population]
try: c = coincident_db[population]
except Exception: c = 0
N = float(len(header)) ### num all samples examined
R = float(len(coincident)) ### num all coincedent samples for the TFs
n = float(d) ### num all samples in cluster
r = float(c) ### num all coincident samples in cluster
try: z = mappfinder.Zscore(r,n,N,R)
except Exception: z=0
#if 'Gfi1b' in comparison and 'Gata1' in comparison: print N, R, n, r, z
final_population_percent.append([population,str(c),str(d),str(float(c)/float(d)),str(z)])
comparison_db[comparison]=final_population_percent
filtered_comparison_db={}
top_scoring_population={}
for comparison in comparison_db:
max_group=[]
for population_stat in comparison_db[comparison]:
z = float(population_stat[-1])
c = float(population_stat[1])
population = population_stat[0]
max_group.append([z,population])
max_group.sort()
z = max_group[-1][0]
pop = max_group[-1][1]
if z>(1.96)*2 and c>3:
filtered_comparison_db[comparison]=comparison_db[comparison]
top_scoring_population[comparison] = pop,z
firstLine = True
for comparison in filtered_comparison_db:
comparison_alt = string.join(list(comparison),'|')
all_percents=[]
for line in filtered_comparison_db[comparison]:
all_percents.append(line[3])
if firstLine:
all_headers=[]
for line in filtered_comparison_db[comparison]:
all_headers.append(line[0])
ea.write(string.join(['gene-pair']+all_headers+['Top Population','Top Z'],'\t')+'\n')
firstLine=False
pop,z = top_scoring_population[comparison]
ea.write(string.join([comparison_alt]+all_percents+[pop,str(z)],'\t')+'\n')
ea.close()
def getlastexon(filename):
filename2 = filename[:-4]+'-last-exon.txt'
ea = export.ExportFile(filename2)
firstLine=True
fn = filepath(filename)
last_gene = 'null'; last_exon=''
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
firstLine=False
else:
gene = t[2]
if gene != last_gene:
if ':E' in last_exon:
gene,exon = last_exon = string.split(':E')
block,region = string.split(exon,'.')
try: ea.write(last_exon+'\n')
except: pass
last_gene = gene
last_exon = t[0]
ea.close()
def replaceWithBinary(filename):
filename2 = filename[:-4]+'-binary.txt'
ea = export.ExportFile(filename2)
firstLine=True
fn = filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstLine:
ea.write(line)
firstLine=False
else:
try: values = map(float,t[1:])
except Exception: print t[1:];sys.exit()
values2=[]
for v in values:
if v == 0: values2.append('0')
else: values2.append('1')
ea.write(string.join([t[0]]+values2,'\t')+'\n')
ea.close()
def geneMethylationOutput(filename):
filename2 = filename[:-4]+'-binary.txt'
ea = export.ExportFile(filename2)
firstLine=True
fn = filepath(filename)
db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
values = (t[20],t[3]+'-methylation')
db[values]=[]
for value in db:
ea.write(string.join(list(value),'\t')+'\n')
ea.close()
def coincidentIncedence(filename,genes):
exportPairs=False
gene_data=[]
firstLine=True
fn = filepath(filename)
if exportPairs:
filename = filename[:-4]+'_'+genes[0]+'-'+genes[1]+'.txt'
ea = export.ExportFile(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if firstLine:
firstLine=False
header = string.split(data,'\t')[1:]
else:
t = string.split(data,'\t')
gene = t[0]
if gene in genes:
values = map(float,t[1:])
gene_data.append(values)
vals1 = gene_data[0]
vals2 = gene_data[1]
i=0
coincident=[]
for v1 in vals1:
v2 = vals2[i]
#print v1,v2
if v1>1 and v2>1:
coincident.append(i)
i+=1
i=0
population_db={}; coincident_db={}
for h in header:
population=string.split(h,':')[0]
if i in coincident:
try: coincident_db[population]+=1
except Exception: coincident_db[population]=1
try: population_db[population]+=1
except Exception: population_db[population]=1
i+=1
import mappfinder
final_population_percent=[]
for population in population_db:
d = population_db[population]
try: c = coincident_db[population]
except Exception: c = 0
N = float(len(header)) ### num all samples examined
R = float(len(coincident)) ### num all coincedent samples for the TFs
n = d ### num all samples in cluster
r = c ### num all coincident samples in cluster
try: z = mappfinder.zscore(r,n,N,R)
except Exception: z = 0
final_population_percent.append([population,str(c),str(d),str(float(c)/float(d)),str(z)])
if exportPairs:
for line in final_population_percent:
ea.write(string.join(line,'\t')+'\n')
ea.close()
else:
return final_population_percent
def extractFeatures(countinp,IGH_gene_file):
import export
ExonsPresent=False
igh_genes=[]
firstLine = True
for line in open(IGH_gene_file,'rU').xreadlines():
if firstLine: firstLine=False
else:
data = cleanUpLine(line)
gene = string.split(data,'\t')[0]
igh_genes.append(gene)
if 'counts.' in countinp:
feature_file = string.replace(countinp,'counts.','IGH.')
fe = export.ExportFile(feature_file)
firstLine = True
for line in open(countinp,'rU').xreadlines():
if firstLine:
fe.write(line)
firstLine=False
else:
feature_info = string.split(line,'\t')[0]
gene = string.split(feature_info,':')[0]
if gene in igh_genes:
fe.write(line)
fe.close()
def filterForJunctions(countinp):
import export
ExonsPresent=False
igh_genes=[]
firstLine = True
count = 0
if 'counts.' in countinp:
feature_file = countinp[:-4]+'-output.txt'
fe = export.ExportFile(feature_file)
firstLine = True
for line in open(countinp,'rU').xreadlines():
if firstLine:
fe.write(line)
firstLine=False
else:
feature_info = string.split(line,'\t')[0]
junction = string.split(feature_info,'=')[0]
if '-' in junction:
fe.write(line)
count+=1
fe.close()
print count
def countIntronsExons(filename):
import export
exon_db={}
intron_db={}
firstLine = True
last_transcript=None
for line in open(filename,'rU').xreadlines():
if firstLine:
firstLine=False
else:
line = line.rstrip()
t = string.split(line,'\t')
transcript = t[-1]
chr = t[1]
strand = t[2]
start = t[3]
end = t[4]
exon_db[chr,start,end]=[]
if transcript==last_transcript:
if strand == '1':
intron_db[chr,last_end,start]=[]
else:
intron_db[chr,last_start,end]=[]
last_end = end
last_start = start
last_transcript = transcript
print len(exon_db)+1, len(intron_db)+1
def importGeneList(gene_list_file):
genesets=[]
genes=[]
for line in open(gene_list_file,'rU').xreadlines():
gene = line.rstrip()
genes.append(gene)
if len(genes)==5:
genesets.append(genes)
genes=[]
if len(genes)>0 and len(genes)<6:
genes+=(5-len(genes))*[gene]
genesets.append(genes)
return genesets
def customClean(filename):
fn = filepath(filename)
firstRow=True
filename = filename[:-4]+'-new.txt'
ea = export.ExportFile(filename)
found = False
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstRow:
firstRow = False
#print len(t)
ea.write(string.join(['UID']+t,'\t')+'\n')
else:
if ';' in t[0]:
uid = string.split(t[0],';')[0]
else:
uid = t[0]
values = map(lambda x: float(x),t[1:])
values.sort()
if values[3]>=1:
ea.write(string.join([uid]+t[1:],'\t')+'\n')
ea.close()
def MakeJunctionFasta(filename):
fn = filepath(filename)
firstRow=True
filename = filename[:-4]+'.fasta'
ea = export.ExportFile(filename)
found = False
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
probeset, seq = string.split(data,'\t')[:2]
ea.write(">"+probeset+'\n')
ea.write(string.upper(seq)+'\n')
ea.close()
def ToppGeneFilter(filename):
import gene_associations, OBO_import
gene_to_symbol = gene_associations.getGeneToUid('Mm',('hide','Ensembl-Symbol'))
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol)
fn = filepath(filename)
firstRow=True
filename = filename[:-4]+'-new.txt'
ea = export.ExportFile(filename)
found = False
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if firstRow:
firstRow = False
#print len(t)
ea.write(string.join(['Ensembl\t\tCategory'],'\t')+'\n')
else:
symbol = t[1]; category = t[3]
symbol = symbol[0]+string.lower(symbol[1:]) ### Mouse
category = category[:100]
if symbol in symbol_to_gene:
ensembl = symbol_to_gene[symbol][0]
ea.write(string.join([ensembl,symbol,category],'\t')+'\n')
ea.close()
def CountKallistoAlignedJunctions(filename):
fn = filepath(filename)
firstRow=True
#filename = filename[:-4]+'.fasta'
ea = export.ExportFile(filename)
found = False
counts=0
unique={}
ea = export.ExportFile(filename[:-4]+'-Mpo.txt')
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if 'ENS' in line and 'JUNC1201' in line:
ea.write(line)
unique[t[0]]=[]
counts+=1
print counts, len(unique)
ea.close()
def filterRandomFile(filename,col1,col2):
fn = filepath(filename)
firstRow=True
counts=0
ea = export.ExportFile(filename[:-4]+'-columns.txt')
for line in open(fn,'rU').xreadlines():
if line[0]!='#':
data = line.rstrip()
t = string.split(data,',')
#print t[col1-1]+'\t'+t[col2-1];sys.exit()
if ' ' in t[col2-1]:
t[col2-1] = string.split(t[col2-1],' ')[2]
ea.write(t[col1-1]+'\t'+t[col2-1]+'\n')
counts+=1
#print counts, len(unique)
ea.close()
def getBlockExonPositions():
fn = '/Users/saljh8/Desktop/Code/AltAnalyze/AltDatabase/EnsMart65/ensembl/Mm/Mm_Ensembl_exon.txt'
firstRow=True
filename = fn[:-4]+'.block.txt'
ea = export.ExportFile(filename)
found = False
lines=0
exon_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
gene,exonid,chromosome,strand,start,stop, a, b, c, d = string.split(data,'\t')
exonid = string.split(exonid,'.')[0]
uid = gene+':'+exonid
if lines>0:
try:
exon_db[uid,strand].append(int(start))
exon_db[uid,strand].append(int(stop))
except Exception:
exon_db[uid,strand] = [int(start)]
exon_db[uid,strand].append(int(stop))
lines+=1
print len(exon_db)
for (uid,strand) in exon_db:
exon_db[uid,strand].sort()
if strand == '-':
exon_db[uid,strand].reverse()
start = str(exon_db[uid,strand][0])
stop = str(exon_db[uid,strand][1])
coord = [start,stop]; coord.sort()
ea.write(uid+'\t'+strand+'\t'+coord[0]+'\t'+coord[1]+'\n')
ea.close()
def combineVariants(fn):
firstRow=True
filename = fn[:-4]+'.gene-level.txt'
ea = export.ExportFile(filename)
found = False
lines=0
gene_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
gene = t[9]
if lines == 0:
header = ['UID']+t[16:]
header = string.join(header,'\t')+'\n'
ea.write(header)
lines+=1
else:
var_calls = map(float,t[16:])
if gene in gene_db:
count_sum_array = gene_db[gene]
count_sum_array = [sum(value) for value in zip(*[count_sum_array,var_calls])]
gene_db[gene] = count_sum_array
else:
gene_db[gene] = var_calls
for gene in gene_db:
var_calls = gene_db[gene]
var_calls2=[]
for i in var_calls:
if i==0: var_calls2.append('0')
else: var_calls2.append('1')
ea.write(gene+'\t'+string.join(var_calls2,'\t')+'\n')
ea.close()
def compareFusions(fn):
firstRow=True
filename = fn[:-4]+'.matrix.txt'
ea = export.ExportFile(filename)
found = False
lines=0
fusion_db={}
sample_list=[]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
sample, fusion = string.split(data,'\t')
try: fusion_db[fusion].append(sample)
except Exception: fusion_db[fusion] = [sample]
if sample not in sample_list: sample_list.append(sample)
fusion_db2=[]
for fusion in fusion_db:
samples = fusion_db[fusion]
samples2=[]
for s in sample_list:
if s in samples: samples2.append('1')
else: samples2.append('0')
fusion_db[fusion] = samples2
ea.write(string.join(['Fusion']+sample_list,'\t')+'\n')
for fusion in fusion_db:
print [fusion]
ea.write(fusion+'\t'+string.join(fusion_db[fusion],'\t')+'\n')
ea.close()
def customCleanSupplemental(filename):
fn = filepath(filename)
firstRow=True
filename = filename[:-4]+'-new.txt'
ea = export.ExportFile(filename)
found = False
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
line = string.split(data,', ')
gene_data=[]
for gene in line:
gene = string.replace(gene,' ','')
if '/' in gene:
genes = string.split(gene,'/')
gene_data.append(genes[0])
for i in genes[1:]:
gene_data.append(genes[0][:len(genes[1])*-1]+i)
elif '(' in gene:
genes = string.split(gene[:-1],'(')
gene_data+=genes
else:
gene_data.append(gene)
ea.write(string.join(gene_data,' ')+'\n')
ea.close()
if __name__ == '__main__':
#compareFusions('/Users/saljh8/Documents/1-collaborations/CPMC/GMP-MM_r2/MM_fusion_result.txt');sys.exit()
#combineVariants('/Users/saljh8/Documents/1-collaborations/CPMC/GMP-MM_r2/MM_known_variants.txt');sys.exit()
#customCleanSupplemental('/Users/saljh8/Desktop/dataAnalysis/CPMC/TCGA_MM/MM_genes_published.txt');sys.exit()
#customClean('/Users/saljh8/Desktop/dataAnalysis/Driscoll/R3/2000_run1708A_normalized.txt');sys.exit()
#simpleFilter('/Volumes/SEQ-DATA 1/all_10.5_mapped_norm_GC.csv');sys.exit()
#filterRandomFile('/Users/saljh8/Downloads/HuGene-1_1-st-v1.na36.hg19.transcript2.csv',1,8);sys.exit()
filename = '/Users/saljh8/Desktop/Grimes/GEC14078/MergedFiles.txt'
#CountKallistoAlignedJunctions(filename);sys.exit()
filename = '/Users/saljh8/Desktop/Code/AltAnalyze/AltDatabase/EnsMart72/Mm/junction1/junction_critical-junction-seq.txt'
#MakeJunctionFasta(filename);sys.exit()
filename = '/Users/saljh8/Downloads/CoexpressionAtlas.txt'
#ToppGeneFilter(filename); sys.exit()
#countIntronsExons(filename);sys.exit()
#filterForJunctions(filename);sys.exit()
#filename = '/Users/saljh8/Desktop/Grimes/GEC14074/ExpressionOutput/LineageCorrelations-test-protein_coding-zscores.txt'
#runHCOnly(filename,[]); sys.exit()
folder = '/Users/saljh8/Desktop/Code/AltAnalyze/AltDatabase/EnsMart72/ensembl/Hs'
files = UI.read_directory(folder)
for file in files: #:70895507-70895600
if '.bed' in file:
#BedFileCheck(folder+'/'+file)
pass
#sys.exit()
#runPCAonly(filename,[],False,showLabels=False,plotType='2D');sys.exit()
countinp = '/Volumes/salomonis2/SinghLab/20150715_single_GCBCell/bams/ExpressionInput/counts.Bcells.txt'
IGH_gene_file = '/Volumes/salomonis2/SinghLab/20150715_single_GCBCell/bams/ExpressionInput/IGH_genes.txt'
#extractFeatures(countinp,IGH_gene_file);sys.exit()
import UI
#geneMethylationOutput(filename);sys.exit()
#ica(filename);sys.exit()
#replaceWithBinary('/Users/saljh8/Downloads/Neg_Bi_wholegenome.txt');sys.exit()
#simpleFilter('/Volumes/SEQ-DATA/AML-TCGA/ExpressionInput/counts.LAML1.txt');sys.exit()
filename = '/Users/saljh8/Desktop/Grimes/KashishNormalization/3-25-2015/genes.tpm_tracking-ordered.txt'
#filename = '/Users/saljh8/Desktop/Grimes/KashishNormalization/6-5-2015/ExpressionInput/amplify/exp.All-wt-output.txt'
#getlastexon(filename);sys.exit()
TFs = '/Users/saljh8/Desktop/Grimes/KashishNormalization/3-25-2015/TF-by-gene_matrix/all-TFs.txt'
folder = '/Users/saljh8/Downloads/BLASTX2_Gecko.tab'
genes = ['Cebpe','Gfi1']
#genes = ['Gata1','Gfi1b']
#coincentIncedenceTest(filename,TFs);sys.exit()
#coincidentIncedence(filename,genes);sys.exit()
#test(folder);sys.exit()
#files = UI.read_directory(folder)
#for file in files: SimpleCorrdinateToBed(folder+'/'+file)
#filename = '/Users/saljh8/Desktop/bed/RREs0.5_exons_unique.txt'
#simpleIntegrityCheck(filename);sys.exit()
gene_list = ['S100a8','Chd7','Ets1','Chd7','S100a8']
gene_list_file = '/Users/saljh8/Desktop/demo/Amit/ExpressionInput/genes.txt'
gene_list_file = '/Users/saljh8/Desktop/Grimes/Comb-plots/AML_genes-interest.txt'
gene_list_file = '/Users/saljh8/Desktop/dataAnalysis/Grimes/Mm_Sara-single-cell-AML/alt/AdditionalHOPACH/ExpressionInput/AML_combplots.txt'
gene_list_file = '/Users/saljh8/Desktop/Grimes/KashishNormalization/12-16-15/AllelicSeries/ExpressionInput/KO_genes.txt'
gene_list_file = '/Users/saljh8/Desktop/dataAnalysis/Grimes/All-Fluidigm/ExpressionInput/comb_plot2.txt'
genesets = importGeneList(gene_list_file)
filename = '/Users/saljh8/Desktop/Grimes/KashishNormalization/3-25-2015/comb-plots/exp.IG2_GG1-extended-output.txt'
filename = '/Users/saljh8/Desktop/Grimes/KashishNormalization/3-25-2015/comb-plots/genes.tpm_tracking-ordered.txt'
filename = '/Users/saljh8/Desktop/demo/Amit/ExpressedCells/GO-Elite_results/3k_selected_LineageGenes-CombPlotInput2.txt'
filename = '/Users/saljh8/Desktop/Grimes/Comb-plots/exp.AML_single-cell-output.txt'
filename = '/Users/saljh8/Desktop/dataAnalysis/Grimes/Mm_Sara-single-cell-AML/alt/AdditionalHOPACH/ExpressionInput/exp.AML.txt'
filename = '/Users/saljh8/Desktop/Grimes/KashishNormalization/12-16-15/AllelicSeries/ExpressionInput/exp.KO-output.txt'
filename = '/Users/saljh8/Desktop/dataAnalysis/Grimes/All-Fluidigm/ExpressionInput/exp.Lsk_panorama.txt'
print genesets
for gene_list in genesets:
multipleSubPlots(filename,gene_list,SubPlotType='column')
sys.exit()
plotHistogram(filename);sys.exit()
filename = '/Users/saljh8/Desktop/Grimes/Expression_final_files/ExpressionInput/amplify-wt/DataPlots/Clustering-exp.myeloid-steady-state-PCA-all_wt_myeloid_SingleCell-Klhl7 Dusp7 Slc25a33 H6pd Bcorl1 Sdpr Ypel3 251000-hierarchical_cosine_cosine.cdt'
openTreeView(filename);sys.exit()
pdf1 = "/Users/saljh8/Desktop/Grimes/1.pdf"
pdf2 = "/Users/saljh8/Desktop/Grimes/2.pdf"
outPdf = "/Users/saljh8/Desktop/Grimes/3.pdf"
merge_horizontal(outPdf, pdf1, pdf2);sys.exit()
mergePDFs(pdf1,pdf2,outPdf);sys.exit()
filename = '/Volumes/SEQ-DATA/CardiacRNASeq/BedFiles/ExpressionOutput/Clustering/SampleLogFolds-CardiacRNASeq.txt'
ica(filename);sys.exit()
features = 5
matrix, column_header, row_header, dataset_name, group_db = importData(filename)
Kmeans(features, column_header, row_header); sys.exit()
#graphViz();sys.exit()
filename = '/Users/saljh8/Desktop/delete.txt'
filenames = [filename]
outputClusters(filenames,[]); sys.exit()
#runPCAonly(filename,[],False);sys.exit()
#VennDiagram(); sys.exit()
#buildGraphFromSIF('Ensembl','Mm',None,None); sys.exit()
#clusterPathwayZscores(None); sys.exit()
pruned_folder = '/Users/nsalomonis/Desktop/CBD/LogTransformed/GO-Elite/GO-Elite_results/CompleteResults/ORA_pruned/'
input_ora_folder = '/Users/nsalomonis/Desktop/CBD/LogTransformed/GO-Elite/input/'
files = UI.read_directory(pruned_folder)
for file in files:
if '.sif' in file:
input_file = string.join(string.split(file,'-')[:-1],'-')+'.txt'
sif_file = pruned_folder+file
input_file = input_ora_folder+input_file
buildGraphFromSIF('Ensembl','Hs',sif_file,input_file)
sys.exit()
filenames = [filename]
outputClusters(filenames,[])
|
wuxue/altanalyze
|
clustering.py
|
Python
|
apache-2.0
| 233,205
|
[
"Bioconductor"
] |
7122ea948651a1a7d6a79b289103cf703bf340c369e03c7e4e84a48a002557e6
|
from setuptools import setup
setup(
name="parapred",
packages=["parapred"],
entry_points={
"console_scripts": ['parapred = parapred.parapred:main']
},
install_requires=[
"Keras==2.0.6",
"pandas>=0.19.2,<0.20",
"tensorflow==1.2.1",
"numpy>=1.13",
"matplotlib>=2.0.0",
"scikit-learn>=0.18,<0.19",
"scipy>=0.19",
"biopython==1.69",
"docopt>=0.6.2",
"h5py>=2.6.0",
"lxml>=4.1.1",
"requests>=2.18.4"
],
version="1.0.1",
description="Deep-learning-powered antibody binding site prediction.",
author="E Liberis",
author_email="el398@cam.ac.uk",
url="https://github.com/eliberis/parapred",
package_data={"parapred": ["data/*.csv", "precomputed/*"]}
)
|
eliberis/parapred
|
setup.py
|
Python
|
mit
| 796
|
[
"Biopython"
] |
4eeac8299a01c071769ce8c83103e133c7ec7dc4cbe7f46a202c95bb26a047e5
|
#!/usr/bin/env python
import os.path
import subprocess
import os
import sys
def is_newer(filename1, filename2):
mtime1 = os.path.getmtime(filename1)
mtime2 = os.path.getmtime(filename2)
if mtime1 <= mtime2:
return False
else:
return True
def makeblastdb(fasta_filename, dbname, dbtype='nucl'):
if dbtype not in ['nucl', 'prot']:
raise ValueError('Invalid dbtype: {}'.format(dbtype))
dustbin_file = open(os.devnull, 'w') # make a file that writes to /dev/null
cmd = ['makeblastdb', '-in', fasta_filename, '-out', dbname, '-dbtype', dbtype]
return_code = subprocess.call(cmd, stdout=dustbin_file)
if return_code == 0:
return True
else:
return False
if len(sys.argv) != 3 and len(sys.argv) != 4:
sys.exit("Usage: {} <FASTA filename> <BLAST db name> [<DB type>]".format(os.path.basename(sys.argv[0])))
seq_filename = sys.argv[1]
dbname = sys.argv[2]
if len(sys.argv) == 4:
dbtype = sys.argv[3]
else:
dbtype = 'nucl'
blastdb_filename = dbname + '.nhr'
if not os.path.exists(blastdb_filename) or is_newer(seq_filename, blastdb_filename):
print "Making BLAST database"
makeblastdb(seq_filename, dbname, dbtype)
else:
print "BLAST database already up to date"
|
pvanheus/python2015
|
bin/makeblastdb.py
|
Python
|
gpl-3.0
| 1,263
|
[
"BLAST"
] |
9382c1fb733282345d906fca6d8e05d84e023d3f006490d973ec0040d6367fec
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import json
import mmap
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument("--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir)
args = parser.parse_args()
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except:
return False
data = f.read()
f.close()
basename = os.path.basename(filename)
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
return False
# Replace all occurrences of the regex "2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh', "vendor", "test/e2e/generated/bindata.go"]
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014, 2015 or 2016, company holder names can be anything
regexs["date"] = re.compile( '(2014|2015|2016|2017)' )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
if __name__ == "__main__":
sys.exit(main())
|
aclisp/kubernetes
|
hack/boilerplate/boilerplate.py
|
Python
|
apache-2.0
| 5,223
|
[
"VisIt"
] |
94ddee7bdadf4f3ad5aae8f0361e3ce7f9148d2712e1fc25b1d5e6c2adcd7063
|
#-----------------------------------------------------------------
# pycparser: func_defs.py
#
# Using pycparser for printing out all the functions defined in a
# C file.
#
# This is a simple example of traversing the AST generated by
# pycparser.
#
# Copyright (C) 2008-2011, Eli Bendersky
# License: BSD
#-----------------------------------------------------------------
from __future__ import print_function
import sys
# This is not required if you've installed pycparser into
# your site-packages/ with setup.py
#
sys.path.extend(['.', '..'])
from pycparser import c_parser, c_ast, parse_file
# A simple visitor for FuncDef nodes that prints the names and
# locations of function definitions.
#
class FuncDefVisitor(c_ast.NodeVisitor):
def visit_FuncDef(self, node):
print('%s at %s' % (node.decl.name, node.decl.coord))
def show_func_defs(filename):
# Note that cpp is used. Provide a path to your own cpp or
# make sure one exists in PATH.
#
ast = parse_file(filename, use_cpp=True)
v = FuncDefVisitor()
v.visit(ast)
if __name__ == "__main__":
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = 'c_files/memmgr.c'
show_func_defs(filename)
|
sideeffects/pycparser
|
examples/func_defs.py
|
Python
|
bsd-3-clause
| 1,231
|
[
"VisIt"
] |
4e0a66746350d78963333d39c55f266e1dff075e6c6f2a48ca2ca34184ff2e77
|
"""
Wrapper functions for nglviewer and jupyter helper functions
Reference links
- https://birdlet.github.io/2019/10/02/py3dmol_example/
- http://nglviewer.org/nglview/latest/api.html#nglview.RdkitStructure
- http://nglviewer.org/nglview/latest/api.html#nglview.show_rdkit
Notes on nglviewer usage
>>> import nglview as nv
>>> from rdkit import Chem
... from rdkit.Chem import AllChem
... m = Chem.AddHs(Chem.MolFromSmiles('COc1ccc2[C@H](O)[C@@H](COc2c1)N3CCC(O)(CC3)c4ccc(F)cc4'))
... _ = AllChem.EmbedMultipleConfs(m, useExpTorsionAnglePrefs=True, useBasicKnowledge=True)
... view = nv.show_rdkit(m)
... view
>>> # add component m2
>>> # create file-like object
>>> from nglview.show import StringIO
>>> m2 = Chem.AddHs(Chem.MolFromSmiles('N[C@H](C)C(=O)O'))
... fh = StringIO(Chem.MolToPDBBlock(m2))
... view.add_component(fh, ext='pdb')
>>> # load as trajectory, need to have ParmEd
>>> view = nv.show_rdkit(m, parmed=True)
"""
import IPython
import ipywidgets
import nglview
import pandas as pd
from ipywidgets import Layout, interact
from rdkit.Chem import rdMolAlign
from ppqm import chembridge
def show_molobj(molobj, align_conformers=True, show_properties=False):
"""
Show molobj in jupyter with a slider for each conformer
"""
if align_conformers:
rdMolAlign.AlignMolConformers(molobj)
n_conformers = molobj.GetNumConformers()
assert n_conformers > 0
view = nglview.show_rdkit(molobj)
def _view_conformer(idx):
coord = chembridge.get_coordinates(molobj, confid=idx)
view.set_coordinates({0: coord})
print(f"Conformer {idx} / {n_conformers - 1}")
if n_conformers > 1:
interact(
_view_conformer,
idx=ipywidgets.IntSlider(min=0, max=n_conformers - 1, step=1),
layout=Layout(width="100%", height="80px"),
)
_view_conformer(0)
IPython.core.display.display(view)
if show_properties:
properties = molobj.GetPropsAsDict()
pdf = pd.DataFrame([properties]).transpose()
IPython.core.display.display(pdf)
def show_molobjs(molobjs, align_conformers=True, show_properties=False):
""" """
n_molobjs = len(molobjs)
def _view_molobj(idx):
show_molobj(
molobjs[idx], align_conformers=align_conformers, show_properties=show_properties
)
interact(
_view_molobj,
idx=ipywidgets.IntSlider(min=0, max=n_molobjs - 1, step=1),
layout=Layout(width="100%", height="80px"),
)
|
ppqm/ppqm
|
ppqm/jupyter.py
|
Python
|
mit
| 2,510
|
[
"RDKit"
] |
e8a1bf2c75de885b6cea3d46ca44a4f32222672b7cb9e5b16ba69c94c62b3402
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import ast
import hashlib
import spack.repo
import spack.package
import spack.directives
import spack.error
import spack.spec
import spack.util.naming
class RemoveDocstrings(ast.NodeTransformer):
"""Transformer that removes docstrings from a Python AST."""
def remove_docstring(self, node):
if node.body:
if isinstance(node.body[0], ast.Expr) and \
isinstance(node.body[0].value, ast.Str):
node.body.pop(0)
self.generic_visit(node)
return node
def visit_FunctionDef(self, node): # noqa
return self.remove_docstring(node)
def visit_ClassDef(self, node): # noqa
return self.remove_docstring(node)
def visit_Module(self, node): # noqa
return self.remove_docstring(node)
class RemoveDirectives(ast.NodeTransformer):
"""Remove Spack directives from a package AST."""
def __init__(self, spec):
self.spec = spec
def is_directive(self, node):
"""Check to determine if the node is a valid directive
Directives are assumed to be represented in the AST as a named function
call expression. This means that they will NOT be represented by a
named function call within a function call expression (e.g., as
callbacks are sometimes represented).
Args:
node (AST): the AST node being checked
Returns:
(bool): ``True`` if the node represents a known directive,
``False`` otherwise
"""
return (isinstance(node, ast.Expr) and
node.value and isinstance(node.value, ast.Call) and
isinstance(node.value.func, ast.Name) and
node.value.func.id in spack.directives.__all__)
def is_spack_attr(self, node):
return (isinstance(node, ast.Assign) and
node.targets and isinstance(node.targets[0], ast.Name) and
node.targets[0].id in spack.package.Package.metadata_attrs)
def visit_ClassDef(self, node): # noqa
if node.name == spack.util.naming.mod_to_class(self.spec.name):
node.body = [
c for c in node.body
if (not self.is_directive(c) and not self.is_spack_attr(c))]
return node
class TagMultiMethods(ast.NodeVisitor):
"""Tag @when-decorated methods in a spec."""
def __init__(self, spec):
self.spec = spec
self.methods = {}
def visit_FunctionDef(self, node): # noqa
nodes = self.methods.setdefault(node.name, [])
if node.decorator_list:
dec = node.decorator_list[0]
if isinstance(dec, ast.Call) and dec.func.id == 'when':
try:
cond = dec.args[0].s
nodes.append(
(node, self.spec.satisfies(cond, strict=True)))
except AttributeError:
# In this case the condition for the 'when' decorator is
# not a string literal (for example it may be a Python
# variable name). Therefore the function is added
# unconditionally since we don't know whether the
# constraint applies or not.
nodes.append((node, None))
else:
nodes.append((node, None))
class ResolveMultiMethods(ast.NodeTransformer):
"""Remove methods which do not exist if their @when is not satisfied."""
def __init__(self, methods):
self.methods = methods
def resolve(self, node):
if node.name not in self.methods:
raise PackageHashError(
"Future traversal visited new node: %s" % node.name)
result = None
for n, cond in self.methods[node.name]:
if cond:
return n
if cond is None:
result = n
return result
def visit_FunctionDef(self, node): # noqa
if self.resolve(node) is node:
node.decorator_list = []
return node
return None
def package_content(spec):
return ast.dump(package_ast(spec))
def package_hash(spec, content=None):
if content is None:
content = package_content(spec)
return hashlib.sha256(content.encode('utf-8')).digest().lower()
def package_ast(spec):
spec = spack.spec.Spec(spec)
filename = spack.repo.path.filename_for_package_name(spec.name)
with open(filename) as f:
text = f.read()
root = ast.parse(text)
root = RemoveDocstrings().visit(root)
RemoveDirectives(spec).visit(root)
fmm = TagMultiMethods(spec)
fmm.visit(root)
root = ResolveMultiMethods(fmm.methods).visit(root)
return root
class PackageHashError(spack.error.SpackError):
"""Raised for all errors encountered during package hashing."""
|
iulian787/spack
|
lib/spack/spack/util/package_hash.py
|
Python
|
lgpl-2.1
| 5,042
|
[
"VisIt"
] |
d28f196c18913691d70712d2fd71098ae45ca9047112103a8831a8d0aa9eade3
|
from pywps import Process, LiteralInput, ComplexInput, ComplexOutput
from pywps import Format
from plotter import simple_plot
import logging
LOGGER = logging.getLogger('PYWPS')
AIR_DS = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/ncep.reanalysis.derived/surface/air.mon.ltm.nc'
class SimplePlot(Process):
def __init__(self):
inputs = [
ComplexInput('dataset', 'Dataset', supported_formats=[Format('application/x-netcdf')],
default=AIR_DS,
abstract='Example: {0}'.format(AIR_DS)),
LiteralInput('variable', 'Variable', data_type='string',
default='air',
abstract='Enter the variable name.'),
]
outputs = [
ComplexOutput('output', 'Simple Plot', supported_formats=[Format('image/png')],
as_reference=True),
]
super(SimplePlot, self).__init__(
self._handler,
identifier='simple_plot',
title='Simple Plot',
abstract='Returns a nice and simple plot.',
version='1.0',
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
def _handler(self, request, response):
variable = request.inputs['variable'][0].data
# Call simple_plot function
output = simple_plot(
resource=request.inputs['dataset'][0].file,
variable=variable)
LOGGER.info("produced output: %s", output)
response.outputs['output'].file = output
response.update_status("simple_plot done", 100)
return response
|
bird-house/birdhouse-workshop
|
tutorials/10_pywps_process/processes/wps_simple_plot.py
|
Python
|
apache-2.0
| 1,714
|
[
"NetCDF"
] |
bf0db36f10cb1da4a5ed6929667f99870e9572a4a9f1743d536187297d5aac18
|
from __future__ import print_function
__author__ = """Alex "O." Holcombe, Wei-Ying Chen""" ## double-quotes will be silently removed, single quotes will be left, eg, O'Connor
from psychopy import *
import psychopy.info
from psychopy import sound, monitors, logging
import numpy as np
import itertools #to calculate all subsets
from copy import deepcopy
from math import atan, pi, cos, sin, sqrt, ceil
import time, sys, platform, os, StringIO, gc
from EyelinkEyetrackerForPsychopySUPA3 import EyeLinkCoreGraphicsPsychopy, Tracker_EyeLink #Chris Fajou integration
from helpersAOH import accelerateComputer, openMyStimWindow, calcCondsPerNumTargets, LCM, gcd
eyetracking = False; eyetrackFileGetFromEyelinkMachine = False #very timeconsuming to get the file from the Windows machine over the ethernet cable,
#usually better to get the EDF file from the Eyelink machine by hand by rebooting into Windows and going to
quitFinder = True
if quitFinder:
applescript="\'tell application \"Finder\" to quit\'" #quit Finder.
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
process_priority = 'realtime' # 'normal' 'high' or 'realtime'
disable_gc = True
subject='test'#'test'
autoLogging = False
demo = False
autopilot=False
if autopilot: subject='auto'
feedback=True
exportImages= False #quits after one trial / output image
screenshot= False; screenshotDone = False;allowGUI = False;waitBlank = False
trackAllIdenticalColors = True#with tracking, can either use same colors as other task (e.g. 6 blobs but only 3 colors so have to track one of 2) or set all blobs identical color
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
respTypes=['order']; respType=respTypes[0]
bindRadiallyRingToIdentify=1 #0 is inner, 1 is outer
numRings=2
radii=[2.5,9.5] #Need to encode as array for those experiments wherein more than one ring presented
offsets = np.array([[0,0],[-5,0],[-10,0]])
respRadius=radii[0] #deg
refreshRate= 160 *1.0; #160 #set to the framerate of the monitor
useClock = True #as opposed to using frame count, which assumes no frames are ever missed
fullscr=1; scrn=0
# create a dialog from dictionary
infoFirst = { 'Autopilot':autopilot, 'Check refresh etc':True, 'Screen to use':scrn, 'Fullscreen (timing errors if not)': fullscr, 'Screen refresh rate': refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='MOT',
order=['Autopilot','Check refresh etc', 'Screen to use', 'Screen refresh rate', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating',
'Screen to use': '0 means primary screen, 1 means second screen'},
)
if not OK.OK:
print('User cancelled from dialog box'); core.quit()
autopilot = infoFirst['Autopilot']
checkRefreshEtc = infoFirst['Check refresh etc']
scrn = infoFirst['Screen to use']
print('scrn = ',scrn, ' from dialog box')
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
#trialDur does not include trackingExtraTime, during which the cue is on. Not really part of the trial.
trialDur = 3.3
if demo:trialDur = 5;refreshRate = 60.;
tokenChosenEachRing= [-999]*numRings
rampUpDur=0; rampDownDur=0
trackingExtraTime=1.0; #giving the person time to attend to the cue (secs). This gets added to trialDur
trackVariableIntervMax = 0.8
def maxTrialDur():
return( trialDur+trackingExtraTime+trackVariableIntervMax)
badTimingCushion = 0.1 #Creating 100ms more of reversals than should need. Because if miss frames and using clock time instead of frames, might go longer
def maxPossibleReversals(): #need answer to know how many blank fields to print to file
return int( ceil( (maxTrialDur() - trackingExtraTime) / timeTillReversalMin ) )
def getReversalTimes():
reversalTimesEachRing = [ [] for i in range(numRings) ]
for r in range(numRings): # set random reversal times
thisReversalDur = trackingExtraTime
while thisReversalDur< trialDurTotal+badTimingCushion:
thisReversalDur += np.random.uniform(timeTillReversalMin,timeTillReversalMax) #10000; print('WARNING thisReversalDur off')
reversalTimesEachRing[r].append(thisReversalDur)
return reversalTimesEachRing
toTrackCueDur = rampUpDur+rampDownDur+trackingExtraTime #giving the person time to attend to the cue (secs)
trialDurFrames=int(trialDur*refreshRate)+int( trackingExtraTime*refreshRate )
rampUpFrames = refreshRate*rampUpDur; rampDownFrames = refreshRate*rampDownDur;
ShowTrackCueFrames = int( refreshRate*toTrackCueDur )
rampDownStart = trialDurFrames-rampDownFrames
ballStdDev = 1.8
mouseChoiceArea = ballStdDev*0.8 # origin =1.3
units='deg' #'cm'
timeTillReversalMin = 0.5 #0.5;
timeTillReversalMax = 1.5# 1.3 #2.9
colors_all = np.array([[1,-1,-1],[1,-1,-1]])
cueColor = np.array([1,1,1])
#monitor parameters
widthPix = 800 #1440 #monitor width in pixels
heightPix =600 #900 #monitor height in pixels
monitorwidth = 38.5 #28.5 #monitor width in centimeters
viewdist = 57.; #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
bgColor = [-1,-1,-1] #black background
monitorname = 'testMonitor' # 'mitsubishi' #in psychopy Monitors Center
if exportImages:
fullscr=0; scrn=0
widthPix = 600; heightPix = 450
monitorwidth = 25.0
if demo:
scrn=0; fullscr=0
widthPix = 800; heightPix = 600
monitorname='testMonitor'
allowGUI = True
monitorwidth = 23#18.0
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#fetch the most recent calib for this monitor
mon.setSizePix( (widthPix,heightPix) )
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscr,scrn,waitBlank)
myMouse = event.Mouse(visible = 'true',win=myWin)
myWin.setRecordFrameIntervals(False)
trialsPerCondition = 2 #default value
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
print('Finished runInfo- which assesses the refresh and processes of this computer')
refreshMsg1 = 'Median frames per second ='+ str( np.round(1000./runInfo["windowRefreshTimeMedian_ms"],1) )
refreshRateTolerancePct = 3
pctOff = abs( (1000./runInfo["windowRefreshTimeMedian_ms"]-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
dlgLabelsOrdered = list() #new dialog box
myDlg = gui.Dlg(title="object tracking experiment", pos=(200,400))
if not autopilot:
myDlg.addField('Subject name :', subject, tip='or subject code')
dlgLabelsOrdered.append('subject')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
msgWrongResolution = ''
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Instead of desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels, screen apparently '+ str(myWinRes[0])+ 'x'+ str(myWinRes[1])
myDlg.addText(msgWrongResolution, color='Red')
print(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) # color='DimGrey') color names stopped working along the way, for unknown reason
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if not autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition =',trialsPerCondition)
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
if os.path.isdir('.'+os.sep+'dataRaw'):
dataDir='dataRaw'
else:
print('"dataRaw" directory does not exist, so saving data in present working directory')
dataDir='.'
expname = ''
fileName = dataDir+'/'+subject+ '_' + expname+timeAndDateStr
if not demo and not exportImages:
dataFile = open(fileName+'.txt', 'w') # sys.stdout #StringIO.StringIO()
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileName + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logF = logging.LogFile(fileName+'.log',
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#errors, data and warnings will be sent to this logfile
if demo or exportImages:
dataFile = sys.stdout
logging.console.setLevel(logging.ERROR) #only show this level messages and higher
logging.console.setLevel(logging.WARNING) #DEBUG means set the console to receive nearly all messges, INFO is for everything else, INFO, EXP, DATA, WARNING and ERROR
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
longerThanRefreshTolerance = 0.27
longFrameLimit = round(1000./refreshRate*(1.0+longerThanRefreshTolerance),3) # round(1000/refreshRate*1.5,2)
print('longFrameLimit=',longFrameLimit,' Recording trials where one or more interframe interval exceeded this figure ', file=logF)
print('longFrameLimit=',longFrameLimit,' Recording trials where one or more interframe interval exceeded this figure ')
if msgWrongResolution != '':
logging.error(msgWrongResolution)
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscr,scrn,waitBlank)
myMouse = event.Mouse(visible = 'true',win=myWin)
runInfo = psychopy.info.RunTimeInfo(
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
print('second window opening runInfo mean ms=',runInfo["windowRefreshTimeAvg_ms"],file=logF)
print('second window opening runInfo mean ms=',runInfo["windowRefreshTimeAvg_ms"])
logging.info(runInfo)
logging.info('gammaGrid='+str(mon.getGammaGrid()))
logging.info('linearizeMethod='+str(mon.getLinearizeMethod()))
gaussian = visual.PatchStim(myWin, tex='none',mask='gauss',colorSpace='rgb',size=ballStdDev,autoLog=autoLogging)
gaussian2 = visual.PatchStim(myWin, tex='none',mask='gauss',colorSpace='rgb',size=ballStdDev,autoLog=autoLogging)
optionChosenCircle = visual.Circle(myWin, radius=mouseChoiceArea, edges=32, fillColorSpace='rgb',fillColor = (1,0,1),autoLog=autoLogging) #to outline chosen options
clickableRegion = visual.Circle(myWin, radius=0.5, edges=32, fillColorSpace='rgb',fillColor = (-1,1,-1),autoLog=autoLogging) #to show clickable zones
circlePostCue = visual.Circle(myWin, radius=2*radii[0], edges=32, fillColorSpace='rgb',fillColor = (-.85,-.85,-.85),lineColor=(-1,-1,-1),autoLog=autoLogging) #visual postcue
#referenceCircle allows visualisation of trajectory, mostly for debugging
referenceCircle = visual.Circle(myWin, radius=radii[0], edges=32, fillColorSpace='rgb',lineColor=(-1,-1,1),autoLog=autoLogging) #visual postcue
blindspotFill = 0 #a way for people to know if they move their eyes
if blindspotFill:
blindspotStim = visual.PatchStim(myWin, tex='none',mask='circle',size=4.8,colorSpace='rgb',color = (-1,1,-1),autoLog=autoLogging) #to outline chosen options
blindspotStim.setPos([13.1,-2.7]) #AOH, size=4.8; pos=[13.1,-2.7] #DL: [13.3,-0.8]
fixatnNoise = True
fixSizePix = 20 #make fixation big so flicker more conspicuous
if fixatnNoise:
checkSizeOfFixatnTexture = fixSizePix/4
nearestPowerOfTwo = round( sqrt(checkSizeOfFixatnTexture) )**2 #Because textures (created on next line) must be a power of 2
fixatnNoiseTexture = np.round( np.random.rand(nearestPowerOfTwo,nearestPowerOfTwo) ,0 ) *2.0-1 #Can counterphase flicker noise texture to create salient flicker if you break fixation
fixation= visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=autoLogging)
fixationBlank= visual.PatchStim(myWin, tex=-1*fixatnNoiseTexture, colorSpace='rgb',mask='circle',size=fixSizePix,units='pix',autoLog=autoLogging)
else:
fixation = visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(.9,.9,.9),mask='circle',units='pix',size=fixSizePix,autoLog=autoLogging)
fixationBlank= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(-1,-1,-1),mask='circle',units='pix',size=fixSizePix,autoLog=autoLogging)
fixationPoint = visual.PatchStim(myWin,colorSpace='rgb',color=(1,1,1),mask='circle',units='pix',size=2,autoLog=autoLogging) #put a point in the center
respText = visual.TextStim(myWin,pos=(0, -.8),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center', units='norm',autoLog=autoLogging)
NextText = visual.TextStim(myWin,pos=(0, 0),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center', units='norm',autoLog=autoLogging)
NextRemindPctDoneText = visual.TextStim(myWin,pos=(-.1, -.4),colorSpace='rgb',color= (1,1,1),alignHoriz='center', alignVert='center', units='norm',autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin,pos=(.1, -.5),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center', units='norm',autoLog=autoLogging)
stimList = []
# temporalfrequency limit test
numObjsInRing = [2]
speedsEachNumObjs = [ [1.1,1.2,1.4,1.7] ] #dont want to go faster than 2 because of blur problem
numTargets = np.array([2]) # np.array([1,2,3])
leastCommonMultipleSubsets = calcCondsPerNumTargets(numRings,numTargets)
leastCommonMultipleTargetNums = LCM( numTargets ) #have to use this to choose whichToQuery. For explanation see newTrajectoryEventuallyForIdentityTracking.oo3
#print('leastCommonMultipleSubsets=',leastCommonMultipleSubsets)
for numObjs in numObjsInRing: #set up experiment design
idx = numObjsInRing.index(numObjs)
speeds= speedsEachNumObjs[ idx ]
for speed in speeds:
ringNums = np.arange(numRings)
for nt in numTargets: # If 3 concentric rings involved, have to consider 3 choose 2, 3 choose 1, have to have as many conditions as the maximum
subsetsThis = list(itertools.combinations(ringNums,nt)) #all subsets of length nt from the universe of ringNums
numSubsetsThis = len( subsetsThis ); #print('numSubsetsThis=',numSubsetsThis)
repsNeeded = leastCommonMultipleSubsets / numSubsetsThis #that's the number of repetitions needed to make up for number of subsets of rings
for r in xrange(repsNeeded): #for nt with largest number of subsets, need no repetitions
for s in subsetsThis:
whichIsTarget = np.ones(numRings)*-999 #-999 is value meaning no target in that ring. 1 will mean target in ring
for ring in s:
whichIsTarget[ring] = np.random.random_integers(0, numObjs-1, size=1) #1
#print('numTargets=',nt,' whichIsTarget=',whichIsTarget,' and that is one of ',numSubsetsThis,' possibilities and we are doing ',repsNeeded,'repetitions')
for whichToQuery in xrange( leastCommonMultipleTargetNums ): #for each subset, have to query one. This is dealed out to the current subset by using modulus. It's assumed that this will result in equal total number of queried rings
whichSubsetEntry = whichToQuery % nt #e.g. if nt=2 and whichToQuery can be 0,1,or2 then modulus result is 0,1,0. This implies that whichToQuery won't be totally counterbalanced with which subset, which is bad because
#might give more resources to one that's queried more often. Therefore for whichToQuery need to use least common multiple.
ringToQuery = s[whichSubsetEntry]; #print('ringToQuery=',ringToQuery,'subset=',s)
for basicShape in ['diamond','circle']:
for initialDirRing0 in [-1,1]:
stimList.append( {'basicShape':basicShape, 'numObjectsInRing':numObjs,'speed':speed,'initialDirRing0':initialDirRing0,
'numTargets':nt,'whichIsTarget':whichIsTarget,'ringToQuery':ringToQuery} )
#set up record of proportion correct in various conditions
trialSpeeds = list() #purely to allow report at end of how many trials got right at each speed
for s in stimList: trialSpeeds.append( s['speed'] )
uniqSpeeds = set(trialSpeeds) #reduce speedsUsed list to unique members, unordered set
uniqSpeeds = sorted( list(uniqSpeeds) )
uniqSpeeds = np.array( uniqSpeeds )
numRightWrongEachSpeedOrder = np.zeros([ len(uniqSpeeds), 2 ]); #summary results to print out at end
numRightWrongEachSpeedIdent = deepcopy(numRightWrongEachSpeedOrder)
#end setup of record of proportion correct in various conditions
trials = data.TrialHandler(stimList,trialsPerCondition) #constant stimuli method
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
logging.info( str('starting exp with name: "'+'TemporalFrequencyLimit'+'" at '+timeAndDateStr) )
logF = StringIO.StringIO() #kludge so I dont have to change all the print >>logF statements
logging.info( 'numtrials='+ str(trials.nTotal)+' and each trialDur='+str(trialDur)+' refreshRate='+str(refreshRate) )
print(' numtrials=', trials.nTotal)
print('rampUpDur=',rampUpDur, ' rampDownDur=', rampDownDur, ' secs', file=logF); logging.info( logF.getvalue() ); logF = StringIO.StringIO()
logging.info('task='+'track'+' respType='+respType)
logging.info( 'radii=' + str(radii) )
logging.flush()
RFcontourAmp= 0.0
RFcontourFreq = 2.0
RFcontourPhase = 0
def RFcontourCalcModulation(angle,freq,phase):
modulation = sin(angle*freq + phase) #radial frequency contour equation, e.g. http://www.journalofvision.org/content/14/11/12.full from Wilkinson et al. 1998
return modulation
def diamondShape(constSpeedOrConstRps,angle):
def triangleWave(period, phase):
#triangle wave is in sine phase (starts at 0)
y = -abs(phase % (2*period) - period) # http://stackoverflow.com/questions/1073606/is-there-a-one-line-function-that-generates-a-triangle-wave
#y goes from -period to 0. Need to rescale to -1 to 1 to match sine wave etc.
y = y/period*2 + 1
#Now goes from -1 to 1
return y
if constSpeedOrConstRps: #maintain constant rps. So, draw the blob at the prescribed theta. But change the radius to correspond to a square.
#As a consequence, will travel faster the more the direction differs from the circle, like around the corners
#Theta varies from 0 to 2pi. Taking its cosine, gives x coordinate on circle.
#Instead of taking cosine, I should just make it a linear ramp of x back and forth. That is, turn it into a triangle wave
#Want 0 to pi to be -1 to 1
x = triangleWave(pi,angle)
y = triangleWave(pi, (angle-pi/2)%(2*pi ))
#This will always describe a diamond. To change the shape would have to use vector rotation formula
else: #constant speed, so
#take theta not as the angle wanted, but what proportion (after being divided by 2pi) along the trajectory you want to go
angle = angle % (2*pi) #modulus
proportnTraj = angle/(2*pi)
if (proportnTraj < 0) or (proportnTraj>1):
print("Unexpected angle below 0!"); logging.error("Unexpected angle below 0!")
#how do I go from proportnTraj either to x,y or to theta?
#Analytic method is that as increase theta deviates from 4 points that touches circle, theta change is smaller for equal change in proportnTraj
#Brute force method is to divide into 4 segments, below.
zeroToFour = proportnTraj*4
if zeroToFour < 1: #headed NW up the first quadrant
x = 1 - (zeroToFour-0)
y = (zeroToFour-0)
elif zeroToFour < 2: #SW
x = - (zeroToFour - 1)
y = 1- (zeroToFour - 1)
elif zeroToFour < 3: #SE
x = -1+(zeroToFour - 2)
y = - (zeroToFour - 2)
elif zeroToFour < 4: #NE
x = (zeroToFour-3)
y = -1+(zeroToFour-3)
else: logging.error("Unexpected zeroToFour="+ str(zeroToFour))
#Max x is 1, meaning that it will be the diamond that circumscribes the unit circle.
#Otherwise need to adjust by calculating the average eccentricity of such a diamond and compensating, which I never did.
return x,y
ampTemporalRadiusModulation = 0.0 # 1.0/3.0
ampModulatnEachRingTemporalPhase = np.random.rand(numRings) * 2*np.pi
def xyThisFrameThisAngle(basicShape, radiiThisTrial, numRing, angle, thisFrameN, speed):
#period of oscillation should be in sec
r = radiiThisTrial[numRing]
timeSeconds = thisFrameN / refreshRate
def waveForm(type,speed,timeSeconds,numRing):
if speed==0 and ampTemporalRadiusModulation==0:
return 0 #this way don't get division by zero error when speed=0
else:
periodOfRadiusModulation = 1.0/speed#so if speed=2 rps, radius modulation period = 0.5 s
modulatnPhaseRadians = timeSeconds/periodOfRadiusModulation * 2*pi + ampModulatnEachRingTemporalPhase[numRing]
if type=='sin':
return sin(modulatnPhaseRadians)
elif type == 'sqrWave':
ans = np.sign( sin(modulatnPhaseRadians) ) #-1 or 1. That's great because that's also sin min and max
if ans==0: ans = -1+ 2*round( np.random.rand(1)[0] ) #exception case is when 0, gives 0, so randomly change that to -1 or 1
return ans
else: print('Error! unexpected type in radiusThisFrameThisAngle')
#if numRing==0:
# basicShape='diamond'
#else: basicShape ='circle' #DEBUGOFF to compare speeds of circle and diamond
if basicShape == 'circle':
rThis = r + waveForm('sin',speed,timeSeconds,numRing) * r * ampTemporalRadiusModulation
rThis += r * RFcontourAmp * RFcontourCalcModulation(angle,RFcontourFreq,RFcontourPhase)
x = rThis*cos(angle)
y = rThis*sin(angle)
elif basicShape == 'diamond': #actual square-shaped trajectory. Could also add all the modulations to this, later
x,y = diamondShape(constSpeedOrConstRps = False, angle=angle)
x*=r
y*=r
else:
print('Unexpected basicShape: ',basicShape)
return x,y
def angleChangeThisFrame(speed,initialDirectionEachRing, numRing, thisFrameN, lastFrameN):
angleMove = initialDirectionEachRing[numRing] * speed*2*pi*(thisFrameN-lastFrameN) / refreshRate
return angleMove
def oneFrameOfStim(thisTrial,currFrame,clock,useClock,offsetXYeachRing,initialDirectionEachRing,currAngle,blobToCueEachRing,isReversed,reversalNumEachRing,ShowTrackCueFrames):
#defining a function to draw each frame of stim. So can call second time for tracking task response phase
global cueRing,ringRadial,ringRadialR, currentlyCuedBlob #makes python treat it as a local variable
global angleIniEachRing, correctAnswers
if useClock: #Don't count on not missing frames. Use actual time.
t = clock.getTime()
n = round(t*refreshRate)
else:
n = currFrame
if n<rampUpFrames:
contrast = cos( -pi+ pi* n/rampUpFrames ) /2. +.5 #starting from -pi trough of cos, and scale into 0->1 range
elif rampDownFrames>0 and n > rampDownStart:
contrast = cos(pi* (n-rampDownStart)/rampDownFrames ) /2.+.5 #starting from peak of cos, and scale into 0->1 range
else: contrast = 1
if n%2:
fixation.draw()#flicker fixation on and off at framerate to see when skip frame
else:
fixationBlank.draw()
fixationPoint.draw()
for numRing in range(numRings):
speed = thisTrial['speed']
if basicShape == 'diamond': #scale up speed so that it achieves that speed in rps even though it has farther to travel
perimeter = radii[numRing]*4.0
circum = 2*pi*radii[numRing]
speed = thisTrial['speed'] * perimeter/circum #Have to go this much faster to get all the way around in same amount of time as for circle
angleMove = angleChangeThisFrame(speed,initialDirectionEachRing, numRing, n, n-1)
currAngle[numRing] = currAngle[numRing]+angleMove*(isReversed[numRing])
angleObject0 = angleIniEachRing[numRing] + currAngle[numRing]
for nobject in range(numObjects):
if nobject==0:
if reversalNumEachRing[numRing] <= len(reversalTimesEachRing[numRing]): #haven't exceeded reversals assigned
reversalNum = int(reversalNumEachRing[numRing])
if len( reversalTimesEachRing[numRing] ) <= reversalNum:
msg = 'You failed to allocate enough reversal times, reached ' +str(reversalNum)+ ' reversals at '+ str(reversalTimesEachRing[numRing][reversalNum-1]) + \
'and still going, current time ='+str(n/refreshRate)+' asking for time of next one, will assume no more reversals'
logging.error(msg)
print(msg)
nextReversalTime = 9999 #didn't allocate enough, will just not reverse any more
else: #allocated enough reversals
nextReversalTime = reversalTimesEachRing[numRing][ reversalNum ]
if n > refreshRate * nextReversalTime: #have now exceeded time for this next reversal
isReversed[numRing] = -1*isReversed[numRing]
reversalNumEachRing[numRing] +=1
angleThisObject = angleObject0 + (2*pi)/numObjects*nobject
x,y = xyThisFrameThisAngle(thisTrial['basicShape'],radii, numRing,angleThisObject,n,speed)
x += offsetXYeachRing[numRing][0]
y += offsetXYeachRing[numRing][1]
if n< ShowTrackCueFrames and nobject==blobToCueEachRing[numRing]: #cue in white
weightToTrueColor = n*1.0/ShowTrackCueFrames #compute weighted average to ramp from white to correct color
blobColor = (1.0-weightToTrueColor)*cueColor + weightToTrueColor*colors_all[nobject]
blobColor *= contrast #also might want to change contrast, if everybody's contrast changing in contrast ramp
#print('weightToTrueColor=',weightToTrueColor,' n=',n, ' blobColor=',blobColor)
else: blobColor = colors_all[0]*contrast
#referenceCircle.setPos(offsetXYeachRing[numRing]); referenceCircle.draw() #debug
gaussian.setColor( blobColor, log=autoLogging )
gaussian.setPos([x,y])
gaussian.draw()
if blindspotFill:
blindspotStim.draw()
return angleIniEachRing,currAngle,isReversed,reversalNumEachRing
# #######End of function definition that displays the stimuli!!!! #####################################
showClickableRegions = True
def collectResponses(thisTrial,n,responses,responsesAutopilot,offsetXYeachRing,respRadius,currAngle,expStop ):
optionSets=numRings
#Draw response cues
numTimesRespSoundPlayed=0
if numTimesRespSoundPlayed<1: #2
respSound.setVolume(1)
if numRings > 1:
respSound.play()
numTimesRespSoundPlayed +=1
#respText.draw()
respondedEachToken = np.zeros([numRings,numObjects]) #potentially two sets of responses, one for each ring
optionIdexs=list();baseSeq=list();numOptionsEachSet=list();numRespsNeeded=list()
numRespsNeeded = np.zeros(numRings)
for ring in xrange(numRings):
optionIdexs.append([])
noArray=list()
for k in range(numObjects):noArray.append(colors_all[0])
baseSeq.append(np.array(noArray))
for i in range(numObjects):
optionIdexs[ring].append(baseSeq[ring][i % len(baseSeq[ring])] )
if ring == thisTrial['ringToQuery']:
numRespsNeeded[ ring ] = 1
else: numRespsNeeded[ ring ] = 0
numOptionsEachSet.append(len(optionIdexs[ring]))
respcount = 0; tClicked = 0; lastClickState=0; mouse1=0
for ring in range(optionSets):
responses.append( list() )
responsesAutopilot.append( [0]*numRespsNeeded[ring] ) #autopilot response is 0
passThisTrial = False;
numTimesRespSoundPlayed=0
while respcount < sum(numRespsNeeded): #collecting response
#Draw visual response cue
if visuallyPostCue:
circlePostCue.setPos( offsetXYeachRing[ thisTrial['ringToQuery'] ] )
circlePostCue.setRadius( radii[ thisTrial['ringToQuery'] ] )
circlePostCue.draw()
for optionSet in range(optionSets): #draw this group (ring) of options
for ncheck in range( numOptionsEachSet[optionSet] ): #draw each available to click on in this ring
angle = (angleIniEachRing[optionSet]+currAngle[optionSet]) + ncheck*1.0/numOptionsEachSet[optionSet] *2.*pi
stretchOutwardRingsFactor = 1
x,y = xyThisFrameThisAngle(thisTrial['basicShape'],radii,optionSet,angle,n,thisTrial['speed'])
x = x+ offsetXYeachRing[optionSet][0]
y = y+ offsetXYeachRing[optionSet][1]
#draw colors, and circles around selected items. Colors are drawn in order they're in in optionsIdxs
opts=optionIdexs;
if respondedEachToken[optionSet][ncheck]: #draw circle around this one to indicate this option has been chosen
optionChosenCircle.setColor(array([1,-1,1]), log=autoLogging)
optionChosenCircle.setPos([x,y])
optionChosenCircle.draw()
gaussian.setColor( colors_all[0], log=autoLogging ) #draw blob
gaussian.setPos([x,y]);
gaussian.draw()
mouse1, mouse2, mouse3 = myMouse.getPressed()
if mouse1 and lastClickState==0: #only count this event if is a new click. Problem is that mouse clicks continue to be pressed for along time
mouseX,mouseY = myMouse.getPos()
#print 'assumes window spans entire screen of ',monitorwidth,' cm; mouse position apparently in cm when units is set to deg = (',mouseX,',',mouseY,')'
#because mouse apparently giving coordinates in cm, I need to convert it to degrees of visual angle because that's what drawing is done in terms of
cmperpixel = monitorwidth*1.0/widthPix
degpercm = 1.0/cmperpixel/pixelperdegree;
mouseX = mouseX # * degpercm #mouse x location relative to center, converted to degrees
mouseY = mouseY #* degpercm #mouse x location relative to center, converted to degrees
for optionSet in range(optionSets):
for ncheck in range( numOptionsEachSet[optionSet] ):
angle = (angleIniEachRing[optionSet]+currAngle[optionSet]) + ncheck*1.0/numOptionsEachSet[optionSet] *2.*pi #radians
x,y = xyThisFrameThisAngle(thisTrial['basicShape'],radii,optionSet,angle,n,thisTrial['speed'])
x = x+ offsetXYeachRing[optionSet][0]
y = y+ offsetXYeachRing[optionSet][1]
#check whether mouse click was close to any of the colors
#Colors were drawn in order they're in in optionsIdxs
distance = sqrt(pow((x-mouseX),2)+pow((y-mouseY),2))
mouseToler = mouseChoiceArea + optionSet*mouseChoiceArea/6.#deg visual angle? origin=2
if showClickableRegions: #revealed in green every time you click
clickableRegion.setPos([x,y])
clickableRegion.setRadius(mouseToler)
clickableRegion.draw()
#print('mouseXY=',round(mouseX,2),',',round(mouseY,2),'xy=',x,',',y, ' distance=',distance, ' mouseToler=',mouseToler)
if distance<mouseToler:
c = opts[optionSet][ncheck] #idx of color that this option num corresponds to
if respondedEachToken[optionSet][ncheck]: #clicked one that already clicked on
if lastClickState ==0: #only count this event if is a distinct click from the one that selected the blob!
respondedEachToken[optionSet][ncheck] =0
responses[optionSet].remove(c) #this redundant list also of course encodes the order
respcount -= 1
#print('removed number ',ncheck, ' from clicked list')
else: #clicked on new one, need to add to response
numRespsAlready = len( np.where(respondedEachToken[optionSet])[0] )
#print('numRespsAlready=',numRespsAlready,' numRespsNeeded= ',numRespsNeeded,' responses=',responses) #debugOFF
if numRespsAlready >= numRespsNeeded[optionSet]:
pass #not allowed to select this one until de-select other
else:
respondedEachToken[optionSet][ncheck] = 1 #register this one has been clicked
responses[optionSet].append(c) #this redundant list also of course encodes the order
respcount += 1
#print('added ',ncheck,'th response to clicked list')
#print 'response=', response, ' respcount=',respcount, ' lastClickState=',lastClickState, ' after affected by click'
#end if mouse clicked
for key in event.getKeys(): #check if pressed abort-type key
if key in ['escape','q']:
expStop = True
respcount = 1
lastClickState = mouse1
if autopilot:
respcount = 1
for i in xrange(numRings):
for j in xrange(numObjects):
respondedEachToken[i][j] = 1 #must set to True for tracking task with click responses, because it uses to determine which one was clicked on
if blindspotFill:
blindspotStim.draw()
myWin.flip# (clearBuffer=True)
if screenshot and ~screenshotDone:
myWin.getMovieFrame()
screenshotDone = True
myWin.saveMovieFrames('respScreen.jpg')
#end response collection loop for non-'track' task
#if [] in responses: responses.remove([]) #this is for case of tracking with click response, when only want one response but draw both rings. One of responses to optionset will then be blank. Need to get rid of it
return responses,responsesAutopilot,respondedEachToken, expStop
####### #End of function definition that collects responses!!!! #################################################
print('Starting experiment of',trials.nTotal,'trials. Current trial is trial 0.')
#print header for data file
print('trialnum\tsubject\tbasicShape\tnumObjects\tspeed\tinitialDirRing0', end='\t', file=dataFile)
print('orderCorrect\ttrialDurTotal\tnumTargets', end= '\t', file=dataFile)
for i in range(numRings):
print('whichIsTarget',i, sep='', end='\t', file=dataFile)
print('ringToQuery',end='\t',file=dataFile)
for i in range(numRings):dataFile.write('Direction'+str(i)+'\t')
for i in range(numRings):dataFile.write('respAdj'+str(i)+'\t')
for r in range(numRings):
for j in range(maxPossibleReversals()):
dataFile.write('rev'+str(r)+'_'+str(j)+'\t') #reversal times for each ring
print('timingBlips', file=dataFile)
#end of header
trialClock = core.Clock()
stimClock = core.Clock()
trialNum=0; numTrialsOrderCorrect=0; numAllCorrectlyIdentified=0; blueMistakes=0; expStop=False; framesSaved=0;
thisTrial = trials.next()
trialDurTotal=0;
ts = list();
if eyetracking:
eyeMoveFile=('EyeTrack_'+subject+'_'+timeAndDateStr+'.EDF')
tracker=Tracker_EyeLink(myWin,trialClock,subject,1, 'HV5',(255,255,255),(0,0,0),False,(widthPix,heightPix))
randomStartAngleEachRing = True
randomInitiialDirExceptRing0 = True
oppositeInitialDirFirstTwoRings = True
while trialNum < trials.nTotal and expStop==False:
accelerateComputer(1,process_priority, disable_gc) #speed up
colorRings=list();preDrawStimToGreasePipeline = list()
isReversed= list([1]) * numRings #always takes values of -1 or 1
reversalNumEachRing = list([0]) * numRings
if randomStartAngleEachRing:
angleIniEachRing = list( np.random.uniform(0,2*pi,size=[numRings]) )
else:
angleIniEachRing = [0]*numRings
currAngle = list([0]) * numRings
if randomInitiialDirExceptRing0:
initialDirectionEachRing = list( np.random.random_integers(0,1,size=[numRings]) *2 -1 ) #randomise initial direction of each ring
initialDirectionEachRing[0] = thisTrial['initialDirRing0']
if oppositeInitialDirFirstTwoRings and numRings>1:
initialDirectionEachRing[1] = -1*initialDirectionEachRing[0]
else:
initialDirectionEachRing = [ thisTrial['initialDirRing0'] ] * numRings
trackVariableIntervDur=np.random.uniform(0,trackVariableIntervMax) #random interval tacked onto tracking to make total duration variable so cant predict final position
trialDurTotal = maxTrialDur() - trackVariableIntervDur
trialDurFrames= int( trialDurTotal*refreshRate )
xyTargets = np.zeros( [thisTrial['numTargets'], 2] ) #need this for eventual case where targets can change what ring they are in
numDistracters = numRings*thisTrial['numObjectsInRing'] - thisTrial['numTargets']
xyDistracters = np.zeros( [numDistracters, 2] )
reversalTimesEachRing = getReversalTimes()
#print('reversalTimesEachRing=',np.around(np.array(reversalTimesEachRing),2),' maxPossibleReversals=',maxPossibleReversals()) #debugOFF
numObjects = thisTrial['numObjectsInRing']
centerInMiddleOfSegment =360./numObjects/2.0
blobsToPreCue=thisTrial['whichIsTarget']
core.wait(.1)
myMouse.setVisible(False)
if eyetracking:
tracker.startEyeTracking(trialNum,calibTrial=True,widthPix=widthPix,heightPix=heightPix) # tell eyetracker to start recording. Does this also somehow allow it to draw on the screen for the calibration?
fixatnPeriodFrames = int( (np.random.rand(1)/2.+0.8) *refreshRate) #random interval between x and x+800ms
for i in range(fixatnPeriodFrames):
if i%2:
fixation.draw()
else: fixationBlank.draw()
myWin.flip() #clearBuffer=True)
trialClock.reset()
t0=trialClock.getTime(); t=trialClock.getTime()-t0
for L in range(len(ts)):ts.remove(ts[0]) # clear all ts array
stimClock.reset()
for n in range(trialDurFrames): #this is the loop for this trial's stimulus!
offsetXYeachRing=[[0,0],[0,0]]
(angleIni,currAngle,isReversed,reversalNumEachRing) = \
oneFrameOfStim(thisTrial,n,stimClock,useClock,offsetXYeachRing,initialDirectionEachRing,currAngle,blobsToPreCue,isReversed,reversalNumEachRing,ShowTrackCueFrames) #da big function
if exportImages:
myWin.getMovieFrame(buffer='back') #for later saving
framesSaved +=1
myWin.flip(clearBuffer=True)
t=trialClock.getTime()-t0; ts.append(t);
if n==trialDurFrames-1: event.clearEvents(eventType='mouse');
if eyetracking:
tracker.stopEyeTracking() #This seems to work immediately and cause the Eyelink machine to save the EDF file to its own drive
#end of big stimulus loop
accelerateComputer(0,process_priority, disable_gc) #turn off stuff that sped everything up
#check for timing problems
interframeIntervs = np.diff(ts)*1000 #difference in time between successive frames, in ms
#print >>logF, 'trialnum=',trialNum, ' interframe intervs were ',around(interframeIntervs,1)
idxsInterframeLong = np.where( interframeIntervs > longFrameLimit ) [0] #frames that exceeded longerThanRefreshTolerance of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong >0:
longFramesStr = 'ERROR,'+str(numCasesInterframeLong)+' frames were longer than '+str(longFrameLimit)+' ms'
if demo:
longFramesStr += 'not printing them all because in demo mode'
else:
longFramesStr += ' apparently screen refreshes skipped, interframe durs were:'+\
str( np.around( interframeIntervs[idxsInterframeLong] ,1 ) )+ ' and was these frames: '+ str(idxsInterframeLong)
if longFramesStr != None:
print('trialnum=',trialNum,' ',longFramesStr)
print('trialnum=',trialNum,' ',longFramesStr, file=logF)
if not demo:
flankingAlso=list()
for idx in idxsInterframeLong: #also print timing of one before and one after long frame
if idx-1>=0: flankingAlso.append(idx-1)
else: flankingAlso.append(np.NaN)
flankingAlso.append(idx)
if idx+1<len(interframeIntervs): flankingAlso.append(idx+1)
else: flankingAlso.append(np.NaN)
#print >>logF, 'flankers also='+str( np.around( interframeIntervs[flankingAlso], 1) )
#end timing check
myMouse.setVisible(True)
#ansIter=(answer).reshape(1,-1)[0]; ln=len(ansIter) #in case it's two dimensions like in bindRadially
#print 'answer=',answer,' or ', [colorNames[ int(ansIter[i]) ] for i in range( ln )], ' it is type ',type(answer), ' and shape ', np.shape(answer)
#shuffledAns = deepcopy(answer); #just to use for options, to ensure they are in a different order
#if numObjects == 2:
# shuffledAns = shuffledAns[0:2] #kludge. Really this should be controlled by nb_colors but that would require fancy array indexing where I currently have 0,2,1 etc above
# np.random.shuffle(shuffledAns)
#if len(np.shape(answer)) >1: #more than one dimension, because bindRadiallyTask
# np.random.shuffle(shuffledAns[:,0]) #unfortunately for bindRadially task, previous shuffling shuffled pairs, not individuals
#print 'answer after shuffling=',shuffledAns
passThisTrial=False
#Create postcues
visuallyPostCue = True
ringQuerySoundFileNames = [ 'innerring.wav', 'middlering.wav', 'outerring.wav' ]
soundDir = 'sounds'
if numRings==3:
soundFileNum = thisTrial['ringToQuery']
else: #eg if numRings==2:
soundFileNum = thisTrial['ringToQuery']*2 #outer, not middle for ring==1
soundPathAndFile= os.path.join(soundDir, ringQuerySoundFileNames[ soundFileNum ])
respSound = sound.Sound(soundPathAndFile, secs=.2)
postCueNumBlobsAway=-999 #doesn't apply to click tracking and non-tracking task
# ####### response set up answer
responses = list(); responsesAutopilot = list()
responses,responsesAutopilot,respondedEachToken,expStop = \
collectResponses(thisTrial,n,responses,responsesAutopilot,offsetXYeachRing,respRadius,currAngle,expStop) #collect responses!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#####
#print("responses=",responses,";respondedEachToken=",respondedEachToken,"expStop=",expStop) #debugOFF
core.wait(.1)
if exportImages: #maybe catch one frame of response
myWin.saveMovieFrames('exported/frame.png')
expStop=True
#Handle response, calculate whether correct, ########################################
if autopilot:responses = responsesAutopilot
if True: #not expStop: #if short on responses, too hard to write code to handle it so don't even try
orderCorrect=0; numColorsCorrectlyIdentified=0; blueMistake=0;respAdj=list();sCorrect=list();targetCorrect=0;
for l in range(numRings):
if responses[l] !=[]:
tokenChosenEachRing[l]=np.where(respondedEachToken[l]) [0][0]
respAdjs= initialDirectionEachRing[l]*isReversed[l]*(tokenChosenEachRing[l]-thisTrial['whichIsTarget'][l])
if respAdjs> numObjects/2. : respAdjs-= numObjects #code in terms of closest way around. So if 9 objects and 8 ahead, code as -1
if respAdjs < -numObjects/2. : respAdjs += numObjects
respAdj.append(respAdjs)
if tokenChosenEachRing[l]==thisTrial['whichIsTarget'][l]:
sCorrects=1
sCorrect.append(sCorrects);
targetCorrect+=sCorrects
else:
respAdj.append(-999)
sCorrect.append(0)
if targetCorrect==1: orderCorrect = 3
else: orderCorrect = 0
if respType=='order': #: this used to work without last conditional
numColorsCorrectlyIdentified=-1
else:
numColorsCorrectlyIdentified = len( intersect1d(response,answer) )
if numColorsCorrectlyIdentified < 3:
if 4 in answer and not (3 in answer): #dark blue present
if 3 in response: #light blue in answer
blueMistake =1
elif 3 in answer and not (4 in answer): #light blue present
if 4 in response: #dark blue in answer
blueMistake =1
#end if statement for if not expStop
if passThisTrial:orderCorrect = -1 #indicate for data analysis that observer opted out of this trial, because think they moved their eyes
#header print('trialnum\tsubject\tbasicShape\tnumObjects\tspeed\tinitialDirRing0\tangleIni
print(trialNum,subject,thisTrial['basicShape'],thisTrial['numObjectsInRing'],thisTrial['speed'],thisTrial['initialDirRing0'],sep='\t', end='\t', file=dataFile)
print(orderCorrect,'\t',trialDurTotal,'\t',thisTrial['numTargets'],'\t', end=' ', file=dataFile) #override newline end
for i in range(numRings): print( thisTrial['whichIsTarget'][i], end='\t', file=dataFile )
print( thisTrial['ringToQuery'],end='\t',file=dataFile )
for i in range(numRings):dataFile.write(str(round(initialDirectionEachRing[i],4))+'\t')
for i in range(numRings):dataFile.write(str(round(respAdj[i],4))+'\t')
for k in range(numRings):
for i in range(len(reversalTimesEachRing[k])):
print(round(reversalTimesEachRing[k][i],4),'\t', end='', file=dataFile)
for j in range(i+1,maxPossibleReversals()):
print('-999\t', end='', file=dataFile)
print(numCasesInterframeLong, file=dataFile)
numTrialsOrderCorrect += (orderCorrect >0) #so count -1 as 0
numAllCorrectlyIdentified += (numColorsCorrectlyIdentified==3)
speedIdx = np.where(uniqSpeeds==thisTrial['speed'])[0][0] #extract index, where returns a list with first element array of the indexes
numRightWrongEachSpeedOrder[ speedIdx, (orderCorrect >0) ] +=1 #if right, add to 1th column, otherwise add to 0th column count
numRightWrongEachSpeedIdent[ speedIdx, (numColorsCorrectlyIdentified==3) ] +=1
blueMistakes+=blueMistake
dataFile.flush(); logF.flush();
if feedback and not expStop:
if orderCorrect==3 :correct=1
else:correct=0
if correct:
highA = sound.Sound('G',octave=5, sampleRate=6000, secs=.8, bits=8)
highA.setVolume(0.3) #low volume because piercing, loud relative to inner, outer files
highA.play()
else: #incorrect
lowD = sound.Sound('E',octave=3, sampleRate=6000, secs=.8, bits=8)
lowD.setVolume(0.9)
lowD.play()
trialNum+=1
waitForKeyPressBetweenTrials = False
if trialNum< trials.nTotal:
if trialNum%( max(trials.nTotal/4,1) ) ==0: #have to enforce at least 1, otherwise will modulus by 0 when #trials is less than 4
pctDone = round( (1.0*trialNum) / (1.0*trials.nTotal)*100, 0 )
NextRemindPctDoneText.setText( str(pctDone) + '% complete' )
NextRemindCountText.setText( str(trialNum) + ' of ' + str(trials.nTotal) )
for i in range(5):
myWin.flip(clearBuffer=True)
NextRemindPctDoneText.draw()
NextRemindCountText.draw()
waitingForKeypress = False
if waitForKeyPressBetweenTrials:
waitingForKeypress=True
NextText.setText('Press "SPACE" to continue')
NextText.draw()
NextRemindCountText.draw()
NextRemindText.draw()
myWin.flip(clearBuffer=True)
else: core.wait(0.15)
while waitingForKeypress:
if autopilot:
waitingForKeypress=False
elif expStop == True:
waitingForKeypress=False
for key in event.getKeys(): #check if pressed abort-type key
if key in ['space']:
waitingForKeypress=False
if key in ['escape','q']:
expStop = True
waitingForKeypress=False
myWin.clearBuffer()
thisTrial = trials.next()
core.wait(.1); time.sleep(.1)
#end trials loop ###########################################################
if expStop == True:
print('user aborted experiment on keypress with trials trialNum=', trialNum, file=logF)
print('user aborted experiment on keypress with trials trialNum=', trialNum)
print('finishing at ',timeAndDateStr, file=logF)
print('%corr order report= ', round( numTrialsOrderCorrect*1.0/trialNum*100., 2) , '% of ',trialNum,' trials', end=' ')
print('%corr each speed: ', end=' ')
print(np.around( numRightWrongEachSpeedOrder[:,1] / ( numRightWrongEachSpeedOrder[:,0] + numRightWrongEachSpeedOrder[:,1]), 2))
print('\t\t\t\tnum trials each speed =', numRightWrongEachSpeedOrder[:,0] + numRightWrongEachSpeedOrder[:,1])
logging.flush(); dataFile.close(); logF.close()
if eyetracking:
if eyetrackFileGetFromEyelinkMachine:
eyetrackerFileWaitingText = visual.TextStim(myWin,pos=(-.1,0),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center', units='norm',autoLog=autoLogging)
eyetrackerFileWaitingText.setText('Waiting for eyetracking file from Eyelink computer. Do not abort eyetracking machine or file will not be saved?')
eyetrackerFileWaitingText.draw()
myWin.flip()
msg = tracker.closeConnectionToEyeTracker(eyeMoveFile) #this requests the data back and thus can be very time-consuming, like 20 min or more
print(msg); print(msg,file=logF) #""Eyelink connection closed successfully" or "Eyelink not available, not closed properly"
else:
print('You will have to get the Eyelink EDF file off the eyetracking machine by hand')
if quitFinder: #turn Finder back on
applescript="\'tell application \"Finder\" to launch\'" #turn Finder back on
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
core.quit()
|
alexholcombe/MOTcircular
|
MOTcircular.py
|
Python
|
mit
| 54,119
|
[
"Gaussian"
] |
86f30c7f91f64f6a1c2d2bbd3296f781e0d644bb474a0b4a3b499713d0a1ad5b
|
from __future__ import print_function, division
from sympy import (Poly, igcd, divisors, sign, symbols, S, Integer, Wild, Symbol, factorint,
Add, Mul, solve, ceiling, floor, sqrt, sympify, Subs, ilcm, Matrix, factor_list, perfect_power,
isprime, nextprime, integer_nthroot)
from sympy.core.function import _mexpand
from sympy.simplify.simplify import rad_rationalize
from sympy.utilities import default_sort_key, numbered_symbols
from sympy.core.numbers import igcdex
from sympy.ntheory.residue_ntheory import sqrt_mod
from sympy.core.compatibility import range
from sympy.core.relational import Eq
from sympy.solvers.solvers import check_assumptions
__all__ = ['diophantine', 'diop_solve', 'classify_diop', 'diop_linear', 'base_solution_linear',
'diop_quadratic', 'diop_DN', 'cornacchia', 'diop_bf_DN', 'transformation_to_DN', 'find_DN',
'diop_ternary_quadratic', 'square_factor', 'descent', 'diop_general_pythagorean',
'diop_general_sum_of_squares', 'partition', 'sum_of_three_squares', 'sum_of_four_squares']
def diophantine(eq, param=symbols("t", integer=True)):
"""
Simplify the solution procedure of diophantine equation ``eq`` by
converting it into a product of terms which should equal zero.
For example, when solving, `x^2 - y^2 = 0` this is treated as
`(x + y)(x - y) = 0` and `x+y = 0` and `x-y = 0` are solved independently
and combined. Each term is solved by calling ``diop_solve()``.
Output of ``diophantine()`` is a set of tuples. Each tuple represents a
solution of the input equation. In a tuple, solution for each variable is
listed according to the alphabetic order of input variables. i.e. if we have
an equation with two variables `a` and `b`, first element of the tuple will
give the solution for `a` and the second element will give the solution for
`b`.
Usage
=====
``diophantine(eq, t)``: Solve the diophantine equation ``eq``.
``t`` is the parameter to be used by ``diop_solve()``.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import diophantine
>>> from sympy.abc import x, y, z
>>> diophantine(x**2 - y**2)
set([(-t, -t), (t, -t)])
#>>> diophantine(x*(2*x + 3*y - z))
#set([(0, n1, n2), (3*t - z, -2*t + z, z)])
#>>> diophantine(x**2 + 3*x*y + 4*x)
#set([(0, n1), (3*t - 4, -t)])
See Also
========
diop_solve()
"""
if isinstance(eq, Eq):
eq = eq.lhs - eq.rhs
eq = Poly(eq).as_expr()
if not eq.is_polynomial() or eq.is_number:
raise TypeError("Equation input format not supported")
var = list(eq.expand(force=True).free_symbols)
var.sort(key=default_sort_key)
terms = factor_list(eq)[1]
sols = set([])
for term in terms:
base = term[0]
var_t, jnk, eq_type = classify_diop(base)
solution = diop_solve(base, param)
if eq_type in ["linear", "homogeneous_ternary_quadratic", "general_pythagorean"]:
if merge_solution(var, var_t, solution) != ():
sols.add(merge_solution(var, var_t, solution))
elif eq_type in ["binary_quadratic", "general_sum_of_squares", "univariate"]:
for sol in solution:
if merge_solution(var, var_t, sol) != ():
sols.add(merge_solution(var, var_t, sol))
return sols
def merge_solution(var, var_t, solution):
"""
This is used to construct the full solution from the solutions of sub
equations.
For example when solving the equation `(x - y)(x^2 + y^2 - z^2) = 0`,
solutions for each of the equations `x-y = 0` and `x^2 + y^2 - z^2` are
found independently. Solutions for `x - y = 0` are `(x, y) = (t, t)`. But
we should introduce a value for z when we output the solution for the
original equation. This function converts `(t, t)` into `(t, t, n_{1})`
where `n_{1}` is an integer parameter.
"""
l = []
if None in solution:
return ()
solution = iter(solution)
params = numbered_symbols("n", Integer=True, start=1)
for v in var:
if v in var_t:
l.append(next(solution))
else:
l.append(next(params))
for val, symb in zip(l, var):
if check_assumptions(val, **symb.assumptions0) is False:
return tuple()
return tuple(l)
def diop_solve(eq, param=symbols("t", integer=True)):
"""
Solves the diophantine equation ``eq``.
Similar to ``diophantine()`` but doesn't try to factor ``eq`` as latter
does. Uses ``classify_diop()`` to determine the type of the eqaution and
calls the appropriate solver function.
Usage
=====
``diop_solve(eq, t)``: Solve diophantine equation, ``eq`` using ``t``
as a parameter if needed.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is a parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import diop_solve
>>> from sympy.abc import x, y, z, w
>>> diop_solve(2*x + 3*y - 5)
(3*t - 5, -2*t + 5)
>>> diop_solve(4*x + 3*y -4*z + 5)
(3*t + 4*z - 5, -4*t - 4*z + 5, z)
>>> diop_solve(x + 3*y - 4*z + w -6)
(t, -t - 3*y + 4*z + 6, y, z)
>>> diop_solve(x**2 + y**2 - 5)
set([(-2, -1), (-2, 1), (2, -1), (2, 1)])
See Also
========
diophantine()
"""
var, coeff, eq_type = classify_diop(eq)
if eq_type == "linear":
return _diop_linear(var, coeff, param)
elif eq_type == "binary_quadratic":
return _diop_quadratic(var, coeff, param)
elif eq_type == "homogeneous_ternary_quadratic":
x_0, y_0, z_0 = _diop_ternary_quadratic(var, coeff)
return _parametrize_ternary_quadratic((x_0, y_0, z_0), var, coeff)
elif eq_type == "general_pythagorean":
return _diop_general_pythagorean(var, coeff, param)
elif eq_type == "univariate":
l = solve(eq)
s = set([])
for soln in l:
if isinstance(soln, Integer):
s.add((soln,))
return s
elif eq_type == "general_sum_of_squares":
return _diop_general_sum_of_squares(var, coeff)
def classify_diop(eq):
"""
Helper routine used by diop_solve() to find the type of the ``eq`` etc.
Returns a tuple containing the type of the diophantine equation along with
the variables(free symbols) and their coefficients. Variables are returned
as a list and coefficients are returned as a dict with the key being the
respective term and the constant term is keyed to Integer(1). Type is an
element in the set {"linear", "binary_quadratic", "general_pythagorean",
"homogeneous_ternary_quadratic", "univariate", "general_sum_of_squares"}
Usage
=====
``classify_diop(eq)``: Return variables, coefficients and type of the
``eq``.
Details
=======
``eq`` should be an expression which is assumed to be zero.
Examples
========
>>> from sympy.solvers.diophantine import classify_diop
>>> from sympy.abc import x, y, z, w, t
>>> classify_diop(4*x + 6*y - 4)
([x, y], {1: -4, x: 4, y: 6}, 'linear')
>>> classify_diop(x + 3*y -4*z + 5)
([x, y, z], {1: 5, x: 1, y: 3, z: -4}, 'linear')
>>> classify_diop(x**2 + y**2 - x*y + x + 5)
([x, y], {1: 5, x: 1, x**2: 1, y: 0, y**2: 1, x*y: -1}, 'binary_quadratic')
"""
eq = eq.expand(force=True)
var = list(eq.free_symbols)
var.sort(key=default_sort_key)
coeff = {}
diop_type = None
coeff = dict([reversed(t.as_independent(*var)) for t in eq.args])
for v in coeff:
if not isinstance(coeff[v], Integer):
raise TypeError("Coefficients should be Integers")
if len(var) == 1:
diop_type = "univariate"
elif Poly(eq).total_degree() == 1:
diop_type = "linear"
elif Poly(eq).total_degree() == 2 and len(var) == 2:
diop_type = "binary_quadratic"
x, y = var[:2]
if isinstance(eq, Mul):
coeff = {x**2: 0, x*y: eq.args[0], y**2: 0, x: 0, y: 0, Integer(1): 0}
else:
for term in [x**2, y**2, x*y, x, y, Integer(1)]:
if term not in coeff.keys():
coeff[term] = Integer(0)
elif Poly(eq).total_degree() == 2 and len(var) == 3 and Integer(1) not in coeff.keys():
for v in var:
if v in coeff.keys():
diop_type = "inhomogeneous_ternary_quadratic"
break
else:
diop_type = "homogeneous_ternary_quadratic"
x, y, z = var[:3]
for term in [x**2, y**2, z**2, x*y, y*z, x*z]:
if term not in coeff.keys():
coeff[term] = Integer(0)
elif Poly(eq).degree() == 2 and len(var) >= 3:
for v in var:
if v in coeff.keys():
diop_type = "inhomogeneous_general_quadratic"
break
else:
if Integer(1) in coeff.keys():
constant_term = True
else:
constant_term = False
non_square_degree_2_terms = False
for v in var:
for u in var:
if u != v and u*v in coeff.keys():
non_square_degree_2_terms = True
break
if non_square_degree_2_terms:
break
if constant_term and non_square_degree_2_terms:
diop_type = "inhomogeneous_general_quadratic"
elif constant_term and not non_square_degree_2_terms:
for v in var:
if coeff[v**2] != 1:
break
else:
diop_type = "general_sum_of_squares"
elif not constant_term and non_square_degree_2_terms:
diop_type = "homogeneous_general_quadratic"
else:
coeff_sign_sum = 0
for v in var:
if not isinstance(sqrt(abs(Integer(coeff[v**2]))), Integer):
break
coeff_sign_sum = coeff_sign_sum + sign(coeff[v**2])
else:
if abs(coeff_sign_sum) == len(var) - 2 and not constant_term:
diop_type = "general_pythagorean"
elif Poly(eq).total_degree() == 3 and len(var) == 2:
x, y = var[:2]
diop_type = "cubic_thue"
for term in [x**3, x**2*y, x*y**2, y**3, Integer(1)]:
if term not in coeff.keys():
coeff[term] == Integer(0)
if diop_type is not None:
return var, coeff, diop_type
else:
raise NotImplementedError("Still not implemented")
def diop_linear(eq, param=symbols("t", integer=True)):
"""
Solves linear diophantine equations.
A linear diophantine equation is an equation of the form `a_{1}x_{1} +
a_{2}x_{2} + .. + a_{n}x_{n} = 0` where `a_{1}, a_{2}, ..a_{n}` are
integer constants and `x_{1}, x_{2}, ..x_{n}` are integer variables.
Usage
=====
``diop_linear(eq)``: Returns a tuple containing solutions to the
diophantine equation ``eq``. Values in the tuple is arranged in the same
order as the sorted variables.
Details
=======
``eq`` is a linear diophantine equation which is assumed to be zero.
``param`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import diop_linear
>>> from sympy.abc import x, y, z, t
>>> from sympy import Integer
>>> diop_linear(2*x - 3*y - 5) #solves equation 2*x - 3*y -5 = 0
(-3*t - 5, -2*t - 5)
Here x = -3*t - 5 and y = -2*t - 5
>>> diop_linear(2*x - 3*y - 4*z -3)
(-3*t - 4*z - 3, -2*t - 4*z - 3, z)
See Also
========
diop_quadratic(), diop_ternary_quadratic(), diop_general_pythagorean(),
diop_general_sum_of_squares()
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "linear":
return _diop_linear(var, coeff, param)
def _diop_linear(var, coeff, param):
x, y = var[:2]
a = coeff[x]
b = coeff[y]
if len(var) == len(coeff):
c = 0
else:
c = -coeff[Integer(1)]
if len(var) == 2:
sol_x, sol_y = base_solution_linear(c, a, b, param)
return (sol_x, sol_y)
elif len(var) > 2:
X = []
Y = []
for v in var[2:]:
sol_x, sol_y = base_solution_linear(-coeff[v], a, b)
X.append(sol_x*v)
Y.append(sol_y*v)
sol_x, sol_y = base_solution_linear(c, a, b, param)
X.append(sol_x)
Y.append(sol_y)
l = []
if None not in X and None not in Y:
l.append(Add(*X))
l.append(Add(*Y))
for v in var[2:]:
l.append(v)
else:
for v in var:
l.append(None)
return tuple(l)
def base_solution_linear(c, a, b, t=None):
"""
Return the base solution for a linear diophantine equation with two
variables.
Used by ``diop_linear()`` to find the base solution of a linear
Diophantine equation. If ``t`` is given then the parametrized solution is
returned.
Usage
=====
``base_solution_linear(c, a, b, t)``: ``a``, ``b``, ``c`` are coefficients
in `ax + by = c` and ``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import base_solution_linear
>>> from sympy.abc import t
>>> base_solution_linear(5, 2, 3) # equation 2*x + 3*y = 5
(-5, 5)
>>> base_solution_linear(0, 5, 7) # equation 5*x + 7*y = 0
(0, 0)
>>> base_solution_linear(5, 2, 3, t) # equation 2*x + 3*y = 5
(3*t - 5, -2*t + 5)
>>> base_solution_linear(0, 5, 7, t) # equation 5*x + 7*y = 0
(7*t, -5*t)
"""
d = igcd(a, igcd(b, c))
a = a // d
b = b // d
c = c // d
if c == 0:
if t != None:
return (b*t , -a*t)
else:
return (S.Zero, S.Zero)
else:
x0, y0, d = extended_euclid(int(abs(a)), int(abs(b)))
x0 = x0 * sign(a)
y0 = y0 * sign(b)
if divisible(c, d):
if t != None:
return (c*x0 + b*t, c*y0 - a*t)
else:
return (Integer(c*x0), Integer(c*y0))
else:
return (None, None)
def extended_euclid(a, b):
"""
For given ``a``, ``b`` returns a tuple containing integers `x`, `y` and `d`
such that `ax + by = d`. Here `d = gcd(a, b)`.
Usage
=====
``extended_euclid(a, b)``: returns `x`, `y` and `\gcd(a, b)`.
Details
=======
``a`` Any instance of Integer.
``b`` Any instance of Integer.
Examples
========
>>> from sympy.solvers.diophantine import extended_euclid
>>> extended_euclid(4, 6)
(-1, 1, 2)
>>> extended_euclid(3, 5)
(2, -1, 1)
"""
if b == 0:
return (1, 0, a)
x0, y0, d = extended_euclid(b, a%b)
x, y = y0, x0 - (a//b) * y0
return x, y, d
def divisible(a, b):
"""
Returns `True` if ``a`` is divisible by ``b`` and `False` otherwise.
"""
return igcd(int(a), int(b)) == abs(int(b))
def diop_quadratic(eq, param=symbols("t", integer=True)):
"""
Solves quadratic diophantine equations.
i.e. equations of the form `Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0`. Returns a
set containing the tuples `(x, y)` which contains the solutions. If there
are no solutions then `(None, None)` is returned.
Usage
=====
``diop_quadratic(eq, param)``: ``eq`` is a quadratic binary diophantine
equation. ``param`` is used to indicate the parameter to be used in the
solution.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``param`` is a parameter to be used in the solution.
Examples
========
>>> from sympy.abc import x, y, t
>>> from sympy.solvers.diophantine import diop_quadratic
>>> diop_quadratic(x**2 + y**2 + 2*x + 2*y + 2, t)
set([(-1, -1)])
References
==========
.. [1] Methods to solve Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0,[online],
Available: http://www.alpertron.com.ar/METHODS.HTM
.. [2] Solving the equation ax^2+ bxy + cy^2 + dx + ey + f= 0, [online],
Available: http://www.jpr2718.org/ax2p.pdf
See Also
========
diop_linear(), diop_ternary_quadratic(), diop_general_sum_of_squares(),
diop_general_pythagorean()
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "binary_quadratic":
return _diop_quadratic(var, coeff, param)
def _diop_quadratic(var, coeff, t):
x, y = var[:2]
for term in [x**2, y**2, x*y, x, y, Integer(1)]:
if term not in coeff.keys():
coeff[term] = Integer(0)
A = coeff[x**2]
B = coeff[x*y]
C = coeff[y**2]
D = coeff[x]
E = coeff[y]
F = coeff[Integer(1)]
d = igcd(A, igcd(B, igcd(C, igcd(D, igcd(E, F)))))
A = A // d
B = B // d
C = C // d
D = D // d
E = E // d
F = F // d
# (1) Linear case: A = B = C = 0 ==> considered under linear diophantine equations
# (2) Simple-Hyperbolic case:A = C = 0, B != 0
# In this case equation can be converted to (Bx + E)(By + D) = DE - BF
# We consider two cases; DE - BF = 0 and DE - BF != 0
# More details, http://www.alpertron.com.ar/METHODS.HTM#SHyperb
l = set([])
if A == 0 and C == 0 and B != 0:
if D*E - B*F == 0:
if divisible(int(E), int(B)):
l.add((-E/B, t))
if divisible(int(D), int(B)):
l.add((t, -D/B))
else:
div = divisors(D*E - B*F)
div = div + [-term for term in div]
for d in div:
if divisible(int(d - E), int(B)):
x0 = (d - E) // B
if divisible(int(D*E - B*F), int(d)):
if divisible(int((D*E - B*F)// d - D), int(B)):
y0 = ((D*E - B*F) // d - D) // B
l.add((x0, y0))
# (3) Parabolic case: B**2 - 4*A*C = 0
# There are two subcases to be considered in this case.
# sqrt(c)D - sqrt(a)E = 0 and sqrt(c)D - sqrt(a)E != 0
# More Details, http://www.alpertron.com.ar/METHODS.HTM#Parabol
elif B**2 - 4*A*C == 0:
if A == 0:
s = _diop_quadratic([y, x], coeff, t)
for soln in s:
l.add((soln[1], soln[0]))
else:
g = igcd(A, C)
g = abs(g) * sign(A)
a = A // g
b = B // g
c = C // g
e = sign(B/A)
if e*sqrt(c)*D - sqrt(a)*E == 0:
z = symbols("z", real=True)
roots = solve(sqrt(a)*g*z**2 + D*z + sqrt(a)*F)
for root in roots:
if isinstance(root, Integer):
l.add((diop_solve(sqrt(a)*x + e*sqrt(c)*y - root)[0], diop_solve(sqrt(a)*x + e*sqrt(c)*y - root)[1]))
elif isinstance(e*sqrt(c)*D - sqrt(a)*E, Integer):
solve_x = lambda u: e*sqrt(c)*g*(sqrt(a)*E - e*sqrt(c)*D)*t**2 - (E + 2*e*sqrt(c)*g*u)*t\
- (e*sqrt(c)*g*u**2 + E*u + e*sqrt(c)*F) // (e*sqrt(c)*D - sqrt(a)*E)
solve_y = lambda u: sqrt(a)*g*(e*sqrt(c)*D - sqrt(a)*E)*t**2 + (D + 2*sqrt(a)*g*u)*t \
+ (sqrt(a)*g*u**2 + D*u + sqrt(a)*F) // (e*sqrt(c)*D - sqrt(a)*E)
for z0 in range(0, abs(e*sqrt(c)*D - sqrt(a)*E)):
if divisible(sqrt(a)*g*z0**2 + D*z0 + sqrt(a)*F, e*sqrt(c)*D - sqrt(a)*E):
l.add((solve_x(z0), solve_y(z0)))
# (4) Method used when B**2 - 4*A*C is a square, is descibed in p. 6 of the below paper
# by John P. Robertson.
# http://www.jpr2718.org/ax2p.pdf
elif isinstance(sqrt(B**2 - 4*A*C), Integer):
if A != 0:
r = sqrt(B**2 - 4*A*C)
u, v = symbols("u, v", integer=True)
eq = _mexpand(4*A*r*u*v + 4*A*D*(B*v + r*u + r*v - B*u) + 2*A*4*A*E*(u - v) + 4*A*r*4*A*F)
sol = diop_solve(eq, t)
sol = list(sol)
for solution in sol:
s0 = solution[0]
t0 = solution[1]
x_0 = S(B*t0 + r*s0 + r*t0 - B*s0)/(4*A*r)
y_0 = S(s0 - t0)/(2*r)
if isinstance(s0, Symbol) or isinstance(t0, Symbol):
if check_param(x_0, y_0, 4*A*r, t) != (None, None):
l.add((check_param(x_0, y_0, 4*A*r, t)[0], check_param(x_0, y_0, 4*A*r, t)[1]))
elif divisible(B*t0 + r*s0 + r*t0 - B*s0, 4*A*r):
if divisible(s0 - t0, 2*r):
if is_solution_quad(var, coeff, x_0, y_0):
l.add((x_0, y_0))
else:
_var = var
_var[0], _var[1] = _var[1], _var[0] # Interchange x and y
s = _diop_quadratic(_var, coeff, t)
while len(s) > 0:
sol = s.pop()
l.add((sol[1], sol[0]))
# (5) B**2 - 4*A*C > 0 and B**2 - 4*A*C not a square or B**2 - 4*A*C < 0
else:
P, Q = _transformation_to_DN(var, coeff)
D, N = _find_DN(var, coeff)
solns_pell = diop_DN(D, N)
if D < 0:
for solution in solns_pell:
for X_i in [-solution[0], solution[0]]:
for Y_i in [-solution[1], solution[1]]:
x_i, y_i = (P*Matrix([X_i, Y_i]) + Q)[0], (P*Matrix([X_i, Y_i]) + Q)[1]
if isinstance(x_i, Integer) and isinstance(y_i, Integer):
l.add((x_i, y_i))
else:
# In this case equation can be transformed into a Pell equation
#n = symbols("n", integer=True)
fund_solns = solns_pell
solns_pell = set(fund_solns)
for X, Y in fund_solns:
solns_pell.add((-X, -Y))
a = diop_DN(D, 1)
T = a[0][0]
U = a[0][1]
if (isinstance(P[0], Integer) and isinstance(P[1], Integer) and isinstance(P[2], Integer)
and isinstance(P[3], Integer) and isinstance(Q[0], Integer) and isinstance(Q[1], Integer)):
for sol in solns_pell:
r = sol[0]
s = sol[1]
x_n = S((r + s*sqrt(D))*(T + U*sqrt(D))**t + (r - s*sqrt(D))*(T - U*sqrt(D))**t)/2
y_n = S((r + s*sqrt(D))*(T + U*sqrt(D))**t - (r - s*sqrt(D))*(T - U*sqrt(D))**t)/(2*sqrt(D))
x_n = _mexpand(x_n)
y_n = _mexpand(y_n)
x_n, y_n = (P*Matrix([x_n, y_n]) + Q)[0], (P*Matrix([x_n, y_n]) + Q)[1]
l.add((x_n, y_n))
else:
L = ilcm(S(P[0]).q, ilcm(S(P[1]).q, ilcm(S(P[2]).q,
ilcm(S(P[3]).q, ilcm(S(Q[0]).q, S(Q[1]).q)))))
k = 1
T_k = T
U_k = U
while (T_k - 1) % L != 0 or U_k % L != 0:
T_k, U_k = T_k*T + D*U_k*U, T_k*U + U_k*T
k += 1
for X, Y in solns_pell:
for i in range(k):
Z = P*Matrix([X, Y]) + Q
x, y = Z[0], Z[1]
if isinstance(x, Integer) and isinstance(y, Integer):
Xt = S((X + sqrt(D)*Y)*(T_k + sqrt(D)*U_k)**t +
(X - sqrt(D)*Y)*(T_k - sqrt(D)*U_k)**t)/ 2
Yt = S((X + sqrt(D)*Y)*(T_k + sqrt(D)*U_k)**t -
(X - sqrt(D)*Y)*(T_k - sqrt(D)*U_k)**t)/ (2*sqrt(D))
Zt = P*Matrix([Xt, Yt]) + Q
l.add((Zt[0], Zt[1]))
X, Y = X*T + D*U*Y, X*U + Y*T
return l
def is_solution_quad(var, coeff, u, v):
"""
Check whether `(u, v)` is solution to the quadratic binary diophantine
equation with the variable list ``var`` and coefficient dictionary
``coeff``.
Not intended for use by normal users.
"""
x, y = var[:2]
eq = x**2*coeff[x**2] + x*y*coeff[x*y] + y**2*coeff[y**2] + x*coeff[x] + y*coeff[y] + coeff[Integer(1)]
return _mexpand(Subs(eq, (x, y), (u, v)).doit()) == 0
def diop_DN(D, N, t=symbols("t", integer=True)):
"""
Solves the equation `x^2 - Dy^2 = N`.
Mainly concerned in the case `D > 0, D` is not a perfect square, which is
the same as generalized Pell equation. To solve the generalized Pell
equation this function Uses LMM algorithm. Refer [1]_ for more details on
the algorithm.
Returns one solution for each class of the solutions. Other solutions of
the class can be constructed according to the values of ``D`` and ``N``.
Returns a list containing the solution tuples `(x, y)`.
Usage
=====
``diop_DN(D, N, t)``: D and N are integers as in `x^2 - Dy^2 = N` and
``t`` is the parameter to be used in the solutions.
Details
=======
``D`` and ``N`` correspond to D and N in the equation.
``t`` is the parameter to be used in the solutions.
Examples
========
>>> from sympy.solvers.diophantine import diop_DN
>>> diop_DN(13, -4) # Solves equation x**2 - 13*y**2 = -4
[(3, 1), (393, 109), (36, 10)]
The output can be interpreted as follows: There are three fundamental
solutions to the equation `x^2 - 13y^2 = -4` given by (3, 1), (393, 109)
and (36, 10). Each tuple is in the form (x, y), i. e solution (3, 1) means
that `x = 3` and `y = 1`.
>>> diop_DN(986, 1) # Solves equation x**2 - 986*y**2 = 1
[(49299, 1570)]
See Also
========
find_DN(), diop_bf_DN()
References
==========
.. [1] Solving the generalized Pell equation x**2 - D*y**2 = N, John P.
Robertson, July 31, 2004, Pages 16 - 17. [online], Available:
http://www.jpr2718.org/pell.pdf
"""
if D < 0:
if N == 0:
return [(S.Zero, S.Zero)]
elif N < 0:
return []
elif N > 0:
d = divisors(square_factor(N))
sol = []
for divisor in d:
sols = cornacchia(1, -D, N // divisor**2)
if sols:
for x, y in sols:
sol.append((divisor*x, divisor*y))
return sol
elif D == 0:
if N < 0 or not isinstance(sqrt(N), Integer):
return []
if N == 0:
return [(S.Zero, t)]
if isinstance(sqrt(N), Integer):
return [(sqrt(N), t)]
else: # D > 0
if isinstance(sqrt(D), Integer):
r = sqrt(D)
if N == 0:
return [(r*t, t)]
else:
sol = []
for y in range(floor(sign(N)*(N - 1)/(2*r)) + 1):
if isinstance(sqrt(D*y**2 + N), Integer):
sol.append((sqrt(D*y**2 + N), y))
return sol
else:
if N == 0:
return [(S.Zero, S.Zero)]
elif abs(N) == 1:
pqa = PQa(0, 1, D)
a_0 = floor(sqrt(D))
l = 0
G = []
B = []
for i in pqa:
a = i[2]
G.append(i[5])
B.append(i[4])
if l != 0 and a == 2*a_0:
break
l = l + 1
if l % 2 == 1:
if N == -1:
x = G[l-1]
y = B[l-1]
else:
count = l
while count < 2*l - 1:
i = next(pqa)
G.append(i[5])
B.append(i[4])
count = count + 1
x = G[count]
y = B[count]
else:
if N == 1:
x = G[l-1]
y = B[l-1]
else:
return []
return [(x, y)]
else:
fs = []
sol = []
div = divisors(N)
for d in div:
if divisible(N, d**2):
fs.append(d)
for f in fs:
m = N // f**2
zs = sqrt_mod(D, abs(m), True)
zs = [i for i in zs if i <= abs(m) // 2 ]
if abs(m) != 2:
zs = zs + [-i for i in zs]
if S.Zero in zs:
zs.remove(S.Zero) # Remove duplicate zero
for z in zs:
pqa = PQa(z, abs(m), D)
l = 0
G = []
B = []
for i in pqa:
a = i[2]
G.append(i[5])
B.append(i[4])
if l != 0 and abs(i[1]) == 1:
r = G[l-1]
s = B[l-1]
if r**2 - D*s**2 == m:
sol.append((f*r, f*s))
elif diop_DN(D, -1) != []:
a = diop_DN(D, -1)
sol.append((f*(r*a[0][0] + a[0][1]*s*D), f*(r*a[0][1] + s*a[0][0])))
break
l = l + 1
if l == length(z, abs(m), D):
break
return sol
def cornacchia(a, b, m):
"""
Solves `ax^2 + by^2 = m` where `\gcd(a, b) = 1 = gcd(a, m)` and `a, b > 0`.
Uses the algorithm due to Cornacchia. The method only finds primitive
solutions, i.e. ones with `\gcd(x, y) = 1`. So this method can't be used to
find the solutions of `x^2 + y^2 = 20` since the only solution to former is
`(x,y) = (4, 2)` and it is not primitive. When ` a = b = 1`, only the
solutions with `x \geq y` are found. For more details, see the References.
Examples
========
>>> from sympy.solvers.diophantine import cornacchia
>>> cornacchia(2, 3, 35) # equation 2x**2 + 3y**2 = 35
set([(2, 3), (4, 1)])
>>> cornacchia(1, 1, 25) # equation x**2 + y**2 = 25
set([(4, 3)])
References
===========
.. [1] A. Nitaj, "L'algorithme de Cornacchia"
.. [2] Solving the diophantine equation ax**2 + by**2 = m by Cornacchia's
method, [online], Available:
http://www.numbertheory.org/php/cornacchia.html
"""
sols = set([])
a1 = igcdex(a, m)[0]
v = sqrt_mod(-b*a1, m, True)
if v is None:
return None
if not isinstance(v, list):
v = [v]
for t in v:
if t < m // 2:
continue
u, r = t, m
while True:
u, r = r, u % r
if a*r**2 < m:
break
m1 = m - a*r**2
if m1 % b == 0:
m1 = m1 // b
if isinstance(sqrt(m1), Integer):
s = sqrt(m1)
sols.add((int(r), int(s)))
return sols
def PQa(P_0, Q_0, D):
"""
Returns useful information needed to solve the Pell equation.
There are six sequences of integers defined related to the continued
fraction representation of `\\frac{P + \sqrt{D}}{Q}`, namely {`P_{i}`},
{`Q_{i}`}, {`a_{i}`},{`A_{i}`}, {`B_{i}`}, {`G_{i}`}. ``PQa()`` Returns
these values as a 6-tuple in the same order as mentioned above. Refer [1]_
for more detailed information.
Usage
=====
``PQa(P_0, Q_0, D)``: ``P_0``, ``Q_0`` and ``D`` are integers corresponding
to `P_{0}`, `Q_{0}` and `D` in the continued fraction
`\\frac{P_{0} + \sqrt{D}}{Q_{0}}`.
Also it's assumed that `P_{0}^2 == D mod(|Q_{0}|)` and `D` is square free.
Examples
========
>>> from sympy.solvers.diophantine import PQa
>>> pqa = PQa(13, 4, 5) # (13 + sqrt(5))/4
>>> next(pqa) # (P_0, Q_0, a_0, A_0, B_0, G_0)
(13, 4, 3, 3, 1, -1)
>>> next(pqa) # (P_1, Q_1, a_1, A_1, B_1, G_1)
(-1, 1, 1, 4, 1, 3)
References
==========
.. [1] Solving the generalized Pell equation x^2 - Dy^2 = N, John P.
Robertson, July 31, 2004, Pages 4 - 8. http://www.jpr2718.org/pell.pdf
"""
A_i_2 = B_i_1 = 0
A_i_1 = B_i_2 = 1
G_i_2 = -P_0
G_i_1 = Q_0
P_i = P_0
Q_i = Q_0
while(1):
a_i = floor((P_i + sqrt(D))/Q_i)
A_i = a_i*A_i_1 + A_i_2
B_i = a_i*B_i_1 + B_i_2
G_i = a_i*G_i_1 + G_i_2
yield P_i, Q_i, a_i, A_i, B_i, G_i
A_i_1, A_i_2 = A_i, A_i_1
B_i_1, B_i_2 = B_i, B_i_1
G_i_1, G_i_2 = G_i, G_i_1
P_i = a_i*Q_i - P_i
Q_i = (D - P_i**2)/Q_i
def diop_bf_DN(D, N, t=symbols("t", integer=True)):
"""
Uses brute force to solve the equation, `x^2 - Dy^2 = N`.
Mainly concerned with the generalized Pell equation which is the case when
`D > 0, D` is not a perfect square. For more information on the case refer
[1]_. Let `(t, u)` be the minimal positive solution of the equation
`x^2 - Dy^2 = 1`. Then this method requires
`\sqrt{\\frac{\mid N \mid (t \pm 1)}{2D}}` to be small.
Usage
=====
``diop_bf_DN(D, N, t)``: ``D`` and ``N`` are coefficients in
`x^2 - Dy^2 = N` and ``t`` is the parameter to be used in the solutions.
Details
=======
``D`` and ``N`` correspond to D and N in the equation.
``t`` is the parameter to be used in the solutions.
Examples
========
>>> from sympy.solvers.diophantine import diop_bf_DN
>>> diop_bf_DN(13, -4)
[(3, 1), (-3, 1), (36, 10)]
>>> diop_bf_DN(986, 1)
[(49299, 1570)]
See Also
========
diop_DN()
References
==========
.. [1] Solving the generalized Pell equation x**2 - D*y**2 = N, John P.
Robertson, July 31, 2004, Page 15. http://www.jpr2718.org/pell.pdf
"""
sol = []
a = diop_DN(D, 1)
u = a[0][0]
v = a[0][1]
if abs(N) == 1:
return diop_DN(D, N)
elif N > 1:
L1 = 0
L2 = floor(sqrt(S(N*(u - 1))/(2*D))) + 1
elif N < -1:
L1 = ceiling(sqrt(S(-N)/D))
L2 = floor(sqrt(S(-N*(u + 1))/(2*D))) + 1
else:
if D < 0:
return [(S.Zero, S.Zero)]
elif D == 0:
return [(S.Zero, t)]
else:
if isinstance(sqrt(D), Integer):
return [(sqrt(D)*t, t), (-sqrt(D)*t, t)]
else:
return [(S.Zero, S.Zero)]
for y in range(L1, L2):
if isinstance(sqrt(N + D*y**2), Integer):
x = sqrt(N + D*y**2)
sol.append((x, y))
if not equivalent(x, y, -x, y, D, N):
sol.append((-x, y))
return sol
def equivalent(u, v, r, s, D, N):
"""
Returns True if two solutions `(u, v)` and `(r, s)` of `x^2 - Dy^2 = N`
belongs to the same equivalence class and False otherwise.
Two solutions `(u, v)` and `(r, s)` to the above equation fall to the same
equivalence class iff both `(ur - Dvs)` and `(us - vr)` are divisible by
`N`. See reference [1]_. No test is performed to test whether `(u, v)` and
`(r, s)` are actually solutions to the equation. User should take care of
this.
Usage
=====
``equivalent(u, v, r, s, D, N)``: `(u, v)` and `(r, s)` are two solutions
of the equation `x^2 - Dy^2 = N` and all parameters involved are integers.
Examples
========
>>> from sympy.solvers.diophantine import equivalent
>>> equivalent(18, 5, -18, -5, 13, -1)
True
>>> equivalent(3, 1, -18, 393, 109, -4)
False
References
==========
.. [1] Solving the generalized Pell equation x**2 - D*y**2 = N, John P.
Robertson, July 31, 2004, Page 12. http://www.jpr2718.org/pell.pdf
"""
return divisible(u*r - D*v*s, N) and divisible(u*s - v*r, N)
def length(P, Q, D):
"""
Returns the (length of aperiodic part + length of periodic part) of
continued fraction representation of `\\frac{P + \sqrt{D}}{Q}`.
It is important to remember that this does NOT return the length of the
periodic part but the addition of the legths of the two parts as mentioned
above.
Usage
=====
``length(P, Q, D)``: ``P``, ``Q`` and ``D`` are integers corresponding to
the continued fraction `\\frac{P + \sqrt{D}}{Q}`.
Details
=======
``P``, ``D`` and ``Q`` corresponds to P, D and Q in the continued fraction,
`\\frac{P + \sqrt{D}}{Q}`.
Examples
========
>>> from sympy.solvers.diophantine import length
>>> length(-2 , 4, 5) # (-2 + sqrt(5))/4
3
>>> length(-5, 4, 17) # (-5 + sqrt(17))/4
4
"""
x = P + sqrt(D)
y = Q
x = sympify(x)
v, res = [], []
q = x/y
if q < 0:
v.append(q)
res.append(floor(q))
q = q - floor(q)
num, den = rad_rationalize(1, q)
q = num / den
while 1:
v.append(q)
a = int(q)
res.append(a)
if q == a:
return len(res)
num, den = rad_rationalize(1,(q - a))
q = num / den
if q in v:
return len(res)
def transformation_to_DN(eq):
"""
This function transforms general quadratic,
`ax^2 + bxy + cy^2 + dx + ey + f = 0`
to more easy to deal with `X^2 - DY^2 = N` form.
This is used to solve the general quadratic equation by transforming it to
the latter form. Refer [1]_ for more detailed information on the
transformation. This function returns a tuple (A, B) where A is a 2 X 2
matrix and B is a 2 X 1 matrix such that,
Transpose([x y]) = A * Transpose([X Y]) + B
Usage
=====
``transformation_to_DN(eq)``: where ``eq`` is the quadratic to be
transformed.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.solvers.diophantine import transformation_to_DN
>>> from sympy.solvers.diophantine import classify_diop
>>> A, B = transformation_to_DN(x**2 - 3*x*y - y**2 - 2*y + 1)
>>> A
Matrix([
[1/26, 3/26],
[ 0, 1/13]])
>>> B
Matrix([
[-6/13],
[-4/13]])
A, B returned are such that Transpose((x y)) = A * Transpose((X Y)) + B.
Substituting these values for `x` and `y` and a bit of simplifying work
will give an equation of the form `x^2 - Dy^2 = N`.
>>> from sympy.abc import X, Y
>>> from sympy import Matrix, simplify, Subs
>>> u = (A*Matrix([X, Y]) + B)[0] # Transformation for x
>>> u
X/26 + 3*Y/26 - 6/13
>>> v = (A*Matrix([X, Y]) + B)[1] # Transformation for y
>>> v
Y/13 - 4/13
Next we will substitute these formulas for `x` and `y` and do
``simplify()``.
>>> eq = simplify(Subs(x**2 - 3*x*y - y**2 - 2*y + 1, (x, y), (u, v)).doit())
>>> eq
X**2/676 - Y**2/52 + 17/13
By multiplying the denominator appropriately, we can get a Pell equation
in the standard form.
>>> eq * 676
X**2 - 13*Y**2 + 884
If only the final equation is needed, ``find_DN()`` can be used.
See Also
========
find_DN()
References
==========
.. [1] Solving the equation ax^2 + bxy + cy^2 + dx + ey + f = 0,
John P.Robertson, May 8, 2003, Page 7 - 11.
http://www.jpr2718.org/ax2p.pdf
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "binary_quadratic":
return _transformation_to_DN(var, coeff)
def _transformation_to_DN(var, coeff):
x, y = var[:2]
a = coeff[x**2]
b = coeff[x*y]
c = coeff[y**2]
d = coeff[x]
e = coeff[y]
f = coeff[Integer(1)]
g = igcd(a, igcd(b, igcd(c, igcd(d, igcd(e, f)))))
a = a // g
b = b // g
c = c // g
d = d // g
e = e // g
f = f // g
X, Y = symbols("X, Y", integer=True)
if b != Integer(0):
B = (S(2*a)/b).p
C = (S(2*a)/b).q
A = (S(a)/B**2).p
T = (S(a)/B**2).q
# eq_1 = A*B*X**2 + B*(c*T - A*C**2)*Y**2 + d*T*X + (B*e*T - d*T*C)*Y + f*T*B
coeff = {X**2: A*B, X*Y: 0, Y**2: B*(c*T - A*C**2), X: d*T, Y: B*e*T - d*T*C, Integer(1): f*T*B}
A_0, B_0 = _transformation_to_DN([X, Y], coeff)
return Matrix(2, 2, [S(1)/B, -S(C)/B, 0, 1])*A_0, Matrix(2, 2, [S(1)/B, -S(C)/B, 0, 1])*B_0
else:
if d != Integer(0):
B = (S(2*a)/d).p
C = (S(2*a)/d).q
A = (S(a)/B**2).p
T = (S(a)/B**2).q
# eq_2 = A*X**2 + c*T*Y**2 + e*T*Y + f*T - A*C**2
coeff = {X**2: A, X*Y: 0, Y**2: c*T, X: 0, Y: e*T, Integer(1): f*T - A*C**2}
A_0, B_0 = _transformation_to_DN([X, Y], coeff)
return Matrix(2, 2, [S(1)/B, 0, 0, 1])*A_0, Matrix(2, 2, [S(1)/B, 0, 0, 1])*B_0 + Matrix([-S(C)/B, 0])
else:
if e != Integer(0):
B = (S(2*c)/e).p
C = (S(2*c)/e).q
A = (S(c)/B**2).p
T = (S(c)/B**2).q
# eq_3 = a*T*X**2 + A*Y**2 + f*T - A*C**2
coeff = {X**2: a*T, X*Y: 0, Y**2: A, X: 0, Y: 0, Integer(1): f*T - A*C**2}
A_0, B_0 = _transformation_to_DN([X, Y], coeff)
return Matrix(2, 2, [1, 0, 0, S(1)/B])*A_0, Matrix(2, 2, [1, 0, 0, S(1)/B])*B_0 + Matrix([0, -S(C)/B])
else:
# TODO: pre-simplification: Not necessary but may simplify
# the equation.
return Matrix(2, 2, [S(1)/a, 0, 0, 1]), Matrix([0, 0])
def find_DN(eq):
"""
This function returns a tuple, `(D, N)` of the simplified form,
`x^2 - Dy^2 = N`, corresponding to the general quadratic,
`ax^2 + bxy + cy^2 + dx + ey + f = 0`.
Solving the general quadratic is then equivalent to solving the equation
`X^2 - DY^2 = N` and transforming the solutions by using the transformation
matrices returned by ``transformation_to_DN()``.
Usage
=====
``find_DN(eq)``: where ``eq`` is the quadratic to be transformed.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.solvers.diophantine import find_DN
>>> find_DN(x**2 - 3*x*y - y**2 - 2*y + 1)
(13, -884)
Interpretation of the output is that we get `X^2 -13Y^2 = -884` after
transforming `x^2 - 3xy - y^2 - 2y + 1` using the transformation returned
by ``transformation_to_DN()``.
See Also
========
transformation_to_DN()
References
==========
.. [1] Solving the equation ax^2 + bxy + cy^2 + dx + ey + f = 0,
John P.Robertson, May 8, 2003, Page 7 - 11.
http://www.jpr2718.org/ax2p.pdf
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "binary_quadratic":
return _find_DN(var, coeff)
def _find_DN(var, coeff):
x, y = var[:2]
X, Y = symbols("X, Y", integer=True)
A , B = _transformation_to_DN(var, coeff)
u = (A*Matrix([X, Y]) + B)[0]
v = (A*Matrix([X, Y]) + B)[1]
eq = x**2*coeff[x**2] + x*y*coeff[x*y] + y**2*coeff[y**2] + x*coeff[x] + y*coeff[y] + coeff[Integer(1)]
simplified = _mexpand(Subs(eq, (x, y), (u, v)).doit())
coeff = dict([reversed(t.as_independent(*[X, Y])) for t in simplified.args])
for term in [X**2, Y**2, Integer(1)]:
if term not in coeff.keys():
coeff[term] = Integer(0)
return -coeff[Y**2]/coeff[X**2], -coeff[Integer(1)]/coeff[X**2]
def check_param(x, y, a, t):
"""
Check if there is a number modulo ``a`` such that ``x`` and ``y`` are both
integers. If exist, then find a parametric representation for ``x`` and
``y``.
Here ``x`` and ``y`` are functions of ``t``.
"""
k, m, n = symbols("k, m, n", integer=True)
p = Wild("p", exclude=[k])
q = Wild("q", exclude=[k])
ok = False
for i in range(a):
z_x = _mexpand(Subs(x, t, a*k + i).doit()).match(p*k + q)
z_y = _mexpand(Subs(y, t, a*k + i).doit()).match(p*k + q)
if (isinstance(z_x[p], Integer) and isinstance(z_x[q], Integer) and
isinstance(z_y[p], Integer) and isinstance(z_y[q], Integer)):
ok = True
break
if ok == True:
x_param = x.match(p*t + q)
y_param = y.match(p*t + q)
if x_param[p] == 0 or y_param[p] == 0:
if x_param[p] == 0:
l1, junk = Poly(y).clear_denoms()
else:
l1 = 1
if y_param[p] == 0:
l2, junk = Poly(x).clear_denoms()
else:
l2 = 1
return x*ilcm(l1, l2), y*ilcm(l1, l2)
eq = S(m - x_param[q])/x_param[p] - S(n - y_param[q])/y_param[p]
lcm_denom, junk = Poly(eq).clear_denoms()
eq = eq * lcm_denom
return diop_solve(eq, t)[0], diop_solve(eq, t)[1]
else:
return (None, None)
def diop_ternary_quadratic(eq):
"""
Solves the general quadratic ternary form,
`ax^2 + by^2 + cz^2 + fxy + gyz + hxz = 0`.
Returns a tuple `(x, y, z)` which is a base solution for the above
equation. If there are no solutions, `(None, None, None)` is returned.
Usage
=====
``diop_ternary_quadratic(eq)``: Return a tuple containing a basic solution
to ``eq``.
Details
=======
``eq`` should be an homogeneous expression of degree two in three variables
and it is assumed to be zero.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine import diop_ternary_quadratic
>>> diop_ternary_quadratic(x**2 + 3*y**2 - z**2)
(1, 0, 1)
>>> diop_ternary_quadratic(4*x**2 + 5*y**2 - z**2)
(1, 0, 2)
>>> diop_ternary_quadratic(45*x**2 - 7*y**2 - 8*x*y - z**2)
(28, 45, 105)
>>> diop_ternary_quadratic(x**2 - 49*y**2 - z**2 + 13*z*y -8*x*y)
(9, 1, 5)
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "homogeneous_ternary_quadratic":
return _diop_ternary_quadratic(var, coeff)
def _diop_ternary_quadratic(_var, coeff):
x, y, z = _var[:3]
var = [x]*3
var[0], var[1], var[2] = _var[0], _var[1], _var[2]
# Equations of the form B*x*y + C*z*x + E*y*z = 0 and At least two of the
# coefficients A, B, C are non-zero.
# There are infinitely many solutions for the equation.
# Ex: (0, 0, t), (0, t, 0), (t, 0, 0)
# Equation can be re-written as y*(B*x + E*z) = -C*x*z and we can find rather
# unobviuos solutions. Set y = -C and B*x + E*z = x*z. The latter can be solved by
# using methods for binary quadratic diophantine equations. Let's select the
# solution which minimizes |x| + |z|
if coeff[x**2] == 0 and coeff[y**2] == 0 and coeff[z**2] == 0:
if coeff[x*z] != 0:
sols = diophantine(coeff[x*y]*x + coeff[y*z]*z - x*z)
s = sols.pop()
min_sum = abs(s[0]) + abs(s[1])
for r in sols:
if abs(r[0]) + abs(r[1]) < min_sum:
s = r
min_sum = abs(s[0]) + abs(s[1])
x_0, y_0, z_0 = s[0], -coeff[x*z], s[1]
else:
var[0], var[1] = _var[1], _var[0]
y_0, x_0, z_0 = _diop_ternary_quadratic(var, coeff)
return simplified(x_0, y_0, z_0)
if coeff[x**2] == 0:
# If the coefficient of x is zero change the variables
if coeff[y**2] == 0:
var[0], var[2] = _var[2], _var[0]
z_0, y_0, x_0 = _diop_ternary_quadratic(var, coeff)
else:
var[0], var[1] = _var[1], _var[0]
y_0, x_0, z_0 = _diop_ternary_quadratic(var, coeff)
else:
if coeff[x*y] != 0 or coeff[x*z] != 0:
# Apply the transformation x --> X - (B*y + C*z)/(2*A)
A = coeff[x**2]
B = coeff[x*y]
C = coeff[x*z]
D = coeff[y**2]
E = coeff[y*z]
F = coeff[z**2]
_coeff = dict()
_coeff[x**2] = 4*A**2
_coeff[y**2] = 4*A*D - B**2
_coeff[z**2] = 4*A*F - C**2
_coeff[y*z] = 4*A*E - 2*B*C
_coeff[x*y] = 0
_coeff[x*z] = 0
X_0, y_0, z_0 = _diop_ternary_quadratic(var, _coeff)
if X_0 == None:
return (None, None, None)
l = (S(B*y_0 + C*z_0)/(2*A)).q
x_0, y_0, z_0 = X_0*l - (S(B*y_0 + C*z_0)/(2*A)).p, y_0*l, z_0*l
elif coeff[z*y] != 0:
if coeff[y**2] == 0:
if coeff[z**2] == 0:
# Equations of the form A*x**2 + E*yz = 0.
A = coeff[x**2]
E = coeff[y*z]
b = (S(-E)/A).p
a = (S(-E)/A).q
x_0, y_0, z_0 = b, a, b
else:
# Ax**2 + E*y*z + F*z**2 = 0
var[0], var[2] = _var[2], _var[0]
z_0, y_0, x_0 = _diop_ternary_quadratic(var, coeff)
else:
# A*x**2 + D*y**2 + E*y*z + F*z**2 = 0, C may be zero
var[0], var[1] = _var[1], _var[0]
y_0, x_0, z_0 = _diop_ternary_quadratic(var, coeff)
else:
# Ax**2 + D*y**2 + F*z**2 = 0, C may be zero
x_0, y_0, z_0 = _diop_ternary_quadratic_normal(var, coeff)
return simplified(x_0, y_0, z_0)
def transformation_to_normal(eq):
"""
Returns the transformation Matrix from general ternary quadratic equation
`eq` to normal form.
General form of the ternary quadratic equation is `ax^2 + by^2 cz^2 + dxy +
eyz + fxz`. This function returns a 3X3 transformation Matrix which
transforms the former equation to the form `ax^2 + by^2 + cz^2 = 0`. This
is not used in solving ternary quadratics. Only implemented for the sake
of completeness.
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "homogeneous_ternary_quadratic":
return _transformation_to_normal(var, coeff)
def _transformation_to_normal(var, coeff):
_var = [var[0]]*3
_var[1], _var[2] = var[1], var[2]
x, y, z = var[:3]
if coeff[x**2] == 0:
# If the coefficient of x is zero change the variables
if coeff[y**2] == 0:
_var[0], _var[2] = var[2], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 2)
T.col_swap(0, 2)
return T
else:
_var[0], _var[1] = var[1], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 1)
T.col_swap(0, 1)
return T
else:
# Apply the transformation x --> X - (B*Y + C*Z)/(2*A)
if coeff[x*y] != 0 or coeff[x*z] != 0:
A = coeff[x**2]
B = coeff[x*y]
C = coeff[x*z]
D = coeff[y**2]
E = coeff[y*z]
F = coeff[z**2]
_coeff = dict()
_coeff[x**2] = 4*A**2
_coeff[y**2] = 4*A*D - B**2
_coeff[z**2] = 4*A*F - C**2
_coeff[y*z] = 4*A*E - 2*B*C
_coeff[x*y] = 0
_coeff[x*z] = 0
T_0 = _transformation_to_normal(_var, _coeff)
return Matrix(3, 3, [1, S(-B)/(2*A), S(-C)/(2*A), 0, 1, 0, 0, 0, 1]) * T_0
elif coeff[y*z] != 0:
if coeff[y**2] == 0:
if coeff[z**2] == 0:
# Equations of the form A*x**2 + E*yz = 0.
# Apply transformation y -> Y + Z ans z -> Y - Z
return Matrix(3, 3, [1, 0, 0, 0, 1, 1, 0, 1, -1])
else:
# Ax**2 + E*y*z + F*z**2 = 0
_var[0], _var[2] = var[2], var[0]
T = _transformtion_to_normal(_var, coeff)
T.row_swap(0, 2)
T.col_swap(0, 2)
return T
else:
# A*x**2 + D*y**2 + E*y*z + F*z**2 = 0, F may be zero
_var[0], _var[1] = var[1], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 1)
T.col_swap(0, 1)
return T
else:
return Matrix(3, 3, [1, 0, 0, 0, 1, 0, 0, 0, 1])
def simplified(x, y, z):
"""
Simplify the solution `(x, y, z)`.
"""
if x == None or y == None or z == None:
return (x, y, z)
g = igcd(x, igcd(y, z))
return x // g, y // g, z // g
def parametrize_ternary_quadratic(eq):
"""
Returns the parametrized general solution for the ternary quadratic
equation ``eq`` which has the form
`ax^2 + by^2 + cz^2 + fxy + gyz + hxz = 0`.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine import parametrize_ternary_quadratic
>>> parametrize_ternary_quadratic(x**2 + y**2 - z**2)
(2*p*q, p**2 - q**2, p**2 + q**2)
Here `p` and `q` are two co-prime integers.
>>> parametrize_ternary_quadratic(3*x**2 + 2*y**2 - z**2 - 2*x*y + 5*y*z - 7*y*z)
(2*p**2 - 2*p*q - q**2, 2*p**2 + 2*p*q - q**2, 2*p**2 - 2*p*q + 3*q**2)
>>> parametrize_ternary_quadratic(124*x**2 - 30*y**2 - 7729*z**2)
(-1410*p**2 - 363263*q**2, 2700*p**2 + 30916*p*q - 695610*q**2, -60*p**2 + 5400*p*q + 15458*q**2)
References
==========
.. [1] The algorithmic resolution of Diophantine equations, Nigel P. Smart,
London Mathematical Society Student Texts 41, Cambridge University
Press, Cambridge, 1998.
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "homogeneous_ternary_quadratic":
x_0, y_0, z_0 = _diop_ternary_quadratic(var, coeff)
return _parametrize_ternary_quadratic((x_0, y_0, z_0), var, coeff)
def _parametrize_ternary_quadratic(solution, _var, coeff):
x, y, z = _var[:3]
x_0, y_0, z_0 = solution[:3]
v = [x]*3
v[0], v[1], v[2] = _var[0], _var[1], _var[2]
if x_0 == None:
return (None, None, None)
if x_0 == 0:
if y_0 == 0:
v[0], v[2] = v[2], v[0]
z_p, y_p, x_p = _parametrize_ternary_quadratic((z_0, y_0, x_0), v, coeff)
return x_p, y_p, z_p
else:
v[0], v[1] = v[1], v[0]
y_p, x_p, z_p = _parametrize_ternary_quadratic((y_0, x_0, z_0), v, coeff)
return x_p, y_p, z_p
x, y, z = v[:3]
r, p, q = symbols("r, p, q", integer=True)
eq = x**2*coeff[x**2] + y**2*coeff[y**2] + z**2*coeff[z**2] + x*y*coeff[x*y] + y*z*coeff[y*z] + z*x*coeff[z*x]
eq_1 = Subs(eq, (x, y, z), (r*x_0, r*y_0 + p, r*z_0 + q)).doit()
eq_1 = _mexpand(eq_1)
A, B = eq_1.as_independent(r, as_Add=True)
x = A*x_0
y = (A*y_0 - _mexpand(B/r*p))
z = (A*z_0 - _mexpand(B/r*q))
return x, y, z
def diop_ternary_quadratic_normal(eq):
"""
Solves the quadratic ternary diophantine equation,
`ax^2 + by^2 + cz^2 = 0`.
Here the coefficients `a`, `b`, and `c` should be non zero. Otherwise the
equation will be a quadratic binary or univariate equation. If solvable,
returns a tuple `(x, y, z)` that satisifes the given equation. If the
equation does not have integer solutions, `(None, None, None)` is returned.
Usage
=====
``diop_ternary_quadratic_normal(eq)``: where ``eq`` is an equation of the form
`ax^2 + by^2 + cz^2 = 0`.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine import diop_ternary_quadratic_normal
>>> diop_ternary_quadratic_normal(x**2 + 3*y**2 - z**2)
(1, 0, 1)
>>> diop_ternary_quadratic_normal(4*x**2 + 5*y**2 - z**2)
(1, 0, 2)
>>> diop_ternary_quadratic_normal(34*x**2 - 3*y**2 - 301*z**2)
(4, 9, 1)
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "homogeneous_ternary_quadratic":
return _diop_ternary_quadratic_normal(var, coeff)
def _diop_ternary_quadratic_normal(var, coeff):
x, y, z = var[:3]
a = coeff[x**2]
b = coeff[y**2]
c = coeff[z**2]
if a*b*c == 0:
raise ValueError("Try factoring out you equation or using diophantine()")
g = igcd(a, igcd(b, c))
a = a // g
b = b // g
c = c // g
a_0 = square_factor(a)
b_0 = square_factor(b)
c_0 = square_factor(c)
a_1 = a // a_0**2
b_1 = b // b_0**2
c_1 = c // c_0**2
a_2, b_2, c_2 = pairwise_prime(a_1, b_1, c_1)
A = -a_2*c_2
B = -b_2*c_2
# If following two conditions are satisified then there are no solutions
if A < 0 and B < 0:
return (None, None, None)
if (sqrt_mod(-b_2*c_2, a_2) == None or sqrt_mod(-c_2*a_2, b_2) == None or
sqrt_mod(-a_2*b_2, c_2) == None):
return (None, None, None)
z_0, x_0, y_0 = descent(A, B)
if divisible(z_0, c_2) == True:
z_0 = z_0 // abs(c_2)
else:
x_0 = x_0*(S(z_0)/c_2).q
y_0 = y_0*(S(z_0)/c_2).q
z_0 = (S(z_0)/c_2).p
x_0, y_0, z_0 = simplified(x_0, y_0, z_0)
# Holzer reduction
if sign(a) == sign(b):
x_0, y_0, z_0 = holzer(x_0, y_0, z_0, abs(a_2), abs(b_2), abs(c_2))
elif sign(a) == sign(c):
x_0, z_0, y_0 = holzer(x_0, z_0, y_0, abs(a_2), abs(c_2), abs(b_2))
else:
y_0, z_0, x_0 = holzer(y_0, z_0, x_0, abs(b_2), abs(c_2), abs(a_2))
x_0 = reconstruct(b_1, c_1, x_0)
y_0 = reconstruct(a_1, c_1, y_0)
z_0 = reconstruct(a_1, b_1, z_0)
l = ilcm(a_0, ilcm(b_0, c_0))
x_0 = abs(x_0*l//a_0)
y_0 = abs(y_0*l//b_0)
z_0 = abs(z_0*l//c_0)
return simplified(x_0, y_0, z_0)
def square_factor(a):
"""
Returns an integer `c` s.t. `a = c^2k, \ c,k \in Z`. Here `k` is square
free.
Examples
========
>>> from sympy.solvers.diophantine import square_factor
>>> square_factor(24)
2
>>> square_factor(36)
6
>>> square_factor(1)
1
"""
f = factorint(abs(a))
c = 1
for p, e in f.items():
c = c * p**(e//2)
return c
def pairwise_prime(a, b, c):
"""
Transform `ax^2 + by^2 + cz^2 = 0` into an equivalent equation
`a'x^2 + b'y^2 + c'z^2 = 0` where `a', b', c'` are pairwise relatively
prime.
Returns a tuple containing `a', b', c'`. `\gcd(a, b, c)` should equal `1`
for this to work. The solutions for `ax^2 + by^2 + cz^2 = 0` can be
recovered from the solutions of `a'x^2 + b'y^2 + c'z^2 = 0`.
Examples
========
>>> from sympy.solvers.diophantine import pairwise_prime
>>> pairwise_prime(6, 15, 10)
(5, 2, 3)
See Also
========
make_prime(), reocnstruct()
"""
a, b, c = make_prime(a, b, c)
b, c, a = make_prime(b, c, a)
c, a, b = make_prime(c, a, b)
return a, b, c
def make_prime(a, b, c):
"""
Transform the equation `ax^2 + by^2 + cz^2 = 0` to an equivalent equation
`a'x^2 + b'y^2 + c'z^2 = 0` with `\gcd(a', b') = 1`.
Returns a tuple `(a', b', c')` which satisfies above conditions. Note that
in the returned tuple `\gcd(a', c')` and `\gcd(b', c')` can take any value.
Examples
========
>>> from sympy.solvers.diophantine import make_prime
>>> make_prime(4, 2, 7)
(2, 1, 14)
See Also
========
pairwaise_prime(), reconstruct()
"""
g = igcd(a, b)
if g != 1:
f = factorint(g)
for p, e in f.items():
a = a // p**e
b = b // p**e
if e % 2 == 1:
c = p*c
return a, b, c
def reconstruct(a, b, z):
"""
Reconstruct the `z` value of an equivalent solution of `ax^2 + by^2 + cz^2`
from the `z` value of a solution of a transformed version of the above
equation.
"""
g = igcd(a, b)
if g != 1:
f = factorint(g)
for p, e in f.items():
if e %2 == 0:
z = z*p**(e//2)
else:
z = z*p**((e//2)+1)
return z
def ldescent(A, B):
"""
Uses Lagrange's method to find a non trivial solution to
`w^2 = Ax^2 + By^2`.
Here, `A \\neq 0` and `B \\neq 0` and `A` and `B` are square free. Output a
tuple `(w_0, x_0, y_0)` which is a solution to the above equation.
Examples
========
>>> from sympy.solvers.diophantine import ldescent
>>> ldescent(1, 1) # w^2 = x^2 + y^2
(1, 1, 0)
>>> ldescent(4, -7) # w^2 = 4x^2 - 7y^2
(2, -1, 0)
This means that `x = -1, y = 0` and `w = 2` is a solution to the equation
`w^2 = 4x^2 - 7y^2`
>>> ldescent(5, -1) # w^2 = 5x^2 - y^2
(2, 1, -1)
References
==========
.. [1] The algorithmic resolution of Diophantine equations, Nigel P. Smart,
London Mathematical Society Student Texts 41, Cambridge University
Press, Cambridge, 1998.
.. [2] Efficient Solution of Rational Conices, J. E. Cremona and D. Rusin,
Mathematics of Computation, Volume 00, Number 0.
"""
if abs(A) > abs(B):
w, y, x = ldescent(B, A)
return w, x, y
if A == 1:
return (S.One, S.One, 0)
if B == 1:
return (S.One, 0, S.One)
r = sqrt_mod(A, B)
Q = (r**2 - A) // B
if Q == 0:
B_0 = 1
d = 0
else:
div = divisors(Q)
B_0 = None
for i in div:
if isinstance(sqrt(abs(Q) // i), Integer):
B_0, d = sign(Q)*i, sqrt(abs(Q) // i)
break
if B_0 != None:
W, X, Y = ldescent(A, B_0)
return simplified((-A*X + r*W), (r*X - W), Y*(B_0*d))
# In this module Descent will always be called with inputs which have solutions.
def descent(A, B):
"""
Lagrange's `descent()` with lattice-reduction to find solutions to
`x^2 = Ay^2 + Bz^2`.
Here `A` and `B` should be square free and pairwise prime. Always should be
called with suitable ``A`` and ``B`` so that the above equation has
solutions.
This is more faster than the normal Lagrange's descent algorithm because
the gaussian reduction is used.
Examples
========
>>> from sympy.solvers.diophantine import descent
>>> descent(3, 1) # x**2 = 3*y**2 + z**2
(1, 0, 1)
`(x, y, z) = (1, 0, 1)` is a solution to the above equation.
>>> descent(41, -113)
(-16, -3, 1)
References
==========
.. [1] Efficient Solution of Rational Conices, J. E. Cremona and D. Rusin,
Mathematics of Computation, Volume 00, Number 0.
"""
if abs(A) > abs(B):
x, y, z = descent(B, A)
return x, z, y
if B == 1:
return (1, 0, 1)
if A == 1:
return (1, 1, 0)
if B == -1:
return (None, None, None)
if B == -A:
return (0, 1, 1)
if B == A:
x, z, y = descent(-1, A)
return (A*y, z, x)
w = sqrt_mod(A, B)
x_0, z_0 = gaussian_reduce(w, A, B)
t = (x_0**2 - A*z_0**2) // B
t_2 = square_factor(t)
t_1 = t // t_2**2
x_1, z_1, y_1 = descent(A, t_1)
return simplified(x_0*x_1 + A*z_0*z_1, z_0*x_1 + x_0*z_1, t_1*t_2*y_1)
def gaussian_reduce(w, a, b):
"""
Returns a reduced solution `(x, z)` to the congruence
`X^2 - aZ^2 \equiv 0 \ (mod \ b)` so that `x^2 + |a|z^2` is minimal.
Details
=======
Here ``w`` is a solution of the congruence `x^2 \equiv a \ (mod \ b)`
References
==========
.. [1] Gaussian lattice Reduction [online]. Available:
http://home.ie.cuhk.edu.hk/~wkshum/wordpress/?p=404
.. [2] Efficient Solution of Rational Conices, J. E. Cremona and D. Rusin,
Mathematics of Computation, Volume 00, Number 0.
"""
u = (0, 1)
v = (1, 0)
if dot(u, v, w, a, b) < 0:
v = (-v[0], -v[1])
if norm(u, w, a, b) < norm(v, w, a, b):
u, v = v, u
while norm(u, w, a, b) > norm(v, w, a, b):
k = dot(u, v, w, a, b) // dot(v, v, w, a, b)
u, v = v, (u[0]- k*v[0], u[1]- k*v[1])
u, v = v, u
if dot(u, v, w, a, b) < dot(v, v, w, a, b)/2 or norm((u[0]-v[0], u[1]-v[1]), w, a, b) > norm(v, w, a, b):
c = v
else:
c = (u[0] - v[0], u[1] - v[1])
return c[0]*w + b*c[1], c[0]
def dot(u, v, w, a, b):
"""
Returns a special dot product of the vectors `u = (u_{1}, u_{2})` and
`v = (v_{1}, v_{2})` which is defined in order to reduce solution of
the congruence equation `X^2 - aZ^2 \equiv 0 \ (mod \ b)`.
"""
u_1, u_2 = u[:2]
v_1, v_2 = v[:2]
return (w*u_1 + b*u_2)*(w*v_1 + b*v_2) + abs(a)*u_1*v_1
def norm(u, w, a, b):
"""
Returns the norm of the vector `u = (u_{1}, u_{2})` under the dot product
defined by `u \cdot v = (wu_{1} + bu_{2})(w*v_{1} + bv_{2}) + |a|*u_{1}*v_{1}`
where `u = (u_{1}, u_{2})` and `v = (v_{1}, v_{2})`.
"""
u_1, u_2 = u[:2]
return sqrt(dot((u_1, u_2), (u_1, u_2), w, a, b))
def holzer(x_0, y_0, z_0, a, b, c):
"""
Simplify the solution `(x_{0}, y_{0}, z_{0})` of the equation
`ax^2 + by^2 = cz^2` with `a, b, c > 0` and `z_{0}^2 \geq \mid ab \mid` to
a new reduced solution `(x, y, z)` such that `z^2 \leq \mid ab \mid`.
"""
while z_0 > sqrt(a*b):
if c % 2 == 0:
k = c // 2
u_0, v_0 = base_solution_linear(k, y_0, -x_0)
else:
k = 2*c
u_0, v_0 = base_solution_linear(c, y_0, -x_0)
w = -(a*u_0*x_0 + b*v_0*y_0) // (c*z_0)
if c % 2 == 1:
if w % 2 != (a*u_0 + b*v_0) % 2:
w = w + 1
x = (x_0*(a*u_0**2 + b*v_0**2 + c*w**2) - 2*u_0*(a*u_0*x_0 + b*v_0*y_0 + c*w*z_0)) // k
y = (y_0*(a*u_0**2 + b*v_0**2 + c*w**2) - 2*v_0*(a*u_0*x_0 + b*v_0*y_0 + c*w*z_0)) // k
z = (z_0*(a*u_0**2 + b*v_0**2 + c*w**2) - 2*w*(a*u_0*x_0 + b*v_0*y_0 + c*w*z_0)) // k
x_0, y_0, z_0 = x, y, z
return x_0, y_0, z_0
def diop_general_pythagorean(eq, param=symbols("m", integer=True)):
"""
Solves the general pythagorean equation,
`a_{1}^2x_{1}^2 + a_{2}^2x_{2}^2 + . . . + a_{n}^2x_{n}^2 - a_{n + 1}^2x_{n + 1}^2 = 0`.
Returns a tuple which contains a parametrized solution to the equation,
sorted in the same order as the input variables.
Usage
=====
``diop_general_pythagorean(eq, param)``: where ``eq`` is a general
pythagorean equation which is assumed to be zero and ``param`` is the base
parameter used to construct other parameters by subscripting.
Examples
========
>>> from sympy.solvers.diophantine import diop_general_pythagorean
>>> from sympy.abc import a, b, c, d, e
>>> diop_general_pythagorean(a**2 + b**2 + c**2 - d**2)
(m1**2 + m2**2 - m3**2, 2*m1*m3, 2*m2*m3, m1**2 + m2**2 + m3**2)
>>> diop_general_pythagorean(9*a**2 - 4*b**2 + 16*c**2 + 25*d**2 + e**2)
(10*m1**2 + 10*m2**2 + 10*m3**2 - 10*m4**2, 15*m1**2 + 15*m2**2 + 15*m3**2 + 15*m4**2, 15*m1*m4, 12*m2*m4, 60*m3*m4)
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "general_pythagorean":
return _diop_general_pythagorean(var, coeff, param)
def _diop_general_pythagorean(var, coeff, t):
if sign(coeff[var[0]**2]) + sign(coeff[var[1]**2]) + sign(coeff[var[2]**2]) < 0:
for key in coeff.keys():
coeff[key] = coeff[key] * -1
n = len(var)
index = 0
for i, v in enumerate(var):
if sign(coeff[v**2]) == -1:
index = i
m = symbols(str(t) + "1:" + str(n), integer=True)
l = []
ith = 0
for m_i in m:
ith = ith + m_i**2
l.append(ith - 2*m[n - 2]**2)
for i in range(n - 2):
l.append(2*m[i]*m[n-2])
sol = l[:index] + [ith] + l[index:]
lcm = 1
for i, v in enumerate(var):
if i == index or (index > 0 and i == 0) or (index == 0 and i == 1):
lcm = ilcm(lcm, sqrt(abs(coeff[v**2])))
else:
lcm = ilcm(lcm, sqrt(coeff[v**2]) if sqrt(coeff[v**2]) % 2 else sqrt(coeff[v**2]) // 2)
for i, v in enumerate(var):
sol[i] = (lcm*sol[i]) / sqrt(abs(coeff[v**2]))
return tuple(sol)
def diop_general_sum_of_squares(eq, limit=1):
"""
Solves the equation `x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`.
Returns at most ``limit`` number of solutions. Currently there is no way to
set ``limit`` using higher level API's like ``diophantine()`` or
``diop_solve()`` but that will be fixed soon.
Usage
=====
``general_sum_of_squares(eq, limit)`` : Here ``eq`` is an expression which
is assumed to be zero. Also, ``eq`` should be in the form,
`x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`. At most ``limit`` number of
solutions are returned.
Details
=======
When `n = 3` if `k = 4^a(8m + 7)` for some `a, m \in Z` then there will be
no solutions. Refer [1]_ for more details.
Examples
========
>>> from sympy.solvers.diophantine import diop_general_sum_of_squares
>>> from sympy.abc import a, b, c, d, e, f
>>> diop_general_sum_of_squares(a**2 + b**2 + c**2 + d**2 + e**2 - 2345)
set([(0, 48, 5, 4, 0)])
Reference
=========
.. [1] Representing an Integer as a sum of three squares, [online],
Available:
http://www.proofwiki.org/wiki/Integer_as_Sum_of_Three_Squares
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "general_sum_of_squares":
return _diop_general_sum_of_squares(var, coeff, limit)
def _diop_general_sum_of_squares(var, coeff, limit=1):
n = len(var)
k = -int(coeff[Integer(1)])
s = set([])
if k < 0:
return set([])
if n == 3:
s.add(sum_of_three_squares(k))
elif n == 4:
s.add(sum_of_four_squares(k))
else:
m = n // 4
f = partition(k, m, True)
for j in range(limit):
soln = []
try:
l = next(f)
except StopIteration:
break
for n_i in l:
a, b, c, d = sum_of_four_squares(n_i)
soln = soln + [a, b, c, d]
soln = soln + [0] * (n % 4)
s.add(tuple(soln))
return s
## Functions below this comment can be more suitably grouped under an Additive number theory module
## rather than the Diophantine equation module.
def partition(n, k=None, zeros=False):
"""
Returns a generator that can be used to generate partitions of an integer
`n`.
A partition of `n` is a set of positive integers which add upto `n`. For
example, partitions of 3 are 3 , 1 + 2, 1 + 1+ 1. A partition is returned
as a tuple. If ``k`` equals None, then all possible partitions are returned
irrespective of their size, otherwise only the partitions of size ``k`` are
returned. If there are no partions of `n` with size `k` then an empty tuple
is returned. If the ``zero`` parameter is set to True then a suitable
number of zeros are added at the end of every partition of size less than
``k``.
``zero`` parameter is considered only if ``k`` is not None. When the
partitions are over, the last `next()` call throws the ``StopIteration``
exception, so this function should always be used inside a try - except
block.
Details
=======
``partition(n, k)``: Here ``n`` is a positive integer and ``k`` is the size
of the partition which is also positive integer.
Examples
========
>>> from sympy.solvers.diophantine import partition
>>> f = partition(5)
>>> next(f)
(1, 1, 1, 1, 1)
>>> next(f)
(1, 1, 1, 2)
>>> g = partition(5, 3)
>>> next(g)
(3, 1, 1)
>>> next(g)
(2, 2, 1)
Reference
=========
.. [1] Generating Integer Partitions, [online],
Available: http://jeromekelleher.net/partitions.php
"""
if n < 1:
yield tuple()
if k is not None:
if k < 1:
yield tuple()
elif k > n:
if zeros:
for i in range(1, n):
for t in partition(n, i):
yield (t,) + (0,) * (k - i)
else:
yield tuple()
else:
a = [1 for i in range(k)]
a[0] = n - k + 1
yield tuple(a)
i = 1
while a[0] >= n // k + 1:
j = 0
while j < i and j + 1 < k:
a[j] = a[j] - 1
a[j + 1] = a[j + 1] + 1
yield tuple(a)
j = j + 1
i = i + 1
if zeros:
for m in range(1, k):
for a in partition(n, m):
yield tuple(a) + (0,) * (k - m)
else:
a = [0 for i in range(n + 1)]
l = 1
y = n - 1
while l != 0:
x = a[l - 1] + 1
l -= 1
while 2*x <= y:
a[l] = x
y -= x
l += 1
m = l + 1
while x <= y:
a[l] = x
a[m] = y
yield tuple(a[:l + 2])
x += 1
y -= 1
a[l] = x + y
y = x + y - 1
yield tuple(a[:l + 1])
def prime_as_sum_of_two_squares(p):
"""
Represent a prime `p` which is congruent to 1 mod 4, as a sum of two
squares.
Examples
========
>>> from sympy.solvers.diophantine import prime_as_sum_of_two_squares
>>> prime_as_sum_of_two_squares(5)
(2, 1)
Reference
=========
.. [1] Representing a number as a sum of four squares, [online],
Available: http://www.schorn.ch/howto.html
"""
if p % 8 == 5:
b = 2
else:
b = 3
while pow(b, (p - 1) // 2, p) == 1:
b = nextprime(b)
b = pow(b, (p - 1) // 4, p)
a = p
while b**2 > p:
a, b = b, a % b
return (b, a % b)
def sum_of_three_squares(n):
"""
Returns a 3-tuple `(a, b, c)` such that `a^2 + b^2 + c^2 = n` and
`a, b, c \geq 0`.
Returns (None, None, None) if `n = 4^a(8m + 7)` for some `a, m \in Z`. See
[1]_ for more details.
Usage
=====
``sum_of_three_squares(n)``: Here ``n`` is a non-negative integer.
Examples
========
>>> from sympy.solvers.diophantine import sum_of_three_squares
>>> sum_of_three_squares(44542)
(207, 37, 18)
References
==========
.. [1] Representing a number as a sum of three squares, [online],
Available: http://www.schorn.ch/howto.html
"""
special = {1:(1, 0, 0), 2:(1, 1, 0), 3:(1, 1, 1), 10: (1, 3, 0), 34: (3, 3, 4), 58:(3, 7, 0),
85:(6, 7, 0), 130:(3, 11, 0), 214:(3, 6, 13), 226:(8, 9, 9), 370:(8, 9, 15),
526:(6, 7, 21), 706:(15, 15, 16), 730:(1, 27, 0), 1414:(6, 17, 33), 1906:(13, 21, 36),
2986: (21, 32, 39), 9634: (56, 57, 57)}
v = 0
if n == 0:
return (0, 0, 0)
while n % 4 == 0:
v = v + 1
n = n // 4
if n % 8 == 7:
return (None, None, None)
if n in special.keys():
x, y, z = special[n]
return (2**v*x, 2**v*y, 2**v*z)
l = int(sqrt(n))
if n == l**2:
return (2**v*l, 0, 0)
x = None
if n % 8 == 3:
l = l if l % 2 else l - 1
for i in range(l, -1, -2):
if isprime((n - i**2) // 2):
x = i
break
y, z = prime_as_sum_of_two_squares((n - x**2) // 2)
return (2**v*x, 2**v*(y + z), 2**v*abs(y - z))
if n % 8 == 2 or n % 8 == 6:
l = l if l % 2 else l - 1
else:
l = l - 1 if l % 2 else l
for i in range(l, -1, -2):
if isprime(n - i**2):
x = i
break
y, z = prime_as_sum_of_two_squares(n - x**2)
return (2**v*x, 2**v*y, 2**v*z)
def sum_of_four_squares(n):
"""
Returns a 4-tuple `(a, b, c, d)` such that `a^2 + b^2 + c^2 + d^2 = n`.
Here `a, b, c, d \geq 0`.
Usage
=====
``sum_of_four_squares(n)``: Here ``n`` is a non-negative integer.
Examples
========
>>> from sympy.solvers.diophantine import sum_of_four_squares
>>> sum_of_four_squares(3456)
(8, 48, 32, 8)
>>> sum_of_four_squares(1294585930293)
(0, 1137796, 2161, 1234)
References
==========
.. [1] Representing a number as a sum of four squares, [online],
Available: http://www.schorn.ch/howto.html
"""
if n == 0:
return (0, 0, 0, 0)
v = 0
while n % 4 == 0:
v = v + 1
n = n // 4
if n % 8 == 7:
d = 2
n = n - 4
elif n % 8 == 6 or n % 8 == 2:
d = 1
n = n - 1
else:
d = 0
x, y, z = sum_of_three_squares(n)
return (2**v*d, 2**v*x, 2**v*y, 2**v*z)
def power_representation(n, p, k, zeros=False):
"""
Returns a generator for finding k-tuples `(n_{1}, n_{2}, . . . n_{k})` such
that `n = n_{1}^p + n_{2}^p + . . . n_{k}^p`.
Here `n` is a non-negative integer. StopIteration exception is raised after
all the solutions are generated, so should always be used within a try-
catch block.
Usage
=====
``power_representation(n, p, k, zeros)``: Represent number ``n`` as a sum
of ``k``, ``p``th powers. If ``zeros`` is true, then the solutions will
contain zeros.
Examples
========
>>> from sympy.solvers.diophantine import power_representation
>>> f = power_representation(1729, 3, 2) # Represent 1729 as a sum of two cubes
>>> next(f)
(12, 1)
>>> next(f)
(10, 9)
"""
if p < 1 or k < 1 or n < 1:
raise ValueError("Expected: n > 0 and k >= 1 and p >= 1")
if k == 1:
if perfect_power(n):
yield (perfect_power(n)[0],)
else:
yield tuple()
elif p == 1:
for t in partition(n, k, zeros):
yield t
else:
l = []
a = integer_nthroot(n, p)[0]
for t in pow_rep_recursive(a, k, n, [], p):
yield t
if zeros:
for i in range(2, k):
for t in pow_rep_recursive(a, i, n, [], p):
yield t + (0,) * (k - i)
def pow_rep_recursive(n_i, k, n_remaining, terms, p):
if k == 0 and n_remaining == 0:
yield tuple(terms)
else:
if n_i >= 1 and k > 0 and n_remaining >= 0:
if n_i**p <= n_remaining:
for t in pow_rep_recursive(n_i, k - 1, n_remaining - n_i**p, terms + [n_i], p):
yield t
for t in pow_rep_recursive(n_i - 1, k, n_remaining, terms, p):
yield t
|
sahilshekhawat/sympy
|
sympy/solvers/diophantine.py
|
Python
|
bsd-3-clause
| 79,097
|
[
"Gaussian"
] |
c656be55f995d68bb7f88a3137dfe47763ccb2cd70cb7fecb5e3fa170fd8368c
|
# -*- coding: utf-8 -*-
"""
This file contains methods for decay-like fitting, these methods
are imported by class FitLogic.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
import numpy as np
from lmfit.models import Model
from scipy.ndimage import filters
############################################################################
# #
# Defining Exponential Models #
# #
############################################################################
####################################################
# General case: bare stretched exponential decay #
####################################################
def make_barestretchedexponentialdecay_model(self, prefix=None):
""" Create a general bare exponential decay model.
@param str prefix: optional string, which serves as a prefix for all
parameters used in this model. That will prevent
name collisions if this model is used in a composite
way.
@return tuple: (object model, object params)
Explanation of the objects:
object lmfit.model.CompositeModel model:
A model the lmfit module will use for that fit. Here a
gaussian model. Returns an object of the class
lmfit.model.CompositeModel.
object lmfit.parameter.Parameters params:
It is basically an OrderedDict, so a dictionary, with keys
denoting the parameters as string names and values which are
lmfit.parameter.Parameter (without s) objects, keeping the
information about the current value.
"""
def barestretchedexponentialdecay_function(x, beta, lifetime):
""" Function of a bare exponential decay.
@param numpy.array x: 1D array as the independent variable - e.g. time
@param float lifetime: constant lifetime
@return: bare exponential decay function: in order to use it as a model
"""
return np.exp(-np.power(x/lifetime, beta))
if not isinstance(prefix, str) and prefix is not None:
self.log.error('The passed prefix <{0}> of type {1} is not a string and'
'cannot be used as a prefix and will be ignored for now.'
'Correct that!'.format(prefix, type(prefix)))
model = Model(barestretchedexponentialdecay_function,
independent_vars='x')
else:
model = Model(barestretchedexponentialdecay_function,
independent_vars='x', prefix=prefix)
params = model.make_params()
return model, params
##############################
# Single exponential decay #
##############################
def make_bareexponentialdecay_model(self, prefix=None):
""" Create a bare single exponential decay model.
@param str prefix: optional string, which serves as a prefix for all
parameters used in this model. That will prevent
name collisions if this model is used in a composite
way.
@return tuple: (object model, object params), for more description see in
the method make_barestretchedexponentialdecay_model.
"""
bare_exp_decay, params = self.make_barestretchedexponentialdecay_model(prefix=prefix)
bare_exp_decay.set_param_hint(name='beta', value=1, vary=False)
params = bare_exp_decay.make_params()
return bare_exp_decay, params
def make_decayexponential_model(self, prefix=None):
""" Create a exponential decay model with an amplitude and offset.
@param str prefix: optional string, which serves as a prefix for all
parameters used in this model. That will prevent
name collisions if this model is used in a composite
way.
@return tuple: (object model, object params), for more description see in
the method make_barestretchedexponentialdecay_model.
"""
bare_exp_model, params = self.make_bareexponentialdecay_model(prefix=prefix)
amplitude_model, params = self.make_amplitude_model(prefix=prefix)
constant_model, params = self.make_constant_model(prefix=prefix)
exponentialdecay_model = amplitude_model * bare_exp_model + constant_model
params = exponentialdecay_model.make_params()
return exponentialdecay_model, params
#################################
# Stretched exponential decay #
#################################
def make_decayexponentialstretched_model(self, prefix=None):
""" Create a stretched exponential decay model with offset.
@param str prefix: optional string, which serves as a prefix for all
parameters used in this model. That will prevent
name collisions if this model is used in a composite
way.
@return tuple: (object model, object params), for more description see in
the method make_barestretchedexponentialdecay_model.
"""
bare_stre_exp_decay, params = self.make_barestretchedexponentialdecay_model(prefix=prefix)
amplitude_model, params = self.make_amplitude_model()
constant_model, params = self.make_constant_model(prefix=prefix)
stre_exp_decay_offset = amplitude_model * bare_stre_exp_decay + constant_model
params = stre_exp_decay_offset.make_params()
return stre_exp_decay_offset, params
############################################################################
# #
# Fit methods and their estimators #
# #
############################################################################
##########################################
# single exponential decay with offset #
##########################################
def make_decayexponential_fit(self, x_axis, data, estimator, units=None, add_params=None):
""" Performes a exponential decay with offset fit on the provided data.
@param numpy.array x_axis: 1D axis values
@param numpy.array data: 1D data, should have the same dimension as x_axis.
@param Parameters or dict add_params: optional, additional parameters of
type lmfit.parameter.Parameters, OrderedDict or dict for the fit
which will be used instead of the values from the estimator.
@return object result: lmfit.model.ModelFit object, all parameters
provided about the fitting, like: success,
initial fitting values, best fitting values, data
with best fit with given axis,...
"""
exponentialdecay, params = self.make_decayexponential_model()
error, params = estimator(x_axis, data, params)
params = self._substitute_params(initial_params=params,
update_params=add_params)
try:
result = exponentialdecay.fit(data, x=x_axis, params=params)
except:
result = exponentialdecay.fit(data, x=x_axis, params=params)
self.log.warning('The exponentialdecay with offset fit did not work. '
'Message: {}'.format(str(result.message)))
if units is None:
units = ['arb. unit', 'arb. unit']
result_str_dict = dict() #create result string for gui
result_str_dict['Amplitude'] = {'value': result.params['amplitude'].value,
'error': result.params['amplitude'].stderr,
'unit': units[1]} #amplitude
result_str_dict['Lifetime'] = {'value': result.params['lifetime'].value,
'error': result.params['lifetime'].stderr,
'unit': units[0]} #lifetime
result_str_dict['Offset'] = {'value': result.params['offset'].value,
'error': result.params['offset'].stderr,
'unit': units[1]} #offset
result.result_str_dict = result_str_dict
return result
def estimate_decayexponential(self, x_axis, data, params):
""" Estimation of the initial values for an exponential decay function.
@param numpy.array x_axis: 1D axis values
@param numpy.array data: 1D data, should have the same dimension as x_axis.
@param lmfit.Parameters params: object includes parameter dictionary which
can be set
@return tuple (error, params):
Explanation of the return parameter:
int error: error code (0:OK, -1:error)
Parameters object params: set parameters of initial values
"""
error = self._check_1D_input(x_axis=x_axis, data=data, params=params)
# calculation of offset, take the last 10% from the end of the data
# and perform the mean from those.
offset = data[-max(1, int(len(x_axis)/10)):].mean()
# substraction of offset, check whether
if data[0] < data[-1]:
data_level = offset - data
else:
data_level = data - offset
# check if the data level contain still negative values and correct
# the data level therefore. Otherwise problems in the logarithm appear.
if data_level.min() <= 0:
data_level = data_level - data_level.min()
# remove all the data that can be smaller than or equals to std.
# when the data is smaller than std, it is beyond resolution
# which is not helpful to our fitting.
for i in range(0, len(x_axis)):
if data_level[i] <= data_level.std():
break
# values and bound of parameter.
ampl = data[-max(1, int(len(x_axis) / 10)):].std()
min_lifetime = 2 * (x_axis[1] - x_axis[0])
try:
data_level_log = np.log(data_level[0:i])
# linear fit, see linearmethods.py
linear_result = self.make_linear_fit(x_axis=x_axis[0:i], data=data_level_log, estimator=self.estimate_linear)
params['lifetime'].set(value=-1/linear_result.params['slope'].value, min=min_lifetime)
# amplitude can be positive of negative
if data[0] < data[-1]:
params['amplitude'].set(value=-np.exp(linear_result.params['offset'].value), max=-ampl)
else:
params['amplitude'].set(value=np.exp(linear_result.params['offset'].value), min=ampl)
except:
self.log.exception('Lifetime too small in estimate_exponentialdecay, beyond resolution!')
params['lifetime'].set(value=x_axis[i]-x_axis[0], min=min_lifetime)
params['amplitude'].set(value=data_level[0])
params['offset'].set(value=offset)
return error, params
#############################################
# stretched exponential decay with offset #
#############################################
def make_decayexponentialstretched_fit(self, x_axis, data, estimator, units=None, add_params=None):
""" Performes a stretched exponential decay with offset fit on the provided data.
@param numpy.array x_axis: 1D axis values
@param numpy.array data: 1D data, should have the same dimension as x_axis.
@param object estimator: Pointer to the estimator method
@param list units: List containing the ['horizontal', 'vertical'] units as strings
@param Parameters or dict add_params: optional, additional parameters of
type lmfit.parameter.Parameters, OrderedDict or dict for the fit
which will be used instead of the values from the estimator.
@return object result: lmfit.model.ModelFit object, all parameters
provided about the fitting, like: success,
initial fitting values, best fitting values, data
with best fit with given axis,...
"""
stret_exp_decay_offset, params = self.make_decayexponentialstretched_model()
error, params = estimator(x_axis, data, params)
params = self._substitute_params(initial_params=params,
update_params=add_params)
try:
result = stret_exp_decay_offset.fit(data, x=x_axis, params=params)
except:
result = stret_exp_decay_offset.fit(data, x=x_axis, params=params)
self.log.warning('The double exponentialdecay with offset fit did not work. '
'Message: {}'.format(str(result.message)))
if units is None:
units = ['arb. unit', 'arb. unit']
result_str_dict = dict() #create result string for gui
result_str_dict['Amplitude'] = {'value': result.params['amplitude'].value,
'error': result.params['amplitude'].stderr,
'unit': units[1]} #amplitude
result_str_dict['Lifetime'] = {'value': result.params['lifetime'].value,
'error': result.params['lifetime'].stderr,
'unit': units[0]} #lifetime
result_str_dict['Offset'] = {'value': result.params['offset'].value,
'error': result.params['offset'].stderr,
'unit': units[1]} #offset
result_str_dict['Beta'] = {'value': result.params['beta'].value,
'error': result.params['beta'].stderr,
'unit': ''} #Beta (exponent of exponential exponent)
result.result_str_dict = result_str_dict
return result
def estimate_decayexponentialstretched(self, x_axis, data, params):
""" Provide an estimation for initial values for a stretched exponential decay with offset.
@param numpy.array x_axis: 1D axis values
@param numpy.array data: 1D data, should have the same dimension as x_axis.
@param lmfit.Parameters params: object includes parameter dictionary which
can be set
@return tuple (error, params):
Explanation of the return parameter:
int error: error code (0:OK, -1:error)
Parameters object params: set parameters of initial values
"""
error = self._check_1D_input(x_axis=x_axis, data=data, params=params)
# Smooth very radically the provided data, so that noise fluctuations will
# not disturb the parameter estimation.
std_dev = 10
data_smoothed = filters.gaussian_filter1d(data, std_dev)
# calculation of offset, take the last 10% from the end of the data
# and perform the mean from those.
offset = data_smoothed[-max(1, int(len(x_axis)/10)):].mean()
# substraction of the offset and correction of the decay behaviour
# (decay to a bigger value or decay to a smaller value)
if data_smoothed[0] < data_smoothed[-1]:
data_smoothed = offset - data_smoothed
ampl_sign=-1
else:
data_smoothed = data_smoothed - offset
ampl_sign=1
if data_smoothed.min() <= 0:
data_smoothed = data_smoothed - data_smoothed.min()
# Take all values up to the standard deviation, the remaining values are
# more disturbing the estimation then helping:
for stop_index in range(0, len(x_axis)):
if data_smoothed[stop_index] <= data_smoothed.std():
break
data_level_log = np.log(data_smoothed[0:stop_index])
# make a polynomial fit with a second order polynom on the remaining data:
poly_coef = np.polyfit(x_axis[0:stop_index], data_level_log, deg=2)
# obtain the values from the polynomical fit
lifetime = 1/np.sqrt(abs(poly_coef[0]))
amplitude = np.exp(poly_coef[2])
# Include all the estimated fit parameter:
params['amplitude'].set(value=amplitude*ampl_sign)
params['offset'].set(value=offset)
min_lifetime = 2 * (x_axis[1]-x_axis[0])
params['lifetime'].set(value=lifetime, min=min_lifetime)
# as an arbitrary starting point:
params['beta'].set(value=2, min=0)
return error, params
|
tobiasgehring/qudi
|
logic/fitmethods/decaylikemethods.py
|
Python
|
gpl-3.0
| 17,012
|
[
"Gaussian"
] |
0ca1d4a992b02de787b8906391929c1cd2aba4c1298f9a47bc8b2e2d05cd6d4e
|
#!/usr/bin/python
import sys
import numpy as np
import warnings
import collections as co
import itertools
import Bio.PDB as bp
import forgi.threedee.model.coarse_grain as ttmc
import forgi.threedee.utilities.graph_pdb as cgg
import forgi.utilities.debug as cud
import forgi.threedee.visual.pymol as cvp
from optparse import OptionParser
import matplotlib.pyplot as plt
import logging
logging.basicConfig(level=logging.WARNING)
log = logging.getLogger(__name__)
'''
def add_pymol_coords(pp, coords):
for i in range(2):
for a in coords[i].keys():
if a == 'P':
pp.add_sphere(coords[i][a], 'orange', 0.1)
elif a == 'C1':
pp.add_sphere(coords[i][a], 'green', 0.1)
elif a == 'O3*':
pp.add_sphere(coords[i][a], 'red', 0.1)
def print_atom_positions(coords):
for i in range(2):
for a in coords[i].keys():
print "%d %s %s" % (i, a,
" ".join(map(str, coords[i][a])))
'''
def print_average_atom_positions(coords, res='A', pp=None):
averages = [co.defaultdict(lambda: co.defaultdict(list)),
co.defaultdict(lambda: co.defaultdict(list))]
if pp is None:
print "import collections as co"
print "avg_stem_vres_atom_coords = [co.defaultdict(dict), co.defaultdict(dict)]\n"
for i in range(2):
for k in coords[i].keys():
for c in coords[i][k]:
for a in c.keys():
averages[i][k][a] += [c[a]]
import matplotlib.colors
fig, ax = plt.subplots(3)
colors = list(matplotlib.colors.cnames.keys())
markers = itertools.cycle(["+", "o", ".", "<", ">", "^", "v", "s", "x", "d", "D", "*"])
for i in range(2):
for r in averages[i].keys():
if r not in res:
continue
for j, a in enumerate(averages[i][r].keys()):
if pp is None:
print 'avg_stem_vres_atom_coords[%d]["%s"]["%s"] = [%s]' % (i,r,a,
",".join(map(str, np.mean(averages[i][r][a], axis=0))))
#print i,r, a, np.mean(averages[i][r][a], axis=0)
else:
if i == 0:
pp.add_sphere(np.mean(averages[i][r][a], axis=0), 'green', 0.1)
else:
pp.add_sphere(np.mean(averages[i][r][a], axis=0), 'red', 0.1)
pp.add_text(np.mean(averages[i][r][a], axis=0), a, color="yellow" )
if i==0:
ax[0].scatter(np.array(averages[i][r][a])[:,0], np.array(averages[i][r][a])[:,1], label="{}:{}".format(i, a), color=colors[j], marker = next(markers))
ax[1].scatter(np.array(averages[i][r][a])[:,1], np.array(averages[i][r][a])[:,2], label="{}:{}".format(i, a), color=colors[j], marker = next(markers))
ax[2].scatter(np.array(averages[i][r][a])[:,0], np.array(averages[i][r][a])[:,2], label="{}:{}".format(i, a), color=colors[j], marker = next(markers))
ax[0].set_xlabel("x")
ax[0].set_ylabel("y")
ax[1].set_xlabel("y")
ax[1].set_ylabel("z")
ax[2].set_xlabel("x")
ax[2].set_ylabel("z")
ax[2].legend()
plt.show()
def main():
usage = './bounding_box_coords.py temp.pdb [temp2.pdb ...]'
usage += "Print out the positions of the atoms in coordinates "
usage += "respective to the virtual residue coordinate system."
parser = OptionParser()
#parser.add_option('-o', '--options', dest='some_option', default='yo', help="Place holder for a real option", type='str')
parser.add_option('-e', '--edge', dest='edge', default=False, action='store_true', help='Include the edge nucleotides in the statistics.')
parser.add_option('-p', '--pymol', dest='pymol', default=False, action='store_true', help='Output in pymol cgo format.')
parser.add_option('-a', '--averages', dest='averages', default=False, action='store_true', help='Output the average coordinates')
parser.add_option('-r', '--residue', dest='residue', default='AUGC', help="The type of residue to calculate the averages for.", type='str')
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(1)
all_coords = [co.defaultdict(list),
co.defaultdict(list)]
pp = cvp.PymolPrinter()
pp.draw_axes = True
for pdbfile in args:
try:
bg = ttmc.from_pdb(pdbfile)
except Exception as e:
log.exception(e)
continue
chain = bg.chain
for s in bg.stem_iterator():
for i in range(bg.stem_length(s)):
if i == 0 or i == bg.stem_length(s) - 1:
if not options.edge:
continue
(origin, basis, coords) = cgg.stem_vres_reference_atoms(bg, chain, s, i)
# subtract one because the sequence is 0-based
(p1, p2) = (bg.defines[s][0] + i - 1, bg.defines[s][3] - i - 1)
(r1, r2) = (bg.seq[p1], bg.seq[p2])
all_coords[0][r1].append(coords[0])
all_coords[1][r2].append(coords[1])
'''
if options.pymol:
if not options.averages:
add_pymol_coords(coords)
else:
print_atom_positions(coords)
'''
if options.averages:
if options.pymol:
print_average_atom_positions(all_coords, list(options.residue), pp)
pp.output_pymol_file()
else:
print_average_atom_positions(all_coords, list(options.residue), None)
else:
for i, c in enumerate(all_coords):
for k in c.keys():
for coords in c[k]:
for atom in coords.keys():
print i, k, atom, " ".join(map(str, coords[atom]))
if __name__ == '__main__':
main()
|
pkerpedjiev/ernwin
|
fess/scripts/stem_vres_atom_positions.py
|
Python
|
agpl-3.0
| 5,999
|
[
"PyMOL"
] |
0eee44f7fea29642280138ad214158a234107cf56802eef60fa834945d1f785e
|
# -*- coding: utf-8 -*-
from collections import defaultdict
import dateutil.parser
import re
from typing import Any, DefaultDict, Dict, FrozenSet, List, Optional, Set, Tuple, Union
from django.http import HttpRequest, HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404
from django.db import connection
from rest_framework.decorators import api_view
from catmaid.models import UserRole, Project, Class, ClassInstance, \
ClassInstanceClassInstance, Relation, ReviewerWhitelist
from catmaid.control.authentication import requires_user_role, can_edit_or_fail
from catmaid.control.common import (get_relation_to_id_map,
get_class_to_id_map, get_request_bool, get_request_list)
def get_annotation_to_id_map(project_id:Union[int,str], annotations:List, relations=None,
classes=None) -> Dict:
"""Get a dictionary mapping annotation names to annotation IDs in a
particular project."""
if not relations:
relations = get_relation_to_id_map(project_id)
if not classes:
classes = get_class_to_id_map(project_id)
cursor = connection.cursor()
cursor.execute("""
SELECT ci.name, ci.id
FROM class_instance ci
JOIN UNNEST(%(annotations)s::text[]) query_annotation(name)
ON ci.name = query_annotation.name
WHERE project_id = %(project_id)s
AND ci.class_id = %(class_id)s
""", {
'project_id': project_id,
'class_id': classes['annotation'],
'annotations': annotations,
})
mapping = dict(cursor.fetchall())
return mapping
def get_annotated_entities(project_id:Union[int,str], params, relations=None, classes=None,
allowed_classes=['neuron', 'annotation'], sort_by=None, sort_dir=None,
range_start=None, range_length=None, with_annotations:bool=True,
with_skeletons:bool=True, with_timestamps:bool=False,
import_only:Union[None, str]=None) -> Tuple[List, int]:
"""Get a list of annotated entities based on the passed in search criteria.
"""
if not relations:
relations = get_relation_to_id_map(project_id)
if not classes:
classes = get_class_to_id_map(project_id)
# Get IDs of constraining classes.
allowed_class_idx = {classes[c]:c for c in allowed_classes}
allowed_class_ids = list(allowed_class_idx.keys())
# One list of annotation sets for requested annotations and one for those
# of which subannotations should be included
annotation_sets:Set[FrozenSet] = set()
not_annotation_sets:Set[FrozenSet] = set()
annotation_sets_to_expand:Set[FrozenSet] = set()
# Get name, annotator and time constraints, if available
name = params.get('name', "").strip()
name_not = get_request_bool(params, 'name_not', False)
name_exact = get_request_bool(params, 'name_exact', False)
name_case_sensitive = get_request_bool(params, 'name_case_sensitive', False)
try:
annotator_ids = set(map(int, params.getlist('annotated_by')))
except AttributeError as e:
# If no getlist() method is found on <params>, the passed in objects is
# no QueryDict, but likely a regular dict. Accept this as okay.
annotator_ids = set()
start_date = params.get('annotation_date_start', "").strip()
end_date = params.get('annotation_date_end', "").strip()
# Allow parameterization of annotations using annotation names instead of IDs.
annotation_reference = params.get('annotation_reference', 'id')
if annotation_reference not in ('id', 'name'):
raise ValueError("Only 'id' and 'name' are accepted for the annotation_reference parameter")
# If annotation_names have been passed in, find matching IDs
if annotation_reference == 'name':
# Find annotation references
annotation_names:Set = set()
for key in params:
if key.startswith('annotated_with') or \
key.startswith('not_annotated_with') or \
key.startswith('sub_annotated_with'):
if len(params[key]) > 0:
annotation_names |= set(params[key].split(','))
annotation_id_map = get_annotation_to_id_map(project_id, list(annotation_names))
def to_id(inval) -> int: # Python wants the signatures for "conditional program variants" to be the same, incl variable names
id = annotation_id_map.get(inval)
if not id:
raise ValueError("Unknown annotation: " + inval)
return id
else:
def to_id(inval) -> int:
return int(inval)
# Collect annotations and sub-annotation information. Each entry can be a
# list of IDs, which will be treated as or-combination.
for key in params:
if key.startswith('annotated_with'):
if len(params[key]) > 0:
annotation_set = frozenset(to_id(a) for a in params[key].split(','))
annotation_sets.add(annotation_set)
elif key.startswith('not_annotated_with'):
if len(params[key]) > 0:
not_annotation_set = frozenset(to_id(a) for a in params[key].split(','))
not_annotation_sets.add(not_annotation_set)
elif key.startswith('sub_annotated_with'):
if len(params[key]) > 0:
annotation_set = frozenset(to_id(a) for a in params[key].split(','))
annotation_sets_to_expand.add(annotation_set)
filters = [
'ci.project_id = %(project_id)s',
'ci.class_id = ANY (%(class_ids)s)'
]
params = {
"project_id": project_id,
"class_ids": allowed_class_ids,
"annotated_with": relations['annotated_with'],
"model_of": relations['model_of']
}
if len(annotator_ids) > 0:
params['annotator_ids'] = list(annotator_ids)
if start_date:
params['start_date'] = start_date
if end_date:
params['end_date'] = end_date
# If a name is given, add this to the query. If its first character is a
# slash, treat it as regex. There is a a trigram index and a upper()
# expression index on class_instance.name and we add checks to utilize both,
# if possible. This is mainly useful for exact name queries, both
# case-sensitive and case insensitive as well as regex and normal.
if name:
is_regex = name.startswith('/')
if is_regex:
op = '~' if name_case_sensitive else '~*'
upper_name_op = '~'
params["name"] = name[1:]
else:
op = '~~' if name_case_sensitive else '~~*'
upper_name_op = '~~'
# LIKE (~~) an ILIKE (~~*) treat _ and % as wildcards, therefore
# they need to be escaped in the input.
name = name.replace('_', '\\_').replace('%', '\\%')
params["name"] = name if name_exact else ('%' + name + '%')
if name_not:
filters.append(f"ci.name !{op} %(name)s")
filters.append(f"upper(ci.name) !{upper_name_op} upper(%(name)s)")
else:
filters.append(f"ci.name {op} %(name)s")
filters.append(f"upper(ci.name) {upper_name_op} upper(%(name)s)")
# Map annotation sets to their expanded sub-annotations
sub_annotation_ids = get_sub_annotation_ids(project_id, annotation_sets_to_expand,
relations, classes)
# Collect all annotations and their sub-annotation IDs (if requested) in a
# set each. For the actual query each set is connected with AND while
# for everything within one set OR is used.
annotation_id_sets = []
for annotation_set in annotation_sets:
current_annotation_ids = set(annotation_set)
# Add sub annotations, if requested
sa_ids = sub_annotation_ids.get(annotation_set)
if sa_ids and len(sa_ids):
current_annotation_ids.update(sa_ids)
annotation_id_sets.append(current_annotation_ids)
not_annotation_id_sets = []
for not_annotation_set in not_annotation_sets:
current_not_annotation_ids = set(not_annotation_set)
# Add sub annotations, if requested
sa_ids = sub_annotation_ids.get(not_annotation_set)
if sa_ids and len(sa_ids):
current_not_annotation_ids.update(sa_ids)
not_annotation_id_sets.append(current_not_annotation_ids)
# Build needed joins for annotated_with search criteria
joins = []
fields= ['ci.id, ci.user_id', 'ci.creation_time', 'ci.edition_time',
'ci.project_id', 'ci.class_id', 'ci.name', 'skel_link.skeletons']
creation_timestamp_fields = []
edition_timestamp_fields = []
for n, annotation_id_set in enumerate(annotation_id_sets):
joins.append(f"""
INNER JOIN class_instance_class_instance cici{n}
ON ci.id = cici{n}.class_instance_a
""")
filters.append(f"""
cici{n}.relation_id = %(annotated_with)s AND
cici{n}.class_instance_b = ANY (%(cici{n}_ann)s)
""")
if with_timestamps:
c_field = f'cici{n}.creation_time'
e_field = f'cici{n}.edition_time'
fields.append(c_field)
fields.append(e_field)
creation_timestamp_fields.append(c_field)
edition_timestamp_fields.append(e_field)
params[f'cici{n}_ann'] = list(annotation_id_set)
# Add annotator and time constraints, if available
if annotator_ids:
filters.append(f"""
cici{n}.user_id = ANY (%(annotator_ids)s)
""")
if start_date:
filters.append(f"""
cici{n}.creation_time >= %(start_date)s
""")
if end_date:
filters.append(f"""
cici{n}.creation_time <= %(end_date)s
""")
# To exclude class instsances that are linked to particular annotation, all
# annotations are collected and if in this list of annotations contains an
# exclusion annotation, it is removed.
if not_annotation_sets:
joins.append("""
LEFT JOIN LATERAL (
SELECT cici_a.class_instance_a AS id,
array_agg(cici_a.class_instance_b) AS annotations
FROM class_instance_class_instance cici_a
WHERE cici_a.class_instance_a = ci.id
AND cici_a.relation_id = %(annotated_with)s
GROUP BY 1
) ann_link ON ci.id = ann_link.id
""")
for n, anno_id_set in enumerate(not_annotation_sets):
filters.append(f"""
NOT (ann_link.annotations && %(cici_ex{n}_ann)s::bigint[])
""")
params[f'cici_ex{n}_ann'] = list(anno_id_set)
# The basic query
query = """
SELECT {fields}
FROM class_instance ci
{joins}
WHERE {where}
{sort}
{offset}
"""
cursor = connection.cursor()
# If there are range limits and given that it is likely that there are many
# entities returned, it is more efficient to get the total result number
# with two queries: 1. Get total number of neurons 2. Get limited set. The
# (too expensive) alternative would be to get all neurons for counting and
# limiting on the Python side.
num_total_records = None
offset = ""
if range_start is not None and range_length is not None:
# Get total number of results with separate query. No sorting or offset
# is needed for this.
query_fmt_params = {
'fields': 'COUNT(*)',
'joins': '\n'.join(joins),
'where': ' AND '.join(filters),
'sort': '',
'offset': ''
}
cursor.execute(query.format(**query_fmt_params), params)
num_total_records = cursor.fetchone()[0]
offset = "OFFSET %(range_start)s LIMIT %(range_length)s"
params['range_start'] = int(range_start)
params['range_length'] = int(range_length)
# Add skeleton ID info (if available)
joins.append("""
LEFT JOIN LATERAL (
SELECT cici_n.class_instance_b AS id,
array_agg(cici_n.class_instance_a) AS skeletons
FROM class_instance_class_instance cici_n
WHERE cici_n.class_instance_b = ci.id
AND cici_n.relation_id = %(model_of)s
GROUP BY 1
) skel_link ON ci.id = skel_link.id
""")
# Check if some nodes originate from an import transaction, if only a
# partial match is needed. This is done separately to use a more optimized
# query.
if import_only == 'partial' or import_only == 'full':
joins.append("""
JOIN catmaid_skeleton_summary css
ON css.skeleton_id = ANY(skel_link.skeletons)
""")
if import_only == 'partial':
filters.append('css.num_imported_nodes > 0')
else:
filters.append('css.num_imported_nodes > 0')
filters.append('css.num_imported_nodes = css.num_nodes')
elif import_only is None:
pass
else:
raise ValueError(f'Unknown import constraint mode: {import_only}')
query_fmt_params = {
"joins": "\n".join(joins),
"where": " AND ".join(filters),
"sort": "",
"offset": offset,
"fields": ', '.join(fields),
}
# Sort if requested
if sort_dir and sort_by:
regular_sort_orders = ('id', 'name', 'first_name', 'last_name')
timebased_sort_order = ('annotated_on', 'last_annotation_link_edit')
if sort_by not in regular_sort_orders and sort_by not in timebased_sort_order:
raise ValueError(f'Unknown sort direction: {sort_by}')
if sort_by in timebased_sort_order and not with_timestamps:
raise ValueError('Set <with_timestamps> parameter to true')
if sort_by in regular_sort_orders:
sort_col = sort_by
elif sort_by == 'annotated_on':
sort_by = ', '.join(creation_timestamp_fields)
elif sort_by == 'last_annotation_link_edit':
sort_by = ', '.join(edition_timestamp_fields)
query_fmt_params['sort'] = f"ORDER BY {sort_by} {sort_dir.upper()}"
# Execute query and build result data structure
cursor.execute(query.format(**query_fmt_params), params)
entities = []
seen_ids:Set = set()
for ent in cursor.fetchall():
# Don't export objects with same ID multiple times
if ent[0] in seen_ids:
continue
class_name = allowed_class_idx[ent[5]]
entity_info = {
'id': ent[0],
'name': ent[6],
'type': class_name,
}
# Depending on the type of entity, some extra information is added.
if class_name == 'neuron':
entity_info['skeleton_ids'] = ent[7]
entities.append(entity_info)
seen_ids.add(ent[0])
if num_total_records is None:
num_total_records = len(entities)
if with_annotations:
entity_ids = [e['id'] for e in entities]
# Make second query to retrieve annotations and skeletons
annotation_fields = ['class_instance_a', 'class_instance_b',
'class_instance_b__name', 'user_id']
if with_timestamps:
annotation_fields.append('creation_time')
annotation_fields.append('edition_time')
annotations = ClassInstanceClassInstance.objects.filter(
relation_id = relations['annotated_with'],
class_instance_a__id__in = entity_ids).order_by('id').values_list(
*annotation_fields)
annotation_dict:DefaultDict[Any, List] = defaultdict(list)
for a in annotations:
ann_data = {'id': a[1], 'name': a[2], 'uid': a[3]}
if with_timestamps:
ann_data['creation_time'] = a[4]
ann_data['edition_time'] = a[5]
annotation_dict[a[0]].append(ann_data)
for ent in entities:
ent['annotations'] = annotation_dict.get(ent['id'], [])
return entities, num_total_records
def get_sub_annotation_ids(project_id:Union[int,str], annotation_sets, relations, classes) -> Dict:
""" Sub-annotations are annotations that are annotated with an annotation
from the annotation_set passed. Additionally, transivitely annotated
annotations are returned as well. Note that all entries annotation_sets
must be frozenset instances, they need to be hashable.
"""
if not annotation_sets:
return {}
aaa_tuples = ClassInstanceClassInstance.objects.filter(
project_id=project_id,
class_instance_a__class_column=classes['annotation'],
class_instance_b__class_column=classes['annotation'],
relation_id = relations['annotated_with']).values_list(
'class_instance_b', 'class_instance_a')
# A set wrapper to keep a set in a dictionary
class set_wrapper:
def __init__(self):
self.data:Set = set()
# Create a dictionary of all annotations annotating a set of annotations
aaa:Dict = {}
for aa in aaa_tuples:
sa_set = aaa.get(aa[0])
if sa_set is None:
sa_set = set_wrapper()
aaa[aa[0]] = sa_set
sa_set.data.add(aa[1])
# Collect all sub-annotations by following the annotation hierarchy for
# every annotation in the annotation set passed.
sa_ids:Dict = {}
for annotation_set in annotation_sets:
# Start with an empty result set for each requested annotation set
ls:Set = set()
for a in annotation_set:
working_set = set([a])
while working_set:
parent_id = working_set.pop()
# Try to get the sub-annotations for this parent
child_ids = aaa.get(parent_id) or set_wrapper()
for child_id in child_ids.data:
if child_id not in sa_ids:
if child_id not in ls:
# Add all children as sub annotations
ls.add(child_id)
working_set.add(child_id)
# Store the result list for this ID
sa_ids[annotation_set] = list(ls)
return sa_ids
@api_view(['POST'])
@requires_user_role([UserRole.Browse])
def query_annotated_classinstances(request:HttpRequest, project_id:Optional[Union[int,str]] = None) -> JsonResponse:
"""Query entities based on various constraints
Entities are objects that can be referenced within CATMAID's semantic
space, e.g. neurons, annotations or stack groups. This API allows to query
them, mainly by annotations that have been used with them. Multiple
annotation parameters can be used to combine different annotation sets with
AND. Elements of one annotation parameter are combined with OR.
---
parameters:
- name: name
description: The name (or a part of it) of result elements.
type: string
paramType: form
- name: name_exact
description: |
Whether the name has to match exactly or can be a part of the result
name. This is typically faster than using a regular expression.
False by default.
type: bool
paramType: form
required: false
defaultValue: false
- name: name_case_sensitive
description: |
Whether the name has to match the exact letter case provided. False
by default.
type: bool
paramType: form
required: false
defaultValue: false
- name: annotated_by
description: A result element was annotated by a user with this ID.
type: integer
paramType: form
allowMultiple: true
- name: annotation_date_start
description: The earliest YYYY-MM-DD date result elements have been annotated at.
format: date
type: string
paramType: query
- name: annotation_date_end
description: The latest YYYY-MM-DD date result elements have been annotated at.
format: date
type: string
paramType: query
- name: annotated_with
description: |
A comma separated list of annotation IDs which all annotate the
result elements.
type: integer
paramType: form
allowMultiple: true
- name: not_annotated_with
description: |
A comma separated list of annotation IDs which don't annotate the
result elements.
type: integer
paramType: form
allowMultiple: true
- name: sub_annotated_with
description: |
A comma separated list of annotation IDs that are contained
in either 'annotated_with' or 'not_annotated_with' that get expanded to
also include their sub-annotations in the query (of which then at
least one has to match inclusion or exclusion respectively).
type: integer
paramType: form
allowMultiple: true
- name: with_annotations
description: Indicate if annotations of result elements should be returned.
type: boolean
paramType: form
- name: types
description: |
Allowed result types. Multple types can be passed with multiple
parameters. Defaults to 'neuron' and 'annotation'.
type: string
paramType: form
allowMultiple: true
- name: sort_by
description: Indicates how results are sorted.
type: string
defaultValue: id
enum: [id, name, first_name, last_name, 'annotated_on', 'last_annotation_link_edit']
paramType: form
- name: sort_dir
description: Indicates sorting direction.
type: string
defaultValue: asc
enum: [asc, desc]
paramType: form
- name: range_start
description: The first result element index.
type: integer
paramType: form
- name: range_length
description: The number of results
type: integer
paramType: form
- name: annotation_reference
description: Whether annoation references are IDs or names, can be 'id' or 'name.
type: string
enum: [id, name]
defaultValue: id
required: false
paramType: form
- name: with_timestamps
description: Whether to return also the annotation time for each entity.
type: boolean
required: false
defaultValue: false
paramType: form
- name: import_only
description: |
Whether and how only skeletons that contain imported fragments
should be returned. If set to 'partial', only skeletons that have at
least one imported node in them are returned. If set to 'full', only
skeletons that are fully imported are returned. Not set by default.
type: string
required: false
paramType: form
models:
annotated_entity:
id: annotated_entity
description: A result entity.
properties:
name:
type: string
description: The name of the entity
required: true
id:
type: integer
description: The id of the entity
required: true
skeleton_ids:
type: array
description: A list of ids of skeletons modeling this entity
required: true
items:
type: integer
type:
type: string
description: Type of the entity
required: true
type:
entities:
type: array
items:
$ref: annotated_entity
required: true
totalRecords:
type: integer
required: true
"""
p = get_object_or_404(Project, pk = project_id)
classes = dict(Class.objects.filter(project_id=project_id).values_list('class_name', 'id'))
relations = dict(Relation.objects.filter(project_id=project_id).values_list('relation_name', 'id'))
# Type constraints
allowed_classes = get_request_list(request.POST, 'types', ['neuron', 'annotation'])
sort_by = request.POST.get('sort_by', 'id')
if sort_by not in ('id', 'name', 'first_name', 'last_name', 'annotated_on',
'last_annotation_link_edit'):
raise ValueError("Only 'id', 'name', 'first_name' and 'last_name' "
"are allowed for the 'sort-dir' parameter")
sort_dir = request.POST.get('sort_dir', 'asc')
if sort_dir not in ('asc', 'desc'):
raise ValueError("Only 'asc' and 'desc' are allowed for the 'sort-dir' parameter")
range_start = request.POST.get('range_start', None)
range_length = request.POST.get('range_length', None)
with_annotations = get_request_bool(request.POST, 'with_annotations', False)
with_timestamps = get_request_bool(request.POST, 'with_timestamps', False)
import_only = request.POST.get('import_only', None)
entities, num_total_records = get_annotated_entities(p.id, request.POST,
relations, classes, allowed_classes, sort_by, sort_dir, range_start,
range_length, with_annotations, with_timestamps=with_timestamps,
import_only=import_only)
return JsonResponse({
'entities': entities,
'totalRecords': num_total_records,
})
def _update_neuron_annotations(project_id:Union[int,str], neuron_id,
annotation_map:Dict[str,Any], losing_neuron_id=None) -> None:
""" Ensure that the neuron is annotated_with only the annotations given.
These annotations are expected to come as dictionary of annotation name
versus annotator ID.
If losing_neuron_id is provided, annotations missing on the neuron that
exist for the losing neuron will be updated to refer to neuon_id, rather
than created from scratch. This preserves provenance such as creation times.
"""
annotated_with = Relation.objects.get(project_id=project_id,
relation_name='annotated_with')
qs = ClassInstanceClassInstance.objects.filter(
class_instance_a__id=neuron_id, relation=annotated_with)
qs = qs.select_related('class_instance_b').values_list(
'class_instance_b__name', 'class_instance_b__id', 'id')
existing_annotations = {e[0]: {
'annotation_id': e[1],
'cici_id': e[2]
} for e in qs}
update = set(annotation_map.keys())
existing = set(existing_annotations.keys())
missing = update - existing
if losing_neuron_id:
qs = ClassInstanceClassInstance.objects.filter(
class_instance_a__id=losing_neuron_id, relation=annotated_with)
qs = qs.select_related('class_instance_b').values_list(
'class_instance_b__name', 'id')
losing_existing_annotations = dict(qs)
losing_missing = frozenset(losing_existing_annotations.keys()) & missing
if losing_missing:
cici_ids = [losing_existing_annotations[k] for k in losing_missing]
u_ids = [annotation_map[k]['user_id'] for k in losing_missing]
cursor = connection.cursor()
cursor.execute('''
UPDATE class_instance_class_instance
SET class_instance_a = %s, user_id = missing.u_id
FROM UNNEST(%s::bigint[], %s::integer[]) AS missing(cici_id, u_id)
WHERE id = missing.cici_id;
''', (neuron_id, cici_ids, u_ids))
missing = missing - losing_missing
missing_map = {k:v for k,v in annotation_map.items() if k in missing}
_annotate_entities(project_id, [neuron_id], missing_map)
to_delete = existing - update
to_delete_ids = tuple(link['annotation_id'] for name, link in existing_annotations.items() \
if name in to_delete)
ClassInstanceClassInstance.objects.filter(project=project_id,
class_instance_a_id=neuron_id, relation=annotated_with,
class_instance_b_id__in=to_delete_ids).delete()
for aid in to_delete_ids:
delete_annotation_if_unused(project_id, aid, annotated_with)
to_update = update.intersection(existing)
to_update_ids = list(map(lambda x: existing_annotations[x]['cici_id'], to_update))
to_update_et = list(map(lambda x: annotation_map[x]['edition_time'], to_update))
to_update_ct = list(map(lambda x: annotation_map[x]['creation_time'], to_update))
cursor = connection.cursor()
cursor.execute("""
UPDATE class_instance_class_instance
SET creation_time = to_update.creation_time
FROM UNNEST(%s::bigint[], %s::timestamptz[])
AS to_update(cici_id, creation_time)
WHERE id = to_update.cici_id;
UPDATE class_instance_class_instance
SET edition_Time = to_update.edition_time
FROM UNNEST(%s::bigint[], %s::timestamptz[])
AS to_update(cici_id, edition_time)
WHERE id = to_update.cici_id;
""", (to_update_ids,
to_update_ct,
to_update_ids,
to_update_et))
def delete_annotation_if_unused(project, annotation, relation) -> Tuple[bool, int]:
""" Delete the given annotation instance if it is not used anymore.
Returns a tuple where the first element states if
"""
num_annotation_links = ClassInstanceClassInstance.objects.filter(
project=project, class_instance_b=annotation, relation=relation).count()
if num_annotation_links:
return False, num_annotation_links
else:
# See if the annotation is annotated itself
meta_annotation_links = ClassInstanceClassInstance.objects.filter(
project=project, class_instance_a=annotation, relation=relation)
meta_annotation_ids = [cici.class_instance_b_id for cici in meta_annotation_links]
# Delete annotation
ClassInstance.objects.filter(project=project, id=annotation).delete()
# Delete also meta annotation instances, if they exist
for ma in meta_annotation_ids:
delete_annotation_if_unused(project, ma, relation)
return True, 0
def _annotate_entities(project_id:Union[int,str], entity_ids, annotation_map:Dict[str,Any],
update_existing=False) -> Tuple[Dict,Set,Set]:
""" Annotate the entities with the given <entity_ids> with the given
annotations. These annotations are expected to come as dictionary of
annotation name versus an object with at least the field 'user_id'
annotator ID. If the 'creation_time' and/or 'edition_time' fields are
available, they will be used for the respective columns. A listof all
annotation class instances that have been used is returned. Annotation
names can contain the counting pattern {nX} with X being a number. This
will add an incrementing number starting from X for each entity.
"""
new_annotations = set()
existing_annotations = set()
r = Relation.objects.get(project_id = project_id,
relation_name = 'annotated_with')
annotation_class = Class.objects.get(project_id = project_id,
class_name = 'annotation')
annotation_objects = {}
# Create a regular expression to find allowed patterns. The first group is
# the whole {nX} part, while the second group is X only.
counting_pattern = re.compile(r"(\{n(\d+)\})")
for annotation, meta in annotation_map.items():
# Look for patterns, replace all {n} with {n1} to normalize
annotation = annotation.replace("{n}", "{n1}")
# Find all {nX} in the annotation name
expanded_annotations = {}
if counting_pattern.search(annotation):
# Create annotation names based on the counting patterns found, for
# each entitiy.
for i, eid in enumerate(entity_ids):
a = annotation
while True:
# Find next match and cancel if there isn't any
m = counting_pattern.search(a)
if not m:
break
# Replace match
count = int(m.groups()[1]) + i
a = m.string[:m.start()] + str(count) + m.string[m.end():]
# Remember this annotation for the current entity
expanded_annotations[a] = [eid]
else:
# No matches, so use same annotation for all entities
expanded_annotations = {annotation: entity_ids}
# Make sure the annotation's class instance exists.
for a, a_entity_ids in expanded_annotations.items():
ci, created = ClassInstance.objects.get_or_create(
project_id=project_id, name=a,
class_column=annotation_class,
defaults={'user_id': meta['user_id']})
if created:
new_annotations.add(ci.id)
newly_annotated = set()
# Annotate each of the entities. Don't allow duplicates.
for entity_id in a_entity_ids:
new_cici_defaults = {
'class_instance_a_id': entity_id,
'user_id': meta['user_id']
}
for field in ('creation_time', 'edition_time'):
value = meta.get(field)
if value:
new_cici_defaults[field] = value
cici, created = ClassInstanceClassInstance.objects.get_or_create(
project_id=project_id, relation=r,
class_instance_a__id=entity_id, class_instance_b=ci,
defaults=new_cici_defaults)
if created:
newly_annotated.add(entity_id)
else:
existing_annotations.add(ci.id)
if update_existing:
# Update creation time and edition_time, if requested
cici.update(**new_cici_defaults)
# Remember which entities got newly annotated
annotation_objects[ci] = newly_annotated
return annotation_objects, new_annotations, existing_annotations
def _annotate_entities_with_name(project_id:Union[int,str], user_id, entity_ids) -> Tuple[List[List[Any]], List[List[Any]]]:
cursor = connection.cursor()
annotated_with = Relation.objects.get(project_id=project_id,
relation_name='annotated_with')
annotation_class = Class.objects.get(project_id=project_id,
class_name='annotation')
name_annotation, _ = ClassInstance.objects.get_or_create(project_id=project_id,
class_column=annotation_class, name='Name', defaults={
'user_id': user_id,
})
entity_name_map = dict(ClassInstance.objects.filter(
pk__in=entity_ids).values_list('id', 'name'))
entity_names = set(entity_name_map.values())
existing_name_annotations = dict(ClassInstance.objects.filter(
project_id=project_id, class_column=annotation_class,
name__in=entity_names).values_list('name', 'id'))
missing_name_annotations = entity_names - set(existing_name_annotations.keys())
if missing_name_annotations:
# Escape single quotes by double-quoting
escaped_name_annotations = (n.replace("'", "''") for n in missing_name_annotations)
values = (f"({user_id}, {project_id}, {annotation_class.id}, '{x}')" for x in escaped_name_annotations)
values_str = ','.join(values) or '()'
cursor.execute(f"""
INSERT INTO class_instance (user_id, project_id, class_id, name)
VALUES {values_str}
RETURNING name, id;
""")
added_annotations = dict(cursor.fetchall())
existing_name_annotations.update(added_annotations)
# Now with all name annotations available we need to make sure all of them
# have the meta annotation 'Name'.
cursor.execute("""
INSERT INTO class_instance_class_instance (project_id, user_id,
class_instance_a, class_instance_b, relation_id)
SELECT %(project_id)s, %(user_id)s, ci.id, %(name_ann_id)s, %(rel_id)s
FROM class_instance ci
JOIN UNNEST(%(name_ann_names)s::text[]) q(name)
ON q.name = ci.name
LEFT JOIN class_instance_class_instance cici
ON cici.class_instance_a = ci.id
AND cici.class_instance_b = %(name_ann_id)s
AND cici.relation_id = %(rel_id)s
WHERE cici.id IS NULL
AND ci.project_id = %(project_id)s
AND ci.class_id = %(annotation_class_id)s
RETURNING id
""", {
'project_id': project_id,
'user_id': user_id,
'name_ann_id': name_annotation.id,
'rel_id': annotated_with.id,
'annotation_class_id': annotation_class.id,
'name_ann_names': list(existing_name_annotations.keys()),
})
created_name_links = cursor.fetchall()
# Now we have valid name annotations for each target entity. The final step
# is to link those name annotations to the entities.
cursor.execute("""
INSERT INTO class_instance_class_instance (project_id, user_id,
class_instance_a, class_instance_b, relation_id)
SELECT %(project_id)s, %(user_id)s, ci.id, ci_name.id, %(rel_id)s
FROM class_instance ci
JOIN UNNEST(%(entity_ids)s::bigint[]) q(id)
ON q.id = ci.id
JOIN class_instance ci_name
ON ci_name.name = ci.name
LEFT JOIN class_instance_class_instance cici
ON cici.class_instance_a = ci.id
AND cici.class_instance_b = ci_name.id
AND cici.relation_id = %(rel_id)s
WHERE cici.id IS NULL
AND ci.project_id = %(project_id)s
AND ci_name.project_id = %(project_id)s
AND ci_name.class_id = %(annotation_class_id)s
RETURNING class_instance_a
""", {
'project_id': project_id,
'user_id': user_id,
'name_ann_id': name_annotation.id,
'rel_id': annotated_with.id,
'entity_ids': entity_ids,
'annotation_class_id': annotation_class.id,
})
updated_cis = cursor.fetchall()
return updated_cis, created_name_links
@requires_user_role(UserRole.Annotate)
def annotate_entities(request:HttpRequest, project_id = None) -> JsonResponse:
p = get_object_or_404(Project, pk = project_id)
# Read keys in a sorted manner
sorted_keys = sorted(request.POST.keys())
annotations = get_request_list(request.POST, 'annotations', [])
meta_annotations = get_request_list(request.POST, 'meta_annotations', [])
entity_ids = get_request_list(request.POST, 'entity_ids', [], map_fn=int)
skeleton_ids = get_request_list(request.POST, 'skeleton_ids', [], map_fn=int)
if any(skeleton_ids):
skid_to_eid = dict(ClassInstance.objects.filter(project = p,
class_column__class_name = 'neuron',
cici_via_b__relation__relation_name = 'model_of',
cici_via_b__class_instance_a__in = skeleton_ids).values_list(
'cici_via_b__class_instance_a', 'id'))
entity_ids += [skid_to_eid[skid] for skid in skeleton_ids]
# Annotate enties
annotation_map = {a: { 'user_id': request.user.id } for a in annotations}
annotation_objs, new_annotations, existing_annotations = _annotate_entities(
project_id, entity_ids, annotation_map)
# Annotate annotations
if meta_annotations:
annotation_ids = [a.id for a in annotation_objs.keys()]
meta_annotation_map = {ma: { 'user_id': request.user.id } for ma in meta_annotations}
meta_annotation_objs, new_meta_annotations, existing_meta_annotations = \
_annotate_entities(project_id, annotation_ids, meta_annotation_map)
# Keep track of new annotations
new_annotations.update(new_meta_annotations)
# Update used annotation objects set
for ma, me in meta_annotation_objs.items():
entities = annotation_objs.get(ma)
if entities:
entities.update(me)
else:
annotation_objs[ma] = me
result = {
'message': 'success',
'annotations': [{
'name': a.name,
'id': a.id,
'entities': list(e)
} for a,e in annotation_objs.items()],
'new_annotations': list(new_annotations),
'existing_annotations': list(existing_annotations),
}
return JsonResponse(result)
@api_view(['POST'])
@requires_user_role(UserRole.Annotate)
def add_neuron_name_annotations(request:HttpRequest, project_id = None) -> JsonResponse:
"""Add missing neuron name annotations.
To each passed in neuron, a list of neuron IDs and/or skelton IDs, the
neuron name stored in the neuron's base name is added as annotation. Each
neuron name annotation is meta-annotated with a "Name" annotation.
---
parameters:
skeleton_ids:
type: array
description: A list of skeleton IDs to update
required: false
items:
type: integer
entity_ids:
type: array
description: A list of target entity IDs to update
required: false
items:
type: integer
"""
p = get_object_or_404(Project, pk = project_id)
entity_ids = get_request_list(request.POST, 'entity_ids', [], map_fn=int)
skeleton_ids = get_request_list(request.POST, 'skeleton_ids', [], map_fn=int)
if not any(entity_ids):
if not any(skeleton_ids):
raise ValueError("Need either 'skeleton_ids' or 'entity_ids'")
entity_ids = []
if any(skeleton_ids):
skid_to_eid = dict(ClassInstance.objects.filter(project = p,
class_column__class_name = 'neuron',
cici_via_b__relation__relation_name = 'model_of',
cici_via_b__class_instance_a__in = skeleton_ids).values_list(
'cici_via_b__class_instance_a', 'id'))
entity_ids += [skid_to_eid[skid] for skid in skeleton_ids]
updated_cis, created_name_links = _annotate_entities_with_name(
project_id, request.user.id, entity_ids)
result = {
'message': 'success',
'updated_cis': updated_cis,
'created_meta_links': len(created_name_links),
}
return JsonResponse(result)
@requires_user_role(UserRole.Annotate)
def remove_annotations(request:HttpRequest, project_id=None) -> JsonResponse:
""" Removes an annotation from one or more entities.
"""
annotation_ids = get_request_list(request.POST, 'annotation_ids', [], map_fn=int)
entity_ids = get_request_list(request.POST, 'entity_ids', [], map_fn=int)
if not annotation_ids:
raise ValueError("No annotation IDs provided")
if not entity_ids:
raise ValueError("No entity IDs provided")
# Remove individual annotations
deleted_annotations = {}
deleted_links = []
num_left_annotations = {}
for annotation_id in annotation_ids:
cicis_to_delete, missed_cicis, deleted, num_left = _remove_annotation(
request.user, project_id, entity_ids, annotation_id)
# Keep track of results
num_left_annotations[str(annotation_id)] = num_left
targetIds = []
for cici in cicis_to_delete:
deleted_links.append(cici.id)
# The target is class_instance_a, because we deal with the
# "annotated_with" relation.
targetIds.append(cici.class_instance_a_id)
if targetIds:
deleted_annotations[annotation_id] = {
'targetIds': targetIds
}
return JsonResponse({
'deleted_annotations': deleted_annotations,
'deleted_links': deleted_links,
'left_uses': num_left_annotations
})
@requires_user_role(UserRole.Annotate)
def remove_annotation(request:HttpRequest, project_id=None, annotation_id=None) -> JsonResponse:
""" Removes an annotation from one or more entities.
"""
entity_ids = get_request_list(request.POST, 'entity_ids', [], map_fn=int)
cicis_to_delete, missed_cicis, deleted, num_left = _remove_annotation(
request.user, project_id, entity_ids, annotation_id)
if len(cicis_to_delete) > 1:
message = "Removed annotation from %s entities." % len(cicis_to_delete)
elif len(cicis_to_delete) == 1:
message = "Removed annotation from one entity."
else:
message = "No annotation removed."
if missed_cicis:
message += " Couldn't de-annotate %s entities, due to the lack of " \
"permissions." % len(missed_cicis)
if deleted:
message += " Also removed annotation instance, because it isn't used " \
"anywhere else."
else:
message += " There are %s links left to this annotation." % num_left
return JsonResponse({
'message': message,
'deleted_annotation': deleted,
'left_uses': num_left
})
def _remove_annotation(user, project_id:Union[int,str], entity_ids, annotation_id) -> Tuple[List, List, int, int]:
"""Remove an annotation made by a certain user in a given project on a set
of entities (usually neurons and annotations). Returned is a 4-tuple which
holds the deleted annotation links, the list of links that couldn't be
deleted due to lack of permission, if the annotation itself was removed
(because it wasn't used anymore) and how many uses of this annotation are
left.
"""
p = get_object_or_404(Project, pk=project_id)
relations = dict(Relation.objects.filter(
project_id=project_id).values_list('relation_name', 'id'))
# Get CICI instance representing the link
cici_n_a = ClassInstanceClassInstance.objects.filter(project=p,
relation_id=relations['annotated_with'],
class_instance_a__id__in=entity_ids,
class_instance_b__id=annotation_id)
# Make sure the current user has permissions to remove the annotation.
missed_cicis = []
cicis_to_delete = []
for cici in cici_n_a:
try:
can_edit_or_fail(user, cici.id, 'class_instance_class_instance')
cicis_to_delete.append(cici)
except Exception:
# Remember links for which permissions are missing
missed_cicis.append(cici)
# Remove link between entity and annotation for all links on which the user
# the necessary permissions has.
if cicis_to_delete:
ClassInstanceClassInstance.objects \
.filter(id__in=[cici.id for cici in cicis_to_delete]) \
.delete()
# Remove the annotation class instance, regardless of the owner, if there
# are no more links to it
annotated_with = Relation.objects.get(project_id=project_id,
relation_name='annotated_with')
deleted, num_left = delete_annotation_if_unused(project_id, annotation_id,
annotated_with)
return cicis_to_delete, missed_cicis, deleted, num_left
def create_annotation_query(project_id, param_dict):
classes = dict(Class.objects.filter(project_id=project_id).values_list('class_name', 'id'))
relations = dict(Relation.objects.filter(project_id=project_id).values_list('relation_name', 'id'))
annotation_query = ClassInstance.objects.filter(project_id=project_id,
class_column__id=classes['annotation'])
# Meta annotations are annotations that are used to annotate other
# annotations.
meta_annotations = [v for k,v in param_dict.items()
if k.startswith('annotations[')]
for meta_annotation in meta_annotations:
annotation_query = annotation_query.filter(
cici_via_b__relation_id = relations['annotated_with'],
cici_via_b__class_instance_a = meta_annotation)
# If information about annotated annotations is found, the current query
# will include only annotations that are meta annotations for it.
annotated_annotations = [v for k,v in param_dict.items()
if k.startswith('annotates[')]
for sub_annotation in annotated_annotations:
annotation_query = annotation_query.filter(
cici_via_a__relation_id = relations['annotated_with'],
cici_via_a__class_instance_b = sub_annotation)
# If parallel_annotations is given, only annotations are returned, that
# are used alongside with these.
parallel_annotations = [v for k,v in param_dict.items()
if k.startswith('parallel_annotations[')]
for p_annotation in parallel_annotations:
annotation_query = annotation_query.filter(
cici_via_b__class_instance_a__cici_via_a__relation_id = relations['annotated_with'],
cici_via_b__class_instance_a__cici_via_a__class_instance_b = p_annotation)
# Passing in a user ID causes the result set to only contain annotations
# that are used by the respective user. The query filter could lead to
# duplicate entries, therefore distinct() is added here.
user_id = param_dict.get('user_id', None)
if user_id:
user_id = int(user_id)
annotation_query = annotation_query.filter(
cici_via_b__user__id=user_id).distinct()
# With the help of the neuron_id field, it is possible to restrict the
# result set to only show annotations that are used for a particular neuron.
neuron_id = param_dict.get('neuron_id', None)
if neuron_id:
annotation_query = annotation_query.filter(
cici_via_b__relation_id = relations['annotated_with'],
cici_via_b__class_instance_a__id=neuron_id)
# Instead of a neuron a user can also use to skeleton id to constrain the
# annotation set returned. This is implicetely a neuron id restriction.
skeleton_id = param_dict.get('skeleton_id', None)
if skeleton_id:
annotation_query = annotation_query.filter(
cici_via_b__relation_id = relations['annotated_with'],
cici_via_b__class_instance_a__cici_via_b__relation_id = relations['model_of'],
cici_via_b__class_instance_a__cici_via_b__class_instance_a__id = skeleton_id)
# If annotations to ignore are passed in, they won't appear in the
# result set.
ignored_annotations = [v for k,v in param_dict.items()
if k.startswith('ignored_annotations[')]
if ignored_annotations:
annotation_query = annotation_query.exclude(
name__in=ignored_annotations)
return annotation_query
def generate_co_annotation_query(project_id:Union[int,str], co_annotation_ids, classIDs, relationIDs) -> Tuple[str,str]:
if not co_annotation_ids:
raise ValueError("Need co-annotations")
tables = []
where = []
annotation_class = classIDs['annotation']
annotated_with = relationIDs['annotated_with']
for i, annotation_id in enumerate(co_annotation_ids):
tables.append("""
class_instance a%s,
class_instance_class_instance cc%s""" % (i, i))
where.append("""
AND a%s.project_id = %s
AND a%s.class_id = %s
AND cc%s.class_instance_a = neuron.id
AND cc%s.relation_id = %s
AND cc%s.class_instance_b = a%s.id
AND a%s.id = '%s'
""" % (i, project_id,
i, annotation_class,
i,
i, annotated_with,
i, i,
i, annotation_id))
select = """
SELECT DISTINCT
a.id,
a.name,
(SELECT username FROM auth_user, class_instance_class_instance cici
WHERE cici.class_instance_b = cc.id
AND cici.user_id = auth_user.id
ORDER BY cici.edition_time DESC LIMIT 1) AS "last_user",
(SELECT MAX(edition_time) FROM class_instance_class_instance cici WHERE cici.class_instance_b = a.id) AS "last_used",
(SELECT count(*) FROM class_instance_class_instance cici WHERE cici.class_instance_b = a.id) AS "num_usage"
"""
rest = """
FROM
class_instance a,
class_instance_class_instance cc,
class_instance neuron,
%s
WHERE
neuron.class_id = %s
AND a.class_id = %s
AND a.project_id = %s
AND cc.class_instance_a = neuron.id
AND cc.relation_id = %s
AND cc.class_instance_b = a.id
%s
""" % (',\n'.join(tables),
classIDs['neuron'],
annotation_class,
project_id,
annotated_with,
''.join(where))
return select, rest
@api_view(['GET', 'POST'])
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def list_annotations(request:HttpRequest, project_id=None) -> JsonResponse:
"""List annotations matching filtering criteria that are currently in use.
The result set is the intersection of annotations matching criteria (the
criteria are conjunctive) unless stated otherwise.
---
parameters:
- name: annotations
description: A list of (meta) annotations with which which resulting annotations should be annotated with.
paramType: form
type: array
items:
type: integer
description: An annotation ID
- name: annotates
description: A list of entity IDs (like annotations and neurons) that should be annotated by the result set.
paramType: form
type: array
items:
type: integer
description: An entity ID
- name: parallel_annotations
description: A list of annotation that have to be used alongside the result set.
paramType: form
type: array
items:
type: integer
description: An annotation ID
- name: user_id
description: Result annotations have to be used by this user.
paramType: form
type: integer
- name: neuron_id
description: Result annotations will annotate this neuron.
paramType: form
type: integer
- name: skeleton_id
description: Result annotations will annotate the neuron modeled by this skeleton.
paramType: form
type: integer
- name: ignored_annotations
description: A list of annotation names that will be excluded from the result set.
paramType: form
type: array
items:
type: string
- name: if_modified_since
description: |
Works only if <simple> is True. Return 304 response if there is no
newer content with respect to the passed in UTC date in ISO format.
paramType: form
type: string
models:
annotation_user_list_element:
id: annotation_user_list_element
properties:
id:
type: integer
name: id
description: The user id
required: true
name:
type: string
name: name
description: The user name
required: true
annotation_list_element:
id: annotation_list_element
description: Represents one annotation along with its users.
properties:
name:
type: string
description: The name of the annotation
required: true
id:
type: integer
description: The id of the annotation
required: true
users:
type: array
description: A list of users
required: true
items:
$ref: annotation_user_list_element
type:
- type: array
items:
$ref: annotation_list_element
required: true
"""
cursor = connection.cursor()
classes = get_class_to_id_map(project_id, ('annotation',), cursor)
# If there is no 'annotation' class, there can't be annotations
if 'annotation' not in classes:
return JsonResponse({'annotations': []})
if request.method == 'GET':
simple = get_request_bool(request.GET, 'simple', False)
relations = get_relation_to_id_map(project_id, ('annotated_with',), cursor)
if_modified_since = request.GET.get('if_modified_since')
# In case a simple representation should be returned, return a simple
# list of name - ID mappings.
if simple:
# If there is no newer annotation data since the passed-in date, return
# a 304 response.
if if_modified_since:
if_modified_since = dateutil.parser.parse(if_modified_since)
cursor.execute("""
SELECT EXISTS(
SELECT 1 FROM class_instance
WHERE edition_time > %(date)s
AND class_id = %(annotation_class_id)s
)
""", {
'date': if_modified_since,
'annotation_class_id': classes['annotation'],
})
new_data_exists = cursor.fetchone()[0]
if not new_data_exists:
return HttpResponse(status=304)
cursor.execute("""
SELECT row_to_json(wrapped)::text
FROM (
SELECT COALESCE(array_to_json(array_agg(row_to_json(annotation))), '[]'::json) AS annotations
FROM (
SELECT ci.id, ci.name
FROM class_instance ci
WHERE project_id = %(project_id)s
AND class_id = %(annotation_class_id)s
) annotation
) wrapped
""", {
'project_id': project_id,
'annotation_class_id': classes['annotation'],
})
annotation_json_text = cursor.fetchone()[0]
return HttpResponse(annotation_json_text, content_type='application/json')
cursor.execute('''
SELECT DISTINCT ON (ci.id, u.id) ci.name, ci.id, u.id, u.username
FROM class_instance ci
LEFT OUTER JOIN class_instance_class_instance cici
ON (ci.id = cici.class_instance_b)
LEFT OUTER JOIN auth_user u
ON (cici.user_id = u.id)
WHERE (ci.class_id = %s AND (cici.relation_id = %s OR cici.id IS NULL));
''',
(classes['annotation'], relations['annotated_with']))
annotation_tuples = cursor.fetchall()
elif request.method == 'POST':
annotation_query = create_annotation_query(project_id, request.POST)
annotation_tuples = annotation_query.distinct().values_list('name', 'id',
'cici_via_b__user__id', 'cici_via_b__user__username')
else:
raise ValueError("Unsupported HTTP method")
# Create a set mapping annotation names to its users
ids = {}
annotation_dict:Dict[Any, List[Dict]] = {}
for annotation, aid, uid, username in annotation_tuples:
ids[aid] = annotation
if aid not in annotation_dict: # With these two conditionals, we make sure an empty entry exists even if uid is None.
annotation_dict[aid] = []
if uid is not None:
annotation_dict[aid].append({'id': uid, 'name': username})
# Flatten dictionary to list
annotations = tuple({'name': ids[aid], 'id': aid, 'users': users} for aid, users in annotation_dict.items())
return JsonResponse({'annotations': annotations})
def _fast_co_annotations(request:HttpRequest, project_id:Union[int,str], display_start, display_length) -> JsonResponse:
classIDs = dict(Class.objects.filter(project_id=project_id).values_list('class_name', 'id'))
relationIDs = dict(Relation.objects.filter(project_id=project_id).values_list('relation_name', 'id'))
co_annotation_ids = set(get_request_list(request.POST, 'parallel_annotations', [], map_fn=int))
select, rest = generate_co_annotation_query(int(project_id), co_annotation_ids, classIDs, relationIDs)
entries = []
search_term = request.POST.get('sSearch', '').strip()
if search_term:
rest += "\nAND a.name ~ %s" # django will escape and quote the string
entries.append(search_term)
# Sorting?
sorting = request.POST.get('iSortCol_0', False)
sorter = ''
if sorting:
column_count = int(request.POST.get('iSortingCols', 0))
sorting_directions = (request.POST.get('sSortDir_%d' % d, 'DESC') for d in range(column_count))
fields = ('a.name', 'last_used', 'num_usage', 'last_user')
sorting_index = (int(request.POST.get('iSortCol_%d' % d)) for d in range(column_count))
sorting_cols = (fields[i] for i in sorting_index)
sorter = '\nORDER BY ' + ','.join('%s %s' % u for u in zip(sorting_cols, sorting_directions))
cursor = connection.cursor()
cursor.execute("SELECT count(DISTINCT a.id) " + rest, entries)
num_records = cursor.fetchone()[0]
response = {
'iTotalRecords': num_records,
'iTotalDisplayRecords': num_records,
}
rest += sorter
rest += '\nLIMIT %s OFFSET %s'
entries.append(display_length) # total to return
entries.append(display_start) # offset
cursor.execute(select + rest, entries)
# 0: a.id
# 1: a.name
# 2: last_user
# 3: last_used
# 4: num_usage
aaData = []
for row in cursor.fetchall():
last_used = row[3]
if last_used:
last_used = last_used.strftime("%Y-%m-%d %H:%M:%S")
else:
last_used = 'never'
aaData.append([row[1], # Annotation name
last_used,
row[4], # Last use
row[2], # Last annotator
row[0]])
response['aaData'] = aaData
return JsonResponse(response)
@requires_user_role([UserRole.Browse])
def list_annotations_datatable(request:HttpRequest, project_id=None) -> JsonResponse:
display_start = int(request.POST.get('iDisplayStart', 0))
display_length = int(request.POST.get('iDisplayLength', -1))
if display_length < 0:
display_length = 2000 # Default number of result rows
# Speed hack
if 'parallel_annotations[0]' in request.POST:
return _fast_co_annotations(request, project_id, display_start, display_length)
annotation_query = create_annotation_query(project_id, request.POST)
should_sort = request.POST.get('iSortCol_0', False)
search_term = request.POST.get('sSearch', '')
# Additional information should also be constrained by neurons and user
# names. E.g., when viewing the annotation list for a user, the usage count
# should only display the number of times the user has used an annotation.
conditions = ""
if request.POST.get('neuron_id'):
conditions += "AND cici.class_instance_a = %s " % \
request.POST.get('neuron_id')
if request.POST.get('user_id'):
conditions += "AND cici.user_id = %s " % \
request.POST.get('user_id')
# Add (last) annotated on time
annotation_query = annotation_query.extra(
select={'annotated_on': 'SELECT MAX(cici.creation_time) FROM ' \
'class_instance_class_instance cici WHERE ' \
'cici.class_instance_b = class_instance.id %s' % conditions})
# Add user ID of last user
annotation_query = annotation_query.extra(
select={'last_user': 'SELECT auth_user.id FROM auth_user, ' \
'class_instance_class_instance cici ' \
'WHERE cici.class_instance_b = class_instance.id ' \
'AND cici.user_id = auth_user.id %s' \
'ORDER BY cici.edition_time DESC LIMIT 1' % conditions})
# Add usage count
annotation_query = annotation_query.extra(
select={'num_usage': 'SELECT COUNT(*) FROM ' \
'class_instance_class_instance cici WHERE ' \
'cici.class_instance_b = class_instance.id %s' % conditions})
if len(search_term) > 0:
annotation_query = annotation_query.filter(name__iregex=search_term)
if should_sort:
column_count = int(request.POST.get('iSortingCols', 0))
sorting_directions = [request.POST.get('sSortDir_%d' % d, 'DESC')
for d in range(column_count)]
sorting_directions = list(map(lambda d: '-' if d.upper() == 'DESC' else '',
sorting_directions))
fields = ['name', 'id', 'annotated_on', 'num_usage', 'last_user']
sorting_index = [int(request.POST.get('iSortCol_%d' % d))
for d in range(column_count)]
sorting_cols = list(map(lambda i: fields[i], sorting_index))
annotation_query = annotation_query.extra(order_by=[di + col for (di, col) in zip(
sorting_directions, sorting_cols)])
# We only require ID, name, last used and usage number
annotation_query = annotation_query.values_list(
'id', 'name', 'annotated_on', 'num_usage', 'last_user')
# Make sure we get a distinct result (which otherwise might not be the case
# due to the JOINS that are made).
annotation_query = annotation_query.distinct()
# num_records = annotation_query.count() # len(annotation_query)
num_records = len(annotation_query)
response:Dict[str, Any] = {
'iTotalRecords': num_records,
'iTotalDisplayRecords': num_records,
'aaData': []
}
for annotation in annotation_query[display_start:display_start + display_length]:
# Format last used time
if annotation[2]:
annotated_on = annotation[2].isoformat()
else:
annotated_on = 'never'
# Build datatable data structure
response['aaData'].append([
annotation[1], # Name
annotated_on, # Annotated on
annotation[3], # Usage
annotation[4], # Annotator ID
annotation[0]]) # ID
return JsonResponse(response)
@api_view(['POST'])
@requires_user_role([UserRole.Browse])
def annotations_for_skeletons(request:HttpRequest, project_id=None) -> JsonResponse:
"""Get annotations and who used them for a set of skeletons.
This method focuses only on annotations linked to skeletons and is likely to
be faster than the general query. Returns an object with two fields:
"annotations", which is itself an object with annotation IDs as fields,
giving access to the corresponding annotation names. And the field
"skeletons" is also an object, mapping skeleton IDs to lists of
annotation-annotator ID pairs. Also, as JSON separator a colon is used
instead of a comma.
---
parameters:
- name: skeleton_ids
description: A list of skeleton IDs which are annotated by the resulting annotations.
paramType: form
type: array
items:
type: integer
description: A skeleton ID
"""
skids = tuple(get_request_list(request.POST, 'skeleton_ids', [], map_fn=int))
cursor = connection.cursor()
cursor.execute("SELECT id FROM relation WHERE project_id=%s AND relation_name='annotated_with'" % int(project_id))
annotated_with_id = cursor.fetchone()[0]
# Select pairs of skeleton_id vs annotation name
cursor.execute('''
SELECT skeleton_neuron.class_instance_a,
annotation.id, annotation.name, neuron_annotation.user_id
FROM class_instance_class_instance skeleton_neuron,
class_instance_class_instance neuron_annotation,
class_instance annotation
WHERE skeleton_neuron.class_instance_a IN (%s)
AND skeleton_neuron.class_instance_b = neuron_annotation.class_instance_a
AND neuron_annotation.relation_id = %s
AND neuron_annotation.class_instance_b = annotation.id
''' % (",".join(map(str, skids)), annotated_with_id))
# Group by skeleton ID
m:DefaultDict[Any, List] = defaultdict(list)
a = dict()
for skid, aid, name, uid in cursor.fetchall():
m[skid].append({'id': aid, 'uid': uid})
a[aid] = name
return JsonResponse({
'skeletons': m,
'annotations': a
}, json_dumps_params={'separators': (',', ':')})
@api_view(['POST'])
@requires_user_role([UserRole.Browse])
def annotations_for_entities(request:HttpRequest, project_id=None) -> JsonResponse:
"""Query annotations linked to a list of objects.
These objects can for instance be neurons, annotations or stack groups. From
a database perspective, these objects are class instances.
Returned is an object with the fields "entities" and "annotations". The
former is an object mapping an entity ID to a list of annotations. Each
annotation is represented by an object containing its "id" and "uid", the
user who annotated it. The latter maps annotation IDs to annotation names.
For instance::
{ "entities": { "42": [{id: 1, uid: 12}, {id: 3, uid: 14}] }, "annotations": { 12: "example1", 14: "example2" } }
---
parameters:
- name: object_ids
description: A list of object IDs for which annotations should be returned.
paramType: form
type: array
allowMultiple: true
items:
type: integer
description: A skeleton ID
"""
# Get 'annotated_with' relation ID
object_ids = tuple(get_request_list(request.POST, 'object_ids', [], map_fn=int))
cursor = connection.cursor()
cursor.execute("""
SELECT id FROM relation
WHERE project_id=%s AND
relation_name='annotated_with'""" % int(project_id))
annotated_with_id = cursor.fetchone()[0]
# Select pairs of skeleton_id vs annotation name
cursor.execute('''
SELECT entity_annotation.class_instance_a,
annotation.id, annotation.name, entity_annotation.user_id
FROM class_instance_class_instance entity_annotation,
class_instance annotation
WHERE entity_annotation.class_instance_a IN (%s)
AND entity_annotation.relation_id = %s
AND entity_annotation.class_instance_b = annotation.id
''' % (",".join(map(str, object_ids)), annotated_with_id))
# Group by entity ID
m:DefaultDict[Any, List] = defaultdict(list)
a = dict()
for eid, aid, name, uid in cursor.fetchall():
m[eid].append({'id': aid, 'uid': uid})
a[aid] = name
return JsonResponse({
'entities': m,
'annotations': a
}, json_dumps_params={'separators': (',', ':')})
def annotations_for_skeleton(project_id:Union[int,str], skeleton_id, relations=None, classes=None) -> Dict:
"""Get a a dictionary mapping annotations on the neuron modeled by the
passed in skeleton to the respective annotators.
"""
if not relations:
relations = get_relation_to_id_map(project_id)
if not classes:
classes = get_class_to_id_map(project_id)
cursor = connection.cursor()
cursor.execute("""
SELECT a.name, cici.user_id
FROM class_instance a
JOIN class_instance_class_instance cici
ON a.id = cici.class_instance_b
JOIN class_instance neuron
ON neuron.id = cici.class_instance_a
JOIN class_instance_class_instance skeleton_neuron
ON cici.class_instance_a = skeleton_neuron.class_instance_b
JOIN class_instance skeleton
ON skeleton.id = skeleton_neuron.class_instance_a
WHERE cici.project_id = %(project_id)s
AND a.class_id = %(annotation_class)s
AND cici.relation_id = %(annotated_with_rel)s
AND neuron.class_id = %(neuron_class)s
AND skeleton_neuron.relation_id = %(model_of_rel)s
AND skeleton_neuron.class_instance_a = %(skeleton_id)s
""", {
'project_id': project_id,
'annotation_class': classes['annotation'],
'annotated_with_rel': relations['annotated_with'],
'neuron_class': classes['neuron'],
'model_of_rel': relations['model_of'],
'skeleton_id': skeleton_id,
})
return dict(cursor.fetchall())
|
tomka/CATMAID
|
django/applications/catmaid/control/annotation.py
|
Python
|
gpl-3.0
| 72,417
|
[
"NEURON"
] |
7da5bc0f7303ff5748a0cec537445a5cf7e5cf730ee95f226a8469f1bc754a31
|
# -*- coding: utf-8 -*-
"""
equip.visitors.classes
~~~~~~~~~~~~~~~~~~~~~~
Callback the visit method for each encountered class in the program.
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
class ClassVisitor(object):
"""
A class visitor that is triggered for all encountered ``TypeDeclaration``.
Example, listing all types declared in the bytecode::
class TypeDeclVisitor(ClassVisitor):
def __init__(self):
ClassVisitor.__init__(self)
def visit(self, typeDecl):
print "New type: %s (parentDecl=%s)" \\
% (typeDecl.type_name, typeDecl.parent)
"""
def __init__(self):
pass
def visit(self, typeDecl):
pass
|
neuroo/equip
|
equip/visitors/classes.py
|
Python
|
apache-2.0
| 762
|
[
"VisIt"
] |
c2b98a66014e0dae23732b749996a1df1f1172a4b22b0f2ff3011bd8c3b6ef4a
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from pymatgen.core.spectrum import Spectrum
import numpy as np
"""
This module defines classes to represent all xas
"""
__author__ = "Chen Zheng"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Chen Zheng"
__email__ = "chz022@ucsd.edu"
__date__ = "Aug 9, 2017"
class XANES(Spectrum):
"""
Basic XANES object.
Args:
x: A sequence of x-ray energies in eV
y: A sequence of mu(E)
structure (Structure): Structure associated with the XANES.
absorption_specie (Specie): Specie associated with the XANES.
edge (str):
.. attribute: x
The sequence of energies
.. attribute: y
The sequence of mu(E)
.. attribute: absorption_specie
The absorption_species of the spectrum
.. attribute: edge
The edge of XANES spectrum
"""
XLABEL = 'Energy'
YLABEL = 'Intensity'
def __init__(self, x, y, structure, absorption_specie, edge):
super(XANES, self).__init__(x, y, structure, absorption_specie, edge)
self.structure = structure
self.absorption_specie = absorption_specie
self.edge = edge
self.e0 = self.x[np.argmax(np.gradient(self.y) / np.gradient(self.x))]
def __str__(self):
return "%s %s Edge for %s: %s" % (
self.absorption_specie, self.edge,
self.structure.composition.reduced_formula,
super(XANES, self).__str__()
)
|
matk86/pymatgen
|
pymatgen/analysis/xas/spectrum.py
|
Python
|
mit
| 1,581
|
[
"pymatgen"
] |
f91ecbc1a63f25804fd266db53bc47c0adaadcfef64e4604d30b5cb9775f33c3
|
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options] [path...]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
Perforce
CVS
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import BaseHTTPServer
import ConfigParser
import cookielib
import errno
import fnmatch
import getpass
import logging
import marshal
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
import webbrowser
from multiprocessing.pool import ThreadPool
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
try:
import keyring
except ImportError:
keyring = None
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# The account type used for authentication.
# This line could be changed by the review server (see handler for
# upload.py).
AUTH_ACCOUNT_TYPE = "GOOGLE"
# URL of the default review server. As for AUTH_ACCOUNT_TYPE, this line could be
# changed by the review server (see handler for upload.py).
DEFAULT_REVIEW_SERVER = "codereview.appspot.com"
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_PERFORCE = "Perforce"
VCS_CVS = "CVS"
VCS_UNKNOWN = "Unknown"
VCS = [
{
'name': VCS_MERCURIAL,
'aliases': ['hg', 'mercurial'],
}, {
'name': VCS_SUBVERSION,
'aliases': ['svn', 'subversion'],
}, {
'name': VCS_PERFORCE,
'aliases': ['p4', 'perforce'],
}, {
'name': VCS_GIT,
'aliases': ['git'],
}, {
'name': VCS_CVS,
'aliases': ['cvs'],
}]
VCS_SHORT_NAMES = [] # hg, svn, ...
VCS_ABBREVIATIONS = {} # alias: name, ...
for vcs in VCS:
VCS_SHORT_NAMES.append(min(vcs['aliases'], key=len))
VCS_ABBREVIATIONS.update((alias, vcs['name']) for alias in vcs['aliases'])
# OAuth 2.0-Related Constants
LOCALHOST_IP = '127.0.0.1'
DEFAULT_OAUTH2_PORT = 8001
ACCESS_TOKEN_PARAM = 'access_token'
ERROR_PARAM = 'error'
OAUTH_DEFAULT_ERROR_MESSAGE = 'OAuth 2.0 error occurred.'
OAUTH_PATH = '/get-access-token'
OAUTH_PATH_PORT_TEMPLATE = OAUTH_PATH + '?port=%(port)d'
AUTH_HANDLER_RESPONSE = """\
<html>
<head>
<title>Authentication Status</title>
<script>
window.onload = function() {
window.close();
}
</script>
</head>
<body>
<p>The authentication flow has completed.</p>
</body>
</html>
"""
# Borrowed from google-api-python-client
OPEN_LOCAL_MESSAGE_TEMPLATE = """\
Your browser has been opened to visit:
%s
If your browser is on a different machine then exit and re-run
upload.py with the command-line parameter
--no_oauth2_webbrowser
"""
NO_OPEN_LOCAL_MESSAGE_TEMPLATE = """\
Go to the following link in your browser:
%s
and copy the access token.
"""
# The result of parsing Subversion's [auto-props] setting.
svn_auto_props_map = None
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self._reason = args["Error"]
self.info = args.get("Info", None)
@property
def reason(self):
# reason is a property on python 2.7 but a member variable on <=2.6.
# self.args is modified so it cannot be used as-is so save the value in
# self._reason.
return self._reason
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None,
extra_headers=None, save_cookies=False,
account_type=AUTH_ACCOUNT_TYPE):
"""Creates a new AbstractRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
account_type: Account type used for authentication. Defaults to
AUTH_ACCOUNT_TYPE.
"""
self.host = host
if (not self.host.startswith("http://") and
not self.host.startswith("https://")):
self.host = "http://" + self.host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers or {}
self.save_cookies = save_cookies
self.account_type = account_type
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data, headers={"Accept": "text/plain"})
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = self.account_type
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
print >>sys.stderr, ''
if e.reason == "BadAuthentication":
if e.info == "InvalidSecondFactor":
print >>sys.stderr, (
"Use an application-specific password instead "
"of your regular account password.\n"
"See http://www.google.com/"
"support/accounts/bin/answer.py?answer=185833")
else:
print >>sys.stderr, "Invalid username or password."
elif e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.\n"
"If you are using a Google Apps account the URL is:\n"
"https://www.google.com/a/yourdomain.com/UnlockCaptcha")
elif e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
elif e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
elif e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
elif e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
elif e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
elif e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
else:
# Unknown error.
raise
print >>sys.stderr, ''
continue
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
extra_headers: Dict containing additional HTTP headers that should be
included in the request (string header names mapped to their values),
or None to not include any additional headers.
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated and self.auth_function:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
if extra_headers:
for header, value in extra_headers.items():
req.add_header(header, value)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
if not self.auth_function:
raise
self._Authenticate()
elif e.code == 301:
# Handle permanent redirect manually.
url = e.info()["location"]
url_loc = urlparse.urlparse(url)
self.host = '%s://%s' % (url_loc[0], url_loc[1])
elif e.code >= 500:
# TODO: We should error out on a 500, but the server is too flaky
# for that at the moment.
StatusUpdate('Upload got a 500 response: %d' % e.code)
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
if isinstance(self.auth_function, OAuth2Creds):
access_token = self.auth_function()
if access_token is not None:
self.extra_headers['Authorization'] = 'OAuth %s' % (access_token,)
self.authenticated = True
else:
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
class CondensedHelpFormatter(optparse.IndentedHelpFormatter):
"""Frees more horizontal space by removing indentation from group
options and collapsing arguments between short and long, e.g.
'-o ARG, --opt=ARG' to -o --opt ARG"""
def format_heading(self, heading):
return "%s:\n" % heading
def format_option(self, option):
self.dedent()
res = optparse.HelpFormatter.format_option(self, option)
self.indent()
return res
def format_option_strings(self, option):
self.set_long_opt_delimiter(" ")
optstr = optparse.HelpFormatter.format_option_strings(self, option)
optlist = optstr.split(", ")
if len(optlist) > 1:
if option.takes_value():
# strip METAVAR from all but the last option
optlist = [x.split()[0] for x in optlist[:-1]] + optlist[-1:]
optstr = " ".join(optlist)
return optstr
parser = optparse.OptionParser(
usage=("%prog [options] [-- diff_options] [path...]\n"
"See also: http://code.google.com/p/rietveld/wiki/UploadPyUsage"),
add_help_option=False,
formatter=CondensedHelpFormatter()
)
parser.add_option("-h", "--help", action="store_true",
help="Show this help message and exit.")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs.")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
group.add_option("--print_diffs", dest="print_diffs", action="store_true",
help="Print full diffs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default=DEFAULT_REVIEW_SERVER,
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
group.add_option("--oauth2", action="store_true",
dest="use_oauth2", default=False,
help="Use OAuth 2.0 instead of a password.")
group.add_option("--oauth2_port", action="store", type="int",
dest="oauth2_port", default=DEFAULT_OAUTH2_PORT,
help=("Port to use to handle OAuth 2.0 redirect. Must be an "
"integer in the range 1024-49151, defaults to "
"'%default'."))
group.add_option("--no_oauth2_webbrowser", action="store_false",
dest="open_oauth2_local_webbrowser", default=True,
help="Don't open a browser window to get an access token.")
group.add_option("--account_type", action="store", dest="account_type",
metavar="TYPE", default=AUTH_ACCOUNT_TYPE,
choices=["GOOGLE", "HOSTED"],
help=("Override the default account type "
"(defaults to '%default', "
"valid choices are 'GOOGLE' and 'HOSTED')."))
group.add_option("-j", "--number-parallel-uploads",
dest="num_upload_threads", default=8,
help="Number of uploads to do in parallel.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-t", "--title", action="store", dest="title",
help="New issue subject or new patch set title")
group.add_option("-m", "--message", action="store", dest="message",
default=None,
help="New issue description or new patch set message")
group.add_option("-F", "--file", action="store", dest="file",
default=None, help="Read the message above from file.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--base_url", action="store", dest="base_url", default=None,
help="Base URL path for files (listed as \"Base URL\" when "
"viewing issue). If omitted, will be guessed automatically "
"for SVN repos and left blank for others.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Base revision/branch/tree to diff against. Use "
"rev1:rev2 range to review already committed changeset.")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("-p", "--send_patch", action="store_true",
dest="send_patch", default=False,
help="Same as --send_mail, but include diff as an "
"attachment, and prepend email subject with 'PATCH:'.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Explicitly specify version control system (%s)"
% ", ".join(VCS_SHORT_NAMES)))
group.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props", default=False,
help=("Emulate Subversion's auto properties feature."))
# Git-specific
group = parser.add_option_group("Git-specific options")
group.add_option("--git_similarity", action="store", dest="git_similarity",
metavar="SIM", type="int", default=50,
help=("Set the minimum similarity index for detecting renames "
"and copies. See `git diff -C`. (default 50)."))
group.add_option("--git_no_find_copies", action="store_false", default=True,
dest="git_find_copies",
help=("Prevents git from looking for copies (default off)."))
# Perforce-specific
group = parser.add_option_group("Perforce-specific options "
"(overrides P4 environment variables)")
group.add_option("--p4_port", action="store", dest="p4_port",
metavar="P4_PORT", default=None,
help=("Perforce server and port (optional)"))
group.add_option("--p4_changelist", action="store", dest="p4_changelist",
metavar="P4_CHANGELIST", default=None,
help=("Perforce changelist id"))
group.add_option("--p4_client", action="store", dest="p4_client",
metavar="P4_CLIENT", default=None,
help=("Perforce client/workspace"))
group.add_option("--p4_user", action="store", dest="p4_user",
metavar="P4_USER", default=None,
help=("Perforce user"))
# OAuth 2.0 Methods and Helpers
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server for redirects back to localhost from the associated server.
Waits for a single request and parses the query parameters for an access token
or an error and then stops serving.
"""
access_token = None
error = None
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for redirects back to localhost from the associated server.
Waits for a single request and parses the query parameters into the server's
access_token or error and then stops serving.
"""
def SetResponseValue(self):
"""Stores the access token or error from the request on the server.
Will only do this if exactly one query parameter was passed in to the
request and that query parameter used 'access_token' or 'error' as the key.
"""
query_string = urlparse.urlparse(self.path).query
query_params = urlparse.parse_qs(query_string)
if len(query_params) == 1:
if query_params.has_key(ACCESS_TOKEN_PARAM):
access_token_list = query_params[ACCESS_TOKEN_PARAM]
if len(access_token_list) == 1:
self.server.access_token = access_token_list[0]
else:
error_list = query_params.get(ERROR_PARAM, [])
if len(error_list) == 1:
self.server.error = error_list[0]
def do_GET(self):
"""Handle a GET request.
Parses and saves the query parameters and prints a message that the server
has completed its lone task (handling a redirect).
Note that we can't detect if an error occurred.
"""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.SetResponseValue()
self.wfile.write(AUTH_HANDLER_RESPONSE)
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def OpenOAuth2ConsentPage(server=DEFAULT_REVIEW_SERVER,
port=DEFAULT_OAUTH2_PORT):
"""Opens the OAuth 2.0 consent page or prints instructions how to.
Uses the webbrowser module to open the OAuth server side page in a browser.
Args:
server: String containing the review server URL. Defaults to
DEFAULT_REVIEW_SERVER.
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
Returns:
A boolean indicating whether the page opened successfully.
"""
path = OAUTH_PATH_PORT_TEMPLATE % {'port': port}
parsed_url = urlparse.urlparse(server)
scheme = parsed_url[0] or 'https'
if scheme != 'https':
ErrorExit('Using OAuth requires a review server with SSL enabled.')
# If no scheme was given on command line the server address ends up in
# parsed_url.path otherwise in netloc.
host = parsed_url[1] or parsed_url[2]
page = '%s://%s%s' % (scheme, host, path)
page_opened = webbrowser.open(page, new=1, autoraise=True)
if page_opened:
print OPEN_LOCAL_MESSAGE_TEMPLATE % (page,)
return page_opened
def WaitForAccessToken(port=DEFAULT_OAUTH2_PORT):
"""Spins up a simple HTTP Server to handle a single request.
Intended to handle a single redirect from the production server after the
user authenticated via OAuth 2.0 with the server.
Args:
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
Returns:
The access token passed to the localhost server, or None if no access token
was passed.
"""
httpd = ClientRedirectServer((LOCALHOST_IP, port), ClientRedirectHandler)
# Wait to serve just one request before deferring control back
# to the caller of wait_for_refresh_token
httpd.handle_request()
if httpd.access_token is None:
ErrorExit(httpd.error or OAUTH_DEFAULT_ERROR_MESSAGE)
return httpd.access_token
def GetAccessToken(server=DEFAULT_REVIEW_SERVER, port=DEFAULT_OAUTH2_PORT,
open_local_webbrowser=True):
"""Gets an Access Token for the current user.
Args:
server: String containing the review server URL. Defaults to
DEFAULT_REVIEW_SERVER.
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
open_local_webbrowser: Boolean, defaults to True. If set, opens a page in
the user's browser.
Returns:
A string access token that was sent to the local server. If the serving page
via WaitForAccessToken does not receive an access token, this method
returns None.
"""
access_token = None
if open_local_webbrowser:
page_opened = OpenOAuth2ConsentPage(server=server, port=port)
if page_opened:
try:
access_token = WaitForAccessToken(port=port)
except socket.error, e:
print 'Can\'t start local webserver. Socket Error: %s\n' % (e.strerror,)
if access_token is None:
# TODO(dhermes): Offer to add to clipboard using xsel, xclip, pbcopy, etc.
page = 'https://%s%s' % (server, OAUTH_PATH)
print NO_OPEN_LOCAL_MESSAGE_TEMPLATE % (page,)
access_token = raw_input('Enter access token: ').strip()
return access_token
class KeyringCreds(object):
def __init__(self, server, host, email):
self.server = server
# Explicitly cast host to str to work around bug in old versions of Keyring
# (versions before 0.10). Even though newer versions of Keyring fix this,
# some modern linuxes (such as Ubuntu 12.04) still bundle a version with
# the bug.
self.host = str(host)
self.email = email
self.accounts_seen = set()
def GetUserCredentials(self):
"""Prompts the user for a username and password.
Only use keyring on the initial call. If the keyring contains the wrong
password, we want to give the user a chance to enter another one.
"""
# Create a local alias to the email variable to avoid Python's crazy
# scoping rules.
global keyring
email = self.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % self.server)
password = None
if keyring and not email in self.accounts_seen:
try:
password = keyring.get_password(self.host, email)
except:
# Sadly, we have to trap all errors here as
# gnomekeyring.IOError inherits from object. :/
print "Failed to get password from keyring"
keyring = None
if password is not None:
print "Using password from system keyring."
self.accounts_seen.add(email)
else:
password = getpass.getpass("Password for %s: " % email)
if keyring:
answer = raw_input("Store password in system keyring?(y/N) ").strip()
if answer == "y":
keyring.set_password(self.host, email, password)
self.accounts_seen.add(email)
return (email, password)
class OAuth2Creds(object):
"""Simple object to hold server and port to be passed to GetAccessToken."""
def __init__(self, server, port, open_local_webbrowser=True):
self.server = server
self.port = port
self.open_local_webbrowser = open_local_webbrowser
def __call__(self):
"""Uses stored server and port to retrieve OAuth 2.0 access token."""
return GetAccessToken(server=self.server, port=self.port,
open_local_webbrowser=self.open_local_webbrowser)
def GetRpcServer(server, email=None, host_override=None, save_cookies=True,
account_type=AUTH_ACCOUNT_TYPE, use_oauth2=False,
oauth2_port=DEFAULT_OAUTH2_PORT,
open_oauth2_local_webbrowser=True):
"""Returns an instance of an AbstractRpcServer.
Args:
server: String containing the review server URL.
email: String containing user's email address.
host_override: If not None, string containing an alternate hostname to use
in the host header.
save_cookies: Whether authentication cookies should be saved to disk.
account_type: Account type for authentication, either 'GOOGLE'
or 'HOSTED'. Defaults to AUTH_ACCOUNT_TYPE.
use_oauth2: Boolean indicating whether OAuth 2.0 should be used for
authentication.
oauth2_port: Integer, the port where the localhost server receiving the
redirect is serving. Defaults to DEFAULT_OAUTH2_PORT.
open_oauth2_local_webbrowser: Boolean, defaults to True. If True and using
OAuth, this opens a page in the user's browser to obtain a token.
Returns:
A new HttpRpcServer, on which RPC calls can be made.
"""
# If this is the dev_appserver, use fake authentication.
host = (host_override or server).lower()
if re.match(r'(http://)?localhost([:/]|$)', host):
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = HttpRpcServer(
server,
lambda: (email, "password"),
host_override=host_override,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=save_cookies,
account_type=account_type)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
positional_args = [server]
if use_oauth2:
positional_args.append(
OAuth2Creds(server, oauth2_port, open_oauth2_local_webbrowser))
else:
positional_args.append(KeyringCreds(server, host, email).GetUserCredentials)
return HttpRpcServer(*positional_args,
host_override=host_override,
save_cookies=save_cookies,
account_type=account_type)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCodeAndStderr(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout, stderr and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (stdout, stderr, return code)
"""
logging.info("Running %s", command)
env = env.copy()
env['LC_MESSAGES'] = 'C'
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, errout, p.returncode
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code."""
out, err, retcode = RunShellWithReturnCodeAndStderr(command, print_output,
universal_newlines, env)
return out, retcode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GetGUID(self):
"""Return string to distinguish the repository from others, for example to
query all opened review issues for it"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def PostProcessDiff(self, diff):
"""Return the diff with any special post processing this VCS needs, e.g.
to include an svn-style "Index:"."""
return diff
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
result = ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
elif options.verbose:
result = "Uploading %s file for %s" % (type, filename)
checksum = md5(content).hexdigest()
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
try:
response_body = rpc_server.Send(url, body, content_type=ctype)
except urllib2.HTTPError, e:
response_body = ("Failed to upload file for %s. Got %d status code." %
(filename, e.code))
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
return result
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
threads = []
thread_pool = ThreadPool(options.num_upload_threads)
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
t = thread_pool.apply_async(UploadFile, args=(filename,
file_id, base_content, is_binary, status, True))
threads.append(t)
if new_content != None:
t = thread_pool.apply_async(UploadFile, args=(filename,
file_id, new_content, is_binary, status, False))
threads.append(t)
for t in threads:
print t.get(timeout=60)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/") and not mimetype.startswith("image/svg")
def IsBinaryData(self, data):
"""Returns true if data contains a null byte."""
# Derived from how Mercurial's heuristic, see
# http://selenic.com/hg/file/848a6658069e/mercurial/util.py#l229
return bool(data and "\0" in data)
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# Base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GetGUID(self):
return self._GetInfo("Repository UUID")
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns base URL for current diff.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
url = self._GetInfo("URL")
if url:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
guess = ""
# TODO(anatoli) - repository specific hacks should be handled by server
if netloc == "svn.python.org" and scheme == "svn+ssh":
path = "projects" + path
scheme = "http"
guess = "Python "
elif netloc.endswith(".googlecode.com"):
scheme = "http"
guess = "Google Code "
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed %sbase = %s", guess, base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def _GetInfo(self, key):
"""Parses 'svn info' for current dir. Returns value for key or None"""
for line in RunShell(["svn", "info"]).splitlines():
if line.startswith(key + ": "):
return line.split(":", 1)[1].strip()
def _EscapeFilename(self, filename):
"""Escapes filename for SVN commands."""
if "@" in filename and not filename.endswith("@"):
filename = "%s@" % filename
return filename
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals",
self._EscapeFilename(filename)])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start,
self._EscapeFilename(dirname) or "."]
out, err, returncode = RunShellWithReturnCodeAndStderr(cmd)
if returncode:
# Directory might not yet exist at start revison
# svn: Unable to find repository location for 'abc' in revision nnn
if re.match('^svn: Unable to find repository location for .+ in revision \d+', err):
old_files = ()
else:
ErrorExit("Failed to get status for %s:\n%s" % (filename, err))
else:
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [self._EscapeFilename(dirname) or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type",
self._EscapeFilename(filename)], silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary:
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
# filename must not be escaped. We already add an ampersand here.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
else:
mimetype = mimetype.strip()
get_base = False
# this test for binary is exactly the test prescribed by the
# official SVN docs at
# http://subversion.apache.org/faq.html#binary-files
is_binary = (bool(mimetype) and
not mimetype.startswith("text/") and
mimetype not in ("image/x-xbitmap", "image/x-xpixmap"))
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content, ret_code = RunShellWithReturnCode(
["svn", "cat", self._EscapeFilename(filename)],
universal_newlines=universal_newlines)
if ret_code and status[0] == "R":
# It's a replaced file without local history (see issue208).
# The base file needs to be fetched from the server.
url = "%s/%s" % (self.svn_base, filename)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
elif ret_code:
ErrorExit("Got error status from 'svn cat %s'" % filename)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def GetGUID(self):
revlist = RunShell("git rev-list --parents HEAD".split()).splitlines()
# M-A: Return the 1st root hash, there could be multiple when a
# subtree is merged. In that case, more analysis would need to
# be done to figure out which HEAD is the 'most representative'.
for r in revlist:
if ' ' not in r:
return r
def PostProcessDiff(self, gitdiff):
"""Converts the diff output to include an svn-style "Index:" line as well
as record the hashes of the files, so we can upload them along with our
diff."""
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
def IsFileNew(filename):
return filename in self.hashes and self.hashes[filename][0] is None
def AddSubversionPropertyChange(filename):
"""Add svn's property change information into the patch if given file is
new file.
We use Subversion's auto-props setting to retrieve its property.
See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
Subversion's [auto-props] setting.
"""
if self.options.emulate_svn_auto_props and IsFileNew(filename):
svnprops = GetSubversionPropertyChanges(filename)
if svnprops:
svndiff.append("\n" + svnprops + "\n")
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
# Add auto property here for previously seen file.
if filename is not None:
AddSubversionPropertyChange(filename)
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
# Add auto property for the last seen file.
assert filename is not None
AddSubversionPropertyChange(filename)
return "".join(svndiff)
def GenerateDiff(self, extra_args):
extra_args = extra_args[:]
if self.options.revision:
if ":" in self.options.revision:
extra_args = self.options.revision.split(":", 1) + extra_args
else:
extra_args = [self.options.revision] + extra_args
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if "GIT_EXTERNAL_DIFF" in env:
del env["GIT_EXTERNAL_DIFF"]
# -M/-C will not print the diff for the deleted file when a file is renamed.
# This is confusing because the original file will not be shown on the
# review when a file is renamed. So, get a diff with ONLY deletes, then
# append a diff (with rename detection), without deletes.
cmd = [
"git", "diff", "--no-color", "--no-ext-diff", "--full-index",
"--ignore-submodules",
]
diff = RunShell(
cmd + ["--no-renames", "--diff-filter=D"] + extra_args,
env=env, silent_ok=True)
if self.options.git_find_copies:
similarity_options = ["--find-copies-harder", "-l100000",
"-C%s" % self.options.git_similarity ]
else:
similarity_options = ["-M%s" % self.options.git_similarity ]
diff += RunShell(
cmd + ["--diff-filter=AMCRT"] + similarity_options + extra_args,
env=env, silent_ok=True)
# The CL could be only file deletion or not. So accept silent diff for both
# commands then check for an empty diff manually.
if not diff:
ErrorExit("No output from %s" % (cmd + extra_args))
return diff
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=False)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(
["git", "show", "HEAD:" + filename], silent_ok=True)
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
# Grab the before/after content if we need it.
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before)
is_binary = self.IsImage(filename)
if base_content:
is_binary = is_binary or self.IsBinaryData(base_content)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if hash_after:
new_content = self.GetFileContent(hash_after)
is_binary = is_binary or self.IsBinaryData(new_content)
if not is_binary:
new_content = None
return (base_content, new_content, is_binary, status)
class CVSVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for CVS."""
def __init__(self, options):
super(CVSVCS, self).__init__(options)
def GetGUID(self):
"""For now we don't know how to get repository ID for CVS"""
return
def GetOriginalContent_(self, filename):
RunShell(["cvs", "up", filename], silent_ok=True)
# TODO need detect file content encoding
content = open(filename).read()
return content.replace("\r\n", "\n")
def GetBaseFile(self, filename):
base_content = None
new_content = None
status = "A"
output, retcode = RunShellWithReturnCode(["cvs", "status", filename])
if retcode:
ErrorExit("Got error status from 'cvs status %s'" % filename)
if output.find("Status: Locally Modified") != -1:
status = "M"
temp_filename = "%s.tmp123" % filename
os.rename(filename, temp_filename)
base_content = self.GetOriginalContent_(filename)
os.rename(temp_filename, filename)
elif output.find("Status: Locally Added"):
status = "A"
base_content = ""
elif output.find("Status: Needs Checkout"):
status = "D"
base_content = self.GetOriginalContent_(filename)
return (base_content, new_content, self.IsBinaryData(base_content), status)
def GenerateDiff(self, extra_args):
cmd = ["cvs", "diff", "-u", "-N"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(extra_args)
data, retcode = RunShellWithReturnCode(cmd)
count = 0
if retcode in [0, 1]:
for line in data.splitlines():
if line.startswith("Index:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from cvs diff")
return data
def GetUnknownFiles(self):
data, retcode = RunShellWithReturnCode(["cvs", "diff"])
if retcode not in [0, 1]:
ErrorExit("Got error status from 'cvs diff':\n%s" % (data,))
unknown_files = []
for line in data.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def GetGUID(self):
# See chapter "Uniquely identifying a repository"
# http://hgbook.red-bean.com/read/customizing-the-output-of-mercurial.html
info = RunShell("hg log -r0 --template {node}".split())
return info.strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
absname = os.path.join(self.repo_dir, filename)
return os.path.relpath(absname)
def GenerateDiff(self, extra_args):
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir,
# but "hg diff" has given us the path relative to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
status, _ = out[0].split(' ', 1)
if len(out) > 1 and status == "A":
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = self.IsBinaryData(base_content)
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or self.IsBinaryData(new_content)
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary:
new_content = None
return base_content, new_content, is_binary, status
class PerforceVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Perforce."""
def __init__(self, options):
def ConfirmLogin():
# Make sure we have a valid perforce session
while True:
data, retcode = self.RunPerforceCommandWithReturnCode(
["login", "-s"], marshal_output=True)
if not data:
ErrorExit("Error checking perforce login")
if not retcode and (not "code" in data or data["code"] != "error"):
break
print "Enter perforce password: "
self.RunPerforceCommandWithReturnCode(["login"])
super(PerforceVCS, self).__init__(options)
self.p4_changelist = options.p4_changelist
if not self.p4_changelist:
ErrorExit("A changelist id is required")
if (options.revision):
ErrorExit("--rev is not supported for perforce")
self.p4_port = options.p4_port
self.p4_client = options.p4_client
self.p4_user = options.p4_user
ConfirmLogin()
if not options.title:
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
if description and "desc" in description:
# Rietveld doesn't support multi-line descriptions
raw_title = description["desc"].strip()
lines = raw_title.splitlines()
if len(lines):
options.title = lines[0]
def GetGUID(self):
"""For now we don't know how to get repository ID for Perforce"""
return
def RunPerforceCommandWithReturnCode(self, extra_args, marshal_output=False,
universal_newlines=True):
args = ["p4"]
if marshal_output:
# -G makes perforce format its output as marshalled python objects
args.extend(["-G"])
if self.p4_port:
args.extend(["-p", self.p4_port])
if self.p4_client:
args.extend(["-c", self.p4_client])
if self.p4_user:
args.extend(["-u", self.p4_user])
args.extend(extra_args)
data, retcode = RunShellWithReturnCode(
args, print_output=False, universal_newlines=universal_newlines)
if marshal_output and data:
data = marshal.loads(data)
return data, retcode
def RunPerforceCommand(self, extra_args, marshal_output=False,
universal_newlines=True):
# This might be a good place to cache call results, since things like
# describe or fstat might get called repeatedly.
data, retcode = self.RunPerforceCommandWithReturnCode(
extra_args, marshal_output, universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (extra_args, data))
return data
def GetFileProperties(self, property_key_prefix = "", command = "describe"):
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
changed_files = {}
file_index = 0
# Try depotFile0, depotFile1, ... until we don't find a match
while True:
file_key = "depotFile%d" % file_index
if file_key in description:
filename = description[file_key]
change_type = description[property_key_prefix + str(file_index)]
changed_files[filename] = change_type
file_index += 1
else:
break
return changed_files
def GetChangedFiles(self):
return self.GetFileProperties("action")
def GetUnknownFiles(self):
# Perforce doesn't detect new files, they have to be explicitly added
return []
def IsBaseBinary(self, filename):
base_filename = self.GetBaseFilename(filename)
return self.IsBinaryHelper(base_filename, "files")
def IsPendingBinary(self, filename):
return self.IsBinaryHelper(filename, "describe")
def IsBinaryHelper(self, filename, command):
file_types = self.GetFileProperties("type", command)
if not filename in file_types:
ErrorExit("Trying to check binary status of unknown file %s." % filename)
# This treats symlinks, macintosh resource files, temporary objects, and
# unicode as binary. See the Perforce docs for more details:
# http://www.perforce.com/perforce/doc.current/manuals/cmdref/o.ftypes.html
return not file_types[filename].endswith("text")
def GetFileContent(self, filename, revision, is_binary):
file_arg = filename
if revision:
file_arg += "#" + revision
# -q suppresses the initial line that displays the filename and revision
return self.RunPerforceCommand(["print", "-q", file_arg],
universal_newlines=not is_binary)
def GetBaseFilename(self, filename):
actionsWithDifferentBases = [
"move/add", # p4 move
"branch", # p4 integrate (to a new file), similar to hg "add"
"add", # p4 integrate (to a new file), after modifying the new file
]
# We only see a different base for "add" if this is a downgraded branch
# after a file was branched (integrated), then edited.
if self.GetAction(filename) in actionsWithDifferentBases:
# -Or shows information about pending integrations/moves
fstat_result = self.RunPerforceCommand(["fstat", "-Or", filename],
marshal_output=True)
baseFileKey = "resolveFromFile0" # I think it's safe to use only file0
if baseFileKey in fstat_result:
return fstat_result[baseFileKey]
return filename
def GetBaseRevision(self, filename):
base_filename = self.GetBaseFilename(filename)
have_result = self.RunPerforceCommand(["have", base_filename],
marshal_output=True)
if "haveRev" in have_result:
return have_result["haveRev"]
def GetLocalFilename(self, filename):
where = self.RunPerforceCommand(["where", filename], marshal_output=True)
if "path" in where:
return where["path"]
def GenerateDiff(self, args):
class DiffData:
def __init__(self, perforceVCS, filename, action):
self.perforceVCS = perforceVCS
self.filename = filename
self.action = action
self.base_filename = perforceVCS.GetBaseFilename(filename)
self.file_body = None
self.base_rev = None
self.prefix = None
self.working_copy = True
self.change_summary = None
def GenerateDiffHeader(diffData):
header = []
header.append("Index: %s" % diffData.filename)
header.append("=" * 67)
if diffData.base_filename != diffData.filename:
if diffData.action.startswith("move"):
verb = "rename"
else:
verb = "copy"
header.append("%s from %s" % (verb, diffData.base_filename))
header.append("%s to %s" % (verb, diffData.filename))
suffix = "\t(revision %s)" % diffData.base_rev
header.append("--- " + diffData.base_filename + suffix)
if diffData.working_copy:
suffix = "\t(working copy)"
header.append("+++ " + diffData.filename + suffix)
if diffData.change_summary:
header.append(diffData.change_summary)
return header
def GenerateMergeDiff(diffData, args):
# -du generates a unified diff, which is nearly svn format
diffData.file_body = self.RunPerforceCommand(
["diff", "-du", diffData.filename] + args)
diffData.base_rev = self.GetBaseRevision(diffData.filename)
diffData.prefix = ""
# We have to replace p4's file status output (the lines starting
# with +++ or ---) to match svn's diff format
lines = diffData.file_body.splitlines()
first_good_line = 0
while (first_good_line < len(lines) and
not lines[first_good_line].startswith("@@")):
first_good_line += 1
diffData.file_body = "\n".join(lines[first_good_line:])
return diffData
def GenerateAddDiff(diffData):
fstat = self.RunPerforceCommand(["fstat", diffData.filename],
marshal_output=True)
if "headRev" in fstat:
diffData.base_rev = fstat["headRev"] # Re-adding a deleted file
else:
diffData.base_rev = "0" # Brand new file
diffData.working_copy = False
rel_path = self.GetLocalFilename(diffData.filename)
diffData.file_body = open(rel_path, 'r').read()
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -0,0 +1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " @@"
diffData.prefix = "+"
return diffData
def GenerateDeleteDiff(diffData):
diffData.base_rev = self.GetBaseRevision(diffData.filename)
is_base_binary = self.IsBaseBinary(diffData.filename)
# For deletes, base_filename == filename
diffData.file_body = self.GetFileContent(diffData.base_filename,
None,
is_base_binary)
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " +0,0 @@"
diffData.prefix = "-"
return diffData
changed_files = self.GetChangedFiles()
svndiff = []
filecount = 0
for (filename, action) in changed_files.items():
svn_status = self.PerforceActionToSvnStatus(action)
if svn_status == "SKIP":
continue
diffData = DiffData(self, filename, action)
# Is it possible to diff a branched file? Stackoverflow says no:
# http://stackoverflow.com/questions/1771314/in-perforce-command-line-how-to-diff-a-file-reopened-for-add
if svn_status == "M":
diffData = GenerateMergeDiff(diffData, args)
elif svn_status == "A":
diffData = GenerateAddDiff(diffData)
elif svn_status == "D":
diffData = GenerateDeleteDiff(diffData)
else:
ErrorExit("Unknown file action %s (svn action %s)." % \
(action, svn_status))
svndiff += GenerateDiffHeader(diffData)
for line in diffData.file_body.splitlines():
svndiff.append(diffData.prefix + line)
filecount += 1
if not filecount:
ErrorExit("No valid patches found in output from p4 diff")
return "\n".join(svndiff) + "\n"
def PerforceActionToSvnStatus(self, status):
# Mirroring the list at http://permalink.gmane.org/gmane.comp.version-control.mercurial.devel/28717
# Is there something more official?
return {
"add" : "A",
"branch" : "A",
"delete" : "D",
"edit" : "M", # Also includes changing file types.
"integrate" : "M",
"move/add" : "M",
"move/delete": "SKIP",
"purge" : "D", # How does a file's status become "purge"?
}[status]
def GetAction(self, filename):
changed_files = self.GetChangedFiles()
if not filename in changed_files:
ErrorExit("Trying to get base version of unknown file %s." % filename)
return changed_files[filename]
def GetBaseFile(self, filename):
base_filename = self.GetBaseFilename(filename)
base_content = ""
new_content = None
status = self.PerforceActionToSvnStatus(self.GetAction(filename))
if status != "A":
revision = self.GetBaseRevision(base_filename)
if not revision:
ErrorExit("Couldn't find base revision for file %s" % filename)
is_base_binary = self.IsBaseBinary(base_filename)
base_content = self.GetFileContent(base_filename,
revision,
is_base_binary)
is_binary = self.IsPendingBinary(filename)
if status != "D" and status != "SKIP":
relpath = self.GetLocalFilename(filename)
if is_binary:
new_content = open(relpath, "rb").read()
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
def UploadFile(filename, data):
form_fields = [("filename", filename)]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
try:
response_body = rpc_server.Send(url, body, content_type=ctype)
except urllib2.HTTPError, e:
response_body = ("Failed to upload patch for %s. Got %d status code." %
(filename, e.code))
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
return ("Uploaded patch for " + filename, [lines[1], filename])
threads = []
thread_pool = ThreadPool(options.num_upload_threads)
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
filename = patch[0]
data = patch[1]
t = thread_pool.apply_async(UploadFile, args=(filename, data))
threads.append(t)
for t in threads:
result = t.get(timeout=60)
print result[0]
rv.append(result[1])
return rv
def GuessVCSName(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, VCS_PERFORCE,
VCS_CVS, or VCS_UNKNOWN.
Since local perforce repositories can't be easily detected, this method
will only guess VCS_PERFORCE if any perforce options have been specified.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
for attribute, value in options.__dict__.iteritems():
if attribute.startswith("p4") and value != None:
return (VCS_PERFORCE, None)
def RunDetectCommand(vcs_type, command):
"""Helper to detect VCS by executing command.
Returns:
A pair (vcs, output) or None. Throws exception on error.
"""
try:
out, returncode = RunShellWithReturnCode(command)
if returncode == 0:
return (vcs_type, out.strip())
except OSError, (errcode, message):
if errcode != errno.ENOENT: # command not found code
raise
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
res = RunDetectCommand(VCS_MERCURIAL, ["hg", "root"])
if res != None:
return res
# Subversion from 1.7 has a single centralized .svn folder
# ( see http://subversion.apache.org/docs/release-notes/1.7.html#wc-ng )
# That's why we use 'svn info' instead of checking for .svn dir
res = RunDetectCommand(VCS_SUBVERSION, ["svn", "info"])
if res != None:
return res
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
res = RunDetectCommand(VCS_GIT, ["git", "rev-parse",
"--is-inside-work-tree"])
if res != None:
return res
# detect CVS repos use `cvs status && $? == 0` rules
res = RunDetectCommand(VCS_CVS, ["cvs", "status"])
if res != None:
return res
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName(options)
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_PERFORCE:
return PerforceVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
elif vcs == VCS_CVS:
return CVSVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def CheckReviewer(reviewer):
"""Validate a reviewer -- either a nickname or an email addres.
Args:
reviewer: A nickname or an email address.
Calls ErrorExit() if it is an invalid email address.
"""
if "@" not in reviewer:
return # Assume nickname
parts = reviewer.split("@")
if len(parts) > 2:
ErrorExit("Invalid email address: %r" % reviewer)
assert len(parts) == 2
if "." not in parts[1]:
ErrorExit("Invalid email address: %r" % reviewer)
def LoadSubversionAutoProperties():
"""Returns the content of [auto-props] section of Subversion's config file as
a dictionary.
Returns:
A dictionary whose key-value pair corresponds the [auto-props] section's
key-value pair.
In following cases, returns empty dictionary:
- config file doesn't exist, or
- 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
"""
if os.name == 'nt':
subversion_config = os.environ.get("APPDATA") + "\\Subversion\\config"
else:
subversion_config = os.path.expanduser("~/.subversion/config")
if not os.path.exists(subversion_config):
return {}
config = ConfigParser.ConfigParser()
config.read(subversion_config)
if (config.has_section("miscellany") and
config.has_option("miscellany", "enable-auto-props") and
config.getboolean("miscellany", "enable-auto-props") and
config.has_section("auto-props")):
props = {}
for file_pattern in config.options("auto-props"):
props[file_pattern] = ParseSubversionPropertyValues(
config.get("auto-props", file_pattern))
return props
else:
return {}
def ParseSubversionPropertyValues(props):
"""Parse the given property value which comes from [auto-props] section and
returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
See the following doctest for example.
>>> ParseSubversionPropertyValues('svn:eol-style=LF')
[('svn:eol-style', 'LF')]
>>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
[('svn:mime-type', 'image/jpeg')]
>>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
[('svn:eol-style', 'LF'), ('svn:executable', '*')]
"""
key_value_pairs = []
for prop in props.split(";"):
key_value = prop.split("=")
assert len(key_value) <= 2
if len(key_value) == 1:
# If value is not given, use '*' as a Subversion's convention.
key_value_pairs.append((key_value[0], "*"))
else:
key_value_pairs.append((key_value[0], key_value[1]))
return key_value_pairs
def GetSubversionPropertyChanges(filename):
"""Return a Subversion's 'Property changes on ...' string, which is used in
the patch file.
Args:
filename: filename whose property might be set by [auto-props] config.
Returns:
A string like 'Property changes on |filename| ...' if given |filename|
matches any entries in [auto-props] section. None, otherwise.
"""
global svn_auto_props_map
if svn_auto_props_map is None:
svn_auto_props_map = LoadSubversionAutoProperties()
all_props = []
for file_pattern, props in svn_auto_props_map.items():
if fnmatch.fnmatch(filename, file_pattern):
all_props.extend(props)
if all_props:
return FormatSubversionPropertyChanges(filename, all_props)
return None
def FormatSubversionPropertyChanges(filename, props):
"""Returns Subversion's 'Property changes on ...' strings using given filename
and properties.
Args:
filename: filename
props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
Returns:
A string which can be used in the patch file for Subversion.
See the following doctest for example.
>>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
Property changes on: foo.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
<BLANKLINE>
"""
prop_changes_lines = [
"Property changes on: %s" % filename,
"___________________________________________________________________"]
for key, value in props:
prop_changes_lines.append("Added: " + key)
prop_changes_lines.append(" + " + value)
return "\n".join(prop_changes_lines) + "\n"
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
options, args = parser.parse_args(argv[1:])
if options.help:
if options.verbose < 2:
# hide Perforce options
parser.epilog = (
"Use '--help -v' to show additional Perforce options. "
"For more help, see "
"http://code.google.com/p/rietveld/wiki/CodeReviewHelp"
)
parser.option_groups.remove(parser.get_option_group('--p4_port'))
parser.print_help()
sys.exit(0)
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
base = options.base_url
if isinstance(vcs, SubversionVCS):
# Guessing the base field is only supported for Subversion.
# Note: Fetching base files may become deprecated in future releases.
guessed_base = vcs.GuessBase(options.download_base)
if base:
if guessed_base and base != guessed_base:
print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
(base, guessed_base)
else:
base = guessed_base
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
data = vcs.PostProcessDiff(data)
if options.print_diffs:
print "Rietveld diff start:*****"
print data
print "Rietveld diff end:*****"
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.use_oauth2:
options.save_cookies = False
rpc_server = GetRpcServer(options.server,
options.email,
options.host,
options.save_cookies,
options.account_type,
options.use_oauth2,
options.oauth2_port,
options.open_oauth2_local_webbrowser)
form_fields = []
repo_guid = vcs.GetGUID()
if repo_guid:
form_fields.append(("repo_guid", repo_guid))
if base:
b = urlparse.urlparse(base)
username, netloc = urllib.splituser(b.netloc)
if username:
logging.info("Removed username from base URL")
base = urlparse.urlunparse((b.scheme, netloc, b.path, b.params,
b.query, b.fragment))
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
CheckReviewer(reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
CheckReviewer(cc)
form_fields.append(("cc", options.cc))
# Process --message, --title and --file.
message = options.message or ""
title = options.title or ""
if options.file:
if options.message:
ErrorExit("Can't specify both message and message file options")
file = open(options.file, 'r')
message = file.read()
file.close()
if options.issue:
prompt = "Title describing this patch set: "
else:
prompt = "New issue subject: "
title = (
title or message.split('\n', 1)[0].strip() or raw_input(prompt).strip())
if not title and not options.issue:
ErrorExit("A non-empty title is required for a new issue")
# For existing issues, it's fine to give a patchset an empty name. Rietveld
# doesn't accept that so use a whitespace.
title = title or " "
if len(title) > 100:
title = title[:99] + '…'
if title and not options.issue:
message = message or title
form_fields.append(("subject", title))
# If it's a new issue send message as description. Otherwise a new
# message is created below on upload_complete.
if message and not options.issue:
form_fields.append(("description", message))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
if options.send_patch:
options.send_mail = True
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
payload = {} # payload for final request
if options.send_mail:
payload["send_mail"] = "yes"
if options.send_patch:
payload["attach_patch"] = "yes"
if options.issue and message:
payload["message"] = message
payload = urllib.urlencode(payload)
rpc_server.Send("/" + issue + "/upload_complete/" + (patchset or ""),
payload=payload)
return issue, patchset
def main():
try:
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
|
sloanyang/depends
|
third_party/upload.py
|
Python
|
gpl-2.0
| 99,227
|
[
"VisIt"
] |
b627d7015e86b412f09b8001e083ab09318ed99a43a987db3e8da0f856a4049f
|
#!/usr/bin/env python3
"""
Convert flat DICOM file set into an NDAR-compliant fileset
Usage
----
dcm2ndar.py -i <DICOM Directory> -o <NDAR Directory>
Example
----
% dcm2ndar.py -i sub-001 -o sub-001.ndar
Authors
----
Mike Tyszka, Caltech Brain Imaging Center
Dates
----
2016-08-09 JMT Adapt from dcm2bids.py
MIT License
Copyright (c) 2016 Mike Tyszka
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__version__ = '0.1.0'
import os
import sys
import argparse
import subprocess
import pydicom
import json
import glob
import shutil
import nibabel as nib
from datetime import datetime
from dateutil import relativedelta
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description='Convert DICOM files to NDAR-compliant fileset')
parser.add_argument('-i', '--indir', required=True, help='Source directory containing subject DICOM directories')
parser.add_argument('-o', '--outdir', required=False, help='Output directory for subject NDAR directories')
# Parse command line arguments
args = parser.parse_args()
dcm_root_dir = args.indir
if args.outdir:
ndar_root_dir = args.outdir
else:
ndar_root_dir = args.indir + '.ndar'
# Load protocol translation and exclusion info from DICOM directory
# If no translator is present, prot_dict is an empty dictionary
# and a template will be created in the DICOM directory. This template should be
# completed by the user and the conversion rerun.
prot_dict_json = os.path.join(dcm_root_dir, 'Protocol_Translator.json')
prot_dict = ndar_load_prot_dict(prot_dict_json)
# Set flag to write template protocol translator to DICOM directory
create_prot_dict = True
if prot_dict:
create_prot_dict = False
# Safe create output NDAR root directory
if os.path.isdir(ndar_root_dir):
shutil.rmtree(ndar_root_dir)
os.makedirs(ndar_root_dir)
# Loop over each subject's DICOM directory within the root source directory
for SID in os.listdir(dcm_root_dir):
dcm_sub_dir = os.path.join(dcm_root_dir, SID)
# Only process subdirectories
if os.path.isdir(dcm_sub_dir):
print('Processing subject ' + SID)
# Create subject directory
print(' Creating NDAR subject directory')
ndar_sub_dir = os.path.join(ndar_root_dir, SID)
subprocess.call(['mkdir', '-p', ndar_sub_dir])
# Create NDAR summary CSV for this subject
ndar_csv_fname = os.path.join(ndar_sub_dir, SID + '_NDAR.csv')
ndar_csv_fd = ndar_init_summary(ndar_csv_fname)
# Read additional subject-level DICOM header fields from first DICOM image
dcm_info = ndar_dcm_info(dcm_sub_dir)
# Run dcm2niix conversion from DICOM to Nifti with BIDS sidecars for metadata
# This relies on the current CBIC branch of dcm2niix which extracts additional DICOM fields
# required by NDAR
subprocess.call(['dcm2niix', '-b', 'y', '-f', 'sub-%n_%p', '-o', ndar_sub_dir, dcm_sub_dir])
# Loop over all Nifti files (*.nii, *.nii.gz) for this SID
# glob returns the full relative path from the NDAR root dir
for nii_fname_full in glob.glob(os.path.join(ndar_sub_dir, '*.nii*')):
# Read Nifti header for image FOV, extent (ie matrix) and voxel dimensions
print(' Reading Nifti header')
nii_info = ndar_nifti_info(nii_fname_full)
# Isolate base filename
nii_fname = os.path.basename(nii_fname_full)
# Parse file basename
SID, prot, fstub = ndar_parse_filename(nii_fname)
# Full path for file stub
fstub_full = os.path.join(ndar_sub_dir, fstub)
# Check if we're creating new protocol dictionary
if create_prot_dict:
print(' Adding protocol %s to dictionary' % prot)
# Add current protocol to protocol dictionary
# The value defaults to "EXCLUDE" which should be replaced with the correct NDAR
# ImageDescription for this protocol (eg "T1w Structural", "BOLD MB EPI Resting State")
prot_dict[prot] = "EXCLUDE"
else:
# JSON sidecar for this image
json_fname = fstub_full + '.json'
if not os.path.isfile(json_fname):
print('* JSON sidecar not found')
break
# Skip excluded protocols
if prot_dict[prot] == 'EXCLUDE':
print('* Excluding protocol ' + prot)
# Remove all files related to this protocol
for f in glob.glob(fstub_full + '.*'):
os.remove(f)
else:
print(' Converting protocol ' + prot)
# Read JSON sidecar contents
json_fd = open(json_fname, 'r')
info = json.load(json_fd)
json_fd.close()
# Combine JSON, Nifti and DICOM info dictionaries
info.update(nii_info)
info.update(dcm_info)
# Add remaining fields not in JSON or DICOM metadata
info['SID'] = SID
info['ImageFile'] = os.path.basename(nii_fname)
info['ImageDescription'] = prot_dict[prot]
info['ScanType'] = ndar_scantype(prot_dict[prot])
info['Orientation'] = ndar_orientation(info)
# Add row to NDAR summary CSV file
ndar_add_row(ndar_csv_fd, info)
# Delete JSON file
os.remove(json_fname)
# Close NDAR summary file for this subject
ndar_close_summary(ndar_csv_fd)
# Create combined protocol translator in DICOM root directory if necessary
if create_prot_dict:
ndar_create_prot_dict(prot_dict_json, prot_dict)
# Clean exit
sys.exit(0)
def ndar_load_prot_dict(prot_dict_json):
'''
Read protocol translations from JSON file
:param prot_dict_json:
:return:
'''
if os.path.isfile(prot_dict_json):
# Read JSON protocol translator
json_fd = open(prot_dict_json, 'r')
prot_trans = json.load(json_fd)
json_fd.close()
else:
print('* Protocol translator missing')
print('* Creating template translator in %s' % prot_dict_json)
# Initialize empty dictionary to be filled during subsequent file loop
prot_trans = dict()
return prot_trans
def ndar_create_prot_dict(prot_dict_json, prot_dict):
'''
Write protocol translation dictionary template to JSON file
:param prot_dict_json:
:param prot_dict:
:return:
'''
json_fd = open(prot_dict_json, 'w')
json.dump(prot_dict, json_fd, indent=4, separators=(',', ':'))
json_fd.close()
print('')
print('---')
print('New protocol dictionary created : %s' % prot_dict_json)
print('Remember to replace "EXCLUDE" values in dictionary with an appropriate image description')
print('For example "MP-RAGE T1w 3D structural" or "MB-EPI BOLD resting-state')
print('---')
print('')
return
def ndar_parse_filename(fname):
"""
Extract SID and protocol string from filename in the form sub-<SID>_<Protocol String>.[nii or nii.gz]
:param fname:
:return: SID, prot, fstub
"""
# Init return values
SID, prot, fstub = 'None', 'None', 'None'
# Strip .nii or .nii.gz from fname
fstub = fname.replace('.nii.gz','').replace('.nii','')
# Split stub at first underscore
for chunk in fstub.split('_', 1):
if chunk.startswith('sub-'):
# SID is everything after "sub-" in this chunk
_, SID = chunk.split('sub-', 1)
else:
prot = chunk
return SID, prot, fstub
def ndar_scantype(desc):
"""
Best effort guess at scan type from description
NDAR allowed MRI scan_type values
----
fMRI
MR structural (T1)
MR structural (T2)
MR structural (PD)
MR structural (FSPGR);
MR structural (MPRAGE)
MR structural (PD, T2)
MR structural (B0 map)
MR structural (B1 map);
Field Map
MR diffusion
single-shell DTI
multi-shell DTI
ASL
:param desc:
:return scan_type:
"""
# Convert description to upper case
desc = desc.upper()
# Search for common contrasts
if 'T1' in desc:
scan_type = 'MR structural (T1)'
elif 'T2' in desc:
scan_type = 'MR structural (T2)'
elif 'FIELDMAP' in desc or 'FMAP' in desc or 'FIELD MAP' in desc or 'B0' in desc:
scan_type = 'MR structural (B0 map)'
elif 'BOLD' in desc:
scan_type = 'fMRI'
else:
scan_type = 'MR structural (T1)' # T1 structural fallback value
return scan_type
def ndar_orientation(info):
orientation = 'Axial'
if 'spc3d' in info['PulseSequenceDetails']:
orientation = 'Sagittal'
if 'tfl3d' in info['PulseSequenceDetails']:
orientation = 'Sagittal'
return orientation
def ndar_nifti_info(nii_fname):
'''
Extract Nifti header fields not handled by dcm2niix
:param nii_fname: Nifti image filename
:return: nii_info: Nifti information dictionary
'''
# Init a new dictionary
nii_info = dict()
# Load Nifti header
nii = nib.load(nii_fname)
hdr = nii.header
dim = hdr['dim']
res = hdr['pixdim']
# Fill dictionary
nii_info['AcquisitionMatrix'] = '%dx%d' % (dim[1], dim[2])
nii_info['NDims'] = dim[0]
nii_info['ImageExtent1'] = dim[1]
nii_info['ImageExtent2'] = dim[2]
nii_info['ImageExtent3'] = dim[3]
nii_info['ImageExtent4'] = dim[4]
nii_info['ImageExtent5'] = dim[5]
nii_info['ImageResolution1'] = res[1]
nii_info['ImageResolution2'] = res[2]
nii_info['ImageResolution3'] = res[3]
nii_info['ImageResolution4'] = res[4]
nii_info['ImageResolution5'] = res[5]
# Use z dimension voxel spacing as slice thickness
nii_info['SliceThickness'] = dim[3]
if dim[0] > 3:
nii_info['Extent4Type'] = 'Timeseries'
else:
nii_info['Extent4Type'] = 'None'
return nii_info
def ndar_dcm_info(dcm_dir):
"""
Extract additional subject-level DICOM header fields not handled by dcm2niix
from first DICOM image in directory
:param dcm_dir: DICOM directory containing subject files
:return: dcm_info: extra information dictionary
"""
# Loop over files until first valid DICOM is found
ds = []
for dcm in os.listdir(dcm_dir):
try:
ds = pydicom.read_file(os.path.join(dcm_dir, dcm))
except:
pass
# Break out if valid DICOM read
if ds:
break
# Init a new dictionary
dcm_info = dict()
# Read DoB and scan date
dob = ds.PatientBirthDate
scan_date = ds.AcquisitionDate
# Calculate age in months at time of scan using datetime functions
d1 = datetime.strptime(dob, '%Y%M%d')
d2 = datetime.strptime(scan_date, '%Y%M%d')
rd = relativedelta.relativedelta(d2, d1)
# Approximation since residual day to month conversion assumes 1 month = 30 days
age_months = rd.years * 12 + rd.months + round(rd.days / 30.0)
# Fill dictionary
dcm_info['Sex'] = ds.PatientSex
dcm_info['PatientPosition'] = ds.PatientPosition
dcm_info['TransmitCoil'] = ds.TransmitCoilName
dcm_info['SoftwareVersions'] = ds.SoftwareVersions
dcm_info['PhotometricInterpretation'] = ds.PhotometricInterpretation
dcm_info['AgeMonths'] = age_months
dcm_info['ScanDate'] = datetime.strftime(d2, '%M/%d/%Y') # NDAR scan date format MM/DD/YYYY
return dcm_info
def ndar_init_summary(fname):
'''
Open a summary CSV file and initialize with NDAR Image03 preamble
:param fname:
:return:
'''
# Write NDAR Image03 preamble and column headers
ndar_fd = open(fname, 'w')
ndar_fd.write('"image","03"\n')
ndar_fd.write('"subjectkey","src_subject_id","interview_date","interview_age","gender","comments_misc",')
ndar_fd.write('"image_file","image_thumbnail_file","image_description","experiment_id","scan_type","scan_object",')
ndar_fd.write('"image_file_format","data_file2","data_file2_type","image_modality","scanner_manufacturer_pd",')
ndar_fd.write('"scanner_type_pd","scanner_software_versions_pd","magnetic_field_strength",')
ndar_fd.write('"mri_repetition_time_pd","mri_echo_time_pd","flip_angle","acquisition_matrix",')
ndar_fd.write('"mri_field_of_view_pd","patient_position","photomet_interpret",')
ndar_fd.write('"receive_coil","transmit_coil","transformation_performed","transformation_type","image_history",')
ndar_fd.write('"image_num_dimensions","image_extent1","image_extent2","image_extent3",')
ndar_fd.write('"image_extent4","extent4_type","image_extent5","extent5_type",')
ndar_fd.write('"image_unit1","image_unit2","image_unit3","image_unit4","image_unit5",')
ndar_fd.write('"image_resolution1","image_resolution2","image_resolution3","image_resolution4",')
ndar_fd.write('"image_resolution5","image_slice_thickness","image_orientation",')
ndar_fd.write('"qc_outcome","qc_description","qc_fail_quest_reason","decay_correction","frame_end_times",')
ndar_fd.write('"frame_end_unit","frame_start_times","frame_start_unit","pet_isotope","pet_tracer",')
ndar_fd.write('"time_diff_inject_to_image","time_diff_units","pulse_seq","slice_acquisition","software_preproc",')
ndar_fd.write('"study","week","experiment_description","visit","slice_timing",')
ndar_fd.write('"bvek_bval_files","bvecfile","bvalfile"')
# Final newline
ndar_fd.write('\n')
return ndar_fd
def ndar_close_summary(fd):
fd.close()
return
def ndar_add_row(fd, info):
"""
Write a single experiment row to the NDAR summary CSV file
:param fd:
:param info:
:return:
"""
# Field descriptions for NDAR Image03 MRI experiments
# ElementName, DataType, Size, Required, ElementDescription, ValueRange, Notes, Aliases
# subjectkey,GUID,,Required,The NDAR Global Unique Identifier (GUID) for research subject,NDAR*,,
fd.write('"TBD",')
# src_subject_id,String,20,Required,Subject ID how it's defined in lab/project,,,
fd.write('"%s",' % info.get('SID','Unknown'))
# interview_date,Date,,Required,Date on which the interview/genetic test/sampling/imaging was completed. MM/DD/YYYY,,Required field,ScanDate
fd.write('"%s",' % info.get('ScanDate','Unknown'))
# interview_age,Integer,,Required,Age in months at the time of the interview/test/sampling/imaging.,0 :: 1260,
# "Age is rounded to chronological month. If the research participant is 15-days-old at time of interview,
# the appropriate value would be 0 months. If the participant is 16-days-old, the value would be 1 month.",
fd.write('%d,' % info.get('AgeMonths','Unknown'))
# gender,String,20,Required,Sex of the subject,M;F,M = Male; F = Female,
fd.write('"%s",' % info.get('Sex','Unknown'))
# comments_misc
fd.write('"",')
# image_file,File,,Required,"Data file (image, behavioral, anatomical, etc)",,,file_source
fd.write('"%s",' % info.get('ImageFile','Unknown'))
# image_thumbnail_file
fd.write('"",')
# Image description and scan type overlap strongly (eg fMRI), so we'll use the translated description provided
# by the user in the protocol dictionary for both NDAR fields. The user description should provide information
# about both the sequence type used (eg MB-EPI or MP-RAGE) and the purpose of the scan (BOLD resting-state,
# T1w structural, B0 fieldmap phase).
# Note the 50 character limit for scan type.
# image_description,String,512,Required,"Image description, i.e. DTI, fMRI, Fast SPGR, phantom, EEG, dynamic PET",,,
fd.write('"%s",' % info.get('ImageDescription','Unknown'))
# experiment_id,Integer,,Conditional,ID for the Experiment/settings/run,,,
fd.write('"",')
# scan_type,String,50,Required,Type of Scan,
# "MR diffusion; fMRI; MR structural (MPRAGE); MR structural (T1); MR structural (PD); MR structural (FSPGR);
# MR structural (T2); PET; ASL; microscopy; MR structural (PD, T2); MR structural (B0 map); MR structural (B1 map);
# single-shell DTI; multi-shell DTI; Field Map; X-Ray",,
fd.write('"%s",' % info.get('ScanType'))
# scan_object,String,50,Required,"The Object of the Scan (e.g. Live, Post-mortem, or Phantom",Live; Post-mortem; Phantom,,
fd.write('"Live",')
# image_file_format,String,50,Required,Image file format,
# AFNI; ANALYZE; AVI; BIORAD; BMP; BRIK; BRUKER; CHESHIRE; COR; DICOM; DM3; FITS; GE GENESIS; GE SIGNA4X; GIF;
# HEAD; ICO; ICS; INTERFILE; JPEG; LSM; MAGNETOM VISION; MEDIVISION; MGH; MICRO CAT; MINC; MIPAV XML; MRC; NIFTI;
# NRRD; OSM; PCX; PIC; PICT; PNG; QT; RAW; SPM; STK; TIFF; TGA; TMG; XBM; XPM; PARREC; MINC HDF; LIFF; BFLOAT;
# SIEMENS TEXT; ZVI; JP2; MATLAB; VISTA; ecat6; ecat7;,,
fd.write('"NIFTI",')
# data_file2
fd.write('"",')
# data_file2_type
fd.write('"",')
# image_modality,String,20,Required,Image modality, MRI;
fd.write('"MRI",')
# scanner_manufacturer_pd,String,30,Conditional,Scanner Manufacturer,,,
fd.write('"%s",' % info.get('Manufacturer','Unknown'))
# scanner_type_pd,String,50,Conditional,Scanner Type,,,ScannerID
fd.write('"%s",' % info.get('ManufacturersModelName','Unknown'))
# scanner_software_versions_pd
fd.write('"%s",' % info.get('SoftwareVersions','Unknown'))
# magnetic_field_strength,String,50,Conditional,Magnetic field strength,,,
fd.write('%f,' % info.get('MagneticFieldStrength','Unknown'))
# mri_repetition_time_pd,Float,,Conditional,Repetition Time (seconds),,,
fd.write('%0.4f,' % info.get('RepetitionTime',-1.0))
# mri_echo_time_pd,Float,,Conditional,Echo Time (seconds),,,
fd.write('%0.4f,' % info.get('EchoTime',-1.0))
# flip_angle,String,30,Conditional,Flip angle,,,
fd.write('%0.1f,' % info.get('FlipAngle',-1.0))
# MRI conditional fields
fd.write('"%s",' % info.get('AcquisitionMatrix')) # acquisition_matrix
fd.write('"%s",' % info.get('FOV')) # mri_field_of_view_pd
fd.write('"%s",' % info.get('PatientPosition')) # patient_position
fd.write('"%s",' % info.get('PhotometricInterpretation')) # photomet_interpret
fd.write('"",') # receive_coil
fd.write('"%s",' % info.get('TransmitCoil')) # transmit_coil
fd.write('"No",') # transformation_performed
fd.write('"",') # transformation_type
fd.write('"",') # image_history
fd.write('%d,' % info.get('NDims')) # image_num_dimensions
fd.write('%d,' % info.get('ImageExtent1')) # image_extent1
fd.write('%d,' % info.get('ImageExtent2')) # image_extent2
fd.write('%d,' % info.get('ImageExtent3')) # image_extent3
fd.write('%d,' % info.get('ImageExtent4')) # image_extent4
fd.write('"%s",' % info.get('Extent4Type')) # extent4_type
fd.write('"",') # image_extent5
fd.write('"",') # extent5_type
fd.write('"Millimeters",') # image_unit1
fd.write('"Millimeters",') # image_unit2
fd.write('"Millimeters",') # image_unit3
fd.write('"Seconds",') # image_unit4
fd.write('"",') # image_unit5
fd.write('%0.3f,' % info.get('ImageResolution1')) # image_resolution1
fd.write('%0.3f,' % info.get('ImageResolution2')) # image_resolution2
fd.write('%0.3f,' % info.get('ImageResolution3')) # image_resolution3
fd.write('%0.3f,' % info.get('ImageResolution4')) # image_resolution4
fd.write('%0.3f,' % info.get('ImageResolution5')) # image_resolution5
fd.write('%0.3f,' % info.get('SliceThickness')) # image_slice_thickness
fd.write('"%s",' % info.get('Orientation')) # image_orientation
fd.write('"",') # qc_outcome
fd.write('"",') # qc_description
fd.write('"",') # qc_fail_quest_reason
fd.write('"",') # decay_correction
fd.write('"",') # frame_end_times
fd.write('"",') # frame_end_unit
fd.write('"",') # frame_start_times
fd.write('"",') # frame_start_unit
fd.write('"",') # pet_isotope
fd.write('"",') # pet_tracer
fd.write('"",') # time_diff_inject_to_image
fd.write('"",') # time_diff_units
fd.write('"",') # pulse_seq
fd.write('"",') # slice_acquisition
fd.write('"None",') # software_preproc
fd.write('"",') # study
fd.write('"",') # week
fd.write('"",') # experiment_description
fd.write('"",') # visit
fd.write('"%s",' % str(info.get('SliceTiming'))) # slice_timing
fd.write('"",') # bvek_bval_files
fd.write('"",') # bvecfile
fd.write('"",') # bvalfile
# Final newline
fd.write('\n')
return
def strip_extensions(fname):
fstub, fext = os.path.splitext(fname)
if fext == '.gz':
fstub, fext = os.path.splitext(fstub)
return fstub
def ndar_include_prot(prot, prot_excludes):
'''
Returns False if protocol is in exclude list
:param prot:
:param prot_excludes:
:return:
'''
status = True
for pe in prot_excludes:
if pe in prot:
status = False
return status
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
|
celstark/bidskit
|
dcm2ndar.py
|
Python
|
mit
| 22,745
|
[
"VisIt"
] |
1a1eae6a0979fe26230437a69c650efd3c5096cdc106486409b699a5410e10e1
|
import collections
import re
CSS = {
"White": "#FFFFFF",
"Old Lace": "#FDF5E6",
"Sea Green": "#2E8B57",
"Slate Blue": "#6A5ACD",
"Dark Khaki": "#BDB76B",
"Light Steel Blue": "#B0C4DE",
"Sky Blue": "#87CEEB",
"Medium Orchid": "#BA55D3",
"Aqua": "#00FFFF",
"Bisque": "#FFE4C4",
"Teal": "#008080",
"White Smoke": "#F5F5F5",
"Dark Turquoise": "#00CED1",
"Blanched Almond": "#FFEBCD",
"Light Blue": "#ADD8E6",
"Dark Slate Gray": "#2F4F4F",
"Peru": "#CD853F",
"Blue": "#0000FF",
"Dark Slate Grey": "#2F4F4F",
"Olive": "#808000",
"Dark Salmon": "#E9967A",
"Dark Cyan": "#008B8B",
"Slate Gray": "#708090",
"Snow": "#FFFAFA",
"Navy": "#000080",
"Rosy Brown": "#BC8F8F",
"Indigo": "#4B0082",
"Dark Violet": "#9400D3",
"Moccasin": "#FFE4B5",
"Medium Spring Green": "#00FA9A",
"Orange": "#FFA500",
"Seashell": "#FFF5EE",
"Orchid": "#DA70D6",
"Dark Red": "#8B0000",
"Yellow-Green": "#9ACD32",
"Forest Green": "#228B22",
"Dark Sea Green": "#8FBC8F",
"Cornflower Blue": "#6495ED",
"Light Salmon": "#FFA07A",
"Firebrick": "#B22222",
"Slate Grey": "#708090",
"Light Grey": "#D3D3D3",
"Spring Green": "#00FF7F",
"Wheat": "#F5DEB3",
"Antique White": "#FAEBD7",
"Plum": "#DDA0DD",
"Linen": "#FAF0E6",
"Light Yellow": "#FFFFE0",
"Green-Yellow": "#ADFF2F",
"Honeydew": "#F0FFF0",
"Tomato": "#FF6347",
"Gainsboro": "#DCDCDC",
"Black": "#000000",
"Light Cyan": "#E0FFFF",
"Coral": "#FF7F50",
"Papaya Whip": "#FFEFD5",
"Maroon": "#800000",
"Pale Green": "#98FB98",
"Green": "#008000",
"Silver": "#C0C0C0",
"Dim Grey": "#696969",
"Medium Blue": "#0000CD",
"Red": "#FF0000",
"Indian Red": "#CD5C5C",
"Medium Turquoise": "#48D1CC",
"Grey": "#808080",
"Pale Turquoise": "#AFEEEE",
"Dodger Blue": "#1E90FF",
"Crimson": "#DC143C",
"Medium Slate Blue": "#7B68EE",
"Medium Violet-Red": "#C71585",
"Dark Grey": "#A9A9A9",
"Midnight Blue": "#191970",
"Dark Magenta": "#8B008B",
"Powder Blue": "#B0E0E6",
"Dark Blue": "#00008B",
"Cyan": "#00FFFF",
"Light Slate Gray": "#778899",
"Steel Blue": "#4682B4",
"Aquamarine": "#7FFFD4",
"Beige": "#F5F5DC",
"Ghost White": "#F8F8FF",
"Navajo White": "#FFDEAD",
"Alice Blue": "#F0F8FF",
"Yellow": "#FFFF00",
"Light Gray": "#D3D3D3",
"Lemon Chiffon": "#FFFACD",
"Lime": "#00FF00",
"Light Sea Green": "#20B2AA",
"Floral White": "#FFFAF0",
"Sandy Brown": "#F4A460",
"Deep Sky Blue": "#00BFFF",
"Deep Pink": "#FF1493",
"Cornsilk": "#FFF8DC",
"Lavender Blush": "#FFF0F5",
"Blue-Violet": "#8A2BE2",
"Dim Gray": "#696969",
"Rebecca Purple": "#663399",
"Saddle Brown": "#8B4513",
"Dark Orange": "#FF8C00",
"Brown": "#A52A2A",
"Medium Purple": "#9370DB",
"Olive Drab": "#6B8E23",
"Dark Goldenrod": "#B8860B",
"Purple": "#800080",
"Light Sky Blue": "#87CEFA",
"Ivory": "#FFFFF0",
"Khaki": "#F0E68C",
"Tan": "#D2B48C",
"Magenta": "#FF00FF",
"Chocolate": "#D2691E",
"Azure": "#F0FFFF",
"Royal Blue": "#4169E1",
"Medium Aquamarine": "#66CDAA",
"Misty Rose": "#FFE4E1",
"Pale Goldenrod": "#EEE8AA",
"Dark Orchid": "#9932CC",
"Gold": "#FFD700",
"Burlywood": "#DEB887",
"Mint Cream": "#F5FFFA",
"Thistle": "#D8BFD8",
"Lavender": "#E6E6FA",
"Pink": "#FFC0CB",
"Peach Puff": "#FFDAB9",
"Dark Olive Green": "#556B2F",
"Orange-Red": "#FF4500",
"Chartreuse": "#7FFF00",
"Fuchsia": "#FF00FF",
"Light Pink": "#FFB6C1",
"Turquoise": "#40E0D0",
"Light Coral": "#F08080",
"Light Goldenrod Yellow": "#FAFAD2",
"Cadet Blue": "#5F9EA0",
"Light Slate Grey": "#778899",
"Hot Pink": "#FF69B4",
"Salmon": "#FA8072",
"Lime Green": "#32CD32",
"Goldenrod": "#DAA520",
"Violet": "#EE82EE",
"Dark Slate Blue": "#483D8B",
"Gray": "#808080",
"Medium Sea Green": "#3CB371",
"Pale Violet-Red": "#DB7093",
"Lawn Green": "#7CFC00",
"Sienna": "#A0522D",
"Light Green": "#90EE90",
"Dark Green": "#006400",
"Dark Gray": "#A9A9A9",
}
CRAYOLA = {
'Red': '#ED0A3F',
'Maroon': '#C32148',
'Scarlet': '#FD0E35',
'Brick Red': '#C62D42',
'English Vermilion': '#CC474B',
'Madder Lake': '#CC3336',
'Permanent Geranium Lake': '#E12C2C',
'Maximum Red': '#D92121',
'Indian Red': '#B94E48',
'Orange-Red': '#FF5349',
'Sunset Orange': '#FE4C40',
'Bittersweet': '#FE6F5E',
'Dark Venetian Red': '#B33B24',
'Venetian Red': '#CC553D',
'Light Venetian Red': '#E6735C',
'Vivid Tangerine': '#FF9980',
'Middle Red': '#E58E73',
'Burnt Orange': '#FF7F49',
'Red-Orange': '#FF681F',
'Orange': '#FF8833',
'Macaroni and Cheese': '#FFB97B',
'Middle Yellow Red': '#ECB176',
'Mango Tango': '#E77200',
'Yellow-Orange': '#FFAE42',
'Maximum Yellow Red': '#F2BA49',
'Banana Mania': '#FBE7B2',
'Maize': '#F2C649',
'Orange-Yellow': '#F8D568',
'Goldenrod': '#FCD667',
'Dandelion': '#FED85D',
'Yellow': '#FBE870',
'Green-Yellow': '#F1E788',
'Middle Yellow': '#FFEB00',
'Olive Green': '#B5B35C',
'Spring Green': '#ECEBBD',
'Maximum Yellow': '#FAFA37',
'Canary': '#FFFF99',
'Lemon Yellow': '#FFFF9F',
'Maximum Green Yellow': '#D9E650',
'Middle Green Yellow': '#ACBF60',
'Inchworm': '#AFE313',
'Light Chrome Green': '#BEE64B',
'Yellow-Green': '#C5E17A',
'Maximum Green': '#5E8C31',
'Asparagus': '#7BA05B',
'Granny Smith Apple': '#9DE093',
'Fern': '#63B76C',
'Middle Green': '#4D8C57',
'Green': '#3AA655',
'Medium Chrome Green': '#6CA67C',
'Forest Green': '#5FA777',
'Sea Green': '#93DFB8',
'Shamrock': '#33CC99',
'Mountain Meadow': '#1AB385',
'Jungle Green': '#29AB87',
'Caribbean Green': '#00CC99',
'Tropical Rain Forest': '#00755E',
'Middle Blue Green': '#8DD9CC',
'Pine Green': '#01786F',
'Maximum Blue Green': '#30BFBF',
"Robin's Egg Blue": '#00CCCC',
'Teal Blue': '#008080',
'Light Blue': '#8FD8D8',
'Aquamarine': '#95E0E8',
'Turquoise Blue': '#6CDAE7',
'Outer Space': '#2D383A',
'Sky Blue': '#76D7EA',
'Middle Blue': '#7ED4E6',
'Blue-Green': '#0095B7',
'Pacific Blue': '#009DC4',
'Cerulean': '#02A4D3',
'Maximum Blue': '#47ABCC',
'Blue 1': '#4997D0',
'Cerulean Blue': '#339ACC',
'Cornflower': '#93CCEA',
'Green-Blue': '#2887C8',
'Midnight Blue': '#00468C',
'Navy Blue': '#0066CC',
'Denim': '#1560BD',
'Blue 3': '#0066FF',
'Cadet Blue': '#A9B2C3',
'Periwinkle': '#C3CDE6',
'Blue 2': '#4570E6',
'Wild Blue Yonder': '#7A89B8',
'Indigo': '#4F69C6',
'Manatee': '#8D90A1',
'Cobalt Blue': '#8C90C8',
'Celestial Blue': '#7070CC',
'Blue Bell': '#9999CC',
'Maximum Blue Purple': '#ACACE6',
'Violet-Blue': '#766EC8',
'Blue-Violet': '#6456B7',
'Ultramarine Blue': '#3F26BF',
'Middle Blue Purple': '#8B72BE',
'Purple Heart': '#652DC1',
'Royal Purple': '#6B3FA0',
'Violet 2': '#8359A3',
'Medium Violet': '#8F47B3',
'Wisteria': '#C9A0DC',
'Lavender 1': '#BF8FCC',
'Vivid Violet': '#803790',
'Maximum Purple': '#733380',
"Purple Mountains' Majesty": '#D6AEDD',
'Fuchsia': '#C154C1',
'Pink Flamingo': '#FC74FD',
'Violet 1': '#732E6C',
'Brilliant Rose': '#E667CE',
'Orchid': '#E29CD2',
'Plum': '#8E3179',
'Medium Rose': '#D96CBE',
'Thistle': '#EBB0D7',
'Mulberry': '#C8509B',
'Red-Violet': '#BB3385',
'Middle Purple': '#D982B5',
'Maximum Red Purple': '#A63A79',
'Jazzberry Jam': '#A50B5E',
'Eggplant': '#614051',
'Magenta': '#F653A6',
'Cerise': '#DA3287',
'Wild Strawberry': '#FF3399',
'Lavender 2': '#FBAED2',
'Cotton Candy': '#FFB7D5',
'Carnation Pink': '#FFA6C9',
'Violet-Red': '#F7468A',
'Razzmatazz': '#E30B5C',
'Pig Pink': '#FDD7E4',
'Carmine': '#E62E6B',
'Blush': '#DB5079',
'Tickle Me Pink': '#FC80A5',
'Mauvelous': '#F091A9',
'Salmon': '#FF91A4',
'Middle Red Purple': '#A55353',
'Mahogany': '#CA3435',
'Melon': '#FEBAAD',
'Pink Sherbert': '#F7A38E',
'Burnt Sienna': '#E97451',
'Brown': '#AF593E',
'Sepia': '#9E5B40',
'Fuzzy Wuzzy': '#87421F',
'Beaver': '#926F5B',
'Tumbleweed': '#DEA681',
'Raw Sienna': '#D27D46',
'Van Dyke Brown': '#664228',
'Tan': '#D99A6C',
'Desert Sand': '#EDC9AF',
'Peach': '#FFCBA4',
'Burnt Umber': '#805533',
'Apricot': '#FDD5B1',
'Almond': '#EED9C4',
'Raw Umber': '#665233',
'Shadow': '#837050',
'Raw Sienna 1': '#E6BC5C',
'Timberwolf': '#D9D6CF',
'Gold 1': '#92926E',
'Gold 2': '#E6BE8A',
'Silver': '#C9C0BB',
'Copper': '#DA8A67',
'Antique Brass': '#C88A65',
'Black': '#000000',
'Charcoal Gray': '#736A62',
'Gray': '#8B8680',
'Blue-Gray': '#C8C8CD',
'White': '#FFFFFF',
}
WIKIPEDIA = {
"Absolute Zero": "#0048BA",
"Acid Green": "#B0BF1A",
"Aero": "#7CB9E8",
"Aero Blue": "#C9FFE5",
"African Violet": "#B284BE",
"Air Force Blue (RAF)": "#5D8AA8",
"Air Force Blue (USAF)": "#00308F",
"Air Superiority Blue": "#72A0C1",
"Alabama Crimson": "#AF002A",
"Alabaster": "#F2F0E6",
"Atlantic Blue": "#F0F8FF",
"Alien Armpit": "#84DE02",
"Alizarin Crimson": "#E32636",
"Alloy Orange": "#C46210",
"Almond": "#EFDECD",
"Amaranth": "#E52B50",
"Amaranth Deep Purple": "#9F2B68",
"Amaranth Pink": "#F19CBB",
"Amaranth Purple": "#AB274F",
"Amaranth Red": "#D3212D",
"Amazon Store": "#3B7A57",
"Amazonite": "#00C4B0",
"Amber": "#FFBF00",
"Amber (SAE/ECE)": "#FF7E00",
"American Rose": "#FF033E",
"Amethyst": "#9966CC",
"Android Green": "#A4C639",
"Anti-Flash White": "#F2F3F4",
"Antique Brass": "#CD9575",
"Antique Bronze": "#665D1E",
"Antique Fuchsia": "#915C83",
"Antique Ruby": "#841B2D",
"Antique White": "#FAEBD7",
"Ao": "#008000",
"Apple Green": "#8DB600",
"Apricot": "#FBCEB1",
"Aqua": "#00FFFF",
"Aquamarine": "#7FFFD4",
"Arctic Lime": "#D0FF14",
"Army Green": "#4B5320",
"Arsenic": "#3B444B",
"Artichoke": "#8F9779",
"Arylide Yellow": "#E9D66B",
"Ash Gray": "#B2BEB5",
"Asparagus": "#87A96B",
"Atomic Tangerine": "#FF9966",
"Auburn": "#A52A2A",
"Aureolin": "#FDEE00",
"AuroMetalSaurus": "#6E7F80",
"Avocado": "#568203",
"Awesome": "#FF2052",
"Aztec Gold": "#C39953",
"Azure": "#007FFF",
"Azure (Web Color)": "#F0FFFF",
"Azure Mist": "#F0FFFF",
"Azureish White": "#DBE9F4",
"Baby Blue": "#89CFF0",
"Baby Blue Eyes": "#A1CAF1",
"Baby Pink": "#F4C2C2",
"Baby Powder": "#FEFEFA",
"Baker-Miller Pink": "#FF91AF",
"Ball Blue": "#21ABCD",
"Banana Mania": "#FAE7B5",
"Banana Yellow": "#FFE135",
"Bangladesh Green": "#006A4E",
"Barbie Pink": "#E0218A",
"Barn Red": "#7C0A02",
"Battery Charged Blue": "#1DACD6",
"Battleship Grey": "#848482",
"Bazaar": "#98777B",
"Beau Blue": "#BCD4E6",
"Beaver": "#9F8170",
"Begonia": "#FA6E79",
"Beige": "#F5F5DC",
"B'dazzled Blue": "#2E5894",
"Big Dip O’ruby": "#9C2542",
"Big Foot Feet": "#E88E5A",
"Bisque": "#FFE4C4",
"Bistre": "#3D2B1F",
"Bistre Brown": "#967117",
"Bitter Lemon": "#CAE00D",
"Bitter Lime": "#BFFF00",
"Bittersweet": "#FE6F5E",
"Bittersweet Shimmer": "#BF4F51",
"Black": "#000000",
"Black Bean": "#3D0C02",
"Black Coral": "#54626F",
"Black Leather Jacket": "#253529",
"Black Olive": "#3B3C36",
"Black Shadows": "#BFAFB2",
"Blanched Almond": "#FFEBCD",
"Blast-Off Bronze": "#A57164",
"Bleu De France": "#318CE7",
"Blizzard Blue": "#ACE5EE",
"Blond": "#FAF0BE",
"Blue": "#0000FF",
"Blue (Crayola)": "#1F75FE",
"Blue (Munsell)": "#0093AF",
"Blue (NCS)": "#0087BD",
"Blue (Pantone)": "#0018A8",
"Blue (Pigment)": "#333399",
"Blue (RYB)": "#0247FE",
"Blue Bell": "#A2A2D0",
"Blue Bolt": "#00B9FB",
"Blue-Gray": "#6699CC",
"Blue-Green": "#0D98BA",
"Blue Jeans": "#5DADEC",
"Blue Lagoon": "#ACE5EE",
"Blue-Magenta Violet": "#553592",
"Blue Sapphire": "#126180",
"Blue-Violet": "#8A2BE2",
"Blue Yonder": "#5072A7",
"Blueberry": "#4F86F7",
"Bluebonnet": "#1C1CF0",
"Blush": "#DE5D83",
"Bole": "#79443B",
"Bondi Blue": "#0095B6",
"Bone": "#E3DAC9",
"Booger Buster": "#DDE26A",
"Boston University Red": "#CC0000",
"Bottle Green": "#006A4E",
"Boysenberry": "#873260",
"Brandeis Blue": "#0070FF",
"Brass": "#B5A642",
"Brick Red": "#CB4154",
"Bright Cerulean": "#1DACD6",
"Bright Green": "#66FF00",
"Bright Lavender": "#BF94E4",
"Bright Lilac": "#D891EF",
"Bright Maroon": "#C32148",
"Bright Navy Blue": "#1974D2",
"Bright Pink": "#FF007F",
"Bright Turquoise": "#08E8DE",
"Bright Ube": "#D19FE8",
"Bright Yellow (Crayola)": "#FFAA1D",
"Brilliant Azure": "#3399FF",
"Brilliant Lavender": "#F4BBFF",
"Brilliant Rose": "#FF55A3",
"Brink Pink": "#FB607F",
"British Racing Green": "#004225",
"Bronze": "#CD7F32",
"Bronze Yellow": "#737000",
"Brown (Traditional)": "#964B00",
"Brown (Web)": "#A52A2A",
"Brown-Nose": "#6B4423",
"Brown Sugar": "#AF6E4D",
"Brown Yellow": "#CC9966",
"Brunswick Green": "#1B4D3E",
"Bubble Gum": "#FFC1CC",
"Bubbles": "#E7FEFF",
"Bud Green": "#7BB661",
"Buff": "#F0DC82",
"Bulgarian Rose": "#480607",
"Burgundy": "#800020",
"Burlywood": "#DEB887",
"Burnished Brown": "#A17A74",
"Burnt Orange": "#CC5500",
"Burnt Sienna": "#E97451",
"Burnt Umber": "#8A3324",
"Button Blue": "#24A0ED",
"Byzantine": "#BD33A4",
"Byzantium": "#702963",
"Cadet": "#536872",
"Cadet Blue": "#5F9EA0",
"Cadet Grey": "#91A3B0",
"Cadmium Green": "#006B3C",
"Cadmium Orange": "#ED872D",
"Cadmium Red": "#E30022",
"Cadmium Yellow": "#FFF600",
"Café Au Lait": "#A67B5B",
"Café Noir": "#4B3621",
"Cal Poly Pomona Green": "#1E4D2B",
"Cambridge Blue": "#A3C1AD",
"Camel": "#C19A6B",
"Cameo Pink": "#EFBBCC",
"Camouflage Green": "#78866B",
"Canary": "#FFFF99",
"Canary Yellow": "#FFEF00",
"Candy Apple Red": "#FF0800",
"Candy Pink": "#E4717A",
"Capri": "#00BFFF",
"Caput Mortuum": "#592720",
"Cardinal": "#C41E3A",
"Caribbean Green": "#00CC99",
"Carmine": "#960018",
"Carmine (M&P)": "#D70040",
"Carmine Pink": "#EB4C42",
"Carmine Red": "#FF0038",
"Carnation Pink": "#FFA6C9",
"Carnelian": "#B31B1B",
"Carolina Blue": "#56A0D3",
"Carrot Orange": "#ED9121",
"Castleton Green": "#00563F",
"Catalina Blue": "#062A78",
"Catawba": "#703642",
"Cedar Chest": "#C95A49",
"Ceil": "#92A1CF",
"Celadon": "#ACE1AF",
"Celadon Blue": "#007BA7",
"Celadon Green": "#2F847C",
"Celeste": "#B2FFFF",
"Celestial Blue": "#4997D0",
"Cerise": "#DE3163",
"Cerise Pink": "#EC3B83",
"Cerulean": "#007BA7",
"Cerulean Blue": "#2A52BE",
"Cerulean Frost": "#6D9BC3",
"CG Blue": "#007AA5",
"CG Red": "#E03C31",
"Chamoisee": "#A0785A",
"Champagne": "#F7E7CE",
"Champagne Pink": "#F1DDCF",
"Charcoal": "#36454F",
"Charleston Green": "#232B2B",
"Charm Pink": "#E68FAC",
"Chartreuse (Traditional)": "#DFFF00",
"Chartreuse (Web)": "#7FFF00",
"Cherry": "#DE3163",
"Cherry Blossom Pink": "#FFB7C5",
"Chestnut": "#954535",
"China Pink": "#DE6FA1",
"China Rose": "#A8516E",
"Chinese Red": "#AA381E",
"Chinese Violet": "#856088",
"Chlorophyll Green": "#4AFF00",
"Chocolate (Traditional)": "#7B3F00",
"Chocolate (Web)": "#D2691E",
"Chrome Yellow": "#FFA700",
"Cinereous": "#98817B",
"Cinnabar": "#E34234",
"Cinnamon": "#D2691E",
"Cinnamon Satin": "#CD607E",
"Citrine": "#E4D00A",
"Citron": "#9FA91F",
"Claret": "#7F1734",
"Classic Rose": "#FBCCE7",
"Cobalt Blue": "#0047AB",
"Cocoa Brown": "#D2691E",
"Coconut": "#965A3E",
"Coffee": "#6F4E37",
"Columbia Blue": "#C4D8E2",
"Congo Pink": "#F88379",
"Cool Black": "#002E63",
"Cool Grey": "#8C92AC",
"Copper": "#B87333",
"Copper (Crayola)": "#DA8A67",
"Copper Penny": "#AD6F69",
"Copper Red": "#CB6D51",
"Copper Rose": "#996666",
"Coquelicot": "#FF3800",
"Coral": "#FF7F50",
"Coral Pink": "#F88379",
"Coral Red": "#FF4040",
"Coral Reef": "#FD7C6E",
"Cordovan": "#893F45",
"Corn": "#FBEC5D",
"Cornell Red": "#B31B1B",
"Cornflower Blue": "#6495ED",
"Cornsilk": "#FFF8DC",
"Cosmic Cobalt": "#2E2D88",
"Cosmic Latte": "#FFF8E7",
"Coyote Brown": "#81613C",
"Cotton Candy": "#FFBCD9",
"Cream": "#FFFDD0",
"Crimson": "#DC143C",
"Crimson Glory": "#BE0032",
"Crimson Red": "#990000",
"Cultured": "#F5F5F5",
"Cyan": "#00FFFF",
"Cyan Azure": "#4E82B4",
"Cyan-Blue Azure": "#4682BF",
"Cyan Cobalt Blue": "#28589C",
"Cyan Cornflower Blue": "#188BC2",
"Cyan (Process)": "#00B7EB",
"Cyber Grape": "#58427C",
"Cyber Yellow": "#FFD300",
"Cyclamen": "#F56FA1",
"Daffodil": "#FFFF31",
"Dandelion": "#F0E130",
"Dark Blue": "#00008B",
"Dark Blue-Gray": "#666699",
"Dark Brown": "#654321",
"Dark Brown-Tangelo": "#88654E",
"Dark Byzantium": "#5D3954",
"Dark Candy Apple Red": "#A40000",
"Dark Cerulean": "#08457E",
"Dark Chestnut": "#986960",
"Dark Coral": "#CD5B45",
"Dark Cyan": "#008B8B",
"Dark Electric Blue": "#536878",
"Dark Goldenrod": "#B8860B",
"Dark Gray (X11)": "#A9A9A9",
"Dark Green": "#013220",
"Dark Green (X11)": "#006400",
"Dark Gunmetal": "#1F262A",
"Dark Imperial Blue": "#00416A",
"Dark Imperial Blue": "#00147E",
"Dark Jungle Green": "#1A2421",
"Dark Khaki": "#BDB76B",
"Dark Lava": "#483C32",
"Dark Lavender": "#734F96",
"Dark Liver": "#534B4F",
"Dark Liver (Horses)": "#543D37",
"Dark Magenta": "#8B008B",
"Dark Medium Gray": "#A9A9A9",
"Dark Midnight Blue": "#003366",
"Dark Moss Green": "#4A5D23",
"Dark Olive Green": "#556B2F",
"Dark Orange": "#FF8C00",
"Dark Orchid": "#9932CC",
"Dark Pastel Blue": "#779ECB",
"Dark Pastel Green": "#03C03C",
"Dark Pastel Purple": "#966FD6",
"Dark Pastel Red": "#C23B22",
"Dark Pink": "#E75480",
"Dark Powder Blue": "#003399",
"Dark Puce": "#4F3A3C",
"Dark Purple": "#301934",
"Dark Raspberry": "#872657",
"Dark Red": "#8B0000",
"Dark Salmon": "#E9967A",
"Dark Scarlet": "#560319",
"Dark Sea Green": "#8FBC8F",
"Dark Sienna": "#3C1414",
"Dark Sky Blue": "#8CBED6",
"Dark Slate Blue": "#483D8B",
"Dark Slate Gray": "#2F4F4F",
"Dark Spring Green": "#177245",
"Dark Tan": "#918151",
"Dark Tangerine": "#FFA812",
"Dark Taupe": "#483C32",
"Dark Terra Cotta": "#CC4E5C",
"Dark Turquoise": "#00CED1",
"Dark Vanilla": "#D1BEA8",
"Dark Violet": "#9400D3",
"Dark Yellow": "#9B870C",
"Dartmouth Green": "#00703C",
"Davy's Grey": "#555555",
"Debian Red": "#D70A53",
"Deep Aquamarine": "#40826D",
"Deep Carmine": "#A9203E",
"Deep Carmine Pink": "#EF3038",
"Deep Carrot Orange": "#E9692C",
"Deep Cerise": "#DA3287",
"Deep Champagne": "#FAD6A5",
"Deep Chestnut": "#B94E48",
"Deep Coffee": "#704241",
"Deep Fuchsia": "#C154C1",
"Deep Green": "#056608",
"Deep Green-Cyan Turquoise": "#0E7C61",
"Deep Jungle Green": "#004B49",
"Deep Koamaru": "#333366",
"Deep Lemon": "#F5C71A",
"Deep Lilac": "#9955BB",
"Deep Magenta": "#CC00CC",
"Deep Maroon": "#820000",
"Deep Mauve": "#D473D4",
"Deep Moss Green": "#355E3B",
"Deep Peach": "#FFCBA4",
"Deep Pink": "#FF1493",
"Deep Puce": "#A95C68",
"Deep Red": "#850101",
"Deep Ruby": "#843F5B",
"Deep Saffron": "#FF9933",
"Deep Sky Blue": "#00BFFF",
"Deep Space Sparkle": "#4A646C",
"Deep Spring Bud": "#556B2F",
"Deep Taupe": "#7E5E60",
"Deep Tuscan Red": "#66424D",
"Deep Violet": "#330066",
"Deer": "#BA8759",
"Denim": "#1560BD",
"Denim Blue": "#2243B6",
"Desaturated Cyan": "#669999",
"Desert": "#C19A6B",
"Desert Sand": "#EDC9AF",
"Desire": "#EA3C53",
"Diamond": "#B9F2FF",
"Dim Gray": "#696969",
"Dingy Dungeon": "#C53151",
"Dirt": "#9B7653",
"Dodger Blue": "#1E90FF",
"Dodie Yellow": "#FEF65B",
"Dogwood Rose": "#D71868",
"Dollar Bill": "#85BB65",
"Dolphin Gray": "#828E84",
"Donkey Brown": "#664C28",
"Drab": "#967117",
"Duke Blue": "#00009C",
"Dust Storm": "#E5CCC9",
"Dutch White": "#EFDFBB",
"Earth Yellow": "#E1A95F",
"Ebony": "#555D50",
"Ecru": "#C2B280",
"Eerie Black": "#1B1B1B",
"Eggplant": "#614051",
"Eggshell": "#F0EAD6",
"Egyptian Blue": "#1034A6",
"Electric Blue": "#7DF9FF",
"Electrical Computer Crimson": "#FF003F",
"Electric Cyan": "#00FFFF",
"Electric Green": "#00FF00",
"Electric Indigo": "#6F00FF",
"Electric Lavender": "#F4BBFF",
"Electric Lime": "#CCFF00",
"Electric Purple": "#BF00FF",
"Electric Ultramarine": "#3F00FF",
"Electric Violet": "#8F00FF",
"Electric Yellow": "#FFFF33",
"Emerald": "#50C878",
"Eminence": "#6C3082",
"English Green": "#1B4D3E",
"English Lavender": "#B48395",
"English Red": "#AB4B52",
"English Vermillion": "#CC474B",
"English Violet": "#563C5C",
"Eton Blue": "#96C8A2",
"Eucalyptus": "#44D7A8",
"Fallow": "#C19A6B",
"Falu Red": "#801818",
"Fandango": "#B53389",
"Fandango Pink": "#DE5285",
"Fashion Fuchsia": "#F400A1",
"Faberge Fuchsia": "#E5AA70",
"Feldgrau": "#4D5D53",
"Feldspar": "#FDD5B1",
"Fern Green": "#4F7942",
"Ferrari Red": "#FF2800",
"Field Drab": "#6C541E",
"Fiery Rose": "#FF5470",
"Firebrick": "#B22222",
"Fire Engine Red": "#CE2029",
"Flame": "#E25822",
"Flamingo Pink": "#FC8EAC",
"Flattery": "#6B4423",
"Flavescent": "#F7E98E",
"Flax": "#EEDC82",
"Flirt": "#A2006D",
"Floral White": "#FFFAF0",
"Fluorescent Orange": "#FFBF00",
"Fluorescent Pink": "#FF1493",
"Fluorescent Yellow": "#CCFF00",
"Folly": "#FF004F",
"Forest Green (Traditional)": "#014421",
"Forest Green (Web)": "#228B22",
"French Beige": "#A67B5B",
"French Bistre": "#856D4D",
"French Blue": "#0072BB",
"French Fuchsia": "#FD3F92",
"French Lilac": "#86608E",
"French Lime": "#9EFD38",
"French Mauve": "#D473D4",
"French Pink": "#FD6C9E",
"French Plum": "#811453",
"French Puce": "#4E1609",
"French Raspberry": "#C72C48",
"French Rose": "#F64A8A",
"French Sky Blue": "#77B5FE",
"French Violet": "#8806CE",
"French Wine": "#AC1E44",
"Fresh Air": "#A6E7FF",
"Frogert": "#E936A7",
"Fuchsia": "#FF00FF",
"Fuchsia (Crayola)": "#C154C1",
"Fuchsia Pink": "#FF77FF",
"Fuchsia Purple": "#CC397B",
"Fuchsia Rose": "#C74375",
"Fulvous": "#E48400",
"Fuzzy Wuzzy": "#CC6666",
"Gainsboro": "#DCDCDC",
"Gamboge": "#E49B0F",
"Gamboge Orange (Brown)": "#996600",
"Gargoyle Gas": "#FFDF46",
"Generic Viridian": "#007F66",
"Ghost White": "#F8F8FF",
"Giant's Club": "#B05C52",
"Giants Orange": "#FE5A1D",
"Ginger": "#B06500",
"Glaucous": "#6082B6",
"Glitter": "#E6E8FA",
"Glossy Grape": "#AB92B3",
"GO Green": "#00AB66",
"Gold (Metallic)": "#D4AF37",
"Gold (Web) (Golden)": "#FFD700",
"Gold Fusion": "#85754E",
"Golden Brown": "#996515",
"Golden Poppy": "#FCC200",
"Golden Yellow": "#FFDF00",
"Goldenrod": "#DAA520",
"Granite Gray": "#676767",
"Granny Smith Apple": "#A8E4A0",
"Grape": "#6F2DA8",
"Gray": "#808080",
"Gray (HTML/CSS Gray)": "#808080",
"Gray (X11 Gray)": "#BEBEBE",
"Gray-Asparagus": "#465945",
"Gray-Blue": "#8C92AC",
"Green (Color Wheel) (X11 Green)": "#00FF00",
"Green (Crayola)": "#1CAC78",
"Green (HTML/CSS Color)": "#008000",
"Green (Munsell)": "#00A877",
"Green (NCS)": "#009F6B",
"Green (Pantone)": "#00AD43",
"Green (Pigment)": "#00A550",
"Green (RYB)": "#66B032",
"Green-Blue": "#1164B4",
"Green-Cyan": "#009966",
"Green Lizard": "#A7F432",
"Green Sheen": "#6EAEA1",
"Green-Yellow": "#ADFF2F",
"Grizzly": "#885818",
"Grullo": "#A99A86",
"Guppie Green": "#00FF7F",
"Gunmetal": "#2A3439",
"Halayà Úbe": "#663854",
"Han Blue": "#446CCF",
"Han Purple": "#5218FA",
"Hansa Yellow": "#E9D66B",
"Harlequin": "#3FFF00",
"Harlequin Green": "#46CB18",
"Harvard Crimson": "#C90016",
"Harvest Gold": "#DA9100",
"Heart Gold": "#808000",
"Heat Wave": "#FF7A00",
"Heidelberg Red": "#960018",
"Heliotrope": "#DF73FF",
"Heliotrope Gray": "#AA98A9",
"Heliotrope Magenta": "#AA00BB",
"Hollywood Cerise": "#F400A1",
"Honeydew": "#F0FFF0",
"Honolulu Blue": "#006DB0",
"Hooker's Green": "#49796B",
"Hot Magenta": "#FF1DCE",
"Hot Pink": "#FF69B4",
"Hunter Green": "#355E3B",
"Iceberg": "#71A6D2",
"Icterine": "#FCF75E",
"Iguana Green": "#71BC78",
"Illuminating Emerald": "#319177",
"Imperial": "#602F6B",
"Imperial Blue": "#002395",
"Imperial Purple": "#66023C",
"Imperial Red": "#ED2939",
"Inchworm": "#B2EC5D",
"Independence": "#4C516D",
"India Green": "#138808",
"Indian Red": "#CD5C5C",
"Indian Yellow": "#E3A857",
"Indigo": "#4B0082",
"Indigo Dye": "#091F92",
"Indigo (Web)": "#4B0082",
"Infra Red": "#FF496C",
"Interdimensional Blue": "#360CCC",
"International Klein Blue": "#002FA7",
"International Orange (Aerospace)": "#FF4F00",
"International Orange (Engineering)": "#BA160C",
"International Orange (Golden Gate Bridge)": "#C0362C",
"Iris": "#5A4FCF",
"Irresistible": "#B3446C",
"Isabelline": "#F4F0EC",
"Islamic Green": "#009000",
"Italian Sky Blue": "#B2FFFF",
"Ivory": "#FFFFF0",
"Jade": "#00A86B",
"Japanese Carmine": "#9D2933",
"Japanese Indigo": "#264348",
"Japanese Violet": "#5B3256",
"Jasmine": "#F8DE7E",
"Jasper": "#D73B3E",
"Jazzberry Jam": "#A50B5E",
"Jelly Bean": "#DA614E",
"Jet": "#343434",
"Jonquil": "#F4CA16",
"Jordy Blue": "#8AB9F1",
"June Bud": "#BDDA57",
"Jungle Green": "#29AB87",
"Kelly Green": "#4CBB17",
"Kenyan Copper": "#7C1C05",
"Keppel": "#3AB09E",
"Key Lime": "#E8F48C",
"Khaki (HTML/CSS) (Khaki)": "#C3B091",
"Khaki (X11) (Light Khaki)": "#F0E68C",
"Kiwi": "#8EE53F",
"Kobe": "#882D17",
"Kobi": "#E79FC4",
"Kobicha": "#6B4423",
"Kombu Green": "#354230",
"KSU Purple": "#512888",
"KU Crimson": "#E8000D",
"La Salle Green": "#087830",
"Languid Lavender": "#D6CADD",
"Lapis Lazuli": "#26619C",
"Laser Lemon": "#FFFF66",
"Laurel Green": "#A9BA9D",
"Lava": "#CF1020",
"Lavender (Floral)": "#B57EDC",
"Lavender (Web)": "#E6E6FA",
"Lavender Blue": "#CCCCFF",
"Lavender Blush": "#FFF0F5",
"Lavender Gray": "#C4C3D0",
"Lavender Indigo": "#9457EB",
"Lavender Magenta": "#EE82EE",
"Lavender Mist": "#E6E6FA",
"Lavender Pink": "#FBAED2",
"Lavender Purple": "#967BB6",
"Lavender Rose": "#FBA0E3",
"Lawn Green": "#7CFC00",
"Lemon": "#FFF700",
"Lemon Chiffon": "#FFFACD",
"Lemon Curry": "#CCA01D",
"Lemon Glacier": "#FDFF00",
"Lemon Lime": "#E3FF00",
"Lemon Meringue": "#F6EABE",
"Lemon Yellow": "#FFF44F",
"Licorice": "#1A1110",
"Liberty": "#545AA7",
"Light Apricot": "#FDD5B1",
"Light Blue": "#ADD8E6",
"Light Brown": "#B5651D",
"Light Carmine Pink": "#E66771",
"Light Cobalt Blue": "#88ACE0",
"Light Coral": "#F08080",
"Light Cornflower Blue": "#93CCEA",
"Light Crimson": "#F56991",
"Light Cyan": "#E0FFFF",
"Light Deep Pink": "#FF5CCD",
"Light French Beige": "#C8AD7F",
"Light Fuchsia Pink": "#F984EF",
"Light Goldenrod Yellow": "#FAFAD2",
"Light Gray": "#D3D3D3",
"Light Grayish Magenta": "#CC99CC",
"Light Green": "#90EE90",
"Light Hot Pink": "#FFB3DE",
"Light Khaki": "#F0E68C",
"Light Medium Orchid": "#D39BCB",
"Light Moss Green": "#ADDFAD",
"Light Orange": "#FED8B1",
"Light Orchid": "#E6A8D7",
"Light Pastel Purple": "#B19CD9",
"Light Pink": "#FFB6C1",
"Light Red Ochre": "#E97451",
"Light Salmon": "#FFA07A",
"Light Salmon Pink": "#FF9999",
"Light Sea Green": "#20B2AA",
"Light Sky Blue": "#87CEFA",
"Light Slate Gray": "#778899",
"Light Steel Blue": "#B0C4DE",
"Light Taupe": "#B38B6D",
"Light Thulian Pink": "#E68FAC",
"Light Yellow": "#FFFFE0",
"Lilac": "#C8A2C8",
"Lilac Luster": "#AE98AA",
"Lime (Color Wheel)": "#BFFF00",
"Lime (Web) (X11 Green)": "#00FF00",
"Lime Green": "#32CD32",
"Limerick": "#9DC209",
"Lincoln Green": "#195905",
"Linen": "#FAF0E6",
"Loeen(Lopen)Look/Vomit+Indogo+Lopen+Gabriel": "#15F2FD",
"Liseran Purple": "#DE6FA1",
"Little Boy Blue": "#6CA0DC",
"Liver": "#674C47",
"Liver (Dogs)": "#B86D29",
"Liver (Organ)": "#6C2E1F",
"Liver Chestnut": "#987456",
"Livid": "#6699CC",
"Lumber": "#FFE4CD",
"Lust": "#E62020",
"Maastricht Blue": "#001C3D",
"Macaroni And Cheese": "#FFBD88",
"Madder Lake": "#CC3336",
"Magenta": "#FF00FF",
"Magenta (Crayola)": "#FF55A3",
"Magenta (Dye)": "#CA1F7B",
"Magenta (Pantone)": "#D0417E",
"Magenta (Process)": "#FF0090",
"Magenta Haze": "#9F4576",
"Magenta-Pink": "#CC338B",
"Magic Mint": "#AAF0D1",
"Magic Potion": "#FF4466",
"Magnolia": "#F8F4FF",
"Mahogany": "#C04000",
"Maize": "#FBEC5D",
"Majorelle Blue": "#6050DC",
"Malachite": "#0BDA51",
"Manatee": "#979AAA",
"Mandarin": "#F37A48",
"Mango Tango": "#FF8243",
"Mantis": "#74C365",
"Mardi Gras": "#880085",
"Marigold": "#EAA221",
"Maroon (Crayola)": "#C32148",
"Maroon (HTML/CSS)": "#800000",
"Maroon (X11)": "#B03060",
"Mauve": "#E0B0FF",
"Mauve Taupe": "#915F6D",
"Mauvelous": "#EF98AA",
"Maximum Blue": "#47ABCC",
"Maximum Blue Green": "#30BFBF",
"Maximum Blue Purple": "#ACACE6",
"Maximum Green": "#5E8C31",
"Maximum Green Yellow": "#D9E650",
"Maximum Purple": "#733380",
"Maximum Red": "#D92121",
"Maximum Red Purple": "#A63A79",
"Maximum Yellow": "#FAFA37",
"Maximum Yellow Red": "#F2BA49",
"May Green": "#4C9141",
"Maya Blue": "#73C2FB",
"Meat Brown": "#E5B73B",
"Medium Aquamarine": "#66DDAA",
"Medium Blue": "#0000CD",
"Medium Candy Apple Red": "#E2062C",
"Medium Carmine": "#AF4035",
"Medium Champagne": "#F3E5AB",
"Medium Electric Blue": "#035096",
"Medium Jungle Green": "#1C352D",
"Medium Lavender Magenta": "#DDA0DD",
"Medium Orchid": "#BA55D3",
"Medium Persian Blue": "#0067A5",
"Medium Purple": "#9370DB",
"Medium Red-Violet": "#BB3385",
"Medium Ruby": "#AA4069",
"Medium Sea Green": "#3CB371",
"Medium Sky Blue": "#80DAEB",
"Medium Slate Blue": "#7B68EE",
"Medium Spring Bud": "#C9DC87",
"Medium Spring Green": "#00FA9A",
"Medium Taupe": "#674C47",
"Medium Turquoise": "#48D1CC",
"Medium Tuscan Red": "#79443B",
"Medium Vermilion": "#D9603B",
"Medium Violet-Red": "#C71585",
"Mellow Apricot": "#F8B878",
"Mellow Yellow": "#F8DE7E",
"Melon": "#FDBCB4",
"Metallic Seaweed": "#0A7E8C",
"Metallic Sunburst": "#9C7C38",
"Mexican Pink": "#E4007C",
"Middle Blue": "#7ED4E6",
"Middle Blue Green": "#8DD9CC",
"Middle Blue Purple": "#8B72BE",
"Middle Red Purple": "#210837",
"Middle Green": "#4D8C57",
"Middle Green Yellow": "#ACBF60",
"Middle Purple": "#D982B5",
"Middle Red": "#E58E73",
"Middle Red Purple": "#A55353",
"Middle Yellow": "#FFEB00",
"Middle Yellow Red": "#ECB176",
"Midnight": "#702670",
"Midnight Blue": "#191970",
"Midnight Green (Eagle Green)": "#004953",
"Mikado Yellow": "#FFC40C",
"Milk": "#FDFFF5",
"Mimi Pink": "#FFDAE9",
"Mindaro": "#E3F988",
"Ming": "#36747D",
"Minion Yellow": "#F5E050",
"Mint": "#3EB489",
"Mint Cream": "#F5FFFA",
"Mint Green": "#98FF98",
"Misty Moss": "#BBB477",
"Misty Rose": "#FFE4E1",
"Moccasin": "#FAEBD7",
"Mode Beige": "#967117",
"Moonstone Blue": "#73A9C2",
"Mordant Red 19": "#AE0C00",
"Morning Blue": "#8DA399",
"Moss Green": "#8A9A5B",
"Mountain Meadow": "#30BA8F",
"Mountbatten Pink": "#997A8D",
"MSU Green": "#18453B",
"Mughal Green": "#306030",
"Mulberry": "#C54B8C",
"Mummy's Tomb": "#828E84",
"Mustard": "#FFDB58",
"Myrtle Green": "#317873",
"Mystic": "#D65282",
"Mystic Maroon": "#AD4379",
"Nadeshiko Pink": "#F6ADC6",
"Napier Green": "#2A8000",
"Naples Yellow": "#FADA5E",
"Navajo White": "#FFDEAD",
"Navy": "#000080",
"Navy Purple": "#9457EB",
"Neon Carrot": "#FFA343",
"Neon Fuchsia": "#FE4164",
"Neon Green": "#39FF14",
"New Car": "#214FC6",
"New York Pink": "#D7837F",
"Nickel": "#727472",
"Non-Photo Blue": "#A4DDED",
"North Texas Green": "#059033",
"Nyanza": "#E9FFDB",
"Ocean Blue": "#4F42B5",
"Ocean Boat Blue": "#0077BE",
"Ocean Green": "#48BF91",
"Ochre": "#CC7722",
"Office Green": "#008000",
"Ogre Odor": "#FD5240",
"Old Burgundy": "#43302E",
"Old Gold": "#CFB53B",
"Old Heliotrope": "#563C5C",
"Old Lace": "#FDF5E6",
"Old Lavender": "#796878",
"Old Mauve": "#673147",
"Old Moss Green": "#867E36",
"Old Rose": "#C08081",
"Old Silver": "#848482",
"Olive": "#808000",
"Olive Drab (#3)": "#6B8E23",
"Olive Drab #7": "#3C341F",
"Olivine": "#9AB973",
"Onyx": "#353839",
"Opera Mauve": "#B784A7",
"Orange (Color Wheel)": "#FF7F00",
"Orange (Crayola)": "#FF7538",
"Orange (Pantone)": "#FF5800",
"Orange (RYB)": "#FB9902",
"Orange (Web)": "#FFA500",
"Orange Peel": "#FF9F00",
"Orange-Red": "#FF4500",
"Orange Soda": "#FA5B3D",
"Orange-Yellow": "#F8D568",
"Orchid": "#DA70D6",
"Orchid Pink": "#F2BDCD",
"Orioles Orange": "#FB4F14",
"Otter Brown": "#654321",
"Outer Space": "#414A4C",
"Outrageous Orange": "#FF6E4A",
"Oxford Blue": "#002147",
"OU Crimson Red": "#990000",
"Pacific Blue": "#1CA9C9",
"Pakistan Green": "#006600",
"Palatinate Blue": "#273BE2",
"Palatinate Purple": "#682860",
"Pale Aqua": "#BCD4E6",
"Pale Blue": "#AFEEEE",
"Pale Brown": "#987654",
"Pale Carmine": "#AF4035",
"Pale Cerulean": "#9BC4E2",
"Pale Chestnut": "#DDADAF",
"Pale Copper": "#DA8A67",
"Pale Cornflower Blue": "#ABCDEF",
"Pale Cyan": "#87D3F8",
"Pale Gold": "#E6BE8A",
"Pale Goldenrod": "#EEE8AA",
"Pale Green": "#98FB98",
"Pale Lavender": "#DCD0FF",
"Pale Magenta": "#F984E5",
"Pale Magenta-Pink": "#FF99CC",
"Pale Pink": "#FADADD",
"Pale Plum": "#DDA0DD",
"Pale Red-Violet": "#DB7093",
"Pale Robin Egg Blue": "#96DED1",
"Pale Silver": "#C9C0BB",
"Pale Spring Bud": "#ECEBBD",
"Pale Taupe": "#BC987E",
"Pale Turquoise": "#AFEEEE",
"Pale Violet": "#CC99FF",
"Pale Violet-Red": "#DB7093",
"Palm Leaf": "#6F9940",
"Pansy Purple": "#78184A",
"Paolo Veronese Green": "#009B7D",
"Papaya Whip": "#FFEFD5",
"Paradise Pink": "#E63E62",
"Paris Green": "#50C878",
"Parrot Pink": "#D998A0",
"Pastel Blue": "#AEC6CF",
"Pastel Brown": "#836953",
"Pastel Gray": "#CFCFC4",
"Pastel Green": "#77DD77",
"Pastel Magenta": "#F49AC2",
"Pastel Orange": "#FFB347",
"Pastel Pink": "#DEA5A4",
"Pastel Purple": "#B39EB5",
"Pastel Red": "#FF6961",
"Pastel Violet": "#CB99C9",
"Pastel Yellow": "#FDFD96",
"Patriarch": "#800080",
"Payne's Grey": "#536878",
"Peach": "#FFE5B4",
"Peach": "#FFCBA4",
"Peach-Orange": "#FFCC99",
"Peach Puff": "#FFDAB9",
"Peach-Yellow": "#FADFAD",
"Pear": "#D1E231",
"Pearl": "#EAE0C8",
"Pearl Aqua": "#88D8C0",
"Pearly Purple": "#B768A2",
"Peridot": "#E6E200",
"Periwinkle": "#CCCCFF",
"Permanent Geranium Lake": "#E12C2C",
"Persian Blue": "#1C39BB",
"Persian Green": "#00A693",
"Persian Indigo": "#32127A",
"Persian Orange": "#D99058",
"Persian Pink": "#F77FBE",
"Persian Plum": "#701C1C",
"Persian Red": "#CC3333",
"Persian Rose": "#FE28A2",
"Persimmon": "#EC5800",
"Peru": "#CD853F",
"Pewter Blue": "#8BA8B7",
"Phlox": "#DF00FF",
"Phthalo Blue": "#000F89",
"Phthalo Green": "#123524",
"Picton Blue": "#45B1E8",
"Pictorial Carmine": "#C30B4E",
"Piggy Pink": "#FDDDE6",
"Pine Green": "#01796F",
"Pineapple": "#563C0D",
"Pink": "#FFC0CB",
"Pink (Pantone)": "#D74894",
"Pink Flamingo": "#FC74FD",
"Pink Lace": "#FFDDF4",
"Pink Lavender": "#D8B2D1",
"Pink-Orange": "#FF9966",
"Pink Pearl": "#E7ACCF",
"Pink Raspberry": "#980036",
"Pink Sherbet": "#F78FA7",
"Pistachio": "#93C572",
"Pixie Powder": "#391285",
"Platinum": "#E5E4E2",
"Plum": "#8E4585",
"Plum (Web)": "#DDA0DD",
"Plump Purple": "#5946B2",
"Polished Pine": "#5DA493",
"Pomp And Power": "#86608E",
"Popstar": "#BE4F62",
"Portland Orange": "#FF5A36",
"Powder Blue": "#B0E0E6",
"Princess Perfume": "#FF85CF",
"Princeton Orange": "#F58025",
"Prune": "#701C1C",
"Prussian Blue": "#003153",
"Psychedelic Purple": "#DF00FF",
"Puce": "#CC8899",
"Puce Red": "#722F37",
"Pullman Brown (UPS Brown)": "#644117",
"Pullman Green": "#3B331C",
"Pumpkin": "#FF7518",
"Purple (HTML)": "#800080",
"Purple (Munsell)": "#9F00C5",
"Purple (X11)": "#A020F0",
"Purple Heart": "#69359C",
"Purple Mountain Majesty": "#9678B6",
"Purple Navy": "#4E5180",
"Purple Pizzazz": "#FE4EDA",
"Purple Plum": "#9C51B6",
"Purple Taupe": "#50404D",
"Purpureus": "#9A4EAE",
"Quartz": "#51484F",
"Queen Blue": "#436B95",
"Queen Pink": "#E8CCD7",
"Quick Silver": "#A6A6A6",
"Quinacridone Magenta": "#8E3A59",
"Rackley": "#5D8AA8",
"Radical Red": "#FF355E",
"Raisin Black": "#242124",
"Rajah": "#FBAB60",
"Raspberry": "#E30B5D",
"Raspberry Glace": "#915F6D",
"Raspberry Pink": "#E25098",
"Raspberry Rose": "#B3446C",
"Raw Sienna": "#D68A59",
"Raw Umber": "#826644",
"Razzle Dazzle Rose": "#FF33CC",
"Razzmatazz": "#E3256B",
"Razzmic Berry": "#8D4E85",
"Rebecca Purple": "#663399",
"Red": "#FF0000",
"Red (Crayola)": "#EE204D",
"Red (Munsell)": "#F2003C",
"Red (NCS)": "#C40233",
"Red (Pantone)": "#ED2939",
"Red (Pigment)": "#ED1C24",
"Red (RYB)": "#FE2712",
"Red-Brown": "#A52A2A",
"Red Devil": "#860111",
"Red-Orange": "#FF5349",
"Red-Purple": "#E40078",
"Red Salsa": "#FD3A4A",
"Red-Violet": "#C71585",
"Redwood": "#A45A52",
"Regalia": "#522D80",
"Registration Black": "#000000",
"Resolution Blue": "#002387",
"Rhythm": "#777696",
"Rich Black": "#004040",
"Rich Black (FOGRA29)": "#010B13",
"Rich Black (FOGRA39)": "#010203",
"Rich Brilliant Lavender": "#F1A7FE",
"Rich Carmine": "#D70040",
"Rich Electric Blue": "#0892D0",
"Rich Lavender": "#A76BCF",
"Rich Lilac": "#B666D2",
"Rich Maroon": "#B03060",
"Rifle Green": "#444C38",
"Roast Coffee": "#704241",
"Robin Egg Blue": "#00CCCC",
"Rocket Metallic": "#8A7F80",
"Roman Silver": "#838996",
"Rose": "#FF007F",
"Rose Bonbon": "#F9429E",
"Rose Dust": "#9E5E6F",
"Rose Ebony": "#674846",
"Rose Gold": "#B76E79",
"Rose Madder": "#E32636",
"Rose Pink": "#FF66CC",
"Rose Quartz": "#AA98A9",
"Rose Red": "#C21E56",
"Rose Taupe": "#905D5D",
"Rose Vale": "#AB4E52",
"Rosewood": "#65000B",
"Rosso Corsa": "#D40000",
"Rosy Brown": "#BC8F8F",
"Royal Azure": "#0038A8",
"Royal Blue": "#002366",
"Royal Blue": "#4169E1",
"Royal Fuchsia": "#CA2C92",
"Royal Purple": "#7851A9",
"Royal Yellow": "#FADA5E",
"Ruber": "#CE4676",
"Rubine Red": "#D10056",
"Ruby": "#E0115F",
"Ruby Red": "#9B111E",
"Ruddy": "#FF0028",
"Ruddy Brown": "#BB6528",
"Ruddy Pink": "#E18E96",
"Rufous": "#A81C07",
"Russet": "#80461B",
"Russian Green": "#679267",
"Russian Violet": "#32174D",
"Rust": "#B7410E",
"Rusty Red": "#DA2C43",
"Sacramento State Green": "#00563F",
"Saddle Brown": "#8B4513",
"Safety Orange": "#FF7800",
"Safety Orange (Blaze Orange)": "#FF6700",
"Safety Yellow": "#EED202",
"Saffron": "#F4C430",
"Sage": "#BCB88A",
"St. Patrick's Blue": "#23297A",
"Salmon": "#FA8072",
"Salmon Pink": "#FF91A4",
"Sand": "#C2B280",
"Sand Dune": "#967117",
"Sandstorm": "#ECD540",
"Sandy Brown": "#F4A460",
"Sandy Tan": "#FDD9B5",
"Sandy Taupe": "#967117",
"Sangria": "#92000A",
"Sap Green": "#507D2A",
"Sapphire": "#0F52BA",
"Sapphire Blue": "#0067A5",
"Sasquatch Socks": "#FF4681",
"Satin Sheen Gold": "#CBA135",
"Scarlet": "#FF2400",
"Scarlet": "#FD0E35",
"Schauss Pink": "#FF91AF",
"School Bus Yellow": "#FFD800",
"Screamin' Green": "#66FF66",
"Sea Blue": "#006994",
"Sea Foam Green": "#9FE2BF",
"Sea Green": "#2E8B57",
"Sea Serpent": "#4BC7CF",
"Seal Brown": "#59260B",
"Seashell": "#FFF5EE",
"Selective Yellow": "#FFBA00",
"Sepia": "#704214",
"Shadow": "#8A795D",
"Shadow Blue": "#778BA5",
"Shampoo": "#FFCFF1",
"Shamrock Green": "#009E60",
"Sheen Green": "#8FD400",
"Shimmering Blush": "#D98695",
"Shiny Shamrock": "#5FA778",
"Shocking Pink": "#FC0FC0",
"Shocking Pink (Crayola)": "#FF6FFF",
"Sienna": "#882D17",
"Silver": "#C0C0C0",
"Silver Chalice": "#ACACAC",
"Silver Lake Blue": "#5D89BA",
"Silver Pink": "#C4AEAD",
"Silver Sand": "#BFC1C2",
"Sinopia": "#CB410B",
"Sizzling Red": "#FF3855",
"Sizzling Sunrise": "#FFDB00",
"Skobeloff": "#007474",
"Sky Blue": "#87CEEB",
"Sky Magenta": "#CF71AF",
"Slate Blue": "#6A5ACD",
"Slate Gray": "#708090",
"Smalt (Dark Powder Blue)": "#003399",
"Slimy Green": "#299617",
"Smashed Pumpkin": "#FF6D3A",
"Smitten": "#C84186",
"Smoke": "#738276",
"Smokey Topaz": "#832A0D",
"Smoky Black": "#100C08",
"Smoky Topaz": "#933D41",
"Snow": "#FFFAFA",
"Soap": "#CEC8EF",
"Solid Pink": "#893843",
"Sonic Silver": "#757575",
"Spartan Crimson": "#9E1316",
"Space Cadet": "#1D2951",
"Spanish Bistre": "#807532",
"Spanish Blue": "#0070B8",
"Spanish Carmine": "#D10047",
"Spanish Crimson": "#E51A4C",
"Spanish Gray": "#989898",
"Spanish Green": "#009150",
"Spanish Orange": "#E86100",
"Spanish Pink": "#F7BFBE",
"Spanish Red": "#E60026",
"Spanish Sky Blue": "#00FFFF",
"Spanish Violet": "#4C2882",
"Spanish Viridian": "#007F5C",
"Spicy Mix": "#8B5F4D",
"Spiro Disco Ball": "#0FC0FC",
"Spring Bud": "#A7FC00",
"Spring Frost": "#87FF2A",
"Spring Green": "#00FF7F",
"Star Command Blue": "#007BB8",
"Steel Blue": "#4682B4",
"Steel Pink": "#CC33CC",
"Steel Teal": "#5F8A8B",
"Stil De Grain Yellow": "#FADA5E",
"Stizza": "#990000",
"Stormcloud": "#4F666A",
"Straw": "#E4D96F",
"Strawberry": "#FC5A8D",
"Sugar Plum": "#914E75",
"Sunburnt Cyclops": "#FF404C",
"Sunglow": "#FFCC33",
"Sunny": "#F2F27A",
"Sunray": "#E3AB57",
"Sunset": "#FAD6A5",
"Sunset Orange": "#FD5E53",
"Super Pink": "#CF6BA9",
"Sweet Brown": "#A83731",
"Tan": "#D2B48C",
"Tangelo": "#F94D00",
"Tangerine": "#F28500",
"Tangerine Yellow": "#FFCC00",
"Tango Pink": "#E4717A",
"Tart Orange": "#FB4D46",
"Taupe": "#483C32",
"Taupe Gray": "#8B8589",
"Tea Green": "#D0F0C0",
"Tea Rose": "#F88379",
"Tea Rose": "#F4C2C2",
"Teal": "#008080",
"Teal Blue": "#367588",
"Teal Deer": "#99E6B3",
"Teal Green": "#00827F",
"Telemagenta": "#CF3476",
"Tenné (Tawny)": "#CD5700",
"Terra Cotta": "#E2725B",
"Thistle": "#D8BFD8",
"Thulian Pink": "#DE6FA1",
"Tickle Me Pink": "#FC89AC",
"Tiffany Blue": "#0ABAB5",
"Tiger's Eye": "#E08D3C",
"Timberwolf": "#DBD7D2",
"Titanium Yellow": "#EEE600",
"Tomato": "#FF6347",
"Toolbox": "#746CC0",
"Topaz": "#FFC87C",
"Tractor Red": "#FD0E35",
"Trolley Grey": "#808080",
"Tropical Rain Forest": "#00755E",
"Tropical Violet": "#CDA4DE",
"True Blue": "#0073CF",
"Tufts Blue": "#3E8EDE",
"Tulip": "#FF878D",
"Tumbleweed": "#DEAA88",
"Turkish Rose": "#B57281",
"Turquoise": "#40E0D0",
"Turquoise Blue": "#00FFEF",
"Turquoise Green": "#A0D6B4",
"Turquoise Surf": "#00C5CD",
"Turtle Green": "#8A9A5B",
"Tuscan": "#FAD6A5",
"Tuscan Brown": "#6F4E37",
"Tuscan Red": "#7C4848",
"Tuscan Tan": "#A67B5B",
"Tuscany": "#C09999",
"Twilight Lavender": "#8A496B",
"Tyrian Purple": "#66023C",
"UA Blue": "#0033AA",
"UA Red": "#D9004C",
"Ube": "#8878C3",
"UCLA Blue": "#536895",
"UCLA Gold": "#FFB300",
"UFO Green": "#3CD070",
"Ultramarine": "#3F00FF",
"Ultramarine Blue": "#4166F5",
"Ultra Pink": "#FF6FFF",
"Ultra Red": "#FC6C85",
"Umber": "#635147",
"Unbleached Silk": "#FFDDCA",
"United Nations Blue": "#5B92E5",
"University Of California Gold": "#B78727",
"Unmellow Yellow": "#FFFF66",
"UP Forest Green": "#014421",
"UP Maroon": "#7B1113",
"Upsdell Red": "#AE2029",
"Urobilin": "#E1AD21",
"USAFA Blue": "#004F98",
"USC Cardinal": "#990000",
"USC Gold": "#FFCC00",
"University Of Tennessee Orange": "#F77F00",
"Utah Crimson": "#D3003F",
"Van Dyke Brown": "#664228",
"Vanilla": "#F3E5AB",
"Vanilla Ice": "#F38FA9",
"Vegas Gold": "#C5B358",
"Venetian Red": "#C80815",
"Verdigris": "#43B3AE",
"Vermilion": "#E34234",
"Vermilion": "#D9381E",
"Veronica": "#A020F0",
"Very Light Azure": "#74BBFB",
"Very Light Blue": "#6666FF",
"Very Light Malachite Green": "#64E986",
"Very Light Tangelo": "#FFB077",
"Very Pale Orange": "#FFDFBF",
"Very Pale Yellow": "#FFFFBF",
"Violet": "#8F00FF",
"Violet (Color Wheel)": "#7F00FF",
"Violet (RYB)": "#8601AF",
"Violet (Web)": "#EE82EE",
"Violet-Blue": "#324AB2",
"Violet-Red": "#F75394",
"Viridian": "#40826D",
"Viridian Green": "#009698",
"Vista Blue": "#7C9ED9",
"Vivid Amber": "#CC9900",
"Vivid Auburn": "#922724",
"Vivid Burgundy": "#9F1D35",
"Vivid Cerise": "#DA1D81",
"Vivid Cerulean": "#00AAEE",
"Vivid Crimson": "#CC0033",
"Vivid Gamboge": "#FF9900",
"Vivid Lime Green": "#A6D608",
"Vivid Malachite": "#00CC33",
"Vivid Mulberry": "#B80CE3",
"Vivid Orange": "#FF5F00",
"Vivid Orange Peel": "#FFA000",
"Vivid Orchid": "#CC00FF",
"Vivid Raspberry": "#FF006C",
"Vivid Red": "#F70D1A",
"Vivid Red-Tangelo": "#DF6124",
"Vivid Sky Blue": "#00CCFF",
"Vivid Tangelo": "#F07427",
"Vivid Tangerine": "#FFA089",
"Vivid Vermilion": "#E56024",
"Vivid Violet": "#9F00FF",
"Vivid Yellow": "#FFE302",
"Volt": "#CEFF00",
"Wageningen Green": "#34B233",
"Warm Black": "#004242",
"Waterspout": "#A4F4F9",
"Weldon Blue": "#7C98AB",
"Wenge": "#645452",
"Wheat": "#F5DEB3",
"White": "#FFFFFF",
"White Smoke": "#F5F5F5",
"Wild Blue Yonder": "#A2ADD0",
"Wild Orchid": "#D470A2",
"Wild Strawberry": "#FF43A4",
"Wild Watermelon": "#FC6C85",
"Willpower Orange": "#FD5800",
"Windsor Tan": "#A75502",
"Wine": "#722F37",
"Wine Dregs": "#673147",
"Winter Sky": "#FF007C",
"Winter Wizard": "#A0E6FF",
"Wintergreen Dream": "#56887D",
"Wisteria": "#C9A0DC",
"Wood Brown": "#C19A6B",
"Xanadu": "#738678",
"Yale Blue": "#0F4D92",
"Yankees Blue": "#1C2841",
"Yellow": "#FFFF00",
"Yellow (Crayola)": "#FCE883",
"Yellow (Munsell)": "#EFCC00",
"Yellow (NCS)": "#FFD300",
"Yellow (Pantone)": "#FEDF00",
"Yellow (Process)": "#FFEF00",
"Yellow (RYB)": "#FEFE33",
"Yellow-Green": "#9ACD32",
"Yellow-Orange": "#FFAE42",
"Yellow Rose": "#FFF000",
"Yellow Sunshine": "#FFF700",
"Zaffre": "#0014A8",
"Zinnwaldite Brown": "#2C1608",
"Zomp": "#39A78E",
}
ALL = {}
ALL.update(WIKIPEDIA)
ALL.update(CRAYOLA)
ALL.update(CSS)
_words_re = re.compile(r'[- ()\n\t]+')
def words(name):
raw = _words_re.split(name)
return [w.replace("'", '').lower() for w in raw if w]
def canonicalize(name):
return ' '.join(words(name))
LOOKUP = {}
CONTAINS_WORD = collections.defaultdict(set)
for name, code in ALL.items():
canonical = canonicalize(name)
LOOKUP[canonical] = (code, name)
for w in words(name):
CONTAINS_WORD[w].add(name)
def find_exact(name):
"""Retrieves the hex code and canonical name for a color.
The name given may differ in capitalization and punctuation from
the canonical one, but it must have the same words in the same
order.
"""
return LOOKUP.get(canonicalize(name), None)
def disambiguate(name):
"""Gets color names that are most similar to a given name.
Returns a list of color names tied for the most words in common
with the words of the name given.
"""
counts = collections.Counter()
best_set = {}
best_count = 0
for word in words(name):
if word in CONTAINS_WORD:
for candidate in CONTAINS_WORD[word]:
counts[candidate] += 1
count = counts[candidate]
if count == best_count:
best_set.add(candidate)
elif count > best_count:
best_set = {candidate}
best_count = count
return list(best_set)
def find_best(name):
"""Finds the best matching color for a given name.
Returns the color with the given name, or failing that the unique
color that is the closest match; or failing that returns None.
"""
exact = find_exact(name)
if exact:
return exact
closest = disambiguate(name)
if len(closest) == 1:
guess, = closest
return find_exact(closest)
return None
|
inexactually/irisbot
|
colornames.py
|
Python
|
mit
| 51,179
|
[
"Amber",
"BLAST"
] |
a0ffc94330268796becc15725dad36976d9cff7c2b920158e734c94755c1251e
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle.proto.ParameterConfig_pb2 import ParameterConfig
from collections import OrderedDict
import paddle.trainer.config_parser as cp
import struct
import tarfile
import cStringIO
from topology import Topology
__all__ = ['Parameters', 'create']
def create(layers):
"""
Create parameter pool by topology.
:param layers:
:return:
"""
topology = Topology(layers)
pool = Parameters()
initializers = cp.g_parameter_initializer_map
for param in topology.proto().parameters:
pool.__append_config__(param)
if param.name in initializers:
pool[param.name] = initializers[param.name](param.name)
return pool
class Parameters(object):
"""
`Parameters` manages all the learnable parameters in a neural network.
It stores parameters' information in an OrderedDict. The key is
the name of a parameter, and value is a parameter's configuration(in
protobuf format), such as initialization mean and std, its size, whether it
is a static parameter, and so on.
:param __param_conf__: store the configurations of learnable parameters in
the network in an OrderedDict. Parameter is added one by one into the
dict by following their created order in the network: parameters of
the previous layers in a network are careted first. You can visit the
parameters from bottom to top by iterating over this dict.
:type __param_conf__: OrderedDict
:param __gradient_machines__: all of the parameters in a neural network are
appended to a PaddlePaddle gradient machine, which is used internally to
copy parameter values between C++ and Python end.
:type __gradient_machines__: list
:param __tmp_params__: a dict to store dummy parameters if no
__gradient_machines__ is appended to `Parameters`.
:type __tmp_params__: dict
Basically usage is
.. code-block:: python
data = paddle.layers.data(...)
...
out = paddle.layers.fc(...)
parameters = paddle.parameters.create(out)
parameter_names = parameters.names()
fc_mat = parameters.get('fc')
print fc_mat
"""
def __init__(self):
self.__param_conf__ = OrderedDict()
self.__gradient_machines__ = []
self.__tmp_params__ = dict()
def __append_config__(self, param_conf):
"""
Append a parameter configuration. It used to initialize Parameters and
should be invoked only in paddle.parameters.create
:param param_conf: The parameter configuration in protobuf
:type param_conf: ParameterConfig
:return: Nothing
"""
if not isinstance(param_conf, ParameterConfig):
raise ValueError("param_conf must be paddle.proto.ParameterConfig")
if param_conf.name in self.__param_conf__:
raise ValueError("duplicated parameter %s" % param_conf.name)
self.__param_conf__[param_conf.name] = param_conf
def update_param_conf(self, model_config):
for p in model_config.parameters:
self.__param_conf__[p.name] = p
def keys(self):
"""
keys are the names of each parameter.
:return: list of parameter name
:rtype: list
"""
return self.__param_conf__.keys()
def names(self):
"""
names of each parameter.
:return: list of parameter name
:rtype: list
"""
return self.keys()
def has_key(self, key):
"""
has_key return true if there are such parameter name == key
:param key: Parameter name
:type key: basestring
:return: True if contains such key
"""
return key in self.__param_conf__.keys()
def __iter__(self):
"""
Return an iterator of parameter name. It is used by `for loop`
or `in` operator.
.. code-block:: python
parameters = paddle.parameters.create(...)
if "fc_param" in parameters:
print 'OK'
:return: an iterator of parameter name
:rtype: iterator
"""
return iter(self.__param_conf__)
def __getter_inner(self, key, param_type):
import py_paddle.swig_paddle as api
shape = self.get_shape(key)
if len(self.__gradient_machines__) == 0:
# create new parameter in python numpy.
if key in self.__tmp_params__:
return self.__tmp_params__[key]
else:
return np.ndarray(shape=shape, dtype=np.float32)
else:
for each_gradient_machine in self.__gradient_machines__:
param = __get_parameter_in_gradient_machine__(
each_gradient_machine, key)
# for simplify implementation now, we always copy from C++
assert isinstance(param, api.Parameter)
val = param.getBuf(param_type)
assert isinstance(val, api.Vector)
val = val.copyToNumpyArray()
return val
# else continue
raise RuntimeError("Unexpected branch")
def __getitem__(self, key):
"""
Get parameter by parameter name. It uses Python dict syntax.
:note: It will always copy the parameter from C++ side.
:param key: Parameter name
:type key: basestring
:return: parameter value
:rtype: np.ndarray
"""
import py_paddle.swig_paddle as api
return self.__getter_inner(key, api.PARAMETER_VALUE)
def get_shape(self, key):
"""
get shape of the parameter.
:param key: parameter name
:type key: basestring
:return: parameter's shape
:rtype: tuple
"""
if not isinstance(key, basestring):
raise ValueError("parameter name should be string")
if not self.has_key(key):
raise ValueError("No such parameter %s" % key)
conf = self.__param_conf__[key]
dims = conf.dims if conf.dims else (1, conf.size)
return tuple(map(int, dims))
def __setitem__(self, key, value):
"""
Set parameter by parameter name & value. It use Python dict syntax.
:note: It will always copy the parameter to C++ side.
:param key: Parameter name
:type key: basestring
:param value: Parameter matrix.
:type value: np.ndarray
:return: Nothing
"""
if not isinstance(value, np.ndarray):
raise ValueError("Must return ndarray")
value = value.astype(dtype=np.float32)
shape = self.get_shape(key)
if value.shape != shape:
raise ValueError("Value shape mismatch, expect %s, should %s" %
(shape, value.shape))
if len(self.__gradient_machines__) == 0:
self.__tmp_params__[key] = value
else:
for each_gradient_machine in self.__gradient_machines__:
__copy_parameter_to_gradient_machine__(each_gradient_machine,
key, value)
def get(self, parameter_name):
"""
Get parameter by parameter name.
:note: It will always copy the parameter from C++ side.
:param parameter_name: parameter name
:type parameter_name: basestring
:return: The parameter matrix.
:rtype: np.ndarray
"""
return self.__getitem__(key=parameter_name)
def get_grad(self, key):
"""
Get grandient by parameter name.
:note: It will always copy the parameter from C++ side.
:param key: parameter name
:type key: basestring
:return: The grandient matrix.
:rtype: np.ndarray
"""
import py_paddle.swig_paddle as api
if self.__param_conf__[key].is_static:
return np.zeros(self.__param_conf__[key].size, dtype=np.float32)
return self.__getter_inner(key, api.PARAMETER_GRADIENT)
def set(self, parameter_name, value):
"""
Set parameter by parameter name & matrix.
:param parameter_name: parameter name
:type parameter_name: basestring
:param value: parameter matrix
:type value: np.ndarray
:return: Nothing.
"""
self.__setitem__(key=parameter_name, value=value)
def append_gradient_machine(self, gradient_machine):
"""
append gradient machine to parameters. This method is used internally in
Trainer.train.
:param gradient_machine: PaddlePaddle C++ GradientMachine object.
:type gradient_machine: api.GradientMachine
:return:
"""
import py_paddle.swig_paddle as api
if not isinstance(gradient_machine, api.GradientMachine):
raise ValueError("gradient_machine should be api.GradientMachine")
if len(self.__tmp_params__) != 0:
for name, val in self.__tmp_params__.iteritems():
try:
__copy_parameter_to_gradient_machine__(gradient_machine,
name, val)
except ValueError:
# If no such parameter in gradient machine, then don't copy
pass
self.__gradient_machines__.append(gradient_machine)
def serialize(self, name, f):
"""
:param name:
:param f:
:type f: file
:return:
"""
param = self.get(name)
size = reduce(lambda a, b: a * b, param.shape)
f.write(struct.pack("IIQ", 0, 4, size))
param = param.astype(np.float32)
s = param.tostring()
wrote_size = 0
buf = buffer(s, wrote_size, 65535)
while buf: # f.write crashes with big data blog.
f.write(buf)
wrote_size += 65535
buf = buffer(s, wrote_size, 65535)
def deserialize(self, name, f):
"""
:param name:
:param f:
:type f: file
:return:
"""
f.read(16) # header
arr = np.frombuffer(f.read(), dtype=np.float32)
self.set(name, arr.reshape(self.get_shape(name)))
def to_tar(self, f):
"""
Save parameters to a tar file.
WARNING: You should use `paddle.v2.trainer.SGD.save_parameter_to_tar(f)`
to save parameters most of the time. Otherwise, some settings such
as model average will not take effect.
:param f:
:type f: file
:return:
"""
tar = tarfile.TarFile(fileobj=f, mode='w')
for nm in self.names():
buf = cStringIO.StringIO()
self.serialize(nm, buf)
tarinfo = tarfile.TarInfo(name=nm)
buf.seek(0)
tarinfo.size = len(buf.getvalue())
tar.addfile(tarinfo, buf)
conf = self.__param_conf__[nm]
confStr = conf.SerializeToString()
tarinfo = tarfile.TarInfo(name="%s.protobuf" % nm)
tarinfo.size = len(confStr)
buf = cStringIO.StringIO(confStr)
buf.seek(0)
tar.addfile(tarinfo, fileobj=buf)
@staticmethod
def from_tar(f):
"""
Create a `Parameters` object from the given file. And
the `Parameters` only contains the parameters in this
file. It is adapted the parameters are same in the
defined network and the given file. For example, it
can be used in the inference.
:param f: the initialized model file.
:type f: tar file
:return: A Parameters object.
:rtype: Parameters.
"""
params = Parameters()
tar = tarfile.TarFile(fileobj=f, mode='r')
for finfo in tar:
assert isinstance(finfo, tarfile.TarInfo)
if finfo.name.endswith('.protobuf'):
f = tar.extractfile(finfo)
conf = ParameterConfig()
conf.ParseFromString(f.read())
params.__append_config__(conf)
for param_name in params.names():
f = tar.extractfile(param_name)
params.deserialize(param_name, f)
return params
def init_from_tar(self, f):
"""
Different from `from_tar`, this interface can be used to
init partial network parameters from another saved model.
:param f: the initialized model file.
:type f: tar file
:return: Nothing.
"""
tar_param = Parameters.from_tar(f)
for pname in tar_param.names():
if pname in self.names():
self.set(pname, tar_param.get(pname))
def __get_parameter_in_gradient_machine__(gradient_machine, name):
"""
:param gradient_machine:
:type gradient_machine: api.GradientMachine
:param name:
:return:
:rtype: api.Parameter
"""
params = filter(lambda p: p.getName() == name,
gradient_machine.getParameters())
if len(params) == 0:
raise ValueError("No such parameter")
elif len(params) > 1:
raise ValueError("Unexpected branch")
else:
return params[0]
def __copy_parameter_to_gradient_machine__(gradient_machine, name, arr):
"""
Copy a python ndarray into the gradient machine.
:param gradient_machine:
:type gradient_machine: api.GradientMachine
:param name:
:param arr:
:type arr: np.ndarray
:return:
:rtype: api.Parameter
"""
import py_paddle.swig_paddle as api
param = __get_parameter_in_gradient_machine__(gradient_machine, name)
vec = param.getBuf(api.PARAMETER_VALUE)
assert isinstance(vec, api.Vector)
vec.copyFromNumpyArray(arr.flatten())
|
pengli09/Paddle
|
python/paddle/v2/parameters.py
|
Python
|
apache-2.0
| 14,433
|
[
"VisIt"
] |
181c80f4d95c64e27ed5203aee0d52547d39e88aa7134a4b3dc9b7ff7d91c454
|
"""
Custom PDB class implementation.
The code below contains heavily modified parts of Jacob Durrant's
NNScore 2.0.1. The following notice is copied from the original NNScore
file:
# NNScore 2.01 is released under the GNU General Public License (see
# http://www.gnu.org/licenses/gpl.html).
# If you have any questions, comments, or suggestions, please don't
# hesitate to contact me, Jacob Durrant, at jdurrant [at] ucsd [dot]
# edu. If you use NNScore 2.01 in your work, please cite [REFERENCE
# HERE].
"""
import ast
import math
import textwrap
import warnings
import numpy as np
from vs_utils.utils.nnscore_utils import AromaticRing
from vs_utils.utils.nnscore_utils import Atom
from vs_utils.utils.nnscore_utils import average_point
from vs_utils.utils.nnscore_utils import Charged
from vs_utils.utils.nnscore_utils import Point
from vs_utils.utils.nnscore_utils import angle_between_three_points
from vs_utils.utils.nnscore_utils import cross_product
from vs_utils.utils.nnscore_utils import dihedral
from vs_utils.utils.nnscore_utils import dot_product
from vs_utils.utils.nnscore_utils import vector_subtraction
__author__ = "Bharath Ramsundar and Jacob Durrant"
__license__ = "GNU General Public License"
def remove_redundant_rings(rings):
"""Filters out those rings which are supersets of other rings.
Rings can be supersets of other rings, especially in molecules like
polycyclic aromatic hydrocarbons. This function ensures that only
"non-decomposable" rings remain in our list. Rings of length-0 are also
removed.
TODO(rbharath): There should be no rings of length zero if my
understanding is correct. See if we can remove this check.
Parameters
----------
rings: list
List of all rings in molecule.
"""
# Remove rings of length 0
rings = [ring for ring in rings if ring]
# To remove duplicate entries, we convert rings from a list to set, and
# then back to a list again. There's a snafu since each ring in rings is
# itself a list (and lists are unhashable in python). To circumvent this
# issue, we convert each ring into a string (after sorting). For example,
# [2, 1] maps to '[1, 2]'. These strings are hashable. To recover the
# original lists, we use ast.literal_eval.
rings = [ast.literal_eval(ring_str) for ring_str in
list(set([str(sorted(ring)) for ring in rings]))]
# Use dictionary to maintain state about which rings are supersets.
ring_dict = dict(zip(range(len(rings)), rings))
for fst_index, fst_ring in enumerate(rings):
for snd_index, snd_ring in enumerate(rings):
if fst_index == snd_index:
continue
if (set(fst_ring).issubset(set(snd_ring))
and snd_index in ring_dict):
del ring_dict[snd_index]
return ring_dict.values()
def print_warning(atom, residue, need):
"""
Prints warning if residue has improper structure.
Parameters
----------
atom: string
Name of affected atom.
residue: string
Name of affected residue.
need: string
Description of need for this atom in residue.
"""
text = ('WARNING: There is no atom named "%s"' % atom +
'in the protein residue ' + residue + '.' + ' '
'Please use standard naming conventions for all ' +
'protein residues. This atom is needed to determine ' +
'%s. If this residue is far from the ' % need +
'active site, this warning may not affect the NNScore.')
lines = textwrap.wrap(text, 80)
for line in lines:
print line
print
def bond_length(element1, element2):
"""
Returns approximate bond-length between atoms of element1 and element2.
Bond lengths taken from Handbook of Chemistry and Physics. The
information provided there was very specific, so representative
examples were used to specify the bond lengths. Sitautions could
arise where these lengths would be incorrect, probably slight errors
(<0.06) in the hundreds.
Parameters
----------
element1: string:
Name of first element.
element2: string
Name of second element.
"""
# All distances are in Angstroms. Duplicate pairs not specified. For
# example, to find distance ("H", "C"), the lookup key is ("C", "H")
distances = {
("C", "C"): 1.53,
("N", "N"): 1.425,
("O", "O"): 1.469,
("S", "S"): 2.048,
("SI", "SI"): 2.359,
("C", "H"): 1.059,
("C", "N"): 1.469,
("C", "O"): 1.413,
("C", "S"): 1.819,
("C", "F"): 1.399,
("C", "CL"): 1.790,
("C", "BR"): 1.910,
("C", "I"): 2.162,
("N", "H"): 1.009,
("N", "O"): 1.463,
("N", "BR"): 1.843,
("N", "CL"): 1.743,
("N", "F"): 1.406,
("N", "I"): 2.2,
("O", "S"): 1.577,
("O", "H"): 0.967,
# This one not from source sited above. Not sure where it's from, but
# it wouldn't ever be used in the current context ("AutoGrow")
("S", "H"): 2.025/1.5,
("S", "N"): 1.633,
("S", "BR"): 2.321,
("S", "CL"): 2.283,
("S", "F"): 1.640,
("S", "I"): 2.687,
("P", "BR"): 2.366,
("P", "CL"): 2.008,
("P", "F"): 1.495,
("P", "I"): 2.490,
# estimate based on eye balling Handbook of Chemistry and Physics
("P", "O"): 1.6,
("SI", "BR"): 2.284,
("SI", "CL"): 2.072,
("SI", "F"): 1.636,
("SI", "P"): 2.264,
("SI", "S"): 2.145,
("SI", "C"): 1.888,
("SI", "N"): 1.743,
("SI", "O"): 1.631,
("H", "H"): .7414,
}
if (element1, element2) in distances:
return distances[(element1, element2)]
elif (element2, element1) in distances:
return distances[(element2, element1)]
else:
raise ValueError("Distance between %s and %s is unknown" %
(element1, element2))
class MultiStructure(object):
"""
Handler for PDB files with multiple models.
The output from autodock vina provides multiple output conformers in the
generated file. This class handles this type of output.
"""
def __init__(self):
self.molecules = {}
def _separate_into_models(self, lines, noisy):
"""Separate lines into a list of models."""
if noisy:
print "len(lines)"
print len(lines)
for line in lines:
print line
models = []
current = None
for line in lines:
if "MODEL" in line and current is None:
current = [line]
elif "ENDMDL" in line and current is not None:
current.append(line)
models.append(current)
current = None
elif current is not None:
current.append(line)
if noisy:
print "len(models)"
print len(models)
return models
def load_from_files(self, pdb_filename, pdbqt_filename):
"""Loads a collection of molecular structures from input files."""
with open(pdbqt_filename, "r") as f:
pdbqt_lines = f.readlines()
with open(pdb_filename, "r") as g:
pdb_lines = g.readlines()
pdb_models = self._separate_into_models(pdb_lines, True)
pdbqt_models = self._separate_into_models(pdbqt_lines, False)
assert len(pdb_models) == len(pdbqt_models)
for index, (pdb_lines, pdbqt_lines) in enumerate(
zip(pdb_models, pdbqt_models)):
self.molecules[index] = PDB()
self.molecules[index].load_from_lines(pdb_lines, pdbqt_lines)
class PDB(object):
"""
PDB file handler class.
TODO(rbharath): The class name here is misleading. This class actually
depends on both the pdb and pdbqt files.
Provides functionality for loading PDB files. Performs a number of
clean-up and annotation steps (filling in missing bonds, identifying
aromatic rings, charged groups, and protein secondary structure
assignation).
"""
def __init__(self):
self.all_atoms = {}
self.non_protein_atoms = {}
self.max_x = -9999.99
self.min_x = 9999.99
self.max_y = -9999.99
self.min_y = 9999.99
self.max_z = -9998.99
self.min_z = 9999.99
self.rotatable_bonds_count = 0
self.protein_resnames = [
"ALA", "ARG", "ASN", "ASP", "ASH", "ASX",
"CYS", "CYM", "CYX", "GLN", "GLU", "GLH", "GLX", "GLY", "HIS",
"HID", "HIE", "HIP", "HSE", "HSD", "ILE", "LEU", "LYS", "LYN", "MET", "PHE",
"PRO", "SER", "THR", "TRP", "TYR", "VAL"]
self.aromatic_rings = []
self.charges = [] # a list of objects of type charge (defined below)
def load_from_files(self, pdb_filename, pdbqt_filename):
"""Loads this molecule from files.
This function require both a pdbqt and pdb file (which must shared
atomnames and indices). The reason for this dual requirement is that
the pdbqt contains partial-charge information (which the pdb doesn't),
while the pdb contains bond information (which the pdbqt doesn't).
Parameters
----------
pdb_filename: string
Name of pdb file.
pdbqt_filename: string
Name of pdbqt file.
"""
with open(pdbqt_filename, "r") as f:
pdbqt_lines = f.readlines()
with open(pdb_filename, "r") as f:
pdb_lines = f.readlines()
self.load_from_lines(pdb_lines, pdbqt_lines)
def load_from_lines(self, pdb_lines, pdbqt_lines):
"""Loads the molecule from lines rather than files."""
# Reset internal state
self.__init__()
# Now load the file into a list
self.load_atoms_from_pdbqt_lines(pdbqt_lines)
self.load_bonds_from_pdb_lines(pdb_lines)
self.check_protein_format()
self.assign_ligand_aromatics()
self.assign_protein_aromatics()
self.assign_non_protein_charges()
self.assign_protein_charges()
self.assign_secondary_structure()
def load_atoms_from_pdbqt(self, pdbqt_filename):
"""Loads atoms and charges from provided PDBQT file.
Parameters
----------
pdbqt_filename: string
Name of pdbqt file.
"""
with open(pdbqt_filename, "r") as f:
pdbqt_lines = f.readlines()
self.load_atoms_from_pdbqt_lines(pdbqt_lines)
def load_atoms_from_pdbqt_lines(self, lines):
"""Loads atoms and charges from provided PDBQT lines.
TODO(rbharath): I'm no longer sure that this stateful paradigm is the
right way to do things. Can I make this functional?
Parameters
----------
lines: list
List of lines in pdbqt file
"""
autoindex = 1
atom_already_loaded = []
for line in lines:
if "between atoms" in line and " A " in line:
self.rotatable_bonds_count = self.rotatable_bonds_count + 1
if len(line) >= 7:
# Load atom data (coordinates, etc.)
if line[0:4] == "ATOM" or line[0:6] == "HETATM":
cur_atom = Atom()
cur_atom.read_atom_pdb_line(line)
# this string unique identifies each atom
key = (cur_atom.atomname.strip() + "_" +
str(cur_atom.resid) + "_" + cur_atom.residue.strip() +
"_" + cur_atom.chain.strip())
# so each atom can only be loaded once. No rotamers.
atom_already_loaded.append(key)
# So you're actually reindexing everything here.
self.all_atoms[autoindex] = cur_atom
#### TODO(rbharath): Disabling loading of non
if cur_atom.residue[-3:] not in self.protein_resnames:
self.non_protein_atoms[autoindex] = cur_atom
autoindex = autoindex + 1
def load_bonds_from_pdb(self, pdb_filename):
"""Loads bonds from PDB file.
Parameters
----------
pdb_filename: string
Name of pdb file.
"""
with open(pdb_filename, "r") as f:
lines = f.readlines()
self.load_bonds_from_pdb_lines(lines)
def load_bonds_from_pdb_lines(self, pdb_lines):
"""
Loads bonds from PDB file.
Bonds in PDBs are represented by CONECT statements. These lines follow
the following record format:
(see ftp://ftp.wwpdb.org/pub/pdb/doc/format_descriptions/Format_v33_Letter.pdf)
Columns DataType Definition
---------------------------------
1 - 6 String -
7 - 11 Int Atom index.
12 - 16 Int Index of bonded atom.
17 - 21 Int Index of bonded atom.
22 - 26 Int Index of bonded atom.
27 - 31 Int Index of bonded atom.
If more than 4 bonded atoms are present, then a second CONECT record
must be specified.
Parameters
----------
pdb_filename: string
Name of pdb file.
Raises
------
ValueError: On improperly formatted input.
"""
for line in pdb_lines:
if "CONECT" in line:
if len(line) < 31:
warnings.warn(
"Bad PDB! Improperly formatted CONECT line (too short)")
continue
atom_index = int(line[6:11].strip())
if atom_index not in self.all_atoms:
warnings.warn(
"Bad PDB! Improper CONECT line: (atom index not loaded)")
continue
bonded_atoms = []
ranges = [(11, 16), (16, 21), (21, 26), (26, 31)]
misformatted = False
for (lower, upper) in ranges:
# Check that the range is nonempty.
if line[lower:upper].strip():
index = int(line[lower:upper])
if index not in self.all_atoms:
warnings.warn(
"Bad PDB! Improper CONECT line: (bonded atom not loaded)")
misformatted = True
break
bonded_atoms.append(index)
if misformatted:
continue
atom = self.all_atoms[atom_index]
atom.add_neighbor_atom_indices(bonded_atoms)
def save_pdb(self, filename):
"""
Writes a PDB file version of self to filename.
Parameters
----------
filename: string
path to desired PDB file output.
"""
f = open(filename, 'w')
towrite = self.save_pdb_string()
# just so no PDB is empty, VMD will load them all
if towrite.strip() == "":
towrite = "ATOM 1 X XXX 0.000 0.000 0.000 X"
f.write(towrite)
f.close()
def save_pdb_string(self):
"""
Generates a PDB string version of self. Used by SavePDB.
"""
to_output = ""
# write coordinates
for atomindex in self.all_atoms:
to_output = (
to_output + self.all_atoms[atomindex].create_pdb_line(atomindex)
+ "\n")
return to_output
def add_new_atom(self, atom):
"""
Adds an extra atom to this PDB.
Parameters
----------
atom: object of atom class
Will be added to self.
"""
self.all_atoms[len(self.all_atoms.keys()) + 1] = atom
def add_new_atoms(self, atoms):
"""
Convenience function to add many atoms.
Parameters
----------
atoms: list
Entries in atoms should be objects of type atom.
"""
for atom_obj in atoms:
self.add_new_atom(atom_obj)
def add_new_non_protein_atom(self, atom):
"""
Adds an extra non-protein atom to this PDB.
Parameters
----------
atom: object of atom class
Will be added to self.
"""
# first get available index
ind = len(self.all_atoms.keys()) + 1
# now add atom
self.all_atoms[ind] = atom
# Add to non-protein list
self.non_protein_atoms[ind] = atom
def connected_atoms(self, index, con_element):
"""
Returns indices of all neighbors of atom at index of given elt.
Parameters
----------
index: integer
Index of base atom.
con_element: string
Name of desired element.
"""
atom = self.all_atoms[index]
connected = []
for con_index in atom.indices_of_atoms_connecting:
con_atom = self.all_atoms[con_index]
if con_atom.element == con_element:
connected.append(con_index)
return connected
def connected_heavy_atoms(self, index):
"""
Returns indices of all connected heavy atoms.
Parameters
----------
index: integer
Index of base atom.
"""
atom = self.all_atoms[index]
connected = []
for con_index in atom.indices_of_atoms_connecting:
con_atom = self.all_atoms[con_index]
if con_atom.element != "H":
connected.append(con_index)
return connected
def check_protein_format(self):
"""Check that loaded protein structure is self-consistent.
Helper function called when loading PDB from file.
"""
for key, residue in self.get_residues().iteritems():
residue_names = [self.all_atoms[ind].atomname.strip() for ind in residue]
self.check_protein_format_process_residue(residue_names, key)
def check_protein_format_process_residue(self, residue_atoms, key):
"""
Check that specified residue in PDB is formatted correctly.
TODO(rbharath): Lots of code repeating in this function. Factor this
out.
Parameters
----------
residue_atoms: list
List of atom names in residue.
key: string
Should be in format RESNAME_RESNUMBER_CHAIN
"""
resname, _, _ = key.strip().split("_")
real_resname = resname[-3:]
if real_resname in self.protein_resnames:
if "N" not in residue_atoms:
print_warning("N", key, "secondary structure")
if "C" not in residue_atoms:
print_warning("C", key, "secondary structure")
if "CA" not in residue_atoms:
print_warning("CA", key, "secondary structure")
if (real_resname == "GLU" or real_resname == "GLH"
or real_resname == "GLX"):
if "OE1" not in residue_atoms:
print_warning("OE1", key, "salt-bridge interactions")
if "OE2" not in residue_atoms:
print_warning("OE2", key, "salt-bridge interactions")
if (real_resname == "ASP" or real_resname == "ASH"
or real_resname == "ASX"):
if "OD1" not in residue_atoms:
print_warning("OD1", key, "salt-bridge interactions")
if "OD2" not in residue_atoms:
print_warning("OD2", key, "salt-bridge interactions")
if real_resname == "LYS" or real_resname == "LYN":
if "NZ" not in residue_atoms:
print_warning(
"NZ", key, "pi-cation and salt-bridge interactions")
if real_resname == "ARG":
if "NH1" not in residue_atoms:
print_warning(
"NH1", key, "pi-cation and salt-bridge interactions")
if "NH2" not in residue_atoms:
print_warning(
"NH2", key, "pi-cation and salt-bridge interactions")
if (real_resname == "HIS" or real_resname == "HID"
or real_resname == "HIE" or real_resname == "HIP"):
if "NE2" not in residue_atoms:
print_warning(
"NE2", key, "pi-cation and salt-bridge interactions")
if "ND1" not in residue_atoms:
print_warning(
"ND1", key, "pi-cation and salt-bridge interactions")
if real_resname == "PHE":
if "CG" not in residue_atoms:
print_warning("CG", key, "pi-pi and pi-cation interactions")
if "CD1" not in residue_atoms:
print_warning("CD1", key, "pi-pi and pi-cation interactions")
if "CD2" not in residue_atoms:
print_warning("CD2", key, "pi-pi and pi-cation interactions")
if "CE1" not in residue_atoms:
print_warning("CE1", key, "pi-pi and pi-cation interactions")
if "CE2" not in residue_atoms:
print_warning("CE2", key, "pi-pi and pi-cation interactions")
if "CZ" not in residue_atoms:
print_warning("CZ", key, "pi-pi and pi-cation interactions")
if real_resname == "TYR":
if "CG" not in residue_atoms:
print_warning("CG", key, "pi-pi and pi-cation interactions")
if "CD1" not in residue_atoms:
print_warning("CD1", key, "pi-pi and pi-cation interactions")
if "CD2" not in residue_atoms:
print_warning("CD2", key, "pi-pi and pi-cation interactions")
if "CE1" not in residue_atoms:
print_warning("CE1", key, "pi-pi and pi-cation interactions")
if "CE2" not in residue_atoms:
print_warning("CE2", key, "pi-pi and pi-cation interactions")
if "CZ" not in residue_atoms:
print_warning("CZ", key, "pi-pi and pi-cation interactions")
if real_resname == "TRP":
if "CG" not in residue_atoms:
print_warning("CG", key, "pi-pi and pi-cation interactions")
if "CD1" not in residue_atoms:
print_warning("CD1", key, "pi-pi and pi-cation interactions")
if "CD2" not in residue_atoms:
print_warning("CD2", key, "pi-pi and pi-cation interactions")
if "NE1" not in residue_atoms:
print_warning("NE1", key, "pi-pi and pi-cation interactions")
if "CE2" not in residue_atoms:
print_warning("CE2", key, "pi-pi and pi-cation interactions")
if "CE3" not in residue_atoms:
print_warning("CE3", key, "pi-pi and pi-cation interactions")
if "CZ2" not in residue_atoms:
print_warning("CZ2", key, "pi-pi and pi-cation interactions")
if "CZ3" not in residue_atoms:
print_warning("CZ3", key, "pi-pi and pi-cation interactions")
if "CH2" not in residue_atoms:
print_warning("CH2", key, "pi-pi and pi-cation interactions")
if (real_resname == "HIS" or real_resname == "HID" or
real_resname == "HIE" or real_resname == "HIP"):
if "CG" not in residue_atoms:
print_warning("CG", key, "pi-pi and pi-cation interactions")
if "ND1" not in residue_atoms:
print_warning("ND1", key, "pi-pi and pi-cation interactions")
if "CD2" not in residue_atoms:
print_warning("CD2", key, "pi-pi and pi-cation interactions")
if "CE1" not in residue_atoms:
print_warning("CE2", key, "pi-pi and pi-cation interactions")
if "NE2" not in residue_atoms:
print_warning("NE2", key, "pi-pi and pi-cation interactions")
# Functions to determine the bond connectivity based on distance
# ==============================================================
# Functions to identify positive charges
# ======================================
def identify_metallic_charges(self):
"""Assign charges to metallic ions.
Returns
-------
charges: list
Contains a Charge object for every metallic cation.
"""
# Metallic atoms are assumed to be cations.
charges = []
for atom_index in self.non_protein_atoms:
atom = self.non_protein_atoms[atom_index]
if (atom.element == "MG" or atom.element == "MN" or
atom.element == "RH" or atom.element == "ZN" or
atom.element == "FE" or atom.element == "BI" or
atom.element == "AS" or atom.element == "AG"):
chrg = Charged(atom.coordinates, [atom_index], True)
charges.append(chrg)
return charges
def identify_nitrogen_charges(self):
"""Assign charges to nitrogen groups where necessary.
Returns
-------
charges: list
Contains a Charge object for every charged nitrogen group.
"""
charges = []
for atom_index in self.non_protein_atoms:
atom = self.non_protein_atoms[atom_index]
# Get all the quartenary amines on non-protein residues (these are the
# only non-protein groups that will be identified as positively
# charged). Note that nitrogen has only 5 valence electrons (out of 8
# for a full shell), so any nitrogen with four bonds must be positively
# charged (think NH4+).
if atom.element == "N":
# a quartenary amine, so it's easy
if atom.number_of_neighbors() == 4:
indexes = [atom_index]
indexes.extend(atom.indices_of_atoms_connecting)
# so the indices stored is just the index of the nitrogen and any
# attached atoms
chrg = Charged(atom.coordinates, indexes, True)
charges.append(chrg)
# maybe you only have two hydrogens added, but they're sp3 hybridized.
# Just count this as a quartenary amine, since I think the positive
# charge would be stabilized. This situation can arise with
# lone-pair electron nitrogen compounds like pyrrolidine
# (http://www.chem.ucla.edu/harding/tutorials/lone_pair.pdf)
elif atom.number_of_neighbors() == 3:
nitrogen = atom
atom1 = self.all_atoms[atom.indices_of_atoms_connecting[0]]
atom2 = self.all_atoms[atom.indices_of_atoms_connecting[1]]
atom3 = self.all_atoms[atom.indices_of_atoms_connecting[2]]
angle1 = (angle_between_three_points(
atom1.coordinates, nitrogen.coordinates, atom2.coordinates)
* 180.0 / math.pi)
angle2 = (angle_between_three_points(
atom1.coordinates, nitrogen.coordinates, atom3.coordinates)
* 180.0 / math.pi)
angle3 = (angle_between_three_points(
atom2.coordinates, nitrogen.coordinates, atom3.coordinates)
* 180.0 / math.pi)
average_angle = (angle1 + angle2 + angle3) / 3
# Test that the angles approximately match the tetrahedral 109
# degrees
if math.fabs(average_angle - 109.0) < 5.0:
indexes = [atom_index]
indexes.extend(atom.indices_of_atoms_connecting)
# so indexes added are the nitrogen and any attached atoms.
chrg = Charged(nitrogen.coordinates, indexes, True)
charges.append(chrg)
return charges
def identify_phosphorus_charges(self):
"""Assign charges to phosphorus groups where necessary.
Searches for phosphate-like groups and assigns charges.
Returns
-------
charges: list
Contains a Charge object for every charged phosphorus group.
"""
charges = []
for atom_index in self.non_protein_atoms:
atom = self.non_protein_atoms[atom_index]
# let's check for a phosphate or anything where a phosphorus is bound
# to two oxygens, where both oxygens are bound to only one heavy atom
# (the phosphorus). I think this will get several phosphorus
# substances.
if atom.element == "P":
oxygens = self.connected_atoms(atom_index, "O")
if len(oxygens) >= 2: # the phosphorus is bound to at least two oxygens
# now count the number of oxygens bound only to the phosphorus
count = 0
for oxygen_index in oxygens:
if len(self.connected_heavy_atoms(oxygen_index)) == 1:
count = count + 1
if count >= 2:
indexes = [atom_index]
indexes.extend(oxygens)
chrg = Charged(atom.coordinates, indexes, False)
charges.append(chrg)
return charges
def identify_carbon_charges(self):
"""Assign charges to carbon groups where necessary.
Checks for guanidino-like groups and carboxylates.
TODO(rbharath): This function is monolithic and very special-purpose.
Can some more general design be created here?
Returns
-------
charges: list
Contains a Charge object for every charged carbon group.
"""
charges = []
for atom_index in self.non_protein_atoms:
atom = self.non_protein_atoms[atom_index]
# let's check for guanidino-like groups (actually H2N-C-NH2,
# where not CN3.)
if atom.element == "C":
# if the carbon has only three atoms connected to it
if atom.number_of_neighbors() == 3:
nitrogens = self.connected_atoms(atom_index, "N")
# if true, carbon is connected to at least two nitrogens now,
# so we need to count the number of nitrogens that are only
# connected to one heavy atom (the carbon)
if len(nitrogens) >= 2:
nitrogens_to_use = []
all_connected = atom.indices_of_atoms_connecting[:]
# Index of atom that connects this charged group to
# the rest of the molecule, ultimately to make sure
# it's sp3 hybridized. Remains -1 if no such atom exists.
connector_ind = -1
for atmindex in nitrogens:
if len(self.connected_heavy_atoms(atmindex)) == 1:
nitrogens_to_use.append(atmindex)
all_connected.remove(atmindex)
# TODO(rbharath): Is picking the first non-nitrogen atom
# correct here?
if len(all_connected) > 0:
connector_ind = all_connected[0]
# Handle case of guanidinium cation
if len(nitrogens_to_use) == 3 and connector_ind == -1:
charges.append(Charged(atom.coordinates.copy_of(),
[atom_index], True))
elif len(nitrogens_to_use) == 2 and connector_ind != -1:
# so there are at two nitrogens that are only
# connected to the carbon (and probably some
# hydrogens)
# now you need to make sure connector_ind atom is sp3 hybridized
connector_atom = self.all_atoms[connector_ind]
if ((connector_atom.element == "C" and
connector_atom.number_of_neighbors() == 4)
or (connector_atom.element == "O"
and connector_atom.number_of_neighbors() == 2)
or connector_atom.element == "N"
or connector_atom.element == "S"
or connector_atom.element == "P"):
# There are only two "guanidino" nitrogens. Assume the
# negative charge is spread equally between the two.
avg_pt = average_point(
[self.all_atoms[nitrogen].coordinates for nitrogen in
nitrogens_to_use])
indexes = [atom_index]
indexes.extend(nitrogens_to_use)
indexes.extend(self.connected_atoms(
nitrogens_to_use[0], "H"))
indexes.extend(self.connected_atoms(
nitrogens_to_use[1], "H"))
charges.append(Charged(avg_pt, indexes, True))
if atom.element == "C": # let's check for a carboxylate
# a carboxylate carbon will have three items connected to it.
if atom.number_of_neighbors() == 3:
oxygens = self.connected_atoms(atom_index, "O")
# a carboxylate will have two oxygens connected to
# it. Now, each of the oxygens should be connected
# to only one heavy atom (so if it's connected to a
# hydrogen, that's okay)
if len(oxygens) == 2:
if (len(self.connected_heavy_atoms(oxygens[0])) == 1
and len(self.connected_heavy_atoms(oxygens[1])) == 1):
# so it's a carboxylate! Add a negative charge.
# Assume negative charge is centered between the two
# oxygens.
avg_pt = average_point(
[self.all_atoms[oxygen].coordinates for oxygen in oxygens])
chrg = Charged(
avg_pt, [oxygens[0], atom_index, oxygens[1]], False)
charges.append(chrg)
return charges
def identify_sulfur_charges(self):
"""Assigns charges to sulfur groups.
Searches for Sulfonates.
Returns
-------
charges: list
Contains a Charge object for every charged sulfur group.
"""
charges = []
for atom_index in self.non_protein_atoms:
atom = self.non_protein_atoms[atom_index]
# let's check for a sulfonate or anything where a sulfur is
# bound to at least three oxygens and at least three are
# bound to only the sulfur (or the sulfur and a hydrogen).
if atom.element == "S":
oxygens = self.connected_atoms(atom_index, "O")
# the sulfur is bound to at least three oxygens now
# count the number of oxygens that are only bound to the
# sulfur
if len(oxygens) >= 3:
count = 0
for oxygen_index in oxygens:
if len(self.connected_heavy_atoms(oxygen_index)) == 1:
count = count + 1
# so there are at least three oxygens that are only
# bound to the sulfur
if count >= 3:
indexes = [atom_index]
indexes.extend(oxygens)
chrg = Charged(atom.coordinates, indexes, False)
charges.append(chrg)
return charges
def assign_non_protein_charges(self):
"""
Assign positive and negative charges to non-protein atoms.
This function handles the following cases:
1) Metallic ions (assumed to be cations)
2) Quartenary amines (such as NH4+)
2) sp3 hybridized nitrogen (such as pyrrolidine)
3) Carboxylates (RCOO-)
4) Guanidino Groups (NHC(=NH)NH2)
5) Phosphates (PO4(3-))
6) Sulfonate (RSO2O-)
"""
self.charges += self.identify_metallic_charges()
self.charges += self.identify_nitrogen_charges()
self.charges += self.identify_carbon_charges()
self.charges += self.identify_phosphorus_charges()
self.charges += self.identify_sulfur_charges()
def get_residues(self):
"""Returns a dictionary containing all residues in this protein.
The generated dictionary uses keys of the following type to uniquely identify
protein residues: RESNAME_RESNUMBER_CHAIN.
Returns
-------
residues: dictionary
Each key is of type defined above and each value is a list of the
atom-indices that make up this residue.
"""
residues = {}
# Group atoms in the same residue together
for atom_index in self.all_atoms:
atom = self.all_atoms[atom_index]
# Assign each atom a residue key.
key = atom.residue + "_" + str(atom.resid) + "_" + atom.chain
if key not in residues:
residues[key] = []
residues[key].append(atom_index)
# Handle edge case of last residue.
return residues
def assign_protein_charges(self):
"""Assigns charges to atoms in charged residues.
"""
residues = self.get_residues()
self.charges += self.get_lysine_charges(residues)
self.charges += self.get_arginine_charges(residues)
self.charges += self.get_histidine_charges(residues)
self.charges += self.get_glutamic_acid_charges(residues)
self.charges += self.get_aspartic_acid_charges(residues)
def get_residue_charges(self, residues, resnames, atomnames,
charged_atomnames, positive=True):
"""Helper function that assigns charges to specified residue.
Regardless of protonation state, we assume below that residues are
charged, since evidence in the literature ("The Cation Pi Interaction,"
TODO(rbharath): Verify citation) suggests that charges will be
stabilized.
Parameters
---------
residues: dictionary
Dict output by get_residue_list
resnames: list
List of acceptable names for residue (e.g. [PHE], [HIS, HIP, HIE,
HID])
atomnames: list
List of names of atoms in charged group.
charged_atomnames: list
List of atoms which will be averaged to yield charge location.
positive: bool
Whether charge is positive or not.
Returns
-------
aromatics: list
List of Aromatic objects.
"""
charges = []
for key, res in residues.iteritems():
resname, _, _ = key.strip().split("_")
real_resname = resname[-3:]
if real_resname in resnames:
indices = []
charged_atoms = [] # The terminal nitrogen holds charge.
# Select those atoms which are part of the charged group.
for index in res:
atom = self.all_atoms[index]
atomname = atom.atomname.strip()
if atomname in atomnames:
indices.append(index)
if atomname in charged_atomnames:
charged_atoms.append(atom)
if len(charged_atoms) == len(charged_atomnames):
avg_pt = average_point([n.coordinates for n in charged_atoms])
if avg_pt.magnitude() != 0:
charges.append(Charged(avg_pt, indices, positive))
return charges
def get_lysine_charges(self, residues):
"""Assign charges to lysine residues.
Regardless of protonation state, assume that lysine is charged.
Recall that LYS is positive charged lysine and LYN is neutral. See
http://www.cgl.ucsf.edu/chimera/docs/ContributedSoftware/addh/addh.html
Parameters
----------
residues: dictionary
Dict output by get_residue_list
"""
return self.get_residue_charges(
residues, ["LYS", "LYN"],
["NZ", "HZ1", "HNZ1", "HZ2", "HNZ2", "HZ3", "HNZ3"],
["NZ"])
def get_arginine_charges(self, residues):
"""Assign charges to arginine residues.
Parameters
----------
residues: dictionary
Dict output by get_residue_list
"""
return self.get_residue_charges(
residues, ["ARG"],
["NH1", "NH2", "2HH2", "HN22", "1HH2", "HN12", "CZ", "2HH1", "HN21",
"1HH1", "HN11"], ["NH1", "NH2"])
def get_histidine_charges(self, residues):
"""Assign charges to histidine residues.
The specific histidine name determines the protonation state:
* HID: Protonate delta-Nitrogen.
* HIE: Protonate epsilon-Nitrogen.
* HIP: Protonate both nitrogens.
* HIS: Protonation unspecified.
Regardless of protonation state, assume it's charged. This based on
"The Cation-Pi Interaction," which suggests protonated state would
be stabilized. But let's not consider HIS when doing salt bridges.
Parameters
----------
residues: dictionary
Dict output by get_residue_list
"""
return self.get_residue_charges(
residues, ["HIS", "HID", "HIE", "HIP"],
["NE2", "ND1", "HE2", "HD1", "CE1", "CD2", "CG"],
["NE2", "ND1"])
def get_glutamic_acid_charges(self, residues):
"""Assign charges to histidine residues.
The specific glutamic acid name determines the protonation state:
* GLU: Negatively charged (deprotonated).
* GLH: Neutral charge (protonated).
* GLX: Protonation unspecified.
See
http://aria.pasteur.fr/documentation/use-aria/version-2.2/non-standard-atom-or-residue-definitions
or
http://proteopedia.org/wiki/index.php/Standard_Residues
Regardless of protonation state, assume it's charged. This based on
"The Cation-Pi Interaction," which suggests protonated state would
be stabilized..
Parameters
----------
residues: dictionary
Dict output by get_residue_list
"""
return self.get_residue_charges(
residues, ["GLU", "GLH", "GLX"],
["OE1", "OE2", "CD"], ["OE1", "OE2"], positive=False)
def get_aspartic_acid_charges(self, residues):
"""Assign charges to aspartic acid residues.
The specific aspartic acid name determines the protonation.
* ASP: Negatively charged (deprotonated).
* ASH: Neutral charge (protonated).
* ASX: Protonation unspecified.
Regardless of protonation state, assume it's charged. This based on
"The Cation-Pi Interaction," which suggests protonated state would
be stabilized.
Parameters
----------
residues: dictionary
Dict output by get_residue_list
"""
return self.get_residue_charges(
residues, ["ASP", "ASH", "ASX"],
["OD1", "OD2", "CG"], ["OD1", "OD2"], positive=False)
# Functions to identify aromatic rings
# ====================================
def get_aromatic_marker(self, indices_of_ring):
"""Identify aromatic markers.
The aromatic marker is an object of class AromaticRing that specifies
the aromatic ring's center, radius, indices of ring-atoms, and equation
of aromatic plane (recall that an aromatic ring must be planar).
Parameters
----------
indices_of_ring: list
Contains atom indices for all atoms in the ring.
Raises
------
ValueError:
If len(indices_of_ring) < 3. In this case, it is not possible to
construct an aromatic marker (3 points are required to specify the
aromatic plane). This happens most often when a residue is missing
atoms (when the crystal structure failed to resolve an atom, it is
often omitted from the PDB file).
"""
if len(indices_of_ring) < 3:
raise ValueError("3 points must be specified to compute aromatic plane")
# first identify the center point
points_list = []
pos = np.array([0, 0, 0])
for index in indices_of_ring:
atom = self.all_atoms[index]
points_list.append(atom.coordinates)
pos += atom.coordinates.as_array().astype(pos.dtype)
center = Point(coords=pos/len(indices_of_ring))
radius = 0.0
for index in indices_of_ring:
atom = self.all_atoms[index]
dist = center.dist_to(atom.coordinates)
if dist > radius:
radius = dist
# now get the plane that defines this ring. Recall that there are
# atleast 3-points in indices_of_ring by ValueError above.
ring_coords = lambda i: self.all_atoms[indices_of_ring[i]].coordinates
if len(indices_of_ring) == 3:
indices = [0, 1, 2]
elif len(indices_of_ring) == 4:
indices = [0, 1, 3]
elif len(indices_of_ring) > 4: # best, for 5 and 6 member rings
indices = [0, 2, 4]
a, b, c = [ring_coords(i) for i in indices]
ab = vector_subtraction(b, a)
ac = vector_subtraction(c, a)
abxac = cross_product(ab, ac)
## formula for plane will be ax + by + cz = d
offset = dot_product(abxac, self.all_atoms[indices_of_ring[0]].coordinates)
return AromaticRing(center, indices_of_ring,
list(abxac.as_array()) + [offset], radius)
def ring_is_flat(self, ring):
"""Checks whether specified ring is flat.
Parameters
----------
ring: list
List of the atom indices for ring.
"""
for ind in range(-3, len(ring)-3):
pt1 = self.non_protein_atoms[ring[ind]].coordinates
pt2 = self.non_protein_atoms[ring[ind+1]].coordinates
pt3 = self.non_protein_atoms[ring[ind+2]].coordinates
pt4 = self.non_protein_atoms[ring[ind+3]].coordinates
# first, let's see if the last atom in this ring is a carbon
# connected to four atoms. That would be a quick way of
# telling this is not an aromatic ring
cur_atom = self.non_protein_atoms[ring[ind+3]]
if cur_atom.element == "C" and cur_atom.number_of_neighbors() == 4:
return False
# now check the dihedral between the ring atoms to see if
# it's flat
angle = dihedral(pt1, pt2, pt3, pt4) * 180 / math.pi
# 15 degrees is the cutoff, ring[ind], ring[ind+1], ring[ind+2],
# ring[ind+3] range of this function is -pi to pi
if (angle > -165 and angle < -15) or (angle > 15 and angle < 165):
return False
# now check the dihedral between the ring atoms and an atom
# connected to the current atom to see if that's flat too.
for substituent_atom_index in cur_atom.indices_of_atoms_connecting:
pt_sub = self.non_protein_atoms[substituent_atom_index].coordinates
angle = dihedral(pt2, pt3, pt4, pt_sub) * 180 / math.pi
# 15 degress is the cutoff, ring[ind], ring[ind+1], ring[ind+2],
# ring[ind+3], range of this function is -pi to pi
if (angle > -165 and angle < -15) or (angle > 15 and angle < 165):
return False
return True
def assign_ligand_aromatics(self):
"""Identifies aromatic rings in ligands.
"""
# Get all the rings containing each of the atoms in the ligand
rings = []
for atom_index in self.non_protein_atoms:
rings.extend(self.all_rings_containing_atom(atom_index))
rings = remove_redundant_rings(rings)
# Aromatic rings are of length 5 or 6
rings = [ring for ring in rings if len(ring) == 5 or len(ring) == 6]
# Due to data errors in PDB files, there are cases in which
# non-protein atoms are bonded to protein atoms. Manually remove these
# cases, by testing that ring atom indices are a subset of non-protein
# ring indices.
rings = [ring for ring in rings if
set(ring).issubset(self.non_protein_atoms.keys())]
# Aromatic rings are flat
rings = [ring for ring in rings if self.ring_is_flat(ring)]
for ring in rings:
self.aromatic_rings.append(self.get_aromatic_marker(ring))
def all_rings_containing_atom(self, index):
"""Identify all rings that contain atom at index.
Parameters
----------
index: int
Index of provided atom.
"""
all_rings = []
atom = self.all_atoms[index]
for connected_atom in atom.indices_of_atoms_connecting:
self.ring_recursive(connected_atom, [index], index, all_rings)
return all_rings
def ring_recursive(self, index, already_crossed, orig_atom, all_rings):
"""Recursive helper function for ring identification.
Parameters
----------
index: int
Index of specified atom.
already_crossed: list
List of atom-indices of atoms already seen in recursive traversal of
molecular graph.
orig_atom: int
Index of the original atom in ring.
all_rings: list
Used to recursively build up ring structure.
"""
# Aromatic rings are of length <= 6
if len(already_crossed) > 6:
return
atom = self.all_atoms[index]
updated_crossings = already_crossed[:]
updated_crossings.append(index)
for connected_atom in atom.indices_of_atoms_connecting:
if connected_atom not in already_crossed:
self.ring_recursive(
connected_atom, updated_crossings, orig_atom, all_rings)
if connected_atom == orig_atom and orig_atom != already_crossed[-1]:
all_rings.append(updated_crossings)
def assign_protein_aromatics(self):
"""Identifies aromatic rings in protein residues.
"""
residues = self.get_residues()
self.aromatic_rings += self.get_phenylalanine_aromatics(residues)
self.aromatic_rings += self.get_tyrosine_aromatics(residues)
self.aromatic_rings += self.get_histidine_aromatics(residues)
self.aromatic_rings += self.get_tryptophan_aromatics(residues)
def get_residue_aromatics(self, residues, resname, ring_atomnames):
"""Helper function that identifies aromatics in given residue.
Parameters
----------
residues: dictionary
Dict output by get_residue_list
resname: list
List of acceptable names for residue (e.g. [PHE], [HIS, HIP, HIE,
HID])
ring_atomnames: list
List of names of atoms in aromatic ring.
Returns
-------
aromatics: list
List of Aromatic objects.
"""
aromatics = []
for key, res in residues.iteritems():
real_resname, _, _ = key.strip().split("_")
indices_of_ring = []
if real_resname in resname:
indices_of_ring = []
for index in res:
if self.all_atoms[index].atomname.strip() in ring_atomnames:
indices_of_ring.append(index)
# At least 3 indices are required to identify the aromatic plane.
if len(indices_of_ring) < 3:
continue
else:
aromatics.append(self.get_aromatic_marker(indices_of_ring))
#if self.get_aromatic_marker(indices_of_ring) is None:
# raise ValueError("None at %s for %s" % (key,
# str(indices_of_ring)))
return aromatics
def get_phenylalanine_aromatics(self, residues):
"""Assign aromatics in phenylalanines.
Parameters
----------
residues: dictionary
Dict output by get_residue_list
Returns
-------
aromatics: list
List of Aromatic objects for aromatics in phenylalanines.
"""
return self.get_residue_aromatics(
residues, "PHE",
["CG", "CD1", "CE1", "CZ", "CE2", "CD2"])
def get_tyrosine_aromatics(self, residues):
"""Assign aromatics in tyrosines.
Parameters
----------
residues: dictionary
Dict output by get_residue_list
Returns
-------
aromatics: list
List of Aromatic objects for aromatics in tyrosines.
"""
return self.get_residue_aromatics(
residues, "TYR", ["CG", "CD1", "CE1", "CZ", "CE2", "CD2"])
def get_histidine_aromatics(self, residues):
"""Assign aromatics in histidines.
Parameters
----------
residues: dictionary
Dict output by get_residue_list
Returns
-------
aromatics: list
List of Aromatic objects for aromatics in histidines.
"""
return self.get_residue_aromatics(
residues, ["HIS", "HID", "HIE", "HIP"],
["CG", "ND1", "CE1", "NE2", "CD2"])
def get_tryptophan_aromatics(self, residues):
"""Assign aromatics in tryptophans.
Parameters
----------
residues: list
List of tuples output by get_residue_list
Returns
-------
aromatics: list
List of Aromatic objects for aromatics in tryptophans.
"""
# Tryptophan has two aromatic rings.
small_ring = self.get_residue_aromatics(
residues, ["TRP"],
["CG", "CD1", "NE1", "CE2", "CD2"])
large_ring = self.get_residue_aromatics(
residues, ["TRP"],
["CE2", "CD2", "CE3", "CZ3", "CH2", "CZ2"])
return small_ring + large_ring
# Functions to assign secondary structure to protein residues
# ===========================================================
def get_structure_dict(self):
"""Creates a dictionary of preliminary structure labels.
Uses a simple heuristic of checking dihedral angles to classify as
alpha helix or beta sheet.
TODO(rbharath): This prediction function is overly simplistic and
fails to provide reasonable results. Swap to use JPred results instead.
Returns:
structure: dict
Maps keys of format RESNUMBER_CHAIN to one of ALPHA, BETA, or OTHER.
"""
# first, we need to know what residues are available
resids = []
#print self.get_residues()
for key in self.get_residues():
_, resnum, chain = key.split("_")
resids.append(resnum + "_" + chain)
structure = {}
for resid in resids:
structure[resid] = "OTHER"
atoms = []
for atom_index in self.all_atoms:
atom = self.all_atoms[atom_index]
if atom.side_chain_or_backbone() == "BACKBONE":
# TODO(rbharath): Why magic number 8?
if len(atoms) < 8:
atoms.append(atom)
else:
atoms.pop(0)
atoms.append(atom)
# now make sure the first four all have the same resid and
# the last four all have the same resid
# TODO(rbharath): Ugly code right here...
if (atoms[0].resid == atoms[1].resid
and atoms[0].resid == atoms[2].resid
and atoms[0].resid == atoms[3].resid
and atoms[0] != atoms[4].resid
and atoms[4].resid == atoms[5].resid
and atoms[4].resid == atoms[6].resid
and atoms[4].resid == atoms[7].resid
and atoms[0].resid + 1 == atoms[7].resid
and atoms[0].chain == atoms[7].chain):
resid1 = atoms[0].resid
resid2 = atoms[7].resid
# Now give easier to use names to the atoms
for atom in atoms:
atomname = atom.atomname.strip()
if atom.resid == resid1 and atomname == "N":
first_n = atom
if atom.resid == resid1 and atomname == "C":
first_c = atom
if atom.resid == resid1 and atomname == "CA":
first_ca = atom
if atom.resid == resid2 and atomname == "N":
second_n = atom
if atom.resid == resid2 and atomname == "C":
second_c = atom
if atom.resid == resid2 and atomname == "CA":
second_ca = atom
# Now compute the phi and psi dihedral angles
phi = (dihedral(first_c.coordinates, second_n.coordinates,
second_ca.coordinates, second_c.coordinates)
* 180.0 / math.pi)
psi = (dihedral(first_n.coordinates, first_ca.coordinates,
first_c.coordinates, second_n.coordinates)
* 180.0 / math.pi)
# Now use those angles to determine if it's alpha or beta
if phi > -145 and phi < -35 and psi > -70 and psi < 50:
key1 = str(first_c.resid) + "_" + first_c.chain
key2 = str(second_c.resid) + "_" + second_c.chain
structure[key1] = "ALPHA"
structure[key2] = "ALPHA"
if ((phi >= -180 and phi < -40 and psi <= 180 and psi > 90)
or (phi >= -180 and phi < -70 and psi <= -165)):
key1 = str(first_c.resid) + "_" + first_c.chain
key2 = str(second_c.resid) + "_" + second_c.chain
structure[key1] = "BETA"
structure[key2] = "BETA"
return structure
def process_alpha_helices(self, ca_list):
"""Postprocess alpha helices to remove extraneous labels.
TODO(rbharath): The comparison method here is quadratic. Can we do
better with a nice datastructure?
Parameters
----------
ca_list: list
List of all alpha carbons in protein.
"""
change = True
while change:
change = False
# A residue of index i is only going to be in an alpha helix
# its CA is within 6 A of the CA of the residue i + 3
for ca_atom_index in ca_list:
ca_atom = self.all_atoms[ca_atom_index]
if ca_atom.structure == "ALPHA":
# so it's in an alpha helix
another_alpha_is_close = False
for other_ca_atom_index in ca_list:
# so now compare that CA to all the other CA's
other_ca_atom = self.all_atoms[other_ca_atom_index]
# so it's also in an alpha helix
if other_ca_atom.structure == "ALPHA":
if (other_ca_atom.resid - 3 == ca_atom.resid
or other_ca_atom.resid + 3 == ca_atom.resid):
# so this CA atom is one of the ones the first atom
# might hydrogen bond with
if other_ca_atom.coordinates.dist_to(ca_atom.coordinates) < 6.0:
# so these two CA atoms are close enough together
# that their residues are probably hydrogen bonded
another_alpha_is_close = True
break
if not another_alpha_is_close:
self.set_structure_of_residue(ca_atom.chain, ca_atom.resid, "OTHER")
change = True
# Alpha helices are only alpha helices if they span at least 4
# residues (to wrap around and hydrogen bond). I'm going to
# require them to span at least 5 residues, based on
# examination of many structures.
for index_in_list in range(len(ca_list)-5):
index_in_pdb1 = ca_list[index_in_list]
index_in_pdb2 = ca_list[index_in_list+1]
index_in_pdb3 = ca_list[index_in_list+2]
index_in_pdb4 = ca_list[index_in_list+3]
index_in_pdb5 = ca_list[index_in_list+4]
index_in_pdb6 = ca_list[index_in_list+5]
atom1 = self.all_atoms[index_in_pdb1]
atom2 = self.all_atoms[index_in_pdb2]
atom3 = self.all_atoms[index_in_pdb3]
atom4 = self.all_atoms[index_in_pdb4]
atom5 = self.all_atoms[index_in_pdb5]
atom6 = self.all_atoms[index_in_pdb6]
if (atom1.resid + 1 == atom2.resid
and atom2.resid + 1 == atom3.resid
and atom3.resid + 1 == atom4.resid
and atom4.resid + 1 == atom5.resid
and atom5.resid + 1 == atom6.resid): # so they are sequential
if (atom1.structure != "ALPHA"
and atom2.structure == "ALPHA"
and atom3.structure != "ALPHA"):
self.set_structure_of_residue(atom2.chain, atom2.resid, "OTHER")
change = True
if (atom2.structure != "ALPHA"
and atom3.structure == "ALPHA"
and atom4.structure != "ALPHA"):
self.set_structure_of_residue(atom3.chain, atom3.resid, "OTHER")
change = True
if (atom3.structure != "ALPHA"
and atom4.structure == "ALPHA"
and atom5.structure != "ALPHA"):
self.set_structure_of_residue(atom4.chain, atom4.resid, "OTHER")
change = True
if (atom4.structure != "ALPHA"
and atom5.structure == "ALPHA"
and atom6.structure != "ALPHA"):
self.set_structure_of_residue(atom5.chain, atom5.resid, "OTHER")
change = True
if (atom1.structure != "ALPHA"
and atom2.structure == "ALPHA"
and atom3.structure == "ALPHA"
and atom4.structure != "ALPHA"):
self.set_structure_of_residue(atom2.chain, atom2.resid, "OTHER")
self.set_structure_of_residue(atom3.chain, atom3.resid, "OTHER")
change = True
if (atom2.structure != "ALPHA"
and atom3.structure == "ALPHA"
and atom4.structure == "ALPHA"
and atom5.structure != "ALPHA"):
self.set_structure_of_residue(atom3.chain, atom3.resid, "OTHER")
self.set_structure_of_residue(atom4.chain, atom4.resid, "OTHER")
change = True
if (atom3.structure != "ALPHA"
and atom4.structure == "ALPHA"
and atom5.structure == "ALPHA"
and atom6.structure != "ALPHA"):
self.set_structure_of_residue(atom4.chain, atom4.resid, "OTHER")
self.set_structure_of_residue(atom5.chain, atom5.resid, "OTHER")
change = True
if (atom1.structure != "ALPHA"
and atom2.structure == "ALPHA"
and atom3.structure == "ALPHA"
and atom4.structure == "ALPHA"
and atom5.structure != "ALPHA"):
self.set_structure_of_residue(atom2.chain, atom2.resid, "OTHER")
self.set_structure_of_residue(atom3.chain, atom3.resid, "OTHER")
self.set_structure_of_residue(atom4.chain, atom4.resid, "OTHER")
change = True
if (atom2.structure != "ALPHA"
and atom3.structure == "ALPHA"
and atom4.structure == "ALPHA"
and atom5.structure == "ALPHA"
and atom6.structure != "ALPHA"):
self.set_structure_of_residue(atom3.chain, atom3.resid, "OTHER")
self.set_structure_of_residue(atom4.chain, atom4.resid, "OTHER")
self.set_structure_of_residue(atom5.chain, atom5.resid, "OTHER")
change = True
if (atom1.structure != "ALPHA"
and atom2.structure == "ALPHA"
and atom3.structure == "ALPHA"
and atom4.structure == "ALPHA"
and atom5.structure == "ALPHA"
and atom6.structure != "ALPHA"):
self.set_structure_of_residue(atom2.chain, atom2.resid, "OTHER")
self.set_structure_of_residue(atom3.chain, atom3.resid, "OTHER")
self.set_structure_of_residue(atom4.chain, atom4.resid, "OTHER")
self.set_structure_of_residue(atom5.chain, atom5.resid, "OTHER")
change = True
def process_beta_sheets(self, ca_list):
"""Postprocess beta sheets to remove extraneous labels.
TODO(rbharath): The comparison method here is quadratic. Can we do
better with a nice datastructure?
Parameters
----------
ca_list: list
List of all alpha carbons in protein.
"""
change = True
while change:
change = False
# now go through each of the BETA CA atoms. A residue is only
# going to be called a beta sheet if CA atom is within 6.0 A
# of another CA beta, same chain, but index difference > 2.
for ca_atom_index in ca_list:
ca_atom = self.all_atoms[ca_atom_index]
if ca_atom.structure == "BETA":
# so it's in a beta sheet
another_beta_is_close = False
for other_ca_atom_index in ca_list:
if other_ca_atom_index != ca_atom_index:
# so not comparing an atom to itself
other_ca_atom = self.all_atoms[other_ca_atom_index]
if other_ca_atom.structure == "BETA":
# so you're comparing it only to other BETA-sheet atoms
if other_ca_atom.chain == ca_atom.chain:
# so require them to be on the same chain. needed to
# indices can be fairly compared
if math.fabs(other_ca_atom.resid - ca_atom.resid) > 2:
# so the two residues are not simply adjacent to each
# other on the chain
if (ca_atom.coordinates.dist_to(
other_ca_atom.coordinates) < 6.0):
# so these to atoms are close to each other
another_beta_is_close = True
break
if not another_beta_is_close:
self.set_structure_of_residue(ca_atom.chain, ca_atom.resid, "OTHER")
change = True
# Now some more post-processing needs to be done. Do this
# again to clear up mess that may have just been created
# (single residue beta strand, for example)
# Beta sheets are usually at least 3 residues long
for index_in_list in range(len(ca_list)-3):
index_in_pdb1 = ca_list[index_in_list]
index_in_pdb2 = ca_list[index_in_list+1]
index_in_pdb3 = ca_list[index_in_list+2]
index_in_pdb4 = ca_list[index_in_list+3]
atom1 = self.all_atoms[index_in_pdb1]
atom2 = self.all_atoms[index_in_pdb2]
atom3 = self.all_atoms[index_in_pdb3]
atom4 = self.all_atoms[index_in_pdb4]
if (atom1.resid + 1 == atom2.resid and atom2.resid + 1 ==
atom3.resid and atom3.resid + 1 == atom4.resid):
# so they are sequential
if (atom1.structure != "BETA"
and atom2.structure == "BETA"
and atom3.structure != "BETA"):
self.set_structure_of_residue(atom2.chain, atom2.resid, "OTHER")
change = True
if (atom2.structure != "BETA"
and atom3.structure == "BETA"
and atom4.structure != "BETA"):
self.set_structure_of_residue(atom3.chain, atom3.resid, "OTHER")
change = True
if (atom1.structure != "BETA"
and atom2.structure == "BETA"
and atom3.structure == "BETA"
and atom4.structure != "BETA"):
self.set_structure_of_residue(atom2.chain, atom2.resid, "OTHER")
self.set_structure_of_residue(atom3.chain, atom3.resid, "OTHER")
change = True
def assign_secondary_structure(self):
"""Assign secondary structure labels (assuming self is a protein).
Keys in this function have form RESNUMBER_CHAIN where CHAIN is
the chain identifier for this molecule.
"""
structure = self.get_structure_dict()
# Now update each of the atoms with this structural information
for atom_index in self.all_atoms:
atom = self.all_atoms[atom_index]
key = str(atom.resid) + "_" + atom.chain
atom.structure = structure[key]
ca_list = [] # first build a list of the indices of all the alpha carbons
for atom_index in self.all_atoms:
atom = self.all_atoms[atom_index]
if (atom.residue.strip() in self.protein_resnames
and atom.atomname.strip() == "CA"):
ca_list.append(atom_index)
# Use this list to perform sanity checks on alpha-helix and beta-sheet
# labels.
self.process_alpha_helices(ca_list)
self.process_beta_sheets(ca_list)
def set_structure_of_residue(self, chain, resid, structure):
"""Set structure of all atoms with specified chain and residue."""
for atom_index in self.all_atoms:
atom = self.all_atoms[atom_index]
if atom.chain == chain and atom.resid == resid:
atom.structure = structure
|
rbharath/vs-utils
|
vs_utils/utils/nnscore_pdb.py
|
Python
|
gpl-3.0
| 64,048
|
[
"CRYSTAL",
"VMD"
] |
61ac2d7d651c2a5168bc4a89e7df560a3e391d88cbf67fc4b21b4ffb7aec7f64
|
# -*- coding: utf-8 -*-
"""
Support for calculating D spacing for powder diffraction lines as
as function of pressure and temperature, given symmetry, zero-pressure lattice
constants and equation of state parameters.
Author:
Mark Rivers
Created:
Sept. 10, 2002 from older IDL version
Modifications:
Sept. 26, 2002 MLR
- Implemented Birch-Murnaghan solver using CARSnp.newton root finder
Mai 27, 2014 Clemens Prescher
- changed np function to numpy versions,
- using scipy optimize for solving the inverse Birch-Murnaghan problem
- fixed a bug which was causing a gamma0 to be 0 for cubic unit cell
August 22, 2014 Clemens Prescher
- calculation of d spacings is now done by using arrays
- added several new utility function -- calculate_d0, add_reflection
- updated the write_file function to be able to use new standard
August 26, 2014 Clemens Prescher
- added sorting functions
- fixed the d spacing calculation for triclinic structure - equation used was wrong...
August 27, 2014 Clemens Prescher
- added modified flag and the surrounding functions. When an attribute is changed, it will set it to true and the
filename and name will have an asterisk appended to indicate that this is not the original jcpds loaded
- added a reload function
- renamed read and write to load and save
- the load function will now reset all parameters (previously parameters not set in the newly loaded file, were
taken over from the previous state of the object)
"""
import logging
logger = logging.getLogger(__name__)
import string
import numpy as np
from scipy.optimize import minimize
import os
class jcpds_reflection:
"""
Class that defines a reflection.
Attributes:
d0: Zero-pressure lattice spacing
d: Lattice spacing at P and T
inten: Relative intensity to most intense reflection for this material
h: H index for this reflection
k: K index for this reflection
l: L index for this reflection
"""
def __init__(self, h=0., k=0., l=0., intensity=0., d=0.):
self.d0 = d
self.d = d
self.intensity = intensity
self.h = h
self.k = k
self.l = l
def __str__(self):
return "{:2d},{:2d},{:2d}\t{:.2f}\t{:.3f}".format(self.h, self.k, self.l, self.intensity, self.d0)
class MyDict(dict):
def __init__(self):
super(MyDict, self).__init__()
self.setdefault('modified', False)
def __setitem__(self, key, value):
if key in ['comments', 'a0', 'b0', 'c0', 'alpha0', 'beta0', 'gamma0',
'symmetry', 'k0', 'k0p0', 'dk0dt', 'dk0pdt',
'alpha_t0', 'd_alpha_dt', 'reflections']:
self.__setitem__('modified', True)
super(MyDict, self).__setitem__(key, value)
class jcpds(object):
def __init__(self):
self._filename = ''
self._name = ''
self.params = MyDict()
self.params['version'] = 0
self.params['comments'] = []
self.params['symmetry'] = ''
self.params['k0'] = 0.
self.params['k0p0'] = 0. # k0p at 298K
self.params['k0p'] = 0. # k0p at high T
self.params['dk0dt'] = 0.
self.params['dk0pdt'] = 0.
self.params['alpha_t0'] = 0. # alphat at 298K
self.params['alpha_t'] = 0. # alphat at high temp.
self.params['d_alpha_dt'] = 0.
self.params['a0'] = 0.
self.params['b0'] = 0.
self.params['c0'] = 0.
self.params['alpha0'] = 0.
self.params['beta0'] = 0.
self.params['gamma0'] = 0.
self.params['v0'] = 0.
self.params['a'] = 0.
self.params['b'] = 0.
self.params['c'] = 0.
self.params['alpha'] = 0.
self.params['beta'] = 0.
self.params['gamma'] = 0.
self.params['v'] = 0.
self.params['pressure'] = 0.
self.params['temperature'] = 298.
self.reflections = []
self.params['modified'] = False
def load_file(self, filename):
"""
Reads a JCPDS file into the JCPDS object.
Inputs:
file: The name of the file to read.
Procedure:
This procedure read the JCPDS file. There are several versions of the
formats used for JCPDS files. Versions 1, 2 and 3 used a fixed
format, where a particular entry had to be in a specific location on
a specific line. Versions 2 and 3 were used only by Dan Shim.
This routine can read these old files, but no new files should be
created in this format, they should be converted to Version 4.
Version 4 is a "keyword" driven format. Each line in the file is of
the form:
KEYWORD: value
The order of the lines is not important, except that the first line of
the file must be "VERSION: 4".
The following keywords are currently supported:
COMMENT: Any information describing the material, literature
references, etc. There can be multiple comment lines
per file.
K0: The bulk modulus in GPa.
K0P: The change in K0 with pressure, for Birch-Murnaghan
equation of state. Dimensionless.
DK0DT: The temperature derivative of K0, GPa/K.
DK0PDT: The temperature derivative of K0P, 1/K.
SYMMETRY: One of CUBIC, TETRAGONAL, HEXAGONAL, RHOMBOHEDRAL,
ORTHORHOMBIC, MONOCLINIC or TRICLINIC
A: The unit cell dimension A
B: The unit cell dimension B
C: The unit cell dimension C
ALPHA: The unit cell angle ALPHA
BETA: The unit cell angle BETA
GAMMA: The unit cell angle GAMMA
VOLUME: The unit cell volume
ALPHAT: The thermal expansion coefficient, 1/K
DALPHADT: The temperature derivative of the thermal expansion
coefficient, 1/K^2
DIHKL: For each reflection, the D spacing in Angstrom, the
relative intensity (0-100), and the H, K, L indices.
This procedure calculates the D spacing of each relfection, using the
symmetry and unit cell parameters from the file. It compares the
calculated D spacing with the input D spacing for each line. If they
disagree by more than 0.1% then a warning message is printed.
The following is an example JCPDS file in the Version 4 format:
VERSION: 4
COMMENT: Alumina (JCPDS 0-173, EOS n/a)
K0: 194.000
K0P: 5.000
SYMMETRY: HEXAGONAL
A: 4.758
C: 12.99
VOLUME: 22.0640
ALPHAT: 2.000e-6
DIHKL: 3.4790 75.0 0 1 2
DIHKL: 2.5520 90.0 1 0 4
DIHKL: 2.3790 40.0 1 1 0
DIHKL: 2.0850 100.0 1 1 3
DIHKL: 1.7400 45.0 0 2 4
DIHKL: 1.6010 80.0 1 1 6
DIHKL: 1.4040 30.0 2 1 4
DIHKL: 1.3740 50.0 3 0 0
DIHKL: 1.2390 16.0 1 0 10
Note that B and ALPHA, BETA and GAMMA are not present, since they are
not needed for a hexagonal material, and will be simple ignored if
they are present.
"""
self.__init__()
# Initialize variables
self._filename = filename
# Construct base name = file without path and without extension
name = os.path.basename(filename)
pos = name.find('.')
if (pos >= 0): name = name[0:pos]
self._name = name
self.params['comments'] = []
self.reflections = []
# Determine what version JCPDS file this is
# In current files have the first line starts with the string VERSION:
fp = open(filename, 'r')
line = fp.readline()
pos = line.index(' ')
tag = line[0:pos].upper()
value = line[pos:].strip()
if tag == 'VERSION:':
self.version = value
# This is the current, keyword based version of JCPDS file
while (1):
line = fp.readline()
if line == '': break
pos = line.index(' ')
tag = line[0:pos].upper()
value = line[pos:].strip()
if tag == 'COMMENT:':
self.params['comments'].append(value)
elif tag == 'K0:':
self.params['k0'] = float(value)
elif tag == 'K0P:':
self.params['k0p0'] = float(value)
elif tag == 'DK0DT:':
self.params['dk0dt'] = float(value)
elif tag == 'DK0PDT:':
self.params['dk0pdt'] = float(value)
elif tag == 'SYMMETRY:':
self.params['symmetry'] = value.upper()
elif tag == 'A:':
self.params['a0'] = float(value)
elif tag == 'B:':
self.params['b0'] = float(value)
elif tag == 'C:':
self.params['c0'] = float(value)
elif tag == 'ALPHA:':
self.params['alpha0'] = float(value)
elif tag == 'BETA:':
self.params['beta0'] = float(value)
elif tag == 'GAMMA:':
self.params['gamma0'] = float(value)
elif tag == 'VOLUME:':
self.params['v0'] = float(value)
elif tag == 'ALPHAT:':
self.params['alpha_t0'] = float(value)
elif tag == 'DALPHADT:':
self.params['d_alpha_dt'] = float(value)
elif tag == 'DIHKL:':
dtemp = value.split()
dtemp = list(map(float, dtemp))
reflection = jcpds_reflection()
reflection.d0 = dtemp[0]
reflection.intensity = dtemp[1]
reflection.h = int(dtemp[2])
reflection.k = int(dtemp[3])
reflection.l = int(dtemp[4])
self.reflections.append(reflection)
else:
# This is an old format JCPDS file
self.version = 1.
header = ''
self.params['comments'].append(line) # Read above
line = fp.readline()
# Replace any commas with blanks, split at blanks
temp = line.replace(',', ' ').split()
temp = list(map(float, temp[0:5]))
# The symmetry codes are as follows:
# 1 -- cubic
# 2 -- hexagonal
if temp[0] == 1:
self.params['symmetry'] = 'CUBIC'
elif temp[0] == 2:
self.params['symmetry'] = 'HEXAGONAL'
self.params['a0'] = temp[1]
self.params['k0'] = temp[2]
self.params['k0p0'] = temp[3]
c0a0 = temp[4]
self.params['c0'] = self.params['a0'] * c0a0
line = fp.readline() # Ignore, just column labels
while 1:
line = fp.readline()
if line == '': break
dtemp = line.split()
dtemp = list(map(float, dtemp))
reflection = jcpds_reflection()
reflection.d0 = dtemp[0]
reflection.intensity = dtemp[1]
reflection.h = int(dtemp[2])
reflection.k = int(dtemp[3])
reflection.l = int(dtemp[4])
self.reflections.append(reflection)
fp.close()
self.compute_v0()
self.params['a'] = self.params['a0']
self.params['b'] = self.params['b0']
self.params['c'] = self.params['c0']
self.params['alpha'] = self.params['alpha0']
self.params['beta'] = self.params['beta0']
self.params['gamma'] = self.params['gamma0']
self.params['v'] = self.params['v0']
# Compute D spacings, make sure they are consistent with the input values
self.compute_d()
for reflection in self.reflections:
reflection.d0 = reflection.d
self.params['modified'] = False
## we just removed this check because it should be better to care more about the actual a,b,c values than
# individual d spacings
# reflections = self.get_reflections()
# for r in reflections:
# diff = abs(r.d0 - r.d) / r.d0
# if (diff > .001):
# logger.info(('Reflection ', r.h, r.k, r.l, \
# ': calculated D ', r.d, \
# ') differs by more than 0.1% from input D (', r.d0, ')'))
def save_file(self, filename):
"""
Writes a JCPDS object to a file.
Inputs:
filename: The name of the file to written.
Procedure:
This procedure writes a JCPDS file. It always writes files in the
current, keyword-driven format (Version 4). See the documentation for
read_file() for information on the file format.
Example:
This reads an old format file, writes a new format file.
j = jcpds.jcpds()
j.read_file('alumina_old.jcpds')
j.write_file('alumina_new.jcpds')
"""
fp = open(filename, 'w')
fp.write('VERSION: 4\n')
for comment in self.params['comments']:
fp.write('COMMENT: ' + comment + '\n')
fp.write('K0: ' + str(self.params['k0']) + '\n')
fp.write('K0P: ' + str(self.params['k0p0']) + '\n')
fp.write('DK0DT: ' + str(self.params['dk0dt']) + '\n')
fp.write('DK0PDT: ' + str(self.params['dk0pdt']) + '\n')
fp.write('SYMMETRY: ' + self.params['symmetry'] + '\n')
fp.write('A: ' + str(self.params['a0']) + '\n')
fp.write('B: ' + str(self.params['b0']) + '\n')
fp.write('C: ' + str(self.params['c0']) + '\n')
fp.write('ALPHA: ' + str(self.params['alpha0']) + '\n')
fp.write('BETA: ' + str(self.params['beta0']) + '\n')
fp.write('GAMMA: ' + str(self.params['gamma0']) + '\n')
fp.write('VOLUME: ' + str(self.params['v0']) + '\n')
fp.write('ALPHAT: ' + str(self.params['alpha_t0']) + '\n')
fp.write('DALPHADT: ' + str(self.params['d_alpha_dt']) + '\n')
reflections = self.get_reflections()
for r in reflections:
fp.write('DIHKL: {0:g}\t{1:g}\t{2:g}\t{3:g}\t{4:g}\n'.format(r.d0, r.intensity, r.h, r.k, r.l))
fp.close()
self._filename = filename
name = os.path.basename(filename)
pos = name.find('.')
if pos >= 0: name = name[0:pos]
self._name = name
self.params['modified'] = False
def reload_file(self):
pressure = self.params['pressure']
temperature = self.params['temperature']
self.load_file(self._filename)
self.params['pressure'] = pressure
self.params['temperature'] = temperature
self.compute_d()
# def __setattr__(self, key, value):
# if key in ['comments', 'a0', 'b0', 'c0', 'alpha0', 'beta0', 'gamma0',
# 'symmetry', 'k0', 'k0p0', 'dk0dt', 'dk0pdt',
# 'alpha_t0', 'd_alpha_dt', 'reflections']:
# self.modified = True
# super(jcpds, self).__setattr__(key, value)
@property
def filename(self):
if self.params['modified']:
return self._filename + '*'
else:
return self._filename
@filename.setter
def filename(self, value):
self._filename = value
@property
def name(self):
if self.params['modified']:
return self._name + '*'
else:
return self._name
@name.setter
def name(self, value):
self._name = value
def compute_v0(self):
"""
Computes the unit cell volume of the material at zero pressure and
temperature from the unit cell parameters.
Procedure:
This procedure computes the unit cell volume from the unit cell
parameters.
Example:
Compute the zero pressure and temperature unit cell volume of alumina
j = jcpds()
j.read_file('alumina.jcpds')
j.compute_v0()
"""
if self.params['symmetry'] == 'CUBIC':
self.params['b0'] = self.params['a0']
self.params['c0'] = self.params['a0']
self.params['alpha0'] = 90.
self.params['beta0'] = 90.
self.params['gamma0'] = 90.
elif self.params['symmetry'] == 'TETRAGONAL':
self.params['b0'] = self.params['a0']
self.params['alpha0'] = 90.
self.params['beta0'] = 90.
self.params['gamma0'] = 90.
elif self.params['symmetry'] == 'ORTHORHOMBIC':
self.params['alpha0'] = 90.
self.params['beta0'] = 90.
self.params['gamma0'] = 90.
elif self.params['symmetry'] == 'HEXAGONAL' or self.params['symmetry'] == "TRIGONAL":
self.params['b0'] = self.params['a0']
self.params['alpha0'] = 90.
self.params['beta0'] = 90.
self.params['gamma0'] = 120.
elif self.params['symmetry'] == 'RHOMBOHEDRAL':
self.params['b0'] = self.params['a0']
self.params['c0'] = self.params['a0']
self.params['beta0'] = self.params['alpha0']
self.params['gamma0'] = self.params['alpha0']
elif self.params['symmetry'] == 'MONOCLINIC':
self.params['alpha0'] = 90.
self.params['gamma0'] = 90.
elif self.params['symmetry'] == 'TRICLINIC':
pass
dtor = np.pi / 180.
self.params['v0'] = (self.params['a0'] * self.params['b0'] * self.params['c0'] *
np.sqrt(1. -
np.cos(self.params['alpha0'] * dtor) ** 2 -
np.cos(self.params['beta0'] * dtor) ** 2 -
np.cos(self.params['gamma0'] * dtor) ** 2 +
2. * (np.cos(self.params['alpha0'] * dtor) *
np.cos(self.params['beta0'] * dtor) *
np.cos(self.params['gamma0'] * dtor))))
def compute_volume(self, pressure=None, temperature=None):
"""
Computes the unit cell volume of the material.
It can compute volumes at different pressures and temperatures.
Keywords:
pressure:
The pressure in GPa. If not present then the pressure is
assumed to be 0.
temperature:
The temperature in K. If not present or zero, then the
temperature is assumed to be 298K, i.e. room temperature.
Procedure:
This procedure computes the unit cell volume. It starts with the
volume read from the JCPDS file or computed from the zero-pressure,
room temperature lattice constants. It does the following:
1) Corrects K0 for temperature if DK0DT is non-zero.
2) Computes volume at zero-pressure and the specified temperature
if ALPHAT0 is non-zero.
3) Computes the volume at the specified pressure if K0 is non-zero.
The routine uses the IDL function FX_ROOT to solve the third
order Birch-Murnaghan equation of state.
Example:
Compute the unit cell volume of alumina at 100 GPa and 2500 K.
j = jcpds()
j.read_file('alumina.jcpds')
j.compute_volume(100, 2500)
"""
if pressure is None:
pressure = self.params['pressure']
else:
self.params['pressure'] = pressure
if temperature is None:
temperature = self.params['temperature']
else:
self.params['temperature'] = temperature
# Assume 0 K really means room T
if temperature == 0: temperature = 298.
# Compute values of K0, K0P and alphat at this temperature
self.params['alpha_t'] = self.params['alpha_t0'] + self.params['d_alpha_dt'] * (temperature - 298.)
self.params['k0p'] = self.params['k0p0'] + self.params['dk0pdt'] * (temperature - 298.)
k0 = self.params['k0'] + self.params['dk0dt'] * (temperature - 298.)
k0p = self.params['k0p']
if pressure == 0.:
self.params['v'] = self.params['v0'] * (1 + self.params['alpha_t'] * (temperature - 298.))
elif pressure < 0:
if self.params['k0'] <= 0.:
logger.info('K0 is zero, computing zero pressure volume')
self.params['v'] = self.params['v0']
else:
self.params['v'] = self.params['v0'] * (1 - pressure / self.params['k0'])
else:
if self.params['k0'] <= 0.:
logger.info('K0 is zero, computing zero pressure volume')
self.params['v'] = self.params['v0']
else:
self.mod_pressure = pressure - \
self.params['alpha_t'] * k0 * (temperature - 298.)
res = minimize(self.bm3_inverse, 1.,
args=(k0, k0p, self.mod_pressure),
method='Nelder-Mead')
if not res.success:
raise ArithmeticError("minimize didn't find a minimum!\n" + str(res))
self.params['v'] = self.params['v0'] / float(res.x)
def bm3_inverse(self, v0_v, k0, k0p, pressure):
"""
Returns the value of the third order Birch-Murnaghan equation minus
pressure. It is used to solve for V0/V for a given
P, K0 and K0'.
Inputs:
v0_v: The ratio of the zero pressure volume to the high pressure
volume
Outputs:
This function returns the value of the third order Birch-Murnaghan
equation minus pressure. \
Procedure:
This procedure simply computes the pressure using V0/V, K0 and K0',
and then subtracts the input pressure.
Example:
Compute the difference of the calculated pressure and 100 GPa for
V0/V=1.3 for alumina
jcpds = obj_new('JCPDS')
jcpds->read_file, 'alumina.jcpds'
common bm3_common mod_pressure, k0, k0p
mod_pressure=100
k0 = 100
k0p = 4.
diff = jcpds_bm3_inverse(1.3)
"""
return (1.5 * k0 * (v0_v ** (7. / 3.) - v0_v ** (5. / 3.)) *
(1 + 0.75 * (k0p - 4.) * (v0_v ** (2. / 3.) - 1.0)) -
pressure) ** 2
def compute_d0(self):
"""
computes d0 values for the based on the the current lattice parameters
"""
a = self.params['a0']
b = self.params['b0']
c = self.params['c0']
degree_to_radians = np.pi / 180.
alpha = self.params['alpha0'] * degree_to_radians
beta = self.params['beta0'] * degree_to_radians
gamma = self.params['gamma0'] * degree_to_radians
h = np.zeros(len(self.reflections))
k = np.zeros(len(self.reflections))
l = np.zeros(len(self.reflections))
for ind, reflection in enumerate(self.reflections):
h[ind] = reflection.h
k[ind] = reflection.k
l[ind] = reflection.l
if self.params['symmetry'] == 'CUBIC':
d2inv = (h ** 2 + k ** 2 + l ** 2) / a ** 2
elif self.params['symmetry'] == 'TETRAGONAL':
d2inv = (h ** 2 + k ** 2) / a ** 2 + l ** 2 / c ** 2
elif self.params['symmetry'] == 'ORTHORHOMBIC':
d2inv = h ** 2 / a ** 2 + k ** 2 / b ** 2 + l ** 2 / c ** 2
elif self.params['symmetry'] == 'HEXAGONAL' or self.params['symmetry'] == 'TRIGONAL':
d2inv = (h ** 2 + h * k + k ** 2) * 4. / 3. / a ** 2 + l ** 2 / c ** 2
elif self.params['symmetry'] == 'RHOMBOHEDRAL':
d2inv = (((1. + np.cos(alpha)) * ((h ** 2 + k ** 2 + l ** 2) -
(1 - np.tan(0.5 * alpha) ** 2) * (h * k + k * l + l * h))) /
(a ** 2 * (1 + np.cos(alpha) - 2 * np.cos(alpha) ** 2)))
elif self.params['symmetry'] == 'MONOCLINIC':
d2inv = (h ** 2 / np.sin(beta) ** 2 / a ** 2 +
k ** 2 / b ** 2 +
l ** 2 / np.sin(beta) ** 2 / c ** 2 +
2 * h * l * np.cos(beta) / (a * c * np.sin(beta) ** 2))
elif self.params['symmetry'] == 'TRICLINIC':
V = (a * b * c *
np.sqrt(1. - np.cos(alpha) ** 2 - np.cos(beta) ** 2 -
np.cos(gamma) ** 2 +
2 * np.cos(alpha) * np.cos(beta) * np.cos(gamma)))
s11 = b ** 2 * c ** 2 * np.sin(alpha) ** 2
s22 = a ** 2 * c ** 2 * np.sin(beta) ** 2
s33 = a ** 2 * b ** 2 * np.sin(gamma) ** 2
s12 = a * b * c ** 2 * (np.cos(alpha) * np.cos(beta) -
np.cos(gamma))
s23 = a ** 2 * b * c * (np.cos(beta) * np.cos(gamma) -
np.cos(alpha))
s31 = a * b ** 2 * c * (np.cos(gamma) * np.cos(alpha) -
np.cos(beta))
d2inv = (s11 * h ** 2 + s22 * k ** 2 + s33 * l ** 2 +
2. * s12 * h * k + 2. * s23 * k * l + 2. * s31 * l * h) / V ** 2
else:
logger.error(('Unknown crystal symmetry = ' + self.params['symmetry']))
d2inv = 1
d_spacings = np.sqrt(1. / d2inv)
for ind in range(len(self.reflections)):
self.reflections[ind].d0 = d_spacings[ind]
def compute_d(self, pressure=None, temperature=None):
"""
Computes the D spacings of the material.
It can compute D spacings at different pressures and temperatures.
Keywords:
pressure:
The pressure in GPa. If not present then the pressure is
assumed to be 0.
temperature:
The temperature in K. If not present or zero, then the
temperature is assumed to be 298K, i.e. room temperature.
Outputs:
None. The D spacing information in the JCPDS object is calculated.
Procedure:
This procedure first calls jcpds.compute_volume().
It then assumes that each lattice dimension fractionally changes by
the cube root of the fractional change in the volume.
Using the equations for the each symmetry class it then computes the
change in D spacing of each reflection.
Example:
Compute the D spacings of alumina at 100 GPa and 2500 K.
j=jcpds()
j.read_file('alumina.jcpds')
j.compute_d(100, 2500)
refl = j.get_reflections()
for r in refl:
# Print out the D spacings at ambient conditions
print, r.d0
# Print out the D spacings at high pressure and temperature
print, r.d
"""
self.compute_volume(pressure, temperature)
# Assume each cell dimension changes by the same fractional amount = cube
# root of volume change ratio
ratio = float((self.params['v'] / self.params['v0']) ** (1.0 / 3.0))
self.params['a'] = self.params['a0'] * ratio
self.params['b'] = self.params['b0'] * ratio
self.params['c'] = self.params['c0'] * ratio
a = self.params['a']
b = self.params['b']
c = self.params['c']
dtor = np.pi / 180.
alpha = self.params['alpha0'] * dtor
beta = self.params['beta0'] * dtor
gamma = self.params['gamma0'] * dtor
h = np.zeros(len(self.reflections))
k = np.zeros(len(self.reflections))
l = np.zeros(len(self.reflections))
for ind, reflection in enumerate(self.reflections):
h[ind] = reflection.h
k[ind] = reflection.k
l[ind] = reflection.l
if self.params['symmetry'] == 'CUBIC':
d2inv = (h ** 2 + k ** 2 + l ** 2) / a ** 2
elif self.params['symmetry'] == 'TETRAGONAL':
d2inv = (h ** 2 + k ** 2) / a ** 2 + l ** 2 / c ** 2
elif self.params['symmetry'] == 'ORTHORHOMBIC':
d2inv = h ** 2 / a ** 2 + k ** 2 / b ** 2 + l ** 2 / c ** 2
elif self.params['symmetry'] == 'HEXAGONAL' or self.params['symmetry'] == 'TRIGONAL':
d2inv = (h ** 2 + h * k + k ** 2) * 4. / 3. / a ** 2 + l ** 2 / c ** 2
elif self.params['symmetry'] == 'RHOMBOHEDRAL':
d2inv = (((1. + np.cos(alpha)) * ((h ** 2 + k ** 2 + l ** 2) -
(1 - np.tan(0.5 * alpha) ** 2) * (h * k + k * l + l * h))) /
(a ** 2 * (1 + np.cos(alpha) - 2 * np.cos(alpha) ** 2)))
elif self.params['symmetry'] == 'MONOCLINIC':
d2inv = (h ** 2 / (np.sin(beta) ** 2 * a ** 2) +
k ** 2 / b ** 2 +
l ** 2 / (np.sin(beta) ** 2 * c ** 2) -
2 * h * l * np.cos(beta) / (a * c * np.sin(beta) ** 2))
elif self.params['symmetry'] == 'TRICLINIC':
V = (a * b * c *
np.sqrt(1. - np.cos(alpha) ** 2 - np.cos(beta) ** 2 -
np.cos(gamma) ** 2 +
2 * np.cos(alpha) * np.cos(beta) * np.cos(gamma)))
s11 = b ** 2 * c ** 2 * np.sin(alpha) ** 2
s22 = a ** 2 * c ** 2 * np.sin(beta) ** 2
s33 = a ** 2 * b ** 2 * np.sin(gamma) ** 2
s12 = a * b * c ** 2 * (np.cos(alpha) * np.cos(beta) -
np.cos(gamma))
s23 = a ** 2 * b * c * (np.cos(beta) * np.cos(gamma) -
np.cos(alpha))
s31 = a * b ** 2 * c * (np.cos(gamma) * np.cos(alpha) -
np.cos(beta))
d2inv = (s11 * h ** 2 + s22 * k ** 2 + s33 * l ** 2 +
2. * s12 * h * k + 2. * s23 * k * l + 2. * s31 * l * h) / V ** 2
else:
logger.error(('Unknown crystal symmetry = ' + self.params['symmetry']))
d2inv = 1
d_spacings = np.sqrt(1. / d2inv)
for ind in range(len(self.reflections)):
self.reflections[ind].d = d_spacings[ind]
def add_reflection(self, h=0., k=0., l=0., intensity=0., d=0.):
new_reflection = jcpds_reflection(h, k, l, intensity, d)
self.reflections.append(new_reflection)
self.params['modified'] = True
def delete_reflection(self, ind):
del self.reflections[ind]
self.params['modified'] = True
def get_reflections(self):
"""
Returns the information for each reflection for the material.
This information is an array of elements of class jcpds_reflection
"""
return self.reflections
def reorder_reflections_by_index(self, ind_list, reversed_toggle=False):
if reversed_toggle:
ind_list = ind_list[::-1]
new_reflections = []
for ind in ind_list:
new_reflections.append(self.reflections[ind])
modified_flag = self.params['modified']
self.reflections = new_reflections
self.params['modified'] = modified_flag
def sort_reflections_by_h(self, reversed_toggle=False):
h_list = []
for reflection in self.reflections:
h_list.append(reflection.h)
sorted_ind = np.argsort(h_list)
self.reorder_reflections_by_index(sorted_ind, reversed_toggle)
def sort_reflections_by_k(self, reversed_toggle=False):
k_list = []
for reflection in self.reflections:
k_list.append(reflection.k)
sorted_ind = np.argsort(k_list)
self.reorder_reflections_by_index(sorted_ind, reversed_toggle)
def sort_reflections_by_l(self, reversed_toggle=False):
l_list = []
for reflection in self.reflections:
l_list.append(reflection.l)
sorted_ind = np.argsort(l_list)
self.reorder_reflections_by_index(sorted_ind, reversed_toggle)
def sort_reflections_by_intensity(self, reversed_toggle=False):
intensity_list = []
for reflection in self.reflections:
intensity_list.append(reflection.intensity)
sorted_ind = np.argsort(intensity_list)
self.reorder_reflections_by_index(sorted_ind, reversed_toggle)
def sort_reflections_by_d(self, reversed_toggle=False):
d_list = []
for reflection in self.reflections:
d_list.append(reflection.d0)
sorted_ind = np.argsort(d_list)
self.reorder_reflections_by_index(sorted_ind, reversed_toggle)
def has_thermal_expansion(self):
return (self.params['alpha_t0'] != 0) or (self.params['d_alpha_dt'] != 0)
def lookup_jcpds_line(in_string,
pressure=0.,
temperature=0.,
path=os.getenv('JCPDS_PATH')):
"""
Returns the d-spacing in Angstroms for a particular lattice plane.
Inputs:
Diffaction_plane: A string of the form 'Compound HKL', where Compound
is the name of a material (e.g. 'gold', and HKL is the diffraction
plane (e.g. 220).
There must be a space between Compound and HKL.
Examples of Diffraction_plane:
'gold 111' - Gold 111 plane
'si 220' - Silicon 220 plane
Keywords:
path:
The path in which to look for the file 'Compound.jcpds'. The
default is to search in the directory pointed to by the
environment variable JCPDS_PATH.
pressure:
The pressure at which to compute the d-spacing. Not yet
implemented, zero pressure d-spacing is always returned.
temperature:
The temperature at which to compute the d-spacing. Not yet
implemented. Room-temperature d-spacing is always returned.
Outputs:
This function returns the d-spacing of the specified lattice plane.
If the input is invalid, e.g. non-existent compound or plane, then the
function returns None.
Restrictions:
This function attempts to locate the file 'Compound.jcpds', where
'Compound' is the name of the material specified in the input parameter
'Diffraction_plane'. For example:
d = lookup_jcpds_line('gold 220')
will look for the file gold.jcpds. It will either look in the file
specified in the PATH keyword parameter to this function, or in the
the directory pointed to by the environtment variable JCPDS_PATH
if the PATH keyword is not specified. Note that the filename will be
case sensitive on Unix systems, but not on Windows.
This function is currently only able to handle HKL values from 0-9.
The parser will need to be improved to handle 2-digit values of H,
K or L.
Procedure:
This function calls jcpds.read_file() and searches for the specified HKL plane
and returns its d-spacing;
Example:
d = lookup_jcpds_line('gold 111') # Look up gold 111 line
d = lookup_jcpds_line('quartz 220') # Look up the quartz 220 line
"""
temp = in_string.split()
if len(temp) < 2:
return None
file = temp[0]
nums = temp[1].split()
n = len(nums)
if n == 1:
if len(nums[0]) == 3:
try:
hkl = (int(nums[0][0]), int(nums[0][1]), int(nums[0][2]))
except:
return None
else:
return None
elif n == 3:
hkl = list(map(int, nums))
else:
return None
full_file = path + file + '.jcpds'
try:
j = jcpds()
j.load_file(full_file)
refl = j.get_reflections()
for r in refl:
if r.h == hkl[0] and r.k == hkl[1] and r.l == hkl[2]:
return r.d0
return None
except:
return None
|
Dioptas/Dioptas
|
dioptas/model/util/jcpds.py
|
Python
|
gpl-3.0
| 36,954
|
[
"CRYSTAL"
] |
7ee0e127091cdcc654bda773dc1eea63ea41146560dd02575238894699f21f47
|
#!/usr/bin/env python
#PBS -N Timing
#PBS -m ae
#PBS -q long
#PBS -l nodes=1:opteron:ppn=4
from asap3 import *
from asap3.md.verlet import VelocityVerlet
from asap3.md.velocitydistribution import MaxwellBoltzmannDistribution
from ase.lattice.cubic import FaceCenteredCubic
import numpy as np
import time
import pickle
print_version(1)
T = 300
threads = (1, 2, 4, 8)
s1 = (100, 1000, 10000, 100000, 1000000)
s2 = (1, 2, 5)
sizes = []
for a in s1:
for b in s2:
sizes.append(a*b)
print sizes
targettime = 30.0
laststeps = 5000
lastsize = sizes[0]
lasttime = targettime
results = {}
for nthreads in threads:
try:
AsapThreads(nthreads)
except ValueError:
break
maxthread = nthreads
for natoms in sizes:
print "Timing with %i atoms (%i threads)" % (natoms, nthreads)
blocksize = int(np.ceil((natoms/4)**(1./3.)))
atoms = FaceCenteredCubic(symbol='Cu', size=(blocksize,blocksize,blocksize), pbc=False)
print "Creating block with %i atoms, cutting to %i atoms" % (len(atoms), natoms)
atoms = atoms[:natoms]
assert len(atoms) == natoms
atoms.set_calculator(EMT())
MaxwellBoltzmannDistribution(atoms, 2 * T * units.kB)
dyn = VelocityVerlet(atoms, 5*units.fs)
ptsteps = int(laststeps * (0.1 * targettime / lasttime) * lastsize / natoms)
if ptsteps < 100:
ptsteps = 100
print "Running pre-timing (%i steps)..." % (ptsteps,)
t1 = time.time()
dyn.run(ptsteps - 50)
MaxwellBoltzmannDistribution(atoms, (2 * T - atoms.get_temperature()) * units.kB)
dyn.run(50)
t1 = time.time() - t1
steps = int(ptsteps * targettime / t1)
if steps < 200:
steps = 200
print "Temperature is %.1f K" % (atoms.get_temperature(),)
print "Running main timing (%i steps)" % (steps,)
MaxwellBoltzmannDistribution(atoms, T * units.kB)
t1 = time.time()
dyn.run(steps)
t1 = time.time() - t1
lasttime = t1
print "... done in %.1f s (T = %.1f K)." % (t1, atoms.get_temperature())
t1 *= 1e6 / (natoms * steps)
print "RESULT: %.3f us/atom/step (%i atoms, %i threads)" % (t1, natoms, nthreads)
results[(natoms, nthreads)] = t1
laststeps = steps
lastsize = natoms
out = open("timing.pickle", "w")
pickle.dump(results, out)
pickle.dump(maxthread, out)
pickle.dump(threads, out)
pickle.dump(sizes, out)
out.close()
|
auag92/n2dm
|
Asap-3.8.4/Test/Timing/OpenMPTiming/TimingMatrix.py
|
Python
|
mit
| 2,515
|
[
"ASE"
] |
f8bc8cb6caf4efd44a83918894c88e19e71f19082a60976e8917a7543a154365
|
#!/usr/bin/env python
"""
This script should be used for creating the scaffolding for a test.
"""
from __future__ import print_function
import os
import sys
from ooni.utils import log
print("!!!! This test writing strategy is now deprecated !!!")
print("visit: https://ooni.readthedocs.org/en/latest/writing_tests.html "
"for new instructions")
sys.exit(1)
test_template = """\"\"\"
This is a self genrated test created by scaffolding.py.
you will need to fill it up with all your necessities.
Safe hacking :).
\"\"\"
from zope.interface import implements
from twisted.python import usage
from twisted.plugin import IPlugin
from ooni.plugoo.tests import ITest, OONITest
from ooni.plugoo.assets import Asset
from ooni.utils import log
class %(testShortname)sArgs(usage.Options):
optParameters = [['asset', 'a', None, 'Asset file'],
['resume', 'r', 0, 'Resume at this index']]
class %(testShortname)sTest(OONITest):
implements(IPlugin, ITest)
shortName = "%(testSNlower)s"
description = "%(testName)s"
requirements = None
options = %(testShortname)sArgs
blocking = True
def control(self, experiment_result, args):
# What you return here ends up inside of the report.
log.msg("Running control")
return {}
def experiment(self, args):
# What you return here gets handed as input to control
log.msg("Running experiment")
return {}
def load_assets(self):
if self.local_options:
return {'asset': Asset(self.local_options['asset'])}
else:
return {}
# We need to instantiate it otherwise getPlugins does not detect it
# XXX Find a way to load plugins without instantiating them.
%(testShortname)s = %(testShortname)sTest(None, None, None)
"""
test_vars = {'testName': None, 'testShortname': None}
test_vars['testName'] = raw_input('Test Name: ')
test_vars['testShortname'] = raw_input("Test Short Name: ")
test_vars['testSNlower'] = test_vars['testShortname'].lower()
fname = os.path.join('plugins', test_vars['testSNlower']+'.py')
if os.path.exists(fname):
print('WARNING! File named "%s" already exists.' % fname)
if raw_input("Do you wish to continue (y/N)? ").lower() != 'y':
print("gotcha! Dying..")
sys.exit(0)
fp = open(fname, 'w')
fp.write(test_template % test_vars)
fp.close()
|
hackerberry/ooni-probe
|
ooni/scaffolding.py
|
Python
|
bsd-2-clause
| 2,370
|
[
"VisIt"
] |
a4575b20327e4800230494e85b29a4f19df0b9c3748a902c5e5618e4cfbca6b8
|
#!/usr/bin/python
# -*- coding=UTF-8 -*-
###########################################################################
# Hex IP Toolkit: Converts IP addresses to hexadecimal and vice versa. #
# also links the IP addresses to given files #
# written to assist in kickstart / preseed installations #
# #
# Copyright (C) 2009 Hakan Bayindir #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# @author: Hakan Bayindir #
# @contact: hbayindir@gmail.com #
# @license: GNU/GPLv3 #
# @status: stable #
# @version: 1.0.1 #
# #
###########################################################################
# Return codes used in this program:
#
# 0: Everything is OK.
# 1: Invalid IP address (hex or standard).
# 2: Linking failed, file exists.
# 3: Linking failed, unknown reason.
# 4: Operating system is not POSIX.
# 5: Author displayed.
# 6: Version displayed.
# 7: License displayed.
# 8: No argument supplied.
def convertIP(ip_address, link_target, force_link):
#is this an integer (standard) IP address? Let me see...
if re.match("[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}", ip_address) <> None and len(ip_address) >= 7 and len(ip_address) < 16 and len(re.split("\.", ip_address)) == 4:
hex_ip_parts = []
ip_parts = re.split("\.", ip_address);
for ip_part in ip_parts:
if int(ip_part) > 255 or int(ip_part) < 0:
printError("Given IP address is not valid")
sys.exit(1)
temporary_hex = re.sub("0x", "", int(ip_part).__hex__())
#pad single digit conversions with a single 0
if len(temporary_hex) == 1:
temporary_hex = "0" + temporary_hex
hex_ip_parts.append(temporary_hex)
if link_target == None:
#now print out the result
for ip_part in hex_ip_parts:
sys.stdout.write(string.upper(ip_part))
#write a nice newline before exit.
sys.stdout.write("\n")
sys.exit(0)
else:
#assemble a string hex IP address
hex_ip = ""
for ip_part in hex_ip_parts:
hex_ip += string.upper(ip_part.__str__());
link(hex_ip, link_target, force_link)
sys.exit(0)
#is this an HEX IP address?
elif re.match("[0-9a-fA-F]{8,8}", ip_address) <> None and len(ip_address) == 8:
divided_hex = []
divided_hex.append("0x" + ip_address[0:2])
divided_hex.append("0x" + ip_address[2:4])
divided_hex.append("0x" + ip_address[4:6])
divided_hex.append("0x" + ip_address[6:8])
ip_address = ""
inserted_dots = 0
for hex_part in divided_hex:
ip_address += int(hex_part, 16).__str__()
if inserted_dots < 3:
ip_address += "."
inserted_dots += 1
if link_target == None:
print ip_address;
sys.exit(0)
else:
link(ip_address, link_target, force_link)
sys.exit(0)
else:
printError("Given IP address is not valid")
sys.exit(1)
#a pretty standard error reporting function that prepends ERROR: to the message and writes to console.
def printError (errorMessage):
print "ERROR: " + errorMessage
#Symbolic linking code. source is the link, destination is the real file, force is whether the link will be overwritten even if the link exists.
def link(source, target, force):
try:
os.symlink(target, source) #try to be nice here.
except OSError, e:
if e.errno == errno.EEXIST:
if force == True: #if usage of deadly force is authorized, use it without thinking twice. (erase file and re-link it)
os.remove(source)
os.symlink(target, source)
else:
printError("Cannot link file, file already exists.") #else back-off with an error message.
sys.exit(2)
else:
printError("A problem occurred during linking.") #if something happens that we don't understand, say it bravely
sys.exit(3)
if __name__ == "__main__":
#import the required things.
import os, errno, sys, re, string
from optparse import OptionParser
#This program is intended to be used on POSIX compliant operating systems.
if os.name <> "posix":
print "This program is designed to run on POSIX compliant operating systems."
sys.exit(4)
#create the option parser that parses the options for us, the lazy programmers
parser = OptionParser()
#teach how our program works to the parser, so she can understand it too.
parser.set_usage("[options] <IP ADDRESS>")
parser.set_description("Converts IP addresses to hexadecimal equivalents or vice versa. Optionally links the resulting address to given target file.")
parser.add_option("-l", "--link" , dest="link_target", help="symbolic link the result to the target file", metavar="TARGETFILE")
parser.add_option("-n", "--noforce", action="store_false", dest="force_linking", default=True, help="doesn't overwrite links if file exists")
parser.add_option("-L", "--license", action="store_true", dest="print_license", default=False, help="print licensing information and exit")
parser.add_option("-a", "--author", action="store_true", dest="print_author", default=False, help="print author & contact information and exit")
parser.add_option("-V", "--version", action="store_true", dest="print_version", default=False, help="print version information and exit")
#Light the path, show the truth! (copy supplied options to options, copy remaining to arguments)
(options, arguments) = parser.parse_args();
#Handle the information request, author, version and license respectively
if options.print_author == True:
print "This program is written by Hakan Bayindir <hbayindir@gmail.com>"
sys.exit(5)
elif options.print_version == True:
print parser.get_prog_name() + " version 1.0.5, build 20100102"
sys.exit(6)
elif options.print_license == True:
print "\nHex IP Toolkit Copyright (C) 2009 Hakan Bayindir\nThis program is licensed under GNU/GPLv3 and comes with ABSOLUTELY NO WARRANTY.\nThis is free software, and you are welcome to redistribute it.under certain conditions.\nFor more information, visit http://www.gnu.org/licenses\n"
sys.exit(7)
#we need an IP address. not less, not more.
if len(arguments) <> 1:
parser.print_usage();
print "To get complete help, try " + parser.get_prog_name() + " -h"
sys.exit(8)
#everything looks OK. Do your magic.
convertIP(arguments[0], options.link_target , options.force_linking)
|
hbayindir/Hex-IP-Toolkit
|
Hex_IP_Toolkit/src/hexiptools.py
|
Python
|
gpl-3.0
| 8,480
|
[
"VisIt"
] |
9e1fcc871601f37b9b0d88c10f02527c96440548ead03df7937d8241d3a45808
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from external.wip import work_in_progress
from .molecule import Atom, Bond, Molecule, ActionError
from .group import Group
from .element import getElement, elementList
################################################################################
class TestAtom(unittest.TestCase):
"""
Contains unit tests of the Atom class.
"""
def setUp(self):
"""
A method called before each unit test in this class.
"""
self.atom = Atom(element=getElement('C'), radicalElectrons=1, charge=0, label='*1', lonePairs=0)
def testMass(self):
"""
Test the Atom.mass property.
"""
self.assertTrue(self.atom.mass == self.atom.element.mass)
def testNumber(self):
"""
Test the Atom.number property.
"""
self.assertTrue(self.atom.number == self.atom.element.number)
def testSymbol(self):
"""
Test the Atom.symbol property.
"""
self.assertTrue(self.atom.symbol == self.atom.element.symbol)
def testIsHydrogen(self):
"""
Test the Atom.isHydrogen() method.
"""
for element in elementList:
atom = Atom(element=element, radicalElectrons=1, charge=0, label='*1', lonePairs=0)
if element.symbol == 'H':
self.assertTrue(atom.isHydrogen())
else:
self.assertFalse(atom.isHydrogen())
def testIsNonHydrogen(self):
"""
Test the Atom.isNonHydrogen() method.
"""
for element in elementList:
atom = Atom(element=element, radicalElectrons=1, charge=0, label='*1', lonePairs=0)
if element.symbol == 'H':
self.assertFalse(atom.isNonHydrogen())
else:
self.assertTrue(atom.isNonHydrogen())
def testIsCarbon(self):
"""
Test the Atom.isCarbon() method.
"""
for element in elementList:
atom = Atom(element=element, radicalElectrons=1, charge=0, label='*1', lonePairs=0)
if element.symbol == 'C':
self.assertTrue(atom.isCarbon())
else:
self.assertFalse(atom.isCarbon())
def testIsOxygen(self):
"""
Test the Atom.isOxygen() method.
"""
for element in elementList:
atom = Atom(element=element, radicalElectrons=1, charge=0, label='*1', lonePairs=2)
if element.symbol == 'O':
self.assertTrue(atom.isOxygen())
else:
self.assertFalse(atom.isOxygen())
def testIncrementRadical(self):
"""
Test the Atom.incrementRadical() method.
"""
radicalElectrons = self.atom.radicalElectrons
self.atom.incrementRadical()
self.assertEqual(self.atom.radicalElectrons, radicalElectrons + 1)
def testDecrementRadical(self):
"""
Test the Atom.decrementRadical() method.
"""
radicalElectrons = self.atom.radicalElectrons
self.atom.decrementRadical()
self.assertEqual(self.atom.radicalElectrons, radicalElectrons - 1)
def testApplyActionBreakBond(self):
"""
Test the Atom.applyAction() method for a BREAK_BOND action.
"""
action = ['BREAK_BOND', '*1', 'S', '*2']
for element in elementList:
atom0 = Atom(element=element, radicalElectrons=1, charge=0, label='*1', lonePairs=0)
atom = atom0.copy()
atom.applyAction(action)
self.assertEqual(atom0.element, atom.element)
self.assertEqual(atom0.radicalElectrons, atom.radicalElectrons)
self.assertEqual(atom0.charge, atom.charge)
self.assertEqual(atom0.label, atom.label)
def testApplyActionFormBond(self):
"""
Test the Atom.applyAction() method for a FORM_BOND action.
"""
action = ['FORM_BOND', '*1', 'S', '*2']
for element in elementList:
atom0 = Atom(element=element, radicalElectrons=1, charge=0, label='*1', lonePairs=0)
atom = atom0.copy()
atom.applyAction(action)
self.assertEqual(atom0.element, atom.element)
self.assertEqual(atom0.radicalElectrons, atom.radicalElectrons)
self.assertEqual(atom0.charge, atom.charge)
self.assertEqual(atom0.label, atom.label)
def testApplyActionIncrementBond(self):
"""
Test the Atom.applyAction() method for a CHANGE_BOND action.
"""
action = ['CHANGE_BOND', '*1', 1, '*2']
for element in elementList:
atom0 = Atom(element=element, radicalElectrons=1, charge=0, label='*1', lonePairs=0)
atom = atom0.copy()
atom.applyAction(action)
self.assertEqual(atom0.element, atom.element)
self.assertEqual(atom0.radicalElectrons, atom.radicalElectrons)
self.assertEqual(atom0.charge, atom.charge)
self.assertEqual(atom0.label, atom.label)
def testApplyActionDecrementBond(self):
"""
Test the Atom.applyAction() method for a CHANGE_BOND action.
"""
action = ['CHANGE_BOND', '*1', -1, '*2']
for element in elementList:
atom0 = Atom(element=element, radicalElectrons=1, charge=0, label='*1', lonePairs=0)
atom = atom0.copy()
atom.applyAction(action)
self.assertEqual(atom0.element, atom.element)
self.assertEqual(atom0.radicalElectrons, atom.radicalElectrons)
self.assertEqual(atom0.charge, atom.charge)
self.assertEqual(atom0.label, atom.label)
def testApplyActionGainRadical(self):
"""
Test the Atom.applyAction() method for a GAIN_RADICAL action.
"""
action = ['GAIN_RADICAL', '*1', 1]
for element in elementList:
atom0 = Atom(element=element, radicalElectrons=1, charge=0, label='*1', lonePairs=0)
atom = atom0.copy()
atom.applyAction(action)
self.assertEqual(atom0.element, atom.element)
self.assertEqual(atom0.radicalElectrons, atom.radicalElectrons - 1)
self.assertEqual(atom0.charge, atom.charge)
self.assertEqual(atom0.label, atom.label)
def testApplyActionLoseRadical(self):
"""
Test the Atom.applyAction() method for a LOSE_RADICAL action.
"""
action = ['LOSE_RADICAL', '*1', 1]
for element in elementList:
atom0 = Atom(element=element, radicalElectrons=1, charge=0, label='*1', lonePairs=0)
atom = atom0.copy()
atom.applyAction(action)
self.assertEqual(atom0.element, atom.element)
self.assertEqual(atom0.radicalElectrons, atom.radicalElectrons + 1)
self.assertEqual(atom0.charge, atom.charge)
self.assertEqual(atom0.label, atom.label)
def testEquivalent(self):
"""
Test the Atom.equivalent() method.
"""
for index1, element1 in enumerate(elementList[0:10]):
for index2, element2 in enumerate(elementList[0:10]):
atom1 = Atom(element=element1, radicalElectrons=1, charge=0, label='*1', lonePairs=0)
atom2 = Atom(element=element2, radicalElectrons=1, charge=0, label='*1', lonePairs=0)
if index1 == index2:
self.assertTrue(atom1.equivalent(atom2))
self.assertTrue(atom2.equivalent(atom1))
else:
self.assertFalse(atom1.equivalent(atom2))
self.assertFalse(atom2.equivalent(atom1))
def testIsSpecificCaseOf(self):
"""
Test the Atom.isSpecificCaseOf() method.
"""
for index1, element1 in enumerate(elementList[0:10]):
for index2, element2 in enumerate(elementList[0:10]):
atom1 = Atom(element=element1, radicalElectrons=1, charge=0, label='*1', lonePairs=0)
atom2 = Atom(element=element2, radicalElectrons=1, charge=0, label='*1', lonePairs=0)
if index1 == index2:
self.assertTrue(atom1.isSpecificCaseOf(atom2))
else:
self.assertFalse(atom1.isSpecificCaseOf(atom2))
def testCopy(self):
"""
Test the Atom.copy() method.
"""
atom = self.atom.copy()
self.assertEqual(self.atom.element.symbol, atom.element.symbol)
self.assertEqual(self.atom.atomType, atom.atomType)
self.assertEqual(self.atom.radicalElectrons, atom.radicalElectrons)
self.assertEqual(self.atom.charge, atom.charge)
self.assertEqual(self.atom.label, atom.label)
def testPickle(self):
"""
Test that a Atom object can be successfully pickled and
unpickled with no loss of information.
"""
import cPickle
atom = cPickle.loads(cPickle.dumps(self.atom))
self.assertEqual(self.atom.element.symbol, atom.element.symbol)
self.assertEqual(self.atom.atomType, atom.atomType)
self.assertEqual(self.atom.radicalElectrons, atom.radicalElectrons)
self.assertEqual(self.atom.charge, atom.charge)
self.assertEqual(self.atom.label, atom.label)
def testIsotopeEquivalent(self):
"""
Test the Atom.equivalent() method for non-normal isotopes
"""
atom1 = Atom(element=getElement('H'))
atom2 = Atom(element=getElement('H', 2))
atom3 = Atom(element=getElement('H'))
self.assertFalse(atom1.equivalent(atom2))
self.assertTrue(atom1.equivalent(atom3))
################################################################################
class TestBond(unittest.TestCase):
"""
Contains unit tests of the Bond class.
"""
def setUp(self):
"""
A method called before each unit test in this class.
"""
self.bond = Bond(atom1=None, atom2=None, order='D')
self.orderList = ['S','D','T','B']
def testIsSingle(self):
"""
Test the Bond.isSingle() method.
"""
for order in self.orderList:
bond = Bond(None, None, order=order)
if order == 'S':
self.assertTrue(bond.isSingle())
else:
self.assertFalse(bond.isSingle())
def testIsDouble(self):
"""
Test the Bond.isDouble() method.
"""
for order in self.orderList:
bond = Bond(None, None, order=order)
if order == 'D':
self.assertTrue(bond.isDouble())
else:
self.assertFalse(bond.isDouble())
def testIsTriple(self):
"""
Test the Bond.isTriple() method.
"""
for order in self.orderList:
bond = Bond(None, None, order=order)
if order == 'T':
self.assertTrue(bond.isTriple())
else:
self.assertFalse(bond.isTriple())
def testIsBenzene(self):
"""
Test the Bond.isBenzene() method.
"""
for order in self.orderList:
bond = Bond(None, None, order=order)
if order == 'B':
self.assertTrue(bond.isBenzene())
else:
self.assertFalse(bond.isBenzene())
def testIncrementOrder(self):
"""
Test the Bond.incrementOrder() method.
"""
for order in self.orderList:
bond = Bond(None, None, order=order)
try:
bond.incrementOrder()
if order == 'S':
self.assertTrue(bond.isDouble())
elif order == 'D':
self.assertTrue(bond.isTriple())
except ActionError:
self.assertTrue(order in ['T','B'])
def testDecrementOrder(self):
"""
Test the Bond.decrementOrder() method.
"""
for order in self.orderList:
bond = Bond(None, None, order=order)
try:
bond.decrementOrder()
if order == 'D':
self.assertTrue(bond.isSingle())
elif order == 'T':
self.assertTrue(bond.isDouble())
except ActionError:
self.assertTrue(order in ['S','B'])
def testApplyActionBreakBond(self):
"""
Test the Bond.applyAction() method for a BREAK_BOND action.
"""
action = ['BREAK_BOND', '*1', 'S', '*2']
for order0 in self.orderList:
bond0 = Bond(None, None, order=order0)
bond = bond0.copy()
try:
bond.applyAction(action)
self.fail('Bond.applyAction() unexpectedly processed a BREAK_BOND action.')
except ActionError:
pass
def testApplyActionFormBond(self):
"""
Test the Bond.applyAction() method for a FORM_BOND action.
"""
action = ['FORM_BOND', '*1', 'S', '*2']
for order0 in self.orderList:
bond0 = Bond(None, None, order=order0)
bond = bond0.copy()
try:
bond.applyAction(action)
self.fail('Bond.applyAction() unexpectedly processed a FORM_BOND action.')
except ActionError:
pass
def testApplyActionIncrementBond(self):
"""
Test the Bond.applyAction() method for a CHANGE_BOND action.
"""
action = ['CHANGE_BOND', '*1', 1, '*2']
for order0 in self.orderList:
bond0 = Bond(None, None, order=order0)
bond = bond0.copy()
try:
bond.applyAction(action)
except ActionError:
self.assertTrue('T' == order0 or 'B' == order0)
def testApplyActionDecrementBond(self):
"""
Test the Bond.applyAction() method for a CHANGE_BOND action.
"""
action = ['CHANGE_BOND', '*1', -1, '*2']
for order0 in self.orderList:
bond0 = Bond(None, None, order=order0)
bond = bond0.copy()
try:
bond.applyAction(action)
except ActionError:
self.assertTrue('S' == order0 or 'B' == order0)
def testApplyActionGainRadical(self):
"""
Test the Bond.applyAction() method for a GAIN_RADICAL action.
"""
action = ['GAIN_RADICAL', '*1', 1]
for order0 in self.orderList:
bond0 = Bond(None, None, order=order0)
bond = bond0.copy()
try:
bond.applyAction(action)
self.fail('Bond.applyAction() unexpectedly processed a GAIN_RADICAL action.')
except ActionError:
pass
def testApplyActionLoseRadical(self):
"""
Test the Bond.applyAction() method for a LOSE_RADICAL action.
"""
action = ['LOSE_RADICAL', '*1', 1]
for order0 in self.orderList:
bond0 = Bond(None, None, order=order0)
bond = bond0.copy()
try:
bond.applyAction(action)
self.fail('Bond.applyAction() unexpectedly processed a LOSE_RADICAL action.')
except ActionError:
pass
def testEquivalent(self):
"""
Test the GroupBond.equivalent() method.
"""
for order1 in self.orderList:
for order2 in self.orderList:
bond1 = Bond(None, None, order=order1)
bond2 = Bond(None, None, order=order2)
if order1 == order2:
self.assertTrue(bond1.equivalent(bond2))
self.assertTrue(bond2.equivalent(bond1))
else:
self.assertFalse(bond1.equivalent(bond2))
self.assertFalse(bond2.equivalent(bond1))
def testIsSpecificCaseOf(self):
"""
Test the Bond.isSpecificCaseOf() method.
"""
for order1 in self.orderList:
for order2 in self.orderList:
bond1 = Bond(None, None, order=order1)
bond2 = Bond(None, None, order=order2)
if order1 == order2:
self.assertTrue(bond1.isSpecificCaseOf(bond2))
else:
self.assertFalse(bond1.isSpecificCaseOf(bond2))
def testCopy(self):
"""
Test the Bond.copy() method.
"""
bond = self.bond.copy()
self.assertEqual(self.bond.order, bond.order)
def testPickle(self):
"""
Test that a Bond object can be successfully pickled and
unpickled with no loss of information.
"""
import cPickle
bond = cPickle.loads(cPickle.dumps(self.bond))
self.assertEqual(self.bond.order, bond.order)
################################################################################
class TestMolecule(unittest.TestCase):
"""
Contains unit tests of the Molecule class.
"""
def setUp(self):
self.adjlist_1 = """
1 *1 C u1 p0 c0 {2,S} {3,S} {4,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
4 *2 N u0 p0 c+1 {1,S} {5,S} {6,D}
5 O u0 p3 c-1 {4,S}
6 O u0 p2 c0 {4,D}
"""
self.molecule = [Molecule().fromAdjacencyList(self.adjlist_1)]
self.adjlist_2 = """
1 *1 C u1 p0 {2,S}
2 *2 N u0 p0 c+1 {1,S} {3,S} {4,D}
3 O u0 p3 c-1 {2,S}
4 O u0 p2 {2,D}
"""
self.molecule.append(Molecule().fromAdjacencyList(self.adjlist_2,saturateH=True))
def testClearLabeledAtoms(self):
"""
Test the Molecule.clearLabeledAtoms() method.
"""
self.molecule[0].clearLabeledAtoms()
for atom in self.molecule[0].atoms:
self.assertEqual(atom.label, '')
def testContainsLabeledAtom(self):
"""
Test the Molecule.containsLabeledAtom() method.
"""
for atom in self.molecule[0].atoms:
if atom.label != '':
self.assertTrue(self.molecule[0].containsLabeledAtom(atom.label))
self.assertFalse(self.molecule[0].containsLabeledAtom('*3'))
self.assertFalse(self.molecule[0].containsLabeledAtom('*4'))
self.assertFalse(self.molecule[0].containsLabeledAtom('*5'))
self.assertFalse(self.molecule[0].containsLabeledAtom('*6'))
def testGetLabeledAtom(self):
"""
Test the Molecule.getLabeledAtom() method.
"""
for atom in self.molecule[0].atoms:
if atom.label != '':
self.assertEqual(atom, self.molecule[0].getLabeledAtom(atom.label))
try:
self.molecule[0].getLabeledAtom('*3')
self.fail('Unexpected successful return from Molecule.getLabeledAtom() with invalid atom label.')
except ValueError:
pass
def testGetLabeledAtoms(self):
"""
Test the Molecule.getLabeledAtoms() method.
"""
labeled = self.molecule[0].getLabeledAtoms()
for atom in self.molecule[0].atoms:
if atom.label != '':
self.assertTrue(atom.label in labeled)
self.assertTrue(atom in labeled.values())
else:
self.assertFalse(atom.label in labeled)
self.assertFalse(atom in labeled.values())
multipleLabelMolecule = Molecule().fromAdjacencyList("""
1 * C u0 p0 c0 {2,S} {3,S} {5,S} {6,S}
2 * C u0 p0 c0 {1,S} {4,S} {7,S} {8,S}
3 * C u0 p0 c0 {1,S} {9,S} {10,S} {11,S}
4 * C u0 p0 c0 {2,S} {12,S} {13,S} {14,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {1,S}
7 *1 H u0 p0 c0 {2,S}
8 *1 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {3,S}
10 *1 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {3,S}
12 H u0 p0 c0 {4,S}
13 H u0 p0 c0 {4,S}
14 H u0 p0 c0 {4,S}
""")
labeled = multipleLabelMolecule.getLabeledAtoms()
self.assertTrue('*' in labeled)
self.assertTrue('*1' in labeled)
self.assertEqual(len(labeled['*']),4)
self.assertEqual(len(labeled['*1']),3)
def testGetFormula(self):
"""
Test the Molecule.getLabeledAtoms() method.
"""
self.assertEqual(self.molecule[0].getFormula(), 'CH2NO2')
self.assertEqual(self.molecule[1].getFormula(), 'CH2NO2')
def testRadicalCount(self):
"""
Test the Molecule.getRadicalCount() method.
"""
self.assertEqual( self.molecule[0].getRadicalCount(), sum([atom.radicalElectrons for atom in self.molecule[0].atoms]) )
self.assertEqual( self.molecule[1].getRadicalCount(), sum([atom.radicalElectrons for atom in self.molecule[1].atoms]) )
def testGetMolecularWeight(self):
"""
Test the Molecule.getMolecularWeight() method.
"""
self.assertAlmostEqual(self.molecule[0].getMolecularWeight() * 1000, 60.03, 2)
self.assertAlmostEqual(self.molecule[1].getMolecularWeight() * 1000, 60.03, 2)
def testFromAdjacencyList(self):
"""
Test the Molecule.fromAdjacencyList() method.
"""
# molecule 1
self.assertTrue(self.molecule[0].multiplicity == 2)
atom1 = self.molecule[0].atoms[0]
atom2 = self.molecule[0].atoms[3]
atom3 = self.molecule[0].atoms[4]
atom4 = self.molecule[0].atoms[5]
self.assertTrue(self.molecule[0].hasBond(atom2,atom1))
self.assertTrue(self.molecule[0].hasBond(atom2,atom3))
self.assertTrue(self.molecule[0].hasBond(atom2,atom4))
self.assertFalse(self.molecule[0].hasBond(atom1,atom3))
self.assertFalse(self.molecule[0].hasBond(atom1,atom4))
bond21 = atom2.bonds[atom1]
bond23 = atom2.bonds[atom3]
bond24 = atom2.bonds[atom4]
self.assertTrue(atom1.label == '*1')
self.assertTrue(atom1.element.symbol == 'C')
self.assertTrue(atom1.radicalElectrons == 1)
self.assertTrue(atom1.charge == 0)
self.assertTrue(atom2.label == '*2')
self.assertTrue(atom2.element.symbol == 'N')
self.assertTrue(atom2.radicalElectrons == 0)
self.assertTrue(atom2.charge == 1)
self.assertTrue(atom3.label == '')
self.assertTrue(atom3.element.symbol == 'O')
self.assertTrue(atom3.radicalElectrons == 0)
self.assertTrue(atom3.charge == -1)
self.assertTrue(atom4.label == '')
self.assertTrue(atom4.element.symbol == 'O')
self.assertTrue(atom4.radicalElectrons == 0)
self.assertTrue(atom4.charge == 0)
self.assertTrue(bond21.isSingle())
self.assertTrue(bond23.isSingle())
self.assertTrue(bond24.isDouble())
# molecule 2
self.assertTrue(self.molecule[1].multiplicity == 2)
atom1 = self.molecule[1].atoms[0]
atom2 = self.molecule[1].atoms[1]
atom3 = self.molecule[1].atoms[2]
atom4 = self.molecule[1].atoms[3]
self.assertTrue(self.molecule[1].hasBond(atom2,atom1))
self.assertTrue(self.molecule[1].hasBond(atom2,atom3))
self.assertTrue(self.molecule[1].hasBond(atom2,atom4))
self.assertFalse(self.molecule[1].hasBond(atom1,atom3))
self.assertFalse(self.molecule[1].hasBond(atom1,atom4))
bond21 = atom2.bonds[atom1]
bond23 = atom2.bonds[atom3]
bond24 = atom2.bonds[atom4]
self.assertTrue(atom1.label == '*1')
self.assertTrue(atom1.element.symbol == 'C')
self.assertTrue(atom1.radicalElectrons == 1)
self.assertTrue(atom1.charge == 0)
self.assertTrue(atom2.label == '*2')
self.assertTrue(atom2.element.symbol == 'N')
self.assertTrue(atom2.radicalElectrons == 0)
self.assertTrue(atom2.charge == 1)
self.assertTrue(atom3.label == '')
self.assertTrue(atom3.element.symbol == 'O')
self.assertTrue(atom3.radicalElectrons == 0)
self.assertTrue(atom3.charge == -1)
self.assertTrue(atom4.label == '')
self.assertTrue(atom4.element.symbol == 'O')
self.assertTrue(atom4.radicalElectrons == 0)
self.assertTrue(atom4.charge == 0)
self.assertTrue(bond21.isSingle())
self.assertTrue(bond23.isSingle())
self.assertTrue(bond24.isDouble())
def testToAdjacencyList(self):
"""
Test the Molecule.toAdjacencyList() method.
"""
adjlist_1 = self.molecule[0].toAdjacencyList(removeH=False)
newMolecule = Molecule().fromAdjacencyList(adjlist_1)
self.assertTrue(self.molecule[0].isIsomorphic(newMolecule))
#self.assertEqual(adjlist_1.strip(), self.adjlist_1.strip())
# def testFromOldAdjacencyList(self):
# """
# Test we can read things with implicit hydrogens.
# """
# adjList = """
# 1 O 0
# """ # should be Water
# molecule = Molecule().fromAdjacencyList(adjList, saturateH=True) # only works with saturateH=True
# self.assertEqual(molecule.getFormula(),'H2O')
def testIsomorphism(self):
"""
Check the graph isomorphism functions.
"""
molecule1 = Molecule().fromSMILES('C=CC=C[CH]C')
molecule2 = Molecule().fromSMILES('C[CH]C=CC=C')
self.assertTrue(molecule1.isIsomorphic(molecule2))
self.assertTrue(molecule2.isIsomorphic(molecule1))
def testSubgraphIsomorphism(self):
"""
Check the graph isomorphism functions.
"""
molecule = Molecule().fromSMILES('C=CC=C[CH]C')
group = Group().fromAdjacencyList("""
1 Cd u0 p0 c0 {2,D}
2 Cd u0 p0 c0 {1,D}
""")
self.assertTrue(molecule.isSubgraphIsomorphic(group))
mapping = molecule.findSubgraphIsomorphisms(group)
self.assertTrue(len(mapping) == 4, "len(mapping) = %d, should be = 4" % (len(mapping)))
for map in mapping:
self.assertTrue(len(map) == min(len(molecule.atoms), len(group.atoms)))
for key, value in map.iteritems():
self.assertTrue(key in molecule.atoms)
self.assertTrue(value in group.atoms)
def testSubgraphIsomorphismAgain(self):
molecule = Molecule()
molecule.fromAdjacencyList("""
1 * C u0 p0 c0 {2,D} {7,S} {8,S}
2 C u0 p0 c0 {1,D} {3,S} {9,S}
3 C u0 p0 c0 {2,S} {4,D} {10,S}
4 C u0 p0 c0 {3,D} {5,S} {11,S}
5 C u0 p0 c0 {4,S} {6,S} {12,S} {13,S}
6 C u0 p0 c0 {5,S} {14,S} {15,S} {16,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {1,S}
9 H u0 p0 c0 {2,S}
10 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {4,S}
12 H u0 p0 c0 {5,S}
13 H u0 p0 c0 {5,S}
14 H u0 p0 c0 {6,S}
15 H u0 p0 c0 {6,S}
16 H u0 p0 c0 {6,S}
""")
group = Group()
group.fromAdjacencyList("""
1 * C u0 p0 c0 {2,D} {3,S} {4,S}
2 C u0 p0 c0 {1,D}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
""")
labeled1 = molecule.getLabeledAtoms().values()[0]
labeled2 = group.getLabeledAtoms().values()[0]
initialMap = {labeled1: labeled2}
self.assertTrue(molecule.isSubgraphIsomorphic(group, initialMap))
initialMap = {labeled1: labeled2}
mapping = molecule.findSubgraphIsomorphisms(group, initialMap)
self.assertTrue(len(mapping) == 2, "len(mapping) = %d, should be = 2" % (len(mapping)))
for map in mapping:
self.assertTrue(len(map) == min(len(molecule.atoms), len(group.atoms)))
for key, value in map.iteritems():
self.assertTrue(key in molecule.atoms)
self.assertTrue(value in group.atoms)
def testSubgraphIsomorphismManyLabels(self):
molecule = Molecule() # specific case (species)
molecule.fromAdjacencyList("""
1 *1 C u1 p0 c0 {2,S} {3,S} {4,S}
2 C u0 p0 c0 {1,S} {3,S} {5,S} {6,S}
3 C u0 p0 c0 {1,S} {2,S} {7,S} {8,S}
4 H u0 p0 c0 {1,S}
5 H u0 p0 c0 {2,S}
6 H u0 p0 c0 {2,S}
7 H u0 p0 c0 {3,S}
8 H u0 p0 c0 {3,S}
""")
group = Group() # general case (functional group)
group.fromAdjacencyList("""
1 *1 C u1 p0 c0 {2,S}, {3,S}
2 R!H u0 p0 c0 {1,S}
3 R!H u0 p0 c0 {1,S}
""")
labeled1 = molecule.getLabeledAtoms()
labeled2 = group.getLabeledAtoms()
initialMap = {}
for label,atom1 in labeled1.iteritems():
initialMap[atom1] = labeled2[label]
self.assertTrue(molecule.isSubgraphIsomorphic(group, initialMap))
mapping = molecule.findSubgraphIsomorphisms(group, initialMap)
self.assertEqual(len(mapping), 2)
for map in mapping:
self.assertTrue(len(map) == min(len(molecule.atoms), len(group.atoms)))
for key, value in map.iteritems():
self.assertTrue(key in molecule.atoms)
self.assertTrue(value in group.atoms)
def testAdjacencyList(self):
"""
Check the adjacency list read/write functions for a full molecule.
"""
molecule1 = Molecule().fromAdjacencyList("""
1 C u0 p0 c0 {2,D} {7,S} {8,S}
2 C u0 p0 c0 {1,D} {3,S} {9,S}
3 C u0 p0 c0 {2,S} {4,D} {10,S}
4 C u0 p0 c0 {3,D} {5,S} {11,S}
5 C u1 {4,S} {6,S} {12,S}
6 C u0 p0 c0 {5,S} {13,S} {14,S} {15,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {1,S}
9 H u0 p0 c0 {2,S}
10 H u0 p0 c0 {3,S}
11 H u0 p0 c0 {4,S}
12 H u0 p0 c0 {5,S}
13 H u0 p0 c0 {6,S}
14 H u0 p0 c0 {6,S}
15 H u0 p0 c0 {6,S}
""")
molecule2 = Molecule().fromSMILES('C=CC=C[CH]C')
self.assertTrue(molecule1.isIsomorphic(molecule2))
self.assertTrue(molecule2.isIsomorphic(molecule1))
def testSSSR(self):
"""
Test the Molecule.getSmallestSetOfSmallestRings() method with a complex
polycyclic molecule.
"""
molecule = Molecule()
molecule.fromSMILES('C(CC1C(C(CCCCCCCC)C1c1ccccc1)c1ccccc1)CCCCCC')
#http://cactus.nci.nih.gov/chemical/structure/C(CC1C(C(CCCCCCCC)C1c1ccccc1)c1ccccc1)CCCCCC/image
sssr = molecule.getSmallestSetOfSmallestRings()
self.assertEqual( len(sssr), 3)
def testIsInCycleEthane(self):
"""
Test the Molecule.isInCycle() method with ethane.
"""
molecule = Molecule().fromSMILES('CC')
for atom in molecule.atoms:
self.assertFalse(molecule.isAtomInCycle(atom))
for atom1 in molecule.atoms:
for atom2, bond in atom1.bonds.items():
self.assertFalse(molecule.isBondInCycle(bond))
def testIsInCycleCyclohexane(self):
"""
Test the Molecule.isInCycle() method with ethane.
"""
molecule = Molecule().fromInChI('InChI=1/C6H12/c1-2-4-6-5-3-1/h1-6H2')
for atom in molecule.atoms:
if atom.isHydrogen():
self.assertFalse(molecule.isAtomInCycle(atom))
elif atom.isCarbon():
self.assertTrue(molecule.isAtomInCycle(atom))
for atom1 in molecule.atoms:
for atom2, bond in atom1.bonds.items():
if atom1.isCarbon() and atom2.isCarbon():
self.assertTrue(molecule.isBondInCycle(bond))
else:
self.assertFalse(molecule.isBondInCycle(bond))
def testFromSMILESH(self):
"""
Make sure that H radical is produced properly from its SMILES
representation.
"""
molecule = Molecule(SMILES='[H]')
self.assertEqual(len(molecule.atoms), 1)
H = molecule.atoms[0]
self.assertTrue(H.isHydrogen())
self.assertEqual(H.radicalElectrons, 1)
def testFromInChIH(self):
"""
Make sure that H radical is produced properly from its InChI
representation.
"""
molecule = Molecule().fromInChI('InChI=1/H')
self.assertEqual(len(molecule.atoms), 1)
H = molecule.atoms[0]
self.assertTrue(H.isHydrogen())
self.assertEqual(H.radicalElectrons, 1)
def testPickle(self):
"""
Test that a Molecule object can be successfully pickled and
unpickled with no loss of information.
"""
molecule0 = Molecule().fromSMILES('C=CC=C[CH2]C')
molecule0.update()
import cPickle
molecule = cPickle.loads(cPickle.dumps(molecule0))
self.assertEqual(len(molecule0.atoms), len(molecule.atoms))
self.assertEqual(molecule0.getFormula(), molecule.getFormula())
self.assertTrue(molecule0.isIsomorphic(molecule))
self.assertTrue(molecule.isIsomorphic(molecule0))
def testRadicalCH(self):
"""
Test that the species [CH] has three radical electrons and a spin multiplicity of 4.
"""
molecule = Molecule().fromSMILES('[CH]')
self.assertEqual(molecule.atoms[0].radicalElectrons, 3)
self.assertEqual(molecule.multiplicity, 4)
self.assertEqual(molecule.getRadicalCount(), 3)
def testRadicalCH2(self):
"""
Test that the species [CH2] has two radical electrons and a spin multiplicity of 3.
"""
molecule = Molecule().fromSMILES('[CH2]')
self.assertEqual(molecule.atoms[0].radicalElectrons, 2)
self.assertEqual(molecule.multiplicity, 3)
self.assertEqual(molecule.getRadicalCount(), 2)
def testRadicalCH2CH2CH2(self):
"""
Test radical count on [CH2]C[CH2]
"""
molecule = Molecule().fromSMILES('[CH2]C[CH2]')
self.assertEqual(molecule.getRadicalCount(), 2)
def testSMILES(self):
"""
Test that we can generate a few SMILES strings as expected
"""
import rmgpy.molecule
test_strings = ['[C-]#[O+]', '[C]', '[CH]', 'OO', '[H][H]', '[H]',
'[He]', '[O]', 'O', '[CH3]', 'C', '[OH]', 'CCC',
'CC', 'N#N', '[O]O', 'C[CH2]', '[Ar]', 'CCCC',
'O=C=O', 'N#[C]',
]
for s in test_strings:
molecule = Molecule(SMILES=s)
self.assertEqual(s, molecule.toSMILES())
def testKekuleToSMILES(self):
"""
Test that we can print SMILES strings of Kekulized structures
The first two are different Kekule forms of the same thing.
"""
test_cases = {
"CC1C=CC=CC=1O":"""
1 C u0 p0 c0 {2,S} {9,S} {10,S} {11,S}
2 C u0 p0 c0 {1,S} {3,D} {4,S}
3 C u0 p0 c0 {2,D} {5,S} {8,S}
4 C u0 p0 c0 {2,S} {7,D} {12,S}
5 C u0 p0 c0 {3,S} {6,D} {13,S}
6 C u0 p0 c0 {5,D} {7,S} {14,S}
7 C u0 p0 c0 {4,D} {6,S} {15,S}
8 O u0 p2 c0 {3,S} {16,S}
9 H u0 p0 c0 {1,S}
10 H u0 p0 c0 {1,S}
11 H u0 p0 c0 {1,S}
12 H u0 p0 c0 {4,S}
13 H u0 p0 c0 {5,S}
14 H u0 p0 c0 {6,S}
15 H u0 p0 c0 {7,S}
16 H u0 p0 c0 {8,S}""",
"CC1=CC=CC=C1O":"""
1 C u0 p0 c0 {2,S} {9,S} {10,S} {11,S}
2 C u0 p0 c0 {1,S} {3,S} {4,D}
3 C u0 p0 c0 {2,S} {5,D} {8,S}
4 C u0 p0 c0 {2,D} {7,S} {15,S}
5 C u0 p0 c0 {3,D} {6,S} {12,S}
6 C u0 p0 c0 {5,S} {7,D} {13,S}
7 C u0 p0 c0 {4,S} {6,D} {14,S}
8 O u0 p2 c0 {3,S} {16,S}
9 H u0 p0 c0 {1,S}
10 H u0 p0 c0 {1,S}
11 H u0 p0 c0 {1,S}
12 H u0 p0 c0 {5,S}
13 H u0 p0 c0 {6,S}
14 H u0 p0 c0 {7,S}
15 H u0 p0 c0 {4,S}
16 H u0 p0 c0 {8,S}""",
"CC1C=CC=CC=1":"""
1 C u0 p0 c0 {2,D} {6,S} {7,S}
2 C u0 p0 c0 {1,D} {3,S} {8,S}
3 C u0 p0 c0 {2,S} {4,D} {9,S}
4 C u0 p0 c0 {3,D} {5,S} {10,S}
5 C u0 p0 c0 {4,S} {6,D} {11,S}
6 C u0 p0 c0 {1,S} {5,D} {12,S}
7 C u0 p0 c0 {1,S} {13,S} {14,S} {15,S}
8 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {3,S}
10 H u0 p0 c0 {4,S}
11 H u0 p0 c0 {5,S}
12 H u0 p0 c0 {6,S}
13 H u0 p0 c0 {7,S}
14 H u0 p0 c0 {7,S}
15 H u0 p0 c0 {7,S}"""
}
for smiles, adjlist in test_cases.iteritems():
m = Molecule().fromAdjacencyList(adjlist)
s = m.toSMILES()
self.assertEqual(s, smiles, "Generated SMILES string {0} instead of {1}".format(s, smiles))
def testKekuleRoundTripSMILES(self):
"""
Test that we can round-trip SMILES strings of Kekulized aromatics
"""
import rmgpy.molecule
test_strings = [
'CC1=CC=CC=C1O', 'CC1C=CC=CC=1O',
# 'Cc1ccccc1O', # this will fail because it is Kekulized during fromSMILES()
]
for s in test_strings:
molecule = Molecule(SMILES=s)
self.assertEqual(s, molecule.toSMILES(), "Started with {0} but ended with {1}".format(s, molecule.toSMILES()))
def testInChIKey(self):
"""
Test that InChI Key generation is working properly.
"""
molecule = Molecule().fromInChI('InChI=1S/C7H12/c1-2-7-4-3-6(1)5-7/h6-7H,1-5H2')
key = molecule.toInChIKey()
self.assertEqual(key, 'UMRZSTCPUPJPOJ-UHFFFAOYSA')
def testAugmentedInChI(self):
"""
Test the Augmented InChI generation
"""
mol = Molecule().fromAdjacencyList("""
1 C u1 p0 c0 {2,S}
2 C u1 p0 c0 {1,S}
""", saturateH=True)
self.assertEqual(mol.toAugmentedInChI(), 'InChI=1S/C2H4/c1-2/h1-2H2/u1,2')
def testAugmentedInChIKey(self):
"""
Test the Augmented InChI Key generation
"""
mol = Molecule().fromAdjacencyList("""
1 C u1 p0 c0 {2,S}
2 C u1 p0 c0 {1,S}
""", saturateH=True)
self.assertEqual(mol.toAugmentedInChIKey(), 'VGGSQFUCUMXWEO-UHFFFAOYSA-u1,2')
def testLinearMethane(self):
"""
Test the Molecule.isLinear() method.
"""
self.assertFalse(Molecule().fromSMILES('C').isLinear())
def testLinearEthane(self):
"""
Test the Molecule.isLinear() method.
"""
self.assertFalse(Molecule().fromSMILES('CC').isLinear())
def testLinearPropane(self):
"""
Test the Molecule.isLinear() method.
"""
self.assertFalse(Molecule().fromSMILES('CCC').isLinear())
def testLinearNeopentane(self):
"""
Test the Molecule.isLinear() method.
"""
self.assertFalse(Molecule().fromSMILES('CC(C)(C)C').isLinear())
def testLinearHydrogen(self):
"""
Test the Molecule.isLinear() method.
"""
self.assertFalse(Molecule().fromSMILES('[H]').isLinear())
def testLinearOxygen(self):
"""
Test the Molecule.isLinear() method.
"""
self.assertTrue(Molecule().fromSMILES('O=O').isLinear())
def testLinearCarbonDioxide(self):
"""
Test the Molecule.isLinear() method.
"""
self.assertTrue(Molecule().fromSMILES('O=C=O').isLinear())
def testLinearAcetylene(self):
"""
Test the Molecule.isLinear() method.
"""
self.assertTrue(Molecule().fromSMILES('C#C').isLinear())
def testLinear135Hexatriyne(self):
"""
Test the Molecule.isLinear() method.
"""
self.assertTrue(Molecule().fromSMILES('C#CC#CC#C').isLinear())
def testAromaticBenzene(self):
"""
Test the Molecule.isAromatic() method for Benzene.
"""
m = Molecule().fromSMILES('C1=CC=CC=C1')
isomers = m.generateResonanceIsomers()
self.assertTrue(any(isomer.isAromatic() for isomer in isomers))
def testAromaticNaphthalene(self):
"""
Test the Molecule.isAromatic() method for Naphthalene.
"""
m = Molecule().fromSMILES('C12C(C=CC=C1)=CC=CC=2')
isomers = m.generateResonanceIsomers()
self.assertTrue(any(isomer.isAromatic() for isomer in isomers))
def testAromaticCyclohexane(self):
"""
Test the Molecule.isAromatic() method for Cyclohexane.
"""
m = Molecule().fromSMILES('C1CCCCC1')
isomers = m.generateResonanceIsomers()
self.assertFalse(any(isomer.isAromatic() for isomer in isomers))
def testCountInternalRotorsEthane(self):
"""
Test the Molecule.countInternalRotors() method.
"""
self.assertEqual(Molecule().fromSMILES('CC').countInternalRotors(), 1)
def testCountInternalRotorsPropane(self):
"""
Test the Molecule.countInternalRotors() method.
"""
self.assertEqual(Molecule().fromSMILES('CCC').countInternalRotors(), 2)
def testCountInternalRotorsNeopentane(self):
"""
Test the Molecule.countInternalRotors() method.
"""
self.assertEqual(Molecule().fromSMILES('CC(C)(C)C').countInternalRotors(), 4)
def testCountInternalRotorsMethylCyclohexane(self):
"""
Test the Molecule.countInternalRotors() method.
"""
self.assertEqual(Molecule().fromSMILES('C1CCCC1C').countInternalRotors(), 1)
def testCountInternalRotorsEthylene(self):
"""
Test the Molecule.countInternalRotors() method.
"""
self.assertEqual(Molecule().fromSMILES('C=C').countInternalRotors(), 0)
def testCountInternalRotorsAcetylene(self):
"""
Test the Molecule.countInternalRotors() method.
"""
self.assertEqual(Molecule().fromSMILES('C#C').countInternalRotors(), 0)
def testCarbeneIdentifiers(self):
"""
Test that singlet carbene molecules, bearing an electron pair rather than unpaired electrons
are correctly converted into rdkit molecules and identifiers.
"""
ch2_t = '''
multiplicity 3
1 C u2 p0 c0 {2,S} {3,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
'''
mol = Molecule().fromAdjacencyList(ch2_t)
self.assertEqual( mol.toAugmentedInChI(), 'InChI=1S/CH2/h1H2/u1,1')
self.assertEqual( mol.toSMILES(), '[CH2]')
ch2_s = '''
multiplicity 1
1 C u0 p1 c0 {2,S} {3,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
'''
mol = Molecule().fromAdjacencyList(ch2_s)
self.assertEqual( mol.toAugmentedInChI(), 'InChI=1S/CH2/h1H2/lp1')
self.assertEqual( mol.toSMILES(), '[CH2]')
def testGetSymmetryNumber(self):
"""
Test that the symmetry number getter works properly
"""
mol = Molecule().fromSMILES('C')
self.assertEquals(12, mol.getSymmetryNumber())
empty = Molecule()
self.assertEquals(1, empty.getSymmetryNumber())
def testMoleculeProps(self):
"""
Test a key-value pair is added to the props attribute of Molecule.
"""
self.molecule[0].props['foo'] = 'bar'
self.assertIsInstance(self.molecule[0].props, dict)
self.assertEquals(self.molecule[0].props['foo'], 'bar')
def testMoleculeProps_object_attribute(self):
"""
Test that Molecule's props dictionaries are independent of each other.
Create a test in which is checked whether props is an object attribute rather
than a class attribute
"""
spc2 = Molecule()
self.molecule[0].props['foo'] = 'bar'
spc3 = Molecule()
spc3.props['foo'] = 'bla'
self.assertEquals(self.molecule[0].props['foo'], 'bar')
self.assertDictEqual(spc2.props, {})
self.assertDictEqual(spc3.props, {'foo': 'bla'})
@work_in_progress
def testCountInternalRotorsDimethylAcetylene(self):
"""
Test the Molecule.countInternalRotors() method for dimethylacetylene.
This is a "hard" test that currently fails.
"""
self.assertEqual(Molecule().fromSMILES('CC#CC').countInternalRotors(), 1)
def testSaturateAromaticRadical(self):
"""
Test that the Molecule.saturate() method works properly for an indenyl radical
containing Benzene bonds
"""
indenyl = Molecule().fromAdjacencyList("""
multiplicity 2
1 C u0 p0 c0 {2,B} {3,S} {4,B}
2 C u0 p0 c0 {1,B} {5,B} {6,S}
3 C u0 p0 c0 {1,S} {7,D} {11,S}
4 C u0 p0 c0 {1,B} {8,B} {12,S}
5 C u0 p0 c0 {2,B} {9,B} {15,S}
6 C u1 p0 c0 {2,S} {7,S} {16,S}
7 C u0 p0 c0 {3,D} {6,S} {10,S}
8 C u0 p0 c0 {4,B} {9,B} {13,S}
9 C u0 p0 c0 {5,B} {8,B} {14,S}
10 H u0 p0 c0 {7,S}
11 H u0 p0 c0 {3,S}
12 H u0 p0 c0 {4,S}
13 H u0 p0 c0 {8,S}
14 H u0 p0 c0 {9,S}
15 H u0 p0 c0 {5,S}
16 H u0 p0 c0 {6,S}
""")
indene = Molecule().fromAdjacencyList("""
1 C u0 p0 c0 {2,B} {3,S} {4,B}
2 C u0 p0 c0 {1,B} {5,B} {6,S}
3 C u0 p0 c0 {1,S} {7,D} {11,S}
4 C u0 p0 c0 {1,B} {8,B} {12,S}
5 C u0 p0 c0 {2,B} {9,B} {15,S}
6 C u0 p0 c0 {2,S} {7,S} {16,S} {17,S}
7 C u0 p0 c0 {3,D} {6,S} {10,S}
8 C u0 p0 c0 {4,B} {9,B} {13,S}
9 C u0 p0 c0 {5,B} {8,B} {14,S}
10 H u0 p0 c0 {7,S}
11 H u0 p0 c0 {3,S}
12 H u0 p0 c0 {4,S}
13 H u0 p0 c0 {8,S}
14 H u0 p0 c0 {9,S}
15 H u0 p0 c0 {5,S}
16 H u0 p0 c0 {6,S}
17 H u0 p0 c0 {6,S}
""")
saturated_molecule = indenyl.copy(deep=True)
saturated_molecule.saturate()
self.assertTrue(saturated_molecule.isIsomorphic(indene))
def testMalformedAugmentedInChI(self):
"""Test that augmented inchi without InChI layer raises Exception."""
from .inchi import InchiException
malform_aug_inchi = 'foo'
with self.assertRaises(InchiException):
mol = Molecule().fromAugmentedInChI(malform_aug_inchi)
def testMalformedAugmentedInChI_Wrong_InChI_Layer(self):
"""Test that augmented inchi with wrong layer is caught."""
malform_aug_inchi = 'InChI=1S/CH3/h1H2'
with self.assertRaises(Exception):
mol = Molecule().fromAugmentedInChI(malform_aug_inchi)
def testMalformedAugmentedInChI_Wrong_Mult(self):
"""Test that augmented inchi with wrong layer is caught."""
malform_aug_inchi = 'InChI=1S/CH3/h1H3'
with self.assertRaises(Exception):
mol = Molecule().fromAugmentedInChI(malform_aug_inchi)
def testMalformedAugmentedInChI_Wrong_Indices(self):
"""Test that augmented inchi with wrong layer is caught."""
malform_aug_inchi = 'InChI=1S/C6H6/c1-3-5-6-4-2/h1,6H,2,5H2/u4,1'
with self.assertRaises(Exception):
mol = Molecule().fromAugmentedInChI(malform_aug_inchi)
def testRDKitMolAtomMapping(self):
"""
Test that the atom mapping returned by toRDKitMol contains the correct
atom indices of the atoms of the molecule when hydrogens are removed.
"""
from .generator import toRDKitMol
adjlist = '''
1 H u0 p0 c0 {2,S}
2 C u0 p0 c0 {1,S} {3,S} {4,S} {5,S}
3 H u0 p0 c0 {2,S}
4 H u0 p0 c0 {2,S}
5 O u0 p2 c0 {2,S} {6,S}
6 H u0 p0 c0 {5,S}
'''
mol = Molecule().fromAdjacencyList(adjlist)
rdkitmol, rdAtomIndices = toRDKitMol(mol, removeHs=True, returnMapping=True)
heavy_atoms = [at for at in mol.atoms if at.number != 1]
for at1 in heavy_atoms:
for at2 in heavy_atoms:
if mol.hasBond(at1, at2):
try:
rdkitmol.GetBondBetweenAtoms(rdAtomIndices[at1],rdAtomIndices[at2])
except RuntimeError:
self.fail("RDKit failed in finding the bond in the original atom!")
def testUpdateLonePairs(self):
adjlist = """
1 Si u0 p1 c0 {2,S} {3,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
"""
mol = Molecule().fromAdjacencyList(adjlist)
mol.updateLonePairs()
lp = 0
for atom in mol.atoms:
lp += atom.lonePairs
self.assertEqual(lp, 1)
def testLargeMolUpdate(self):
adjlist = """
1 C u0 p0 c0 {7,S} {33,S} {34,S} {35,S}
2 C u0 p0 c0 {8,S} {36,S} {37,S} {38,S}
3 C u0 p0 c0 {5,S} {9,D} {39,S}
4 C u0 p0 c0 {6,S} {10,D} {40,S}
5 C u0 p0 c0 {3,S} {17,S} {41,S} {85,S}
6 C u0 p0 c0 {4,S} {18,D} {42,S}
7 C u0 p0 c0 {1,S} {11,S} {43,S} {44,S}
8 C u0 p0 c0 {2,S} {12,S} {45,S} {46,S}
9 C u0 p0 c0 {3,D} {31,S} {47,S}
10 C u0 p0 c0 {4,D} {32,S} {48,S}
11 C u0 p0 c0 {7,S} {19,S} {51,S} {52,S}
12 C u0 p0 c0 {8,S} {20,S} {53,S} {54,S}
13 C u0 p0 c0 {18,S} {32,S} {50,S} {86,S}
14 C u0 p0 c0 {17,D} {31,S} {49,S}
15 C u0 p0 c0 {17,S} {25,S} {63,S} {64,S}
16 C u0 p0 c0 {18,S} {26,S} {65,S} {66,S}
17 C u0 p0 c0 {5,S} {14,D} {15,S}
18 C u0 p0 c0 {6,D} {13,S} {16,S}
19 C u0 p0 c0 {11,S} {23,S} {55,S} {56,S}
20 C u0 p0 c0 {12,S} {24,S} {57,S} {58,S}
21 C u0 p0 c0 {25,S} {29,S} {75,S} {76,S}
22 C u0 p0 c0 {26,S} {30,S} {77,S} {78,S}
23 C u0 p0 c0 {19,S} {27,S} {71,S} {72,S}
24 C u0 p0 c0 {20,S} {28,S} {73,S} {74,S}
25 C u0 p0 c0 {15,S} {21,S} {59,S} {60,S}
26 C u0 p0 c0 {16,S} {22,S} {61,S} {62,S}
27 C u0 p0 c0 {23,S} {29,S} {79,S} {80,S}
28 C u0 p0 c0 {24,S} {30,S} {81,S} {82,S}
29 C u0 p0 c0 {21,S} {27,S} {67,S} {68,S}
30 C u0 p0 c0 {22,S} {28,S} {69,S} {70,S}
31 C u0 p0 c0 {9,S} {14,S} {32,S} {83,S}
32 C u0 p0 c0 {10,S} {13,S} {31,S} {84,S}
33 H u0 p0 c0 {1,S}
34 H u0 p0 c0 {1,S}
35 H u0 p0 c0 {1,S}
36 H u0 p0 c0 {2,S}
37 H u0 p0 c0 {2,S}
38 H u0 p0 c0 {2,S}
39 H u0 p0 c0 {3,S}
40 H u0 p0 c0 {4,S}
41 H u0 p0 c0 {5,S}
42 H u0 p0 c0 {6,S}
43 H u0 p0 c0 {7,S}
44 H u0 p0 c0 {7,S}
45 H u0 p0 c0 {8,S}
46 H u0 p0 c0 {8,S}
47 H u0 p0 c0 {9,S}
48 H u0 p0 c0 {10,S}
49 H u0 p0 c0 {14,S}
50 H u0 p0 c0 {13,S}
51 H u0 p0 c0 {11,S}
52 H u0 p0 c0 {11,S}
53 H u0 p0 c0 {12,S}
54 H u0 p0 c0 {12,S}
55 H u0 p0 c0 {19,S}
56 H u0 p0 c0 {19,S}
57 H u0 p0 c0 {20,S}
58 H u0 p0 c0 {20,S}
59 H u0 p0 c0 {25,S}
60 H u0 p0 c0 {25,S}
61 H u0 p0 c0 {26,S}
62 H u0 p0 c0 {26,S}
63 H u0 p0 c0 {15,S}
64 H u0 p0 c0 {15,S}
65 H u0 p0 c0 {16,S}
66 H u0 p0 c0 {16,S}
67 H u0 p0 c0 {29,S}
68 H u0 p0 c0 {29,S}
69 H u0 p0 c0 {30,S}
70 H u0 p0 c0 {30,S}
71 H u0 p0 c0 {23,S}
72 H u0 p0 c0 {23,S}
73 H u0 p0 c0 {24,S}
74 H u0 p0 c0 {24,S}
75 H u0 p0 c0 {21,S}
76 H u0 p0 c0 {21,S}
77 H u0 p0 c0 {22,S}
78 H u0 p0 c0 {22,S}
79 H u0 p0 c0 {27,S}
80 H u0 p0 c0 {27,S}
81 H u0 p0 c0 {28,S}
82 H u0 p0 c0 {28,S}
83 H u0 p0 c0 {31,S}
84 H u0 p0 c0 {32,S}
85 H u0 p0 c0 {5,S}
86 H u0 p0 c0 {13,S}
"""
mol = Molecule().fromAdjacencyList(adjlist)
mol.resetConnectivityValues()
try:
mol.updateConnectivityValues()
except OverflowError:
self.fail("updateConnectivityValues() raised OverflowError unexpectedly!")
def testLargeMolCreation(self):
"""
Test molecules between C1 to C201 in 10 carbon intervals to make
sure that overflow errors are not generated.
"""
for i in xrange(1,202,10):
smi = 'C'*i
try:
m = Molecule(SMILES=smi)
except OverflowError:
self.fail('Creation of C{} failed!'.format(i))
def testGetPolycyclicRings(self):
"""
Test that polycyclic rings within a molecule are returned properly in the function
`Graph().getPolycyclicRings()`
"""
# norbornane
m1 = Molecule(SMILES='C1CC2CCC1C2')
polyrings1 = m1.getPolycyclicRings()
self.assertEqual(len(polyrings1), 1)
ring = polyrings1[0]
self.assertEqual(len(ring),7) # 7 carbons in cycle
# dibenzyl
m2 = Molecule(SMILES='C1=CC=C(C=C1)CCC1C=CC=CC=1')
polyrings2 = m2.getPolycyclicRings()
self.assertEqual(len(polyrings2), 0)
# spiro[2.5]octane
m3 = Molecule(SMILES='C1CCC2(CC1)CC2')
polyrings3 = m3.getPolycyclicRings()
self.assertEqual(len(polyrings3), 1)
ring = polyrings3[0]
self.assertEqual(len(ring),8)
# 1-phenyl norbornane
m4 = Molecule(SMILES='C1=CC=C(C=C1)C12CCC(CC1)C2')
polyrings4 = m4.getPolycyclicRings()
self.assertEqual(len(polyrings4), 1)
ring = polyrings4[0]
self.assertEqual(len(ring),7)
def testGetMonocyclicRings(self):
"""
Test that monocyclic rings within a molecule are returned properly in the function
`Graph().getMonocyclicRings()`
"""
m1 = Molecule(SMILES='C(CCCC1CCCCC1)CCCC1CCCC1')
monorings = m1.getMonocyclicRings()
self.assertEqual(len(monorings),2)
m2 = Molecule(SMILES='C(CCC1C2CCC1CC2)CC1CCC1')
monorings = m2.getMonocyclicRings()
self.assertEqual(len(monorings),1)
self.assertEqual(len(monorings[0]),4)
m3 = Molecule(SMILES='CCCCC')
monorings = m3.getMonocyclicRings()
self.assertEqual(len(monorings),0)
def testGetDisparateRings(self):
"""
Test that monocyclic rings within a molecule are returned properly in the function
`Graph().getDisparateRings()`
"""
# norbornane
m1 = Molecule(SMILES='C1CC2CCC1C2')
monorings, polyrings = m1.getDisparateRings()
self.assertEqual(len(monorings), 0)
self.assertEqual(len(polyrings), 1)
self.assertEqual(len(polyrings[0]),7) # 7 carbons in cycle
m2 = Molecule(SMILES='C(CCC1C2CCC1CC2)CC1CCC1')
monorings, polyrings = m2.getDisparateRings()
self.assertEqual(len(monorings),1)
self.assertEqual(len(polyrings),1)
self.assertEqual(len(monorings[0]),4)
self.assertEqual(len(polyrings[0]),7)
m3 = Molecule(SMILES='C1CCC2(CC1)CC2CCCCC1CCC1')
monorings, polyrings = m3.getDisparateRings()
self.assertEqual(len(polyrings), 1)
self.assertEqual(len(monorings),1)
self.assertEqual(len(monorings[0]),4)
self.assertEqual(len(polyrings[0]),8)
m4 = Molecule(SMILES='CCCC')
monorings, polyrings = m4.getDisparateRings()
self.assertEqual(len(monorings),0)
self.assertEqual(len(polyrings),0)
m5 = Molecule(SMILES='C1=CC=C(CCCC2CC2)C(=C1)CCCCCC1CC1')
monorings, polyrings = m5.getDisparateRings()
self.assertEqual(len(monorings),3)
self.assertEqual(len(polyrings),0)
def testGetSmallestSetOfSmallestRings(self):
"""
Test that SSSR within a molecule are returned properly in the function
`Graph().getSmallestSetOfSmallestRings()`
"""
m1 = Molecule(SMILES='C12CCC1C3CC2CC3')
sssr1 = m1.getSmallestSetOfSmallestRings()
sssr1_sizes = sorted([len(ring) for ring in sssr1])
sssr1_sizes_expected = [4, 5, 5]
self.assertEqual(sssr1_sizes, sssr1_sizes_expected)
m2 = Molecule(SMILES='C1(CC2)C(CC3)CC3C2C1')
sssr2 = m2.getSmallestSetOfSmallestRings()
sssr2_sizes = sorted([len(ring) for ring in sssr2])
sssr2_sizes_expected = [5, 5, 6]
self.assertEqual(sssr2_sizes, sssr2_sizes_expected)
m3 = Molecule(SMILES='C1(CC2)C2C(CCCC3)C3C1')
sssr3 = m3.getSmallestSetOfSmallestRings()
sssr3_sizes = sorted([len(ring) for ring in sssr3])
sssr3_sizes_expected = [4, 5, 6]
self.assertEqual(sssr3_sizes, sssr3_sizes_expected)
m4 = Molecule(SMILES='C12=CC=CC=C1C3=C2C=CC=C3')
sssr4 = m4.getSmallestSetOfSmallestRings()
sssr4_sizes = sorted([len(ring) for ring in sssr4])
sssr4_sizes_expected = [4, 6, 6]
self.assertEqual(sssr4_sizes, sssr4_sizes_expected)
m5 = Molecule(SMILES='C12=CC=CC=C1CC3=C(C=CC=C3)C2')
sssr5 = m5.getSmallestSetOfSmallestRings()
sssr5_sizes = sorted([len(ring) for ring in sssr5])
sssr5_sizes_expected = [6, 6, 6]
self.assertEqual(sssr5_sizes, sssr5_sizes_expected)
def testToGroup(self):
"""
Test if we can convert a Molecule object into a Group object.
"""
mol = Molecule().fromSMILES('CC(C)CCCC(C)C1CCC2C3CC=C4CC(O)CCC4(C)C3CCC12C')#cholesterol
group = mol.toGroup()
self.assertTrue(isinstance(group, Group))
self.assertEquals(len(mol.atoms), len(group.atoms))
molbondcount = sum([1 for atom in mol.atoms for bondedAtom, bond in atom.edges.iteritems()])
groupbondcount = sum([1 for atom in group.atoms for bondedAtom, bond in atom.edges.iteritems()])
self.assertEquals(molbondcount, groupbondcount)
for i, molAt in enumerate(mol.atoms):
groupAtom = group.atoms[i]
atomTypes = [groupAtomType.equivalent(molAt.atomType) for groupAtomType in groupAtom.atomType]
self.assertTrue(any(atomTypes))
def testToAdjacencyListWithIsotopes(self):
"""
Test the Molecule.toAdjacencyList() method works for atoms with unexpected isotopes.
"""
mol = Molecule().fromSMILES('CC')
mol.atoms[0].element = getElement('C', 13)
adjlist = mol.toAdjacencyList().translate(None, '\n ')
adjlistExp = """
1 C u0 p0 c0 i13 {2,S} {3,S} {4,S} {5,S}
2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {2,S}
7 H u0 p0 c0 {2,S}
8 H u0 p0 c0 {2,S}
""".translate(None, '\n ')
self.assertEquals(adjlist, adjlistExp)
mol = Molecule().fromSMILES('CC')
mol.atoms[2].element = getElement('H', 2)
adjlist = mol.toAdjacencyList().translate(None, '\n ')
adjlistExp = """
1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}
2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}
3 H u0 p0 c0 i2 {1,S}
4 H u0 p0 c0 {1,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {2,S}
7 H u0 p0 c0 {2,S}
8 H u0 p0 c0 {2,S}
""".translate(None, '\n ')
self.assertEquals(adjlist, adjlistExp)
mol = Molecule().fromSMILES('OC')
mol.atoms[0].element = getElement('O', 18)
adjlist = mol.toAdjacencyList().translate(None, '\n ')
adjlistExp = """
1 O u0 p2 c0 i18 {2,S} {3,S}
2 C u0 p0 c0 {1,S} {4,S} {5,S} {6,S}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {2,S}
5 H u0 p0 c0 {2,S}
6 H u0 p0 c0 {2,S}
""".translate(None, '\n ')
self.assertEquals(adjlist, adjlistExp)
def testFromAdjacencyListWithIsotopes(self):
"""
Test the Molecule.fromAdjacencyList() method works for atoms with unexpected isotopes.
"""
exp = Molecule().fromSMILES('CC')
exp.atoms[0].element = getElement('C', 13)
adjlistCalc = """
1 C u0 p0 c0 i13 {2,S} {3,S} {4,S} {5,S}
2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {2,S}
7 H u0 p0 c0 {2,S}
8 H u0 p0 c0 {2,S}
"""
calc = Molecule().fromAdjacencyList(adjlistCalc)
self.assertTrue(exp.isIsomorphic(calc))
exp = Molecule().fromSMILES('CC')
exp.atoms[2].element = getElement('H', 2)
adjlistCalc = """
1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}
2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}
3 H u0 p0 c0 i2 {1,S}
4 H u0 p0 c0 {1,S}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {2,S}
7 H u0 p0 c0 {2,S}
8 H u0 p0 c0 {2,S}
"""
calc = Molecule().fromAdjacencyList(adjlistCalc)
self.assertTrue(exp.isIsomorphic(calc))
exp = Molecule().fromSMILES('OC')
exp.atoms[0].element = getElement('O', 18)
adjlistCalc = """
1 O u0 p2 c0 i18 {2,S} {3,S}
2 C u0 p0 c0 {1,S} {4,S} {5,S} {6,S}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {2,S}
5 H u0 p0 c0 {2,S}
6 H u0 p0 c0 {2,S}
"""
calc = Molecule().fromAdjacencyList(adjlistCalc)
self.assertTrue(exp.isIsomorphic(calc))
################################################################################
if __name__ == '__main__':
unittest.main( testRunner = unittest.TextTestRunner(verbosity=2) )
|
pierrelb/RMG-Py
|
rmgpy/molecule/moleculeTest.py
|
Python
|
mit
| 61,794
|
[
"RDKit"
] |
622257ec273171726ed11d5aef0d83d863087b121ff20ba8a9dbcb2fb082a0e3
|
#!/usr/bin/python3
#
# Copyright (c) 2012 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is herby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import logging
import os
import random
import uuid
from pathlib import Path
from typing import Any, Iterable, Optional, Sequence, Tuple, Type, TypeVar
from unittest.mock import Mock, call
import pytest
import paleomix.node
from paleomix.common.command import AtomicCmd, InputFile, OutputFile, TempOutputFile
from paleomix.common.utilities import safe_coerce_to_frozenset
from paleomix.common.versions import Requirement
from paleomix.node import (
CmdNodeError,
CommandNode,
Node,
NodeError,
NodeUnhandledException,
)
T = TypeVar("T")
def test_dir():
return os.path.dirname(__file__)
def test_file(*args: str):
return os.path.join(test_dir(), "data", *args)
def choice(values: Iterable[T]) -> T:
return random.choice(tuple(values))
class CommandNodeWithDefaultCommand(CommandNode):
def __init__(
self,
description: Optional[str] = None,
threads: int = 1,
dependencies: Iterable[Node] = (),
):
super().__init__(AtomicCmd("true"), description, threads, dependencies)
_NODE_TYPES: Tuple[Type[Node], ...] = (Node, CommandNodeWithDefaultCommand)
_DESCRIPTION = "My description of a node"
_IN_FILES = frozenset((test_file("empty_file_1"), test_file("empty_file_2")))
_OUT_FILES = frozenset(
(test_file("missing_out_file_1"), test_file("missing_out_file_2"))
)
_EXEC_FILES = frozenset(("ls", "sh"))
_AUX_FILES = frozenset((test_file("rCRS.fasta"), test_file("rCRS.fasta.fai")))
_REQUIREMENT_1 = Requirement(["bwa"], r"", "")
_REQUIREMENT_2 = Requirement(["bowtie"], r"", "")
_REQUIREMENTS = frozenset((_REQUIREMENT_1, _REQUIREMENT_2))
_EMPTY_FILE = test_file("empty_file_1")
def _build_cmd_mock(
input_files: Iterable[str] = _IN_FILES,
output_files: Iterable[str] = (),
executables: Iterable[str] = (),
auxiliary_files: Iterable[str] = (),
requirements: Iterable[str] = (),
optional_temp_files: Iterable[str] = (),
return_codes: Sequence[int] = (0,),
):
cmd = Mock(
input_files=frozenset(input_files),
output_files=frozenset(output_files),
executables=frozenset(executables),
auxiliary_files=frozenset(auxiliary_files),
requirements=frozenset(requirements),
expected_temp_files=frozenset(os.path.basename(f) for f in output_files),
optional_temp_files=frozenset(optional_temp_files),
)
cmd.join.return_value = return_codes
return cmd
###############################################################################
###############################################################################
# Node: Constructor: File sets
_CONSTUCTOR_SINGLE_VALUES = (
# Single values
("input_files", choice(_IN_FILES)),
("output_files", choice(_OUT_FILES)),
("executables", choice(_EXEC_FILES)),
("auxiliary_files", choice(_AUX_FILES)),
# Single value in list
("input_files", [choice(_IN_FILES)]),
("output_files", [choice(_OUT_FILES)]),
("executables", [choice(_EXEC_FILES)]),
("auxiliary_files", [choice(_AUX_FILES)]),
# Multiple values in list
("input_files", _IN_FILES),
("output_files", _OUT_FILES),
("executables", _EXEC_FILES),
("auxiliary_files", _AUX_FILES),
)
@pytest.mark.parametrize("key, value", _CONSTUCTOR_SINGLE_VALUES)
def test_constructor(key: str, value: str):
defaults = {"input_files": _EMPTY_FILE}
defaults[key] = value
node = Node(**defaults)
expected = safe_coerce_to_frozenset(value)
assert getattr(node, key) == expected
_CONSTUCTOR_INVALID_VALUES = (
("input_files", [id]),
("output_files", [-1]),
("executables", [{}]),
("auxiliary_files", [1.3]),
)
@pytest.mark.parametrize("key, value", _CONSTUCTOR_INVALID_VALUES)
def test_constructor__invalid_values(key: str, value: Any):
with pytest.raises(TypeError):
Node(**{key: value})
###############################################################################
###############################################################################
# Node: Constructor: Requirements
def test_constructor__requirements():
node = Node(requirements=_REQUIREMENT_1)
assert node.requirements == frozenset([_REQUIREMENT_1])
node = Node(requirements=[_REQUIREMENT_1])
assert node.requirements == frozenset([_REQUIREMENT_1])
node = Node(requirements=[_REQUIREMENT_1, _REQUIREMENT_2])
assert node.requirements == frozenset([_REQUIREMENT_1, _REQUIREMENT_2])
@pytest.mark.parametrize("value", (17, "867-5309"))
def test_constructor__requirements__wrong_type(value: Any):
with pytest.raises(TypeError):
Node(requirements=value) # type: ignore
###############################################################################
###############################################################################
# Node: Constructor: Dependencies
def test_constructor__nodes_is_none():
with pytest.raises(TypeError):
Node(dependencies=None) # type: ignore
def test_constructor__single_node():
sub_node = Node()
my_node = Node(dependencies=sub_node)
assert my_node.dependencies == frozenset([sub_node])
def test_constructor__iterable():
sub_nodes = [Node(), Node()]
my_node = Node(dependencies=iter(sub_nodes))
assert my_node.dependencies == frozenset(sub_nodes)
def test_constructor__not_a_node():
with pytest.raises(TypeError):
Node(dependencies=(1,)) # type: ignore
###############################################################################
###############################################################################
# *Node: Description
@pytest.mark.parametrize("cls", _NODE_TYPES)
def test_constructor__description(cls: Type[Node]):
my_node = cls(description=_DESCRIPTION)
assert str(my_node) == _DESCRIPTION
@pytest.mark.parametrize("cls", _NODE_TYPES)
def test_constructor__description__default(cls: Type[Node]):
my_node = cls()
assert str(my_node) == repr(my_node)
@pytest.mark.parametrize("cls", _NODE_TYPES)
@pytest.mark.parametrize("value", (1, {}))
def test_constructor__description__non_string(cls: Type[Node], value: Any):
with pytest.raises(TypeError):
cls(description=value) # type: ignore
###############################################################################
###############################################################################
# *Node: Constructor tests: #threads
@pytest.mark.parametrize("cls", _NODE_TYPES)
@pytest.mark.parametrize("nthreads", (1, 3))
def test_constructor__threads(cls: Type[Node], nthreads: int):
node = cls(threads=nthreads)
assert node.threads == nthreads
@pytest.mark.parametrize("cls", _NODE_TYPES)
@pytest.mark.parametrize("nthreads", (-1, 0))
def test_constructor__threads_invalid_range(cls: Type[Node], nthreads: int):
with pytest.raises(ValueError):
cls(threads=nthreads)
@pytest.mark.parametrize("cls", _NODE_TYPES)
@pytest.mark.parametrize("nthreads", ("1", {}, 2.7))
def test_constructor__threads_invalid_type(cls: Type[Node], nthreads: int):
with pytest.raises(TypeError):
cls(threads=nthreads)
###############################################################################
###############################################################################
# Node: Run
_DUMMY_TEMP_ROOT = "/xyz/tmp"
_DUMMY_TEMP = os.path.join(_DUMMY_TEMP_ROOT, "xTMPx")
def test_run__order():
node_mock = Mock()
node = Node()
node._create_temp_dir = node_mock._create_temp_dir
node._create_temp_dir.return_value = _DUMMY_TEMP
node._setup = node_mock._setup
node._run = node_mock._run
node._teardown = node_mock._teardown
node._remove_temp_dir = node_mock._remove_temp_dir
node.run(_DUMMY_TEMP_ROOT)
node_mock.mock_calls == [
call._create_temp_dir(_DUMMY_TEMP_ROOT),
call._setup(_DUMMY_TEMP),
call._run(_DUMMY_TEMP),
call._teardown(_DUMMY_TEMP),
call._remove_temp_dir(_DUMMY_TEMP),
]
_EXCEPTIONS = (
(TypeError("The castle AAARGH!"), NodeUnhandledException),
(NodeError("He's a very naughty boy!"), NodeError),
)
@pytest.mark.parametrize("key", ("_setup", "_run", "_teardown"))
@pytest.mark.parametrize("exception, expectation", _EXCEPTIONS)
def test_run__exceptions(key, exception, expectation):
mock = Mock()
node = Node()
node._create_temp_dir = mock._create_temp_dir
node._create_temp_dir.return_value = _DUMMY_TEMP
setattr(node, key, getattr(mock, key))
getattr(node, key).side_effect = exception
with pytest.raises(expectation):
node.run(_DUMMY_TEMP_ROOT)
assert mock.mock_calls == [
call._create_temp_dir(_DUMMY_TEMP_ROOT),
getattr(call, key)(_DUMMY_TEMP),
]
def test_run__exception__create_temp_dir():
node_mock = Node()
node_mock._create_temp_dir = Mock()
node_mock._create_temp_dir.side_effect = OSError()
with pytest.raises(NodeUnhandledException):
node_mock.run(_DUMMY_TEMP_ROOT)
assert node_mock._create_temp_dir.mock_calls == [call(_DUMMY_TEMP_ROOT)]
def test_run__exception__remove_temp_dir():
mock = Mock()
node_mock = Node()
node_mock._create_temp_dir = mock._create_temp_dir
node_mock._create_temp_dir.return_value = _DUMMY_TEMP
node_mock._remove_temp_dir = mock._remove_temp_dir
node_mock._remove_temp_dir.side_effect = OSError()
with pytest.raises(NodeUnhandledException):
node_mock.run(_DUMMY_TEMP_ROOT)
assert mock.mock_calls == [
call._create_temp_dir(_DUMMY_TEMP_ROOT),
call._remove_temp_dir(_DUMMY_TEMP),
]
@pytest.mark.parametrize("exception", (NodeError, OSError))
def test_run__error_log__node_error(tmp_path, exception):
temp = tmp_path / "xTMPx"
mock = Mock()
node_mock = Node()
node_mock._create_temp_dir = mock._create_temp_dir
node_mock._create_temp_dir.return_value = str(temp)
node_mock._run = mock._run
node_mock._run.side_effect = exception("ARGH!")
temp.mkdir()
with pytest.raises(NodeError):
node_mock.run(tmp_path)
log_file = tmp_path / "xTMPx" / "pipe.errors"
assert log_file.exists()
error_text = log_file.read_text()
assert "Errors =" in error_text
assert paleomix.__version__ in error_text
assert mock.mock_calls == [
call._create_temp_dir(tmp_path),
call._run(str(temp)),
]
###############################################################################
###############################################################################
# Node: _setup / _teardown
_INPUT_FILES_EXIST = (
{"executables": ("ls", "sh")},
{"input_files": _IN_FILES},
{"auxiliary_files": _IN_FILES},
)
@pytest.mark.parametrize("kwargs", _INPUT_FILES_EXIST)
def test__setup__input_files(kwargs):
Node(**kwargs)._setup(None)
_INPUT_FILES_MISSING = (
{"executables": ("ls", "shxxxx")},
{"input_files": _OUT_FILES},
{"auxiliary_files": _OUT_FILES},
)
@pytest.mark.parametrize("kwargs", _INPUT_FILES_MISSING)
def test__setup__input_files_missing(kwargs):
with pytest.raises(NodeError):
Node(**kwargs)._setup(None)
def test__teardown__output_files():
Node(input_files=_EMPTY_FILE, output_files=_IN_FILES)._teardown(None)
def test__teardown__output_files_missing():
node = Node(input_files=_EMPTY_FILE, output_files=_OUT_FILES)
with pytest.raises(NodeError):
node._teardown(None)
###############################################################################
# Node._remove_temp_dir
def test_node_remove_temp_dir__empty_dir(tmp_path, caplog):
with caplog.at_level(logging.WARNING):
node = Node()
node._remove_temp_dir(tmp_path)
assert not tmp_path.exists()
assert not caplog.messages
def test_node_remove_temp_dir__extranous_files(tmp_path, caplog):
tmp_file = tmp_path / str(uuid.uuid4())
tmp_file.touch()
with caplog.at_level(logging.WARNING):
node = Node()
node._remove_temp_dir(tmp_path)
assert not tmp_path.exists()
assert (
paleomix.node.__name__,
logging.WARNING,
"Unexpected file in temporary directory: %r" % (str(tmp_file),),
) in caplog.record_tuples
###############################################################################
# Node._collect_files
def test_node_collect_files__empty_folder(tmp_path: Path):
assert list(Node._collect_files(tmp_path)) == []
def test_node_collect_files__root_files(tmp_path: Path):
(tmp_path / "foo.txt").touch()
(tmp_path / "bar.txt").touch()
assert sorted(Node._collect_files(tmp_path)) == [
"bar.txt",
"foo.txt",
]
def test_node_collect_files__files_and_folders(tmp_path: Path):
(tmp_path / "foo1.txt").touch()
(tmp_path / "bar1").mkdir()
(tmp_path / "bar1" / "foo2.txt").touch()
(tmp_path / "bar1" / "bar2").mkdir()
(tmp_path / "bar1" / "bar2" / "foo3.txt").touch()
(tmp_path / "bar1" / "bar2" / "foo4.txt").touch()
assert sorted(Node._collect_files(tmp_path)) == [
"bar1/bar2/foo3.txt",
"bar1/bar2/foo4.txt",
"bar1/foo2.txt",
"foo1.txt",
]
###############################################################################
###############################################################################
# CommandNode: Constructor
_SIMPLE_DEPS = Node()
_SIMPLE_CMD_MOCK = Mock(
input_files=_IN_FILES,
output_files=_OUT_FILES,
executables=_EXEC_FILES,
auxiliary_files=_AUX_FILES,
requirements=_REQUIREMENTS,
)
_SIMPLE_CMD_NODE = CommandNode(command=_SIMPLE_CMD_MOCK, dependencies=_SIMPLE_DEPS)
def test_commandnode_constructor__input_files():
assert _SIMPLE_CMD_NODE.input_files == _IN_FILES
def test_commandnode_constructor__output_files():
assert _SIMPLE_CMD_NODE.output_files == _OUT_FILES
def test_commandnode_constructor__auxiliary_files():
assert _SIMPLE_CMD_NODE.auxiliary_files == _AUX_FILES
def test_commandnode_constructor__executables():
assert _SIMPLE_CMD_NODE.executables == _EXEC_FILES
def test_commandnode_constructor__requirements():
assert _SIMPLE_CMD_NODE.requirements == _REQUIREMENTS
def test_commandnode_constructor__dependencies():
assert _SIMPLE_CMD_NODE.dependencies == frozenset([_SIMPLE_DEPS])
def test_commandnode_constructor__dependencies__default():
cmd_mock = CommandNode(command=_SIMPLE_CMD_MOCK)
assert cmd_mock.dependencies == frozenset()
###############################################################################
###############################################################################
# CommandNode: run
def test_command_node__run():
mock = _build_cmd_mock()
node_mock = CommandNode(mock)
node_mock._create_temp_dir = mock._test_node_._create_temp_dir
node_mock._create_temp_dir.return_value = _DUMMY_TEMP
node_mock._setup = mock._test_node_._setup
node_mock._teardown = mock._test_node_._teardown
node_mock._remove_temp_dir = mock._test_node_._remove_temp_dir
node_mock.run(_DUMMY_TEMP_ROOT)
assert mock.mock_calls == [
call._test_node_._create_temp_dir(_DUMMY_TEMP_ROOT),
call._test_node_._setup(_DUMMY_TEMP),
call.run(_DUMMY_TEMP),
call.join(),
call._test_node_._teardown(_DUMMY_TEMP),
call._test_node_._remove_temp_dir(_DUMMY_TEMP),
]
###############################################################################
###############################################################################
# CommandNode: _setup()
_SETUP_FILES_EXIST = (
{"executables": ("ls", "sh")},
{"input_files": _IN_FILES},
{"auxiliary_files": _IN_FILES},
)
@pytest.mark.parametrize("kwargs", _SETUP_FILES_EXIST)
def test_commandnode_setup__files_exist(kwargs):
cmd_mock = _build_cmd_mock(**kwargs)
node = CommandNode(cmd_mock)
node._setup(None)
_SETUP_FILES_MISSING = (
{"executables": ("ls", "shxxxxxxxxxxx")},
{"input_files": _OUT_FILES},
{"auxiliary_files": _OUT_FILES},
)
@pytest.mark.parametrize("kwargs", _SETUP_FILES_MISSING)
def test_commandnode_setup__files_missing(kwargs):
cmd_mock = _build_cmd_mock(**kwargs)
node = CommandNode(cmd_mock)
with pytest.raises(NodeError):
node._setup(None)
###############################################################################
###############################################################################
# CommandNode: _run()
def test_commandnode_run__call_order():
cmd_mock = _build_cmd_mock()
node = CommandNode(cmd_mock)
node._run("xTMPx")
assert cmd_mock.mock_calls == [call.run("xTMPx"), call.join()]
def test_commandnode_run__exception_on_error():
cmd_mock = _build_cmd_mock(return_codes=(1,))
node = CommandNode(cmd_mock)
with pytest.raises(CmdNodeError):
node._run("xTMPx")
assert cmd_mock.mock_calls == [call.run("xTMPx"), call.join()]
###############################################################################
###############################################################################
# CommandNode: _teardown
def _setup_temp_folders(tmp_path: Path):
destination = tmp_path / "dst"
tmp_path = tmp_path / "tmp"
tmp_path.mkdir(parents=True, exist_ok=True)
destination.mkdir(parents=True, exist_ok=True)
return destination, tmp_path
# Commit is called on the command obj
def test_commandnode_teardown__commit(tmp_path: Path):
cmd_mock = _build_cmd_mock()
node = CommandNode(cmd_mock)
node._teardown(tmp_path)
assert cmd_mock.mock_calls == [call.commit(tmp_path)]
# Files exist in temp folder, and in destination after commit
def test_commandnode_teardown(tmp_path: Path):
destination, tmp_path = _setup_temp_folders(tmp_path)
cmd = AtomicCmd(
("echo", "-n", "1 2 3"),
extra_files=[InputFile(_EMPTY_FILE)],
stdout=str(destination / "foo.txt"),
)
cmd.run(tmp_path)
assert cmd.join() == [0]
node = CommandNode(cmd)
assert (tmp_path / "foo.txt").exists()
assert not (destination / "foo.txt").exists()
node._teardown(tmp_path)
assert not (tmp_path / "foo.txt").exists()
assert (destination / "foo.txt").exists()
# Not all required files have been generated (atomic)
def test_commandnode_teardown__missing_files_in_temp(tmp_path: Path):
destination, tmp_path = _setup_temp_folders(tmp_path)
cmd = AtomicCmd(
("echo", "-n", "1 2 3"),
extra_files=[
InputFile(_EMPTY_FILE),
OutputFile(str(destination / "bar.txt")),
],
stdout=str(destination / "foo.txt"),
)
cmd.run(tmp_path)
assert cmd.join() == [0]
node = CommandNode(cmd)
temp_files_before = set(tmp_path.iterdir())
dest_files_before = set(destination.iterdir())
with pytest.raises(CmdNodeError):
node._teardown(tmp_path)
assert temp_files_before == set(tmp_path.iterdir())
assert dest_files_before == set(destination.iterdir())
# Not all specified TEMP_ files exist at _teardown (allowed)
def test_commandnode_teardown__missing_optional_files(tmp_path: Path):
destination, tmp_path = _setup_temp_folders(tmp_path)
cmd = AtomicCmd(
("echo", "-n", "1 2 3"),
extra_files=[
InputFile(_EMPTY_FILE),
TempOutputFile("bar.txt"),
],
stdout=str(destination / "foo.txt"),
)
cmd.run(tmp_path)
assert cmd.join() == [0]
node = CommandNode(cmd)
node._teardown(tmp_path)
assert os.listdir(str(tmp_path)) == []
assert os.listdir(str(destination)) == ["foo.txt"]
# Not all required files were in place after commit
def test_commandnode_teardown__missing_files_in_dest(tmp_path: Path):
destination, tmp_path = _setup_temp_folders(tmp_path)
class _CmdMock(AtomicCmd):
def commit(self, temp):
AtomicCmd.commit(self, temp)
(destination / "foo.txt").unlink()
cmd = _CmdMock(
(
"touch",
OutputFile(str(destination / "foo.txt")),
OutputFile(str(destination / "bar.txt")),
),
extra_files=[InputFile(_EMPTY_FILE)],
)
cmd.run(tmp_path)
assert cmd.join() == [0]
node = CommandNode(cmd)
with pytest.raises(NodeError):
node._teardown(tmp_path)
def test_commandnode_teardown__extra_files_in_temp(tmp_path: Path):
destination, tmp_path = _setup_temp_folders(tmp_path)
unexpected_file = tmp_path / "unexpected_file.txt"
unexpected_file.write_text("1 2 3")
cmd = AtomicCmd(
("echo", "1 2 3"),
extra_files=[InputFile(_EMPTY_FILE)],
stdout=str(destination / "foo.txt"),
)
node = CommandNode(cmd)
node._run(tmp_path)
node._teardown(tmp_path)
assert list(tmp_path.iterdir()) == [unexpected_file]
assert list(destination.iterdir()) == [destination / "foo.txt"]
|
MikkelSchubert/paleomix
|
tests/node_test.py
|
Python
|
mit
| 22,024
|
[
"BWA",
"Bowtie"
] |
ac6dc20f5c16f95c99ec63223d2c401dbc99549ee55897de621dd38390a19690
|
"""Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Modified by: Pete Green <p.l.green@liverpool.ac.uk>
# License: BSD 3 clause
import sys
import numpy as np
from scipy.optimize import approx_fprime
import pytest
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.gaussian_process.kernels import DotProduct, ExpSineSquared
from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._testing \
import (assert_array_less,
assert_almost_equal, assert_raise_message,
assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_warns_message)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2))]
non_fixed_kernels = [kernel for kernel in kernels
if kernel != fixed_kernel]
@pytest.mark.parametrize('kernel', kernels)
def test_gpr_interpolation(kernel):
if sys.maxsize <= 2 ** 32 and sys.version_info[:2] == (3, 6):
pytest.xfail("This test may fail on 32bit Py3.6")
# Test the interpolating property for different kernels.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_pred, y)
assert_almost_equal(np.diag(y_cov), 0.)
def test_gpr_interpolation_structured():
# Test the interpolating property for different kernels.
kernel = MiniSeqKernel(baseline_similarity_bounds='fixed')
X = ['A', 'B', 'C']
y = np.array([1, 2, 3])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(kernel(X, eval_gradient=True)[1].ravel(),
(1 - np.eye(len(X))).ravel())
assert_almost_equal(y_pred, y)
assert_almost_equal(np.diag(y_cov), 0.)
@pytest.mark.parametrize('kernel', non_fixed_kernels)
def test_lml_improving(kernel):
if sys.maxsize <= 2 ** 32 and sys.version_info[:2] == (3, 6):
pytest.xfail("This test may fail on 32bit Py3.6")
# Test that hyperparameter-tuning improves log-marginal likelihood.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert (gpr.log_marginal_likelihood(gpr.kernel_.theta) >
gpr.log_marginal_likelihood(kernel.theta))
@pytest.mark.parametrize('kernel', kernels)
def test_lml_precomputed(kernel):
# Test that lml of optimized kernel is stored correctly.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert (gpr.log_marginal_likelihood(gpr.kernel_.theta) ==
gpr.log_marginal_likelihood())
@pytest.mark.parametrize('kernel', kernels)
def test_lml_without_cloning_kernel(kernel):
# Test that lml of optimized kernel is stored correctly.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
input_theta = np.ones(gpr.kernel_.theta.shape, dtype=np.float64)
gpr.log_marginal_likelihood(input_theta, clone_kernel=False)
assert_almost_equal(gpr.kernel_.theta, input_theta, 7)
@pytest.mark.parametrize('kernel', non_fixed_kernels)
def test_converged_to_local_maximum(kernel):
# Test that we are in local maximum after hyperparameter-optimization.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert np.all((np.abs(lml_gradient) < 1e-4) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 1]))
@pytest.mark.parametrize('kernel', non_fixed_kernels)
def test_solution_inside_bounds(kernel):
# Test that hyperparameter-optimization remains in bounds#
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
@pytest.mark.parametrize('kernel', kernels)
def test_lml_gradient(kernel):
# Compare analytic and numeric gradient of log marginal likelihood.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
@pytest.mark.parametrize('kernel', kernels)
def test_prior(kernel):
# Test that GP prior has mean 0 and identical variances.
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
@pytest.mark.parametrize('kernel', kernels)
def test_sample_statistics(kernel):
# Test that statistics of samples drawn from GP are correct.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 1)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
# Test that kernel parameters are unmodified when optimizer is None.
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert np.exp(gpr.kernel_.theta) == 1.0
@pytest.mark.parametrize('kernel', kernels)
def test_predict_cov_vs_std(kernel):
if sys.maxsize <= 2 ** 32 and sys.version_info[:2] == (3, 6):
pytest.xfail("This test may fail on 32bit Py3.6")
# Test that predicted std.-dev. is consistent with cov's diagonal.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
# Test that GPR can identify meaningful anisotropic length-scales.
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert (np.exp(gpr.kernel_.theta[1]) >
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the log marginal likelihood of the chosen theta.
n_samples, n_features = 25, 2
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert lml > last_lml - np.finfo(np.float32).eps
last_lml = lml
@pytest.mark.parametrize('kernel', kernels)
def test_y_normalization(kernel):
"""
Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results. Note that, here,
'normalized y' refers to y that has been made zero mean and unit
variance.
"""
y_mean = np.mean(y)
y_std = np.std(y)
y_norm = (y - y_mean) / y_std
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_pred * y_std + y_mean
y_pred_std = y_pred_std * y_std
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
y_cov = y_cov * y_std**2
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_large_variance_y():
"""
Here we test that, when noramlize_y=True, our GP can produce a
sensible fit to training data whose variance is significantly
larger than unity. This test was made in response to issue #15612.
GP predictions are verified against predictions that were made
using GPy which, here, is treated as the 'gold standard'. Note that we
only investigate the RBF kernel here, as that is what was used in the
GPy implementation.
The following code can be used to recreate the GPy data:
--------------------------------------------------------------------------
import GPy
kernel_gpy = GPy.kern.RBF(input_dim=1, lengthscale=1.)
gpy = GPy.models.GPRegression(X, np.vstack(y_large), kernel_gpy)
gpy.optimize()
y_pred_gpy, y_var_gpy = gpy.predict(X2)
y_pred_std_gpy = np.sqrt(y_var_gpy)
--------------------------------------------------------------------------
"""
# Here we utilise a larger variance version of the training data
y_large = 10 * y
# Standard GP with normalize_y=True
RBF_params = {'length_scale': 1.0}
kernel = RBF(**RBF_params)
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y_large)
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
# 'Gold standard' mean predictions from GPy
y_pred_gpy = np.array([15.16918303,
-27.98707845,
-39.31636019,
14.52605515,
69.18503589])
# 'Gold standard' std predictions from GPy
y_pred_std_gpy = np.array([7.78860962,
3.83179178,
0.63149951,
0.52745188,
0.86170042])
# Based on numerical experiments, it's reasonable to expect our
# GP's mean predictions to get within 7% of predictions of those
# made by GPy.
assert_allclose(y_pred, y_pred_gpy, rtol=0.07, atol=0)
# Based on numerical experiments, it's reasonable to expect our
# GP's std predictions to get within 15% of predictions of those
# made by GPy.
assert_allclose(y_pred_std, y_pred_std_gpy, rtol=0.15, atol=0)
def test_y_multioutput():
# Test that GPR can deal with multi-dimensional target values
y_2d = np.vstack((y, y * 2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
@pytest.mark.parametrize('kernel', non_fixed_kernels)
def test_custom_optimizer(kernel):
# Test that GPR can use externally defined optimizers.
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert (gpr.log_marginal_likelihood(gpr.kernel_.theta) >
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_gpr_correct_error_message():
X = np.arange(12).reshape(6, -1)
y = np.ones(6)
kernel = DotProduct()
gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0)
assert_raise_message(np.linalg.LinAlgError,
"The kernel, %s, is not returning a "
"positive definite matrix. Try gradually increasing "
"the 'alpha' parameter of your "
"GaussianProcessRegressor estimator."
% kernel, gpr.fit, X, y)
@pytest.mark.parametrize('kernel', kernels)
def test_duplicate_input(kernel):
# Test GPR can handle two different output-values for the same input.
gpr_equal_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
def test_no_fit_default_predict():
# Test that GPR predictions without fit does not break by default.
default_kernel = (C(1.0, constant_value_bounds="fixed") *
RBF(1.0, length_scale_bounds="fixed"))
gpr1 = GaussianProcessRegressor()
_, y_std1 = gpr1.predict(X, return_std=True)
_, y_cov1 = gpr1.predict(X, return_cov=True)
gpr2 = GaussianProcessRegressor(kernel=default_kernel)
_, y_std2 = gpr2.predict(X, return_std=True)
_, y_cov2 = gpr2.predict(X, return_cov=True)
assert_array_almost_equal(y_std1, y_std2)
assert_array_almost_equal(y_cov1, y_cov2)
@pytest.mark.parametrize('kernel', kernels)
def test_K_inv_reset(kernel):
y2 = f(X2).ravel()
# Test that self._K_inv is reset after a new fit
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert hasattr(gpr, '_K_inv')
assert gpr._K_inv is None
gpr.predict(X, return_std=True)
assert gpr._K_inv is not None
gpr.fit(X2, y2)
assert gpr._K_inv is None
gpr.predict(X2, return_std=True)
gpr2 = GaussianProcessRegressor(kernel=kernel).fit(X2, y2)
gpr2.predict(X2, return_std=True)
# the value of K_inv should be independent of the first fit
assert_array_equal(gpr._K_inv, gpr2._K_inv)
def test_warning_bounds():
kernel = RBF(length_scale_bounds=[1e-5, 1e-3])
gpr = GaussianProcessRegressor(kernel=kernel)
assert_warns_message(ConvergenceWarning, "The optimal value found for "
"dimension 0 of parameter "
"length_scale is close to "
"the specified upper bound "
"0.001. Increasing the bound "
"and calling fit again may "
"find a better value.",
gpr.fit, X, y)
kernel_sum = (WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) +
RBF(length_scale_bounds=[1e3, 1e5]))
gpr_sum = GaussianProcessRegressor(kernel=kernel_sum)
with pytest.warns(None) as record:
gpr_sum.fit(X, y)
assert len(record) == 2
assert record[0].message.args[0] == ("The optimal value found for "
"dimension 0 of parameter "
"k1__noise_level is close to the "
"specified upper bound 0.001. "
"Increasing the bound and calling "
"fit again may find a better value.")
assert record[1].message.args[0] == ("The optimal value found for "
"dimension 0 of parameter "
"k2__length_scale is close to the "
"specified lower bound 1000.0. "
"Decreasing the bound and calling "
"fit again may find a better value.")
X_tile = np.tile(X, 2)
kernel_dims = RBF(length_scale=[1., 2.],
length_scale_bounds=[1e1, 1e2])
gpr_dims = GaussianProcessRegressor(kernel=kernel_dims)
with pytest.warns(None) as record:
gpr_dims.fit(X_tile, y)
assert len(record) == 2
assert record[0].message.args[0] == ("The optimal value found for "
"dimension 0 of parameter "
"length_scale is close to the "
"specified lower bound 10.0. "
"Decreasing the bound and calling "
"fit again may find a better value.")
assert record[1].message.args[0] == ("The optimal value found for "
"dimension 1 of parameter "
"length_scale is close to the "
"specified lower bound 10.0. "
"Decreasing the bound and calling "
"fit again may find a better value.")
def test_bound_check_fixed_hyperparameter():
# Regression test for issue #17943
# Check that having a hyperparameter with fixed bounds doesn't cause an
# error
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
kernel = k1 + k2
GaussianProcessRegressor(kernel=kernel).fit(X, y)
|
bnaul/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
Python
|
bsd-3-clause
| 21,004
|
[
"Gaussian"
] |
d9ec4d96d905b6b1e580f10e83c780e41e1db6d7fa4f31e8a3dbb6298bf9f606
|
#########
# firstVideo.py
# This program is part of the online PS-Drone-API-tutorial on www.playsheep.de/drone.
# It shows the general usage of the video-function of a Parrot AR.Drone 2.0 using the PS-Drone-API.
# The drone will stay on the ground.
# Dependencies: a POSIX OS, openCV2, PS-Drone-API 2.0 beta or higher.
# (w) J. Philipp de Graaff, www.playsheep.de, 2014
##########
# LICENCE:
# Artistic License 2.0 as seen on http://opensource.org/licenses/artistic-license-2.0 (retrieved December 2014)
# Visit www.playsheep.de/drone or see the PS-Drone-API-documentation for an abstract from the Artistic License 2.0.
###########
##### Suggested clean drone startup sequence #####
import time, sys
import api.ps_drone as ps_drone # Import PS-Drone
drone = ps_drone.Drone() # Start using drone
drone.startup() # Connects to drone and starts subprocesses
drone.reset() # Sets drone's status to good (LEDs turn green when red)
while (drone.getBattery()[0] == -1): time.sleep(0.1) # Waits until drone has done its reset
print "Battery: "+str(drone.getBattery()[0])+"% "+str(drone.getBattery()[1]) # Gives a battery-status
drone.useDemoMode(True) # Just give me 15 basic dataset per second (is default anyway)
##### Mainprogram begin #####
drone.setConfigAllID() # Go to multiconfiguration-mode
drone.sdVideo() # Choose lower resolution (hdVideo() for...well, guess it)
drone.frontCam() # Choose front view
CDC = drone.ConfigDataCount
while CDC == drone.ConfigDataCount: time.sleep(0.0001) # Wait until it is done (after resync is done)
drone.startVideo() # Start video-function
drone.showVideo() # Display the video
##### And action !
print "Use <space> to toggle front- and groundcamera, any other key to stop"
IMC = drone.VideoImageCount # Number of encoded videoframes
stop = False
ground = False
while not stop:
while drone.VideoImageCount == IMC: time.sleep(0.01) # Wait until the next video-frame
IMC = drone.VideoImageCount
key = drone.getKey() # Gets a pressed key
if key==" ":
if ground: ground = False
else: ground = True
drone.groundVideo(ground) # Toggle between front- and groundcamera. Hint: options work for all videocommands
elif key and key != " ": stop = True
|
reixd/ps-drone
|
tutorials/firstVideo.py
|
Python
|
artistic-2.0
| 2,806
|
[
"VisIt"
] |
e8373d262970bb734fc739fbfb18a443b2135a2735a13043361f38aa942c1fea
|
from mayavi import mlab
import numpy as np
# visualize
# =========
def isosurface(volume, lattice=None, **kwargs):
if lattice == None:
return mlab.contour3d(volume, **kwargs)
xx, yy, zz = lattice
return mlab.contour3d(xx, yy, zz, volume, **kwargs)
def mesh(vertices, faces, **kwargs):
xx, yy, zz = vertices
return mlab.triangular_mesh(xx, yy, zz, faces, **kwargs)
def line(point0, point1, **kwargs):
xx, yy, zz = np.asarray([point0, point1]).T
return mlab.plot3d(xx, yy, zz, **kwargs)
def dots(points, **kwargs):
xx, yy, zz = np.asarray(points).T
return mlab.points3d(xx, yy, zz, **kwargs)
def clear():
return mlab.clf()
|
towle-lab/electrode-registration-app
|
core/visualize.py
|
Python
|
gpl-2.0
| 646
|
[
"Mayavi"
] |
254e28202b80b18c24ab1622a208b6a827c0b9f9527579aadb0b2aef23acc7bb
|
""" Main class for doing consistency checks, between files in:
- File Catalog
- TransformationSystem
Should be extended to include the Storage (in DIRAC)
"""
import os
import time
import sys
import types
import re
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.DataManagementSystem.Client.DataIntegrityClient import DataIntegrityClient
from DIRAC.Resources.Utilities import checkArgumentFormat
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.Core.Utilities.Adler import compareAdler
class ConsistencyInspector( object ):
""" A class for handling some consistency checks
"""
def __init__( self, interactive = True, transClient = None, dm = None, fc = None ):
""" c'tor
interactive: Data Manager (True) or DIRAC Agente (False)
transClient: TransformationClient() if None, else transClient params
dm: DataManager() if None, else dm params
fc: FileCatalog() if None, else fc params
One object for every production/directoriesList...
"""
self.interactive = interactive
self.transClient = TransformationClient() if transClient is None else transClient
self.dm = DataManager() if dm is None else dm
self.fc = FileCatalog() if fc is None else fc
self.dirac = Dirac()
# Base elements from which to start the consistency checks
self._prod = 0
self._bkQuery = None
self._fileType = []
self._fileTypesExcluded = []
self._lfns = []
self.noLFC = False
self.directories = []
# Accessory elements
self.runsList = []
self.runStatus = None
self.fromProd = None
self.transType = ''
self.cachedReplicas = {}
self.prcdWithDesc = []
self.prcdWithoutDesc = []
self.prcdWithMultDesc = []
self.nonPrcdWithDesc = []
self.nonPrcdWithoutDesc = []
self.nonPrcdWithMultDesc = []
self.descForPrcdLFNs = []
self.descForNonPrcdLFNs = []
self.removedFiles = []
self.absentLFNsInFC = []
self.existLFNsNoSE = {}
self.existLFNsBadReplicas = {}
self.existLFNsBadFiles = {}
self.commonAncestors = {}
self.multipleDescendants = {}
self.ancestors = {}
################################################################################
def getReplicasPresence( self, lfns ):
""" get the replicas using the standard DataManager.getReplicas()
"""
lfns = checkArgumentFormat(lfns)
present = set()
notPresent = set()
chunkSize = 1000
printProgress = ( len( lfns ) > chunkSize )
startTime = time.time()
self.__write( "Checking replicas for %d files%s" %
( len( lfns ), ( ' (chunks of %d)' % chunkSize ) if printProgress else '... ' ) )
for chunk in breakListIntoChunks( lfns, chunkSize ):
if printProgress:
self.__write( '.' )
for _ in range( 1, 10 ):
res = self.dm.getReplicas( chunk )
if res['OK']:
present.update( res['Value']['Successful'] )
self.cachedReplicas.update( res['Value']['Successful'] )
notPresent.update( res['Value']['Failed'] )
break
else:
gLogger.error( "\nError getting replicas from FC, retry", res['Message'] )
time.sleep( 0.1 )
self.__write( ' (%.1f seconds)\n' % ( time.time() - startTime ) )
gLogger.info( "Found %d files with replicas and %d without" % ( len( present ), len( notPresent ) ) )
return list( present ), list( notPresent )
################################################################################
def getReplicasPresenceFromDirectoryScan( self, lfns ):
""" Get replicas scanning the directories. Might be faster.
"""
lfns = checkArgumentFormat(lfns)
dirs = {}
present = []
notPresent = []
compare = True
for lfn in lfns:
dirN = os.path.dirname( lfn )
if lfn == dirN + '/':
compare = False
dirs.setdefault( dirN, [] ).append( lfn )
if compare:
self.__write( "Checking File Catalog for %d files from %d directories " % ( len( lfns ), len( dirs ) ) )
else:
self.__write( "Getting files from %d directories " % len( dirs ) )
startTime = time.time()
for dirN in sorted( dirs ):
startTime1 = time.time()
self.__write( '.' )
lfnsFound = self._getFilesFromDirectoryScan( dirN )
gLogger.verbose( "Obtained %d files in %.1f seconds" % ( len( lfnsFound ), time.time() - startTime1 ) )
if compare:
pr, notPr = self.__compareLFNLists( dirs[dirN], lfnsFound )
notPresent += notPr
present += pr
else:
present += lfnsFound
self.__write( ' (%.1f seconds)\n' % ( time.time() - startTime ) )
gLogger.info( "Found %d files with replicas and %d without" % ( len( present ), len( notPresent ) ) )
return present, notPresent
################################################################################
@staticmethod
def __compareLFNLists( lfns, lfnsFound ):
""" return files in both lists and files in lfns and not in lfnsFound
"""
lfns = checkArgumentFormat(lfns)
present = []
notPresent = lfns
startTime = time.time()
gLogger.verbose( "Comparing list of %d LFNs with second list of %d" % ( len( lfns ), len( lfnsFound ) ) )
if lfnsFound:
setLfns = set( lfns )
setLfnsFound = set( lfnsFound )
present = list( setLfns & setLfnsFound )
notPresent = list( setLfns - setLfnsFound )
gLogger.verbose( "End of comparison: %.1f seconds" % ( time.time() - startTime ) )
return present, notPresent
def _getFilesFromDirectoryScan( self, dirs ):
""" calls dm.getFilesFromDirectory
"""
level = gLogger.getLevel()
gLogger.setLevel( 'FATAL' )
res = self.dm.getFilesFromDirectory( dirs )
gLogger.setLevel( level )
if not res['OK']:
if 'No such file or directory' not in res['Message']:
gLogger.error( "Error getting files from directories %s:" % dirs, res['Message'] )
return []
if res['Value']:
lfnsFound = res['Value']
else:
lfnsFound = []
return lfnsFound
################################################################################
def _getTSFiles( self ):
""" Helper function - get files from the TS
"""
selectDict = { 'TransformationID': self.prod}
if self._lfns:
selectDict['LFN'] = self._lfns
elif self.runStatus and self.fromProd:
res = self.transClient.getTransformationRuns( {'TransformationID': self.fromProd, 'Status':self.runStatus} )
if not res['OK']:
gLogger.error( "Failed to get runs for transformation %d" % self.prod )
else:
if res['Value']:
self.runsList.extend( [run['RunNumber'] for run in res['Value'] if run['RunNumber'] not in self.runsList] )
gLogger.always( "%d runs selected" % len( res['Value'] ) )
elif not self.runsList:
gLogger.always( "No runs selected, check completed" )
DIRAC.exit( 0 )
if not self._lfns and self.runsList:
selectDict['RunNumber'] = self.runsList
res = self.transClient.getTransformation( self.prod )
if not res['OK']:
gLogger.error( "Failed to find transformation %s" % self.prod )
return [], [], []
status = res['Value']['Status']
if status not in ( 'Active', 'Stopped', 'Completed', 'Idle' ):
gLogger.always( "Transformation %s in status %s, will not check if files are processed" % ( self.prod, status ) )
processedLFNs = []
nonProcessedLFNs = []
nonProcessedStatuses = []
if self._lfns:
processedLFNs = self._lfns
else:
res = self.transClient.getTransformationFiles( selectDict )
if not res['OK']:
gLogger.error( "Failed to get files for transformation %d" % self.prod, res['Message'] )
return [], [], []
else:
processedLFNs = [item['LFN'] for item in res['Value'] if item['Status'] == 'Processed']
nonProcessedLFNs = [item['LFN'] for item in res['Value'] if item['Status'] != 'Processed']
nonProcessedStatuses = list( set( [item['Status'] for item in res['Value'] if item['Status'] != 'Processed'] ) )
return processedLFNs, nonProcessedLFNs, nonProcessedStatuses
################################################################################
def __write( self, text ):
if self.interactive:
sys.stdout.write( text )
sys.stdout.flush()
print text
################################################################################
def _selectByFileType( self, lfnDict, fileTypes = None, fileTypesExcluded = None ):
""" Select only those files from the values of lfnDict that have a certain type
"""
if not lfnDict:
return {}
if not fileTypes:
fileTypes = self.fileType
if not fileTypesExcluded:
fileTypesExcluded = self.fileTypesExcluded
else:
fileTypesExcluded += [ft for ft in self.fileTypesExcluded if ft not in fileTypesExcluded]
# lfnDict is a dictionary of dictionaries including the metadata, create a deep copy to get modified
ancDict = dict( lfnDict )
if fileTypes == ['']:
fileTypes = []
# and loop on the original dictionaries
for ancestor in lfnDict:
for desc in lfnDict[ancestor]:
ft = lfnDict[ancestor][desc]['FileType']
if ft in fileTypesExcluded or ( fileTypes and ft not in fileTypes ):
ancDict[ancestor].pop( desc )
if not len( ancDict[ancestor] ):
ancDict.pop( ancestor )
return ancDict
@staticmethod
def _getFileTypesCount( lfnDict ):
""" return file types count
"""
ft_dict = {}
for ancestor in lfnDict:
t_dict = {}
for desc in lfnDict[ancestor]:
ft = lfnDict[ancestor][desc]['FileType']
t_dict[ft] = t_dict.setdefault( ft, 0 ) + 1
ft_dict[ancestor] = t_dict
return ft_dict
def __getLFNsFromFC( self ):
if not self.lfns:
directories = []
for dirName in self.__getDirectories():
if not dirName.endswith( '/' ):
dirName += '/'
directories.append( dirName )
present, notPresent = self.getReplicasPresenceFromDirectoryScan( directories )
gLogger.always( '%d files found in the FC' % len( present ) )
else:
present, notPresent = self.getReplicasPresence( self.lfns )
gLogger.always( 'Out of %d files, %d are in the FC, %d are not' \
% ( len( self.lfns ), len( present ), len( notPresent ) ) )
return present, notPresent
def compareChecksum( self, lfns ):
"""compare the checksum of the file in the FC and the checksum of the physical replicas.
Returns a dictionary containing 3 sub-dictionaries: one with files with missing PFN, one with
files with all replicas corrupted, and one with files with some replicas corrupted and at least
one good replica
"""
retDict = {'AllReplicasCorrupted' : {}, 'SomeReplicasCorrupted': {}, 'MissingPFN':{}, 'NoReplicas':{}}
chunkSize = 1000
replicas = {}
setLfns = set( lfns )
cachedLfns = setLfns & set( self.cachedReplicas )
for lfn in cachedLfns:
replicas[lfn] = self.cachedReplicas[lfn]
lfnsLeft = list( setLfns - cachedLfns )
startTime = time.time()
if lfnsLeft:
self.__write( "Get replicas for %d files (chunks of %d): " % ( len( lfnsLeft ), chunkSize ) )
for lfnChunk in breakListIntoChunks( lfnsLeft, chunkSize ):
self.__write( '.' )
replicasRes = self.dm.getReplicas( lfnChunk )
if not replicasRes['OK']:
gLogger.error( "error: %s" % replicasRes['Message'] )
S_ERROR("error: %s" % replicasRes['Message']) # raise RuntimeError( "error: %s" % replicasRes['Message'] )
replicasRes = replicasRes['Value']
if replicasRes['Failed']:
retDict['NoReplicas'].update( replicasRes['Failed'] )
replicas.update( replicasRes['Successful'] )
self.__write( ' (%.1f seconds)\n' % ( time.time() - startTime ) )
self.__write( "Get FC metadata for %d files to be checked: " % len( lfns ) )
metadata = {}
for lfnChunk in breakListIntoChunks( replicas.keys(), chunkSize ):
self.__write( '.' )
res = self.fc.getFileMetadata( lfnChunk )
print "res", res
if not res['OK']:
S_ERROR("error %s" % res['Message']) # raise RuntimeError( "error %s" % res['Message'] )
metadata.update( res['Value']['Successful'] )
self.__write( ' (%.1f seconds)\n' % ( time.time() - startTime ) )
gLogger.always( "Check existence and compare checksum file by file..." )
csDict = {}
seFiles = {}
surlLfn = {}
startTime = time.time()
# Reverse the LFN->SE dictionary
for lfn in replicas:
csDict.setdefault( lfn, {} )[ 'LFCChecksum' ] = metadata.get( lfn, {} ).get( 'Checksum' )
replicaDict = replicas[ lfn ]
for se in replicaDict:
surl = replicaDict[ se ]
surlLfn[surl] = lfn
seFiles.setdefault( se, [] ).append( surl )
checkSum = {}
self.__write( 'Getting checksum of %d replicas in %d SEs (chunks of %d): ' % ( len( surlLfn ), len( seFiles ), chunkSize ) )
pfnNotAvailable = {}
logLevel = gLogger.getLevel()
gLogger.setLevel( 'FATAL' )
for num, se in enumerate( sorted( seFiles ) ):
self.__write( '\n%d. At %s (%d files): ' % ( num, se, len( seFiles[se] ) ) )
oSe = StorageElement( se )
for surlChunk in breakListIntoChunks( seFiles[se], chunkSize ):
self.__write( '.' )
surlRes = oSe.getFileMetadata( surlChunk )
if not surlRes['OK']:
gLogger.error( "error StorageElement.getFileMetadata returns %s" % ( surlRes['Message'] ) )
raise RuntimeError( "error StorageElement.getFileMetadata returns %s" % ( surlRes['Message'] ) )
surlRes = surlRes['Value']
for surl in surlRes['Failed']:
lfn = surlLfn[surl]
gLogger.info( "SURL was not found at %s! %s " % ( se, surl ) )
pfnNotAvailable.setdefault( lfn, [] ).append( se )
for surl in surlRes['Successful']:
lfn = surlLfn[surl]
checkSum.setdefault( lfn, {} )[se] = surlRes['Successful'][ surl ]['Checksum']
self.__write( ' (%.1f seconds)\n' % ( time.time() - startTime ) )
gLogger.setLevel( logLevel )
retDict[ 'MissingPFN'] = {}
startTime = time.time()
self.__write( 'Verifying checksum of %d files (chunks of %d) ' % ( len( replicas ), chunkSize ) )
for num, lfn in enumerate( replicas ):
# get the file catalog checksum #lfn checksum from the LFC
if not num % chunkSize:
self.__write( '.' )
replicaDict = replicas[ lfn ]
oneGoodReplica = False
allGoodReplicas = True
lfcChecksum = csDict[ lfn ].pop( 'LFCChecksum' )
for se in replicaDict:
# If replica doesn't exist skip check
if se in pfnNotAvailable.get( lfn, [] ):
allGoodReplicas = False
continue
surl = replicaDict[ se ]
# get the surls metadata and compare the checksum
surlChecksum = checkSum.get( lfn, {} ).get( se, '' )
if not surlChecksum or not compareAdler( lfcChecksum , surlChecksum ):
# if lfcChecksum does not match surlChecksum
csDict[ lfn ][ se ] = {'SURL':surl, 'PFNChecksum': surlChecksum}
gLogger.info( "ERROR!! checksum mismatch at %s for LFN %s: LFC checksum: %s , PFN checksum : %s "
% ( se, lfn, lfcChecksum, surlChecksum ) )
allGoodReplicas = False
else:
oneGoodReplica = True
if not oneGoodReplica:
if lfn in pfnNotAvailable:
gLogger.info( "=> All replicas are missing" )
retDict['MissingPFN'][ lfn] = 'All'
else:
gLogger.info( "=> All replicas have bad checksum" )
retDict['AllReplicasCorrupted'][ lfn ] = csDict[ lfn ]
elif not allGoodReplicas:
if lfn in pfnNotAvailable:
gLogger.info( "=> At least one replica missing" )
retDict['MissingPFN'][lfn] = pfnNotAvailable[lfn]
else:
gLogger.info( "=> At least one replica with good Checksum" )
retDict['SomeReplicasCorrupted'][ lfn ] = csDict[ lfn ]
self.__write( ' (%.1f seconds)\n' % ( time.time() - startTime ) )
return retDict
################################################################################
# properties
def set_prod( self, value ):
""" Setter """
if value:
value = int( value )
res = self.transClient.getTransformation( value, extraParams = False )
if not res['OK']:
S_ERROR("Couldn't find transformation %d: %s" % ( value, res['Message'] )) #raise RuntimeError( "Couldn't find transformation %d: %s" % ( value, res['Message'] ) )
else:
self.transType = res['Value']['Type']
if self.interactive:
gLogger.info( "Production %d has type %s" % ( value, self.transType ) )
else:
value = 0
self._prod = value
def get_prod( self ):
""" Getter """
return self._prod
prod = property( get_prod, set_prod )
def set_fileType( self, value ):
""" Setter """
self._fileType = [ft.upper() for ft in value]
def get_fileType( self ):
""" Getter """
return self._fileType
fileType = property( get_fileType, set_fileType )
def set_fileTypesExcluded( self, value ):
""" Setter """
self._fileTypesExcluded = [ft.upper() for ft in value]
def get_fileTypesExcluded( self ):
""" Getter """
return self._fileTypesExcluded
fileTypesExcluded = property( get_fileTypesExcluded, set_fileTypesExcluded )
def set_lfns( self, value ):
""" Setter """
if type( value ) == type( "" ):
value = [value]
value = [v.replace( ' ', '' ).replace( '//', '/' ) for v in value]
self._lfns = value
def get_lfns( self ):
""" Getter """
return self._lfns
lfns = property( get_lfns, set_lfns )
###############################################################################################
#
# This part was backported from DataIntegrityClient
#
#
# This section contains the specific methods for File Catalog->SE checks
#
def catalogDirectoryToSE( self, lfnDir ):
""" This obtains the replica and metadata information from the catalog for the supplied directory and checks against the storage elements.
"""
gLogger.info( "-" * 40 )
gLogger.info( "Performing the FC->SE check" )
gLogger.info( "-" * 40 )
if type( lfnDir ) in types.StringTypes:
lfnDir = [lfnDir]
res = self.__getCatalogDirectoryContents( lfnDir )
if not res['OK']:
return res
replicas = res['Value']['Replicas']
catalogMetadata = res['Value']['Metadata']
res = self.__checkPhysicalFiles( replicas, catalogMetadata )
if not res['OK']:
return res
resDict = {'CatalogMetadata':catalogMetadata, 'CatalogReplicas':replicas}
return S_OK( resDict )
def catalogFileToSE( self, lfns ):
""" This obtains the replica and metadata information from the catalog and checks against the storage elements.
"""
gLogger.info( "-" * 40 )
gLogger.info( "Performing the FC->SE check" )
gLogger.info( "-" * 40 )
if type( lfns ) in types.StringTypes:
lfns = [lfns]
res = self.__getCatalogMetadata( lfns )
if not res['OK']:
return res
catalogMetadata = res['Value']
res = self.__getCatalogReplicas( catalogMetadata.keys() )
if not res['OK']:
return res
replicas = res['Value']
res = self.__checkPhysicalFiles( replicas, catalogMetadata )
if not res['OK']:
return res
resDict = {'CatalogMetadata':catalogMetadata, 'CatalogReplicas':replicas}
return S_OK( resDict )
def checkPhysicalFiles( self, replicas, catalogMetadata, ses = None ):
""" This obtains takes the supplied replica and metadata information obtained from the catalog and checks against the storage elements.
"""
gLogger.info( "-" * 40 )
gLogger.info( "Performing the LFC->SE check" )
gLogger.info( "-" * 40 )
return self.__checkPhysicalFiles( replicas, catalogMetadata, ses = ses )
def __checkPhysicalFiles( self, replicas, catalogMetadata, ses = None ):
""" This obtains the physical file metadata and checks the metadata against the catalog entries
"""
seLfns = {}
for lfn, replicaDict in replicas.items():
for se, _url in replicaDict.items():
if ( ses ) and ( se not in ses ):
continue
seLfns.setdefault( se, [] ).append( lfn )
gLogger.info( '%s %s' % ( 'Storage Element'.ljust( 20 ), 'Replicas'.rjust( 20 ) ) )
for se in sorted( seLfns ):
files = len( seLfns[se] )
gLogger.info( '%s %s' % ( se.ljust( 20 ), str( files ).rjust( 20 ) ) )
lfns = seLfns[se]
sizeMismatch = []
res = self.__checkPhysicalFileMetadata( lfns, se )
if not res['OK']:
gLogger.error( 'Failed to get physical file metadata.', res['Message'] )
return res
for lfn, metadata in res['Value'].items():
if lfn in catalogMetadata:
if ( metadata['Size'] != catalogMetadata[lfn]['Size'] ): # and ( metadata['Size'] != 0 ):
sizeMismatch.append( ( lfn, 'deprecatedUrl', se, 'CatalogPFNSizeMismatch' ) )
if sizeMismatch:
self.__reportProblematicReplicas( sizeMismatch, se, 'CatalogPFNSizeMismatch' )
return S_OK()
def __checkPhysicalFileMetadata( self, lfns, se ):
""" Check obtain the physical file metadata and check the files are available
"""
gLogger.info( 'Checking the integrity of %s physical files at %s' % ( len( lfns ), se ) )
res = StorageElement( se ).getFileMetadata( lfns )
if not res['OK']:
gLogger.error( 'Failed to get metadata for lfns.', res['Message'] )
return res
lfnMetadataDict = res['Value']['Successful']
# If the replicas are completely missing
missingReplicas = []
for lfn, reason in res['Value']['Failed'].items():
if re.search( 'File does not exist', reason ):
missingReplicas.append( ( lfn, 'deprecatedUrl', se, 'PFNMissing' ) )
if missingReplicas:
self.__reportProblematicReplicas( missingReplicas, se, 'PFNMissing' )
lostReplicas = []
unavailableReplicas = []
zeroSizeReplicas = []
# If the files are not accessible
for lfn, lfnMetadata in lfnMetadataDict.items():
if lfnMetadata['Lost']:
lostReplicas.append( ( lfn, 'deprecatedUrl', se, 'PFNLost' ) )
if lfnMetadata['Unavailable']:
unavailableReplicas.append( ( lfn, 'deprecatedUrl', se, 'PFNUnavailable' ) )
if not lfnMetadata['Size']:
zeroSizeReplicas.append( ( lfn, 'deprecatedUrl', se, 'PFNZeroSize' ) )
if lostReplicas:
self.__reportProblematicReplicas( lostReplicas, se, 'PFNLost' )
if unavailableReplicas:
self.__reportProblematicReplicas( unavailableReplicas, se, 'PFNUnavailable' )
if zeroSizeReplicas:
self.__reportProblematicReplicas( zeroSizeReplicas, se, 'PFNZeroSize' )
gLogger.info( 'Checking the integrity of physical files at %s complete' % se )
return S_OK( lfnMetadataDict )
##########################################################################
#
# This section contains the specific methods for SE->File Catalog checks
#
def storageDirectoryToCatalog( self, lfnDir, storageElement ):
""" This obtains the file found on the storage element in the supplied directories and determines whether they exist in the catalog and checks their metadata elements
"""
gLogger.info( "-" * 40 )
gLogger.info( "Performing the SE->FC check at %s" % storageElement )
gLogger.info( "-" * 40 )
if type( lfnDir ) in types.StringTypes:
lfnDir = [lfnDir]
res = self.getStorageDirectoryContents( lfnDir, storageElement )
if not res['OK']:
return res
storageFileMetadata = res['Value']
if storageFileMetadata:
return self.__checkCatalogForSEFiles( storageFileMetadata, storageElement )
return S_OK( {'CatalogMetadata':{}, 'StorageMetadata':{}} )
def __checkCatalogForSEFiles( self, storageMetadata, storageElement ):
gLogger.info( 'Checking %s storage files exist in the catalog' % len( storageMetadata ) )
res = self.fc.getReplicas( storageMetadata )
if not res['OK']:
gLogger.error( "Failed to get replicas for LFN", res['Message'] )
return res
failedLfns = res['Value']['Failed']
successfulLfns = res['Value']['Successful']
notRegisteredLfns = []
for lfn in storageMetadata:
if lfn in failedLfns:
if 'No such file or directory' in failedLfns[lfn]:
notRegisteredLfns.append( ( lfn, 'deprecatedUrl', storageElement, 'LFNNotRegistered' ) )
failedLfns.pop( lfn )
elif storageElement not in successfulLfns[lfn]:
notRegisteredLfns.append( ( lfn, 'deprecatedUrl', storageElement, 'LFNNotRegistered' ) )
if notRegisteredLfns:
self.__reportProblematicReplicas( notRegisteredLfns, storageElement, 'LFNNotRegistered' )
if failedLfns:
return S_ERROR( 'Failed to obtain replicas' )
# For the LFNs found to be registered obtain the file metadata from the catalog and verify against the storage metadata
res = self.__getCatalogMetadata( storageMetadata )
if not res['OK']:
return res
catalogMetadata = res['Value']
sizeMismatch = []
for lfn, lfnCatalogMetadata in catalogMetadata.items():
lfnStorageMetadata = storageMetadata[lfn]
if ( lfnStorageMetadata['Size'] != lfnCatalogMetadata['Size'] ) and ( lfnStorageMetadata['Size'] != 0 ):
sizeMismatch.append( ( lfn, 'deprecatedUrl', storageElement, 'CatalogPFNSizeMismatch' ) )
if sizeMismatch:
self.__reportProblematicReplicas( sizeMismatch, storageElement, 'CatalogPFNSizeMismatch' )
gLogger.info( 'Checking storage files exist in the catalog complete' )
resDict = {'CatalogMetadata':catalogMetadata, 'StorageMetadata':storageMetadata}
return S_OK( resDict )
def getStorageDirectoryContents( self, lfnDir, storageElement ):
""" This obtains takes the supplied lfn directories and recursively obtains the files in the supplied storage element
"""
gLogger.info( 'Obtaining the contents for %s directories at %s' % ( len( lfnDir ), storageElement ) )
se = StorageElement( storageElement )
res = se.exists( lfnDir )
if not res['OK']:
gLogger.error( "Failed to obtain existance of directories", res['Message'] )
return res
for directory, error in res['Value']['Failed'].items():
gLogger.error( 'Failed to determine existance of directory', '%s %s' % ( directory, error ) )
if res['Value']['Failed']:
return S_ERROR( 'Failed to determine existance of directory' )
directoryExists = res['Value']['Successful']
activeDirs = []
for directory in sorted( directoryExists ):
exists = directoryExists[directory]
if exists:
activeDirs.append( directory )
allFiles = {}
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
res = se.listDirectory( currentDir )
activeDirs.remove( currentDir )
if not res['OK']:
gLogger.error( 'Failed to get directory contents', res['Message'] )
return res
elif currentDir in res['Value']['Failed']:
gLogger.error( 'Failed to get directory contents', '%s %s' % ( currentDir, res['Value']['Failed'][currentDir] ) )
return S_ERROR( res['Value']['Failed'][currentDir] )
else:
dirContents = res['Value']['Successful'][currentDir]
activeDirs.extend( se.getLFNFromURL( dirContents['SubDirs'] ).get( 'Value', {} ).get( 'Successful', [] ) )
fileURLMetadata = dirContents['Files']
fileMetadata = {}
res = se.getLFNFromURL( fileURLMetadata )
if not res['OK']:
gLogger.error( 'Failed to get directory content LFNs', res['Message'] )
return res
for url, error in res['Value']['Failed'].items():
gLogger.error( "Failed to get LFN for URL", "%s %s" % ( url, error ) )
if res['Value']['Failed']:
return S_ERROR( "Failed to get LFNs for PFNs" )
urlLfns = res['Value']['Successful']
for urlLfn, lfn in urlLfns.items():
fileMetadata[lfn] = fileURLMetadata[urlLfn]
allFiles.update( fileMetadata )
zeroSizeFiles = []
for lfn in sorted( allFiles ):
if os.path.basename( lfn ) == 'dirac_directory':
allFiles.pop( lfn )
else:
metadata = allFiles[lfn]
if not metadata['Size']:
zeroSizeFiles.append( ( lfn, 'deprecatedUrl', storageElement, 'PFNZeroSize' ) )
if zeroSizeFiles:
self.__reportProblematicReplicas( zeroSizeFiles, storageElement, 'PFNZeroSize' )
gLogger.info( 'Obtained at total of %s files for directories at %s' % ( len( allFiles ), storageElement ) )
return S_OK( allFiles )
def __getCatalogDirectoryContents( self, lfnDir ):
""" Obtain the contents of the supplied directory
"""
gLogger.info( 'Obtaining the catalog contents for %s directories' % len( lfnDir ) )
activeDirs = lfnDir
allFiles = {}
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
res = self.fc.listDirectory( currentDir )
activeDirs.remove( currentDir )
if not res['OK']:
gLogger.error( 'Failed to get directory contents', res['Message'] )
return res
elif res['Value']['Failed'].has_key( currentDir ):
gLogger.error( 'Failed to get directory contents', '%s %s' % ( currentDir, res['Value']['Failed'][currentDir] ) )
else:
dirContents = res['Value']['Successful'][currentDir]
activeDirs.extend( dirContents['SubDirs'] )
allFiles.update( dirContents['Files'] )
zeroReplicaFiles = []
zeroSizeFiles = []
allReplicaDict = {}
allMetadataDict = {}
for lfn, lfnDict in allFiles.items():
lfnReplicas = {}
for se, replicaDict in lfnDict['Replicas'].items():
lfnReplicas[se] = replicaDict['PFN']
if not lfnReplicas:
zeroReplicaFiles.append( lfn )
allReplicaDict[lfn] = lfnReplicas
allMetadataDict[lfn] = lfnDict['MetaData']
if not lfnDict['MetaData']['Size']:
zeroSizeFiles.append( lfn )
if zeroReplicaFiles:
if not self.interactive:
self.__reportProblematicFiles( zeroReplicaFiles, 'LFNZeroReplicas' )
if zeroSizeFiles:
if not self.interactive:
self.__reportProblematicFiles( zeroSizeFiles, 'LFNZeroSize' )
gLogger.info( 'Obtained at total of %s files for the supplied directories' % len( allMetadataDict ) )
resDict = {'Metadata':allMetadataDict, 'Replicas':allReplicaDict}
return S_OK( resDict )
def __getCatalogReplicas( self, lfns ):
""" Obtain the file replicas from the catalog while checking that there are replicas
"""
gLogger.info( 'Obtaining the replicas for %s files' % len( lfns ) )
zeroReplicaFiles = []
res = self.fc.getReplicas( lfns, allStatus = True )
if not res['OK']:
gLogger.error( 'Failed to get catalog replicas', res['Message'] )
return res
allReplicas = res['Value']['Successful']
for lfn, error in res['Value']['Failed'].items():
if re.search( 'File has zero replicas', error ):
zeroReplicaFiles.append( lfn )
if zeroReplicaFiles:
if not self.interactive:
self.__reportProblematicFiles( zeroReplicaFiles, 'LFNZeroReplicas' )
gLogger.info( 'Obtaining the replicas for files complete' )
return S_OK( allReplicas )
def __getCatalogMetadata( self, lfns ):
""" Obtain the file metadata from the catalog while checking they exist
"""
if not lfns:
return S_OK( {} )
gLogger.info( 'Obtaining the catalog metadata for %s files' % len( lfns ) )
missingCatalogFiles = []
zeroSizeFiles = []
res = self.fc.getFileMetadata( lfns )
if not res['OK']:
gLogger.error( 'Failed to get catalog metadata', res['Message'] )
return res
allMetadata = res['Value']['Successful']
for lfn, error in res['Value']['Failed'].items():
if re.search( 'No such file or directory', error ):
missingCatalogFiles.append( lfn )
if missingCatalogFiles:
if not self.interactive:
self.__reportProblematicFiles( missingCatalogFiles, 'LFNCatalogMissing' )
for lfn, metadata in allMetadata.items():
if metadata['Size'] == 0:
zeroSizeFiles.append( lfn )
if zeroSizeFiles:
if not self.interactive:
self.__reportProblematicFiles( zeroSizeFiles, 'LFNZeroSize' )
gLogger.info( 'Obtaining the catalog metadata complete' )
return S_OK( allMetadata )
def __reportProblematicFiles( self, lfns, reason ):
""" Simple wrapper function around setFileProblematic """
gLogger.info( 'The following %s files were found with %s' % ( len( lfns ), reason ) )
for lfn in sorted( lfns ):
gLogger.info( lfn )
res = DataIntegrityClient.setFileProblematic( lfns, reason, sourceComponent = 'DataIntegrityClient' )
if not res['OK']:
gLogger.info( 'Failed to update integrity DB with files', res['Message'] )
else:
gLogger.info( 'Successfully updated integrity DB with files' )
|
vmendez/DIRAC
|
DataManagementSystem/Client/ConsistencyInspector.py
|
Python
|
gpl-3.0
| 33,838
|
[
"DIRAC"
] |
1842d3e6e6e9a3a66b57e198eba5eb0d34733133cec598d988dd90477bc529ea
|
#!/usr/bin/env python
"""buildpkg.py -- Build OS X packages for Apple's Installer.app.
This is an experimental command-line tool for building packages to be
installed with the Mac OS X Installer.app application.
It is much inspired by Apple's GUI tool called PackageMaker.app, that
seems to be part of the OS X developer tools installed in the folder
/Developer/Applications. But apparently there are other free tools to
do the same thing which are also named PackageMaker like Brian Hill's
one:
http://personalpages.tds.net/~brian_hill/packagemaker.html
Beware of the multi-package features of Installer.app (which are not
yet supported here) that can potentially screw-up your installation
and are discussed in these articles on Stepwise:
http://www.stepwise.com/Articles/Technical/Packages/InstallerWoes.html
http://www.stepwise.com/Articles/Technical/Packages/InstallerOnX.html
Beside using the PackageMaker class directly, by importing it inside
another module, say, there are additional ways of using this module:
the top-level buildPackage() function provides a shortcut to the same
feature and is also called when using this module from the command-
line.
****************************************************************
NOTE: For now you should be able to run this even on a non-OS X
system and get something similar to a package, but without
the real archive (needs pax) and bom files (needs mkbom)
inside! This is only for providing a chance for testing to
folks without OS X.
****************************************************************
TODO:
- test pre-process and post-process scripts (Python ones?)
- handle multi-volume packages (?)
- integrate into distutils (?)
Dinu C. Gherman,
gherman@europemail.com
November 2001
!! USE AT YOUR OWN RISK !!
"""
__version__ = 0.2
__license__ = "FreeBSD"
import os, sys, glob, fnmatch, shutil, string, copy, getopt
from os.path import basename, dirname, join, islink, isdir, isfile
Error = "buildpkg.Error"
PKG_INFO_FIELDS = """\
Title
Version
Description
DefaultLocation
DeleteWarning
NeedsAuthorization
DisableStop
UseUserMask
Application
Relocatable
Required
InstallOnly
RequiresReboot
RootVolumeOnly
LongFilenames
LibrarySubdirectory
AllowBackRev
OverwritePermissions
InstallFat\
"""
######################################################################
# Helpers
######################################################################
# Convenience class, as suggested by /F.
class GlobDirectoryWalker:
"A forward iterator that traverses files in a directory tree."
def __init__(self, directory, pattern="*"):
self.stack = [directory]
self.pattern = pattern
self.files = []
self.index = 0
def __getitem__(self, index):
while 1:
try:
file = self.files[self.index]
self.index = self.index + 1
except IndexError:
# pop next directory from stack
self.directory = self.stack.pop()
self.files = os.listdir(self.directory)
self.index = 0
else:
# got a filename
fullname = join(self.directory, file)
if isdir(fullname) and not islink(fullname):
self.stack.append(fullname)
if fnmatch.fnmatch(file, self.pattern):
return fullname
######################################################################
# The real thing
######################################################################
class PackageMaker:
"""A class to generate packages for Mac OS X.
This is intended to create OS X packages (with extension .pkg)
containing archives of arbitrary files that the Installer.app
will be able to handle.
As of now, PackageMaker instances need to be created with the
title, version and description of the package to be built.
The package is built after calling the instance method
build(root, **options). It has the same name as the constructor's
title argument plus a '.pkg' extension and is located in the same
parent folder that contains the root folder.
E.g. this will create a package folder /my/space/distutils.pkg/:
pm = PackageMaker("distutils", "1.0.2", "Python distutils.")
pm.build("/my/space/distutils")
"""
packageInfoDefaults = {
'Title': None,
'Version': None,
'Description': '',
'DefaultLocation': '/',
'DeleteWarning': '',
'NeedsAuthorization': 'NO',
'DisableStop': 'NO',
'UseUserMask': 'YES',
'Application': 'NO',
'Relocatable': 'YES',
'Required': 'NO',
'InstallOnly': 'NO',
'RequiresReboot': 'NO',
'RootVolumeOnly' : 'NO',
'InstallFat': 'NO',
'LongFilenames': 'YES',
'LibrarySubdirectory': 'Standard',
'AllowBackRev': 'YES',
'OverwritePermissions': 'NO',
}
def __init__(self, title, version, desc):
"Init. with mandatory title/version/description arguments."
info = {"Title": title, "Version": version, "Description": desc}
self.packageInfo = copy.deepcopy(self.packageInfoDefaults)
self.packageInfo.update(info)
# variables set later
self.packageRootFolder = None
self.packageResourceFolder = None
self.sourceFolder = None
self.resourceFolder = None
def build(self, root, resources=None, **options):
"""Create a package for some given root folder.
With no 'resources' argument set it is assumed to be the same
as the root directory. Option items replace the default ones
in the package info.
"""
# set folder attributes
self.sourceFolder = root
if resources == None:
self.resourceFolder = root
else:
self.resourceFolder = resources
# replace default option settings with user ones if provided
fields = self. packageInfoDefaults.keys()
for k, v in options.items():
if k in fields:
self.packageInfo[k] = v
elif not k in ["OutputDir"]:
raise Error, "Unknown package option: %s" % k
# Check where we should leave the output. Default is current directory
outputdir = options.get("OutputDir", os.getcwd())
packageName = self.packageInfo["Title"]
self.PackageRootFolder = os.path.join(outputdir, packageName + ".pkg")
# do what needs to be done
self._makeFolders()
self._addInfo()
self._addBom()
self._addArchive()
self._addResources()
self._addSizes()
self._addLoc()
def _makeFolders(self):
"Create package folder structure."
# Not sure if the package name should contain the version or not...
# packageName = "%s-%s" % (self.packageInfo["Title"],
# self.packageInfo["Version"]) # ??
contFolder = join(self.PackageRootFolder, "Contents")
self.packageResourceFolder = join(contFolder, "Resources")
os.mkdir(self.PackageRootFolder)
os.mkdir(contFolder)
os.mkdir(self.packageResourceFolder)
def _addInfo(self):
"Write .info file containing installing options."
# Not sure if options in PKG_INFO_FIELDS are complete...
info = ""
for f in string.split(PKG_INFO_FIELDS, "\n"):
if self.packageInfo.has_key(f):
info = info + "%s %%(%s)s\n" % (f, f)
info = info % self.packageInfo
base = self.packageInfo["Title"] + ".info"
path = join(self.packageResourceFolder, base)
f = open(path, "w")
f.write(info)
def _addBom(self):
"Write .bom file containing 'Bill of Materials'."
# Currently ignores if the 'mkbom' tool is not available.
try:
base = self.packageInfo["Title"] + ".bom"
bomPath = join(self.packageResourceFolder, base)
cmd = "mkbom %s %s" % (self.sourceFolder, bomPath)
res = os.system(cmd)
except:
pass
def _addArchive(self):
"Write .pax.gz file, a compressed archive using pax/gzip."
# Currently ignores if the 'pax' tool is not available.
cwd = os.getcwd()
# create archive
os.chdir(self.sourceFolder)
base = basename(self.packageInfo["Title"]) + ".pax"
self.archPath = join(self.packageResourceFolder, base)
cmd = "pax -w -f %s %s" % (self.archPath, ".")
res = os.system(cmd)
# compress archive
cmd = "gzip %s" % self.archPath
res = os.system(cmd)
os.chdir(cwd)
def _addResources(self):
"Add Welcome/ReadMe/License files, .lproj folders and scripts."
# Currently we just copy everything that matches the allowed
# filenames. So, it's left to Installer.app to deal with the
# same file available in multiple formats...
if not self.resourceFolder:
return
# find candidate resource files (txt html rtf rtfd/ or lproj/)
allFiles = []
for pat in string.split("*.txt *.html *.rtf *.rtfd *.lproj", " "):
pattern = join(self.resourceFolder, pat)
allFiles = allFiles + glob.glob(pattern)
# find pre-process and post-process scripts
# naming convention: packageName.{pre,post}_{upgrade,install}
# Alternatively the filenames can be {pre,post}_{upgrade,install}
# in which case we prepend the package name
packageName = self.packageInfo["Title"]
for pat in ("*upgrade", "*install", "*flight"):
pattern = join(self.resourceFolder, packageName + pat)
pattern2 = join(self.resourceFolder, pat)
allFiles = allFiles + glob.glob(pattern)
allFiles = allFiles + glob.glob(pattern2)
# check name patterns
files = []
for f in allFiles:
for s in ("Welcome", "License", "ReadMe"):
if string.find(basename(f), s) == 0:
files.append((f, f))
if f[-6:] == ".lproj":
files.append((f, f))
elif basename(f) in ["pre_upgrade", "pre_install", "post_upgrade", "post_install"]:
files.append((f, packageName+"."+basename(f)))
elif basename(f) in ["preflight", "postflight"]:
files.append((f, f))
elif f[-8:] == "_upgrade":
files.append((f,f))
elif f[-8:] == "_install":
files.append((f,f))
# copy files
for src, dst in files:
src = basename(src)
dst = basename(dst)
f = join(self.resourceFolder, src)
if isfile(f):
shutil.copy(f, os.path.join(self.packageResourceFolder, dst))
elif isdir(f):
# special case for .rtfd and .lproj folders...
d = join(self.packageResourceFolder, dst)
os.mkdir(d)
files = GlobDirectoryWalker(f)
for file in files:
shutil.copy(file, d)
def _addSizes(self):
"Write .sizes file with info about number and size of files."
# Not sure if this is correct, but 'installedSize' and
# 'zippedSize' are now in Bytes. Maybe blocks are needed?
# Well, Installer.app doesn't seem to care anyway, saying
# the installation needs 100+ MB...
numFiles = 0
installedSize = 0
zippedSize = 0
files = GlobDirectoryWalker(self.sourceFolder)
for f in files:
numFiles = numFiles + 1
installedSize = installedSize + os.lstat(f)[6]
try:
zippedSize = os.stat(self.archPath+ ".gz")[6]
except OSError: # ignore error
pass
base = self.packageInfo["Title"] + ".sizes"
f = open(join(self.packageResourceFolder, base), "w")
format = "NumFiles %d\nInstalledSize %d\nCompressedSize %d\n"
f.write(format % (numFiles, installedSize, zippedSize))
def _addLoc(self):
"Write .loc file."
base = self.packageInfo["Title"] + ".loc"
f = open(join(self.packageResourceFolder, base), "w")
f.write('/')
# Shortcut function interface
def buildPackage(*args, **options):
"A Shortcut function for building a package."
o = options
title, version, desc = o["Title"], o["Version"], o["Description"]
pm = PackageMaker(title, version, desc)
apply(pm.build, list(args), options)
######################################################################
# Tests
######################################################################
def test0():
"Vanilla test for the distutils distribution."
pm = PackageMaker("distutils2", "1.0.2", "Python distutils package.")
pm.build("/Users/dinu/Desktop/distutils2")
def test1():
"Test for the reportlab distribution with modified options."
pm = PackageMaker("reportlab", "1.10",
"ReportLab's Open Source PDF toolkit.")
pm.build(root="/Users/dinu/Desktop/reportlab",
DefaultLocation="/Applications/ReportLab",
Relocatable="YES")
def test2():
"Shortcut test for the reportlab distribution with modified options."
buildPackage(
"/Users/dinu/Desktop/reportlab",
Title="reportlab",
Version="1.10",
Description="ReportLab's Open Source PDF toolkit.",
DefaultLocation="/Applications/ReportLab",
Relocatable="YES")
######################################################################
# Command-line interface
######################################################################
def printUsage():
"Print usage message."
format = "Usage: %s <opts1> [<opts2>] <root> [<resources>]"
print format % basename(sys.argv[0])
print
print " with arguments:"
print " (mandatory) root: the package root folder"
print " (optional) resources: the package resources folder"
print
print " and options:"
print " (mandatory) opts1:"
mandatoryKeys = string.split("Title Version Description", " ")
for k in mandatoryKeys:
print " --%s" % k
print " (optional) opts2: (with default values)"
pmDefaults = PackageMaker.packageInfoDefaults
optionalKeys = pmDefaults.keys()
for k in mandatoryKeys:
optionalKeys.remove(k)
optionalKeys.sort()
maxKeyLen = max(map(len, optionalKeys))
for k in optionalKeys:
format = " --%%s:%s %%s"
format = format % (" " * (maxKeyLen-len(k)))
print format % (k, repr(pmDefaults[k]))
def main():
"Command-line interface."
shortOpts = ""
keys = PackageMaker.packageInfoDefaults.keys()
longOpts = map(lambda k: k+"=", keys)
try:
opts, args = getopt.getopt(sys.argv[1:], shortOpts, longOpts)
except getopt.GetoptError, details:
print details
printUsage()
return
optsDict = {}
for k, v in opts:
optsDict[k[2:]] = v
ok = optsDict.keys()
if not (1 <= len(args) <= 2):
print "No argument given!"
elif not ("Title" in ok and \
"Version" in ok and \
"Description" in ok):
print "Missing mandatory option!"
else:
apply(buildPackage, args, optsDict)
return
printUsage()
# sample use:
# buildpkg.py --Title=distutils \
# --Version=1.0.2 \
# --Description="Python distutils package." \
# /Users/dinu/Desktop/distutils
if __name__ == "__main__":
main()
|
xbmc/atv2
|
xbmc/lib/libPython/Python/Mac/scripts/buildpkg.py
|
Python
|
gpl-2.0
| 15,904
|
[
"Brian"
] |
cb0bab6fd63e3071c4c29b020c39eaf182666dc1fd42068cfaac5f99e4c6974e
|
# -*- coding: utf-8 -*-
# Copyright 2007-2011 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from hyperspy._signals.eels import EELSSpectrum
from hyperspy._signals.spectrum_simulation import SpectrumSimulation
class EELSSpectrumSimulation(SpectrumSimulation, EELSSpectrum):
pass
# @auto_replot
# def add_energy_instability(self, std):
# """Introduce random energy instability
#
# Parameters
# ----------
# std : float
# std in energy units of the energy instability.
# See also
# --------
# Spectrum.simulate
# """
# if abs(std) > 0:
# delta_map = np.random.normal(
# size = (self.xdimension, self.ydimension),
# scale = abs(std))
# else:
# delta_map = np.zeros((self.xdimension,
# self.ydimension))
# for edge in self.edges:
# edge.delta.map = delta_map
# edge.delta.already_set_map = np.ones((self.xdimension,
# self.ydimension), dtype = 'Bool')
# return delta_map
# def simulate(self, maps = None, energy_instability = 0,
# min_intensity = 0., max_intensity = 1.):
# """Create a simulated SI.
#
# If an image is provided, it will use each RGB color channel as the
# intensity map of each three elements that must be previously defined as
# a set in self.elements. Otherwise it will create a random map for each
# element defined.
#
# Parameters
# ----------
# maps : list/tuple of arrays
# A list with as many arrays as elements are defined.
# energy_instability : float
# standard deviation in energy units of the energy instability.
# min_intensity : float
# minimum edge intensity
# max_intensity : float
# maximum edge intensity
#
# Returns
# -------
#
# If energy_instability != 0 it returns the energy shift map
# """
# if maps is not None:
# self.xdimension = maps[0].shape[0]
# self.ydimension = maps[0].shape[1]
# self.xscale = 1.
# self.yscale = 1.
# i = 0
# if energy_instability > 0:
# delta_map = np.random.normal(np.zeros((self.xdimension,
# self.ydimension)), energy_instability)
# for edge in self.edges:
# edge.fine_structure_active = False
# if not edge.intensity.twin:
# edge.intensity.map = maps[i]
# edge.intensity.already_set_map = np.ones((
# self.xdimension, self.ydimension), dtype = 'Bool')
# i += 1
# if energy_instability != 0:
# instability_map = self.add_energy_instability(energy_instability)
# for edge in self.edges:
# edge.fetch_stored_values(0,0)
# self.create_data_cube()
# self.model = Model(self, auto_background=False)
# self.model.charge()
# self.model.generate_data_from_model()
# self.data_cube = self.model.model_cube
# self.type = 'simulation'
# else:
# print "No image defined. Producing a gaussian mixture image of the \
# elements"
# i = 0
# if energy_instability:
# delta_map = np.random.normal(np.zeros((self.xdimension,
# self.ydimension)), energy_instability)
# print delta_map.shape
# size = self.xdimension * self.ydimension
# for edge in self.edges:
# edge.fine_structure_active = False
# if not edge.intensity.twin:
# edge.intensity.map = np.random.uniform(0, max_intensity,
# size).reshape(self.xdimension, self.ydimension)
# edge.intensity.already_set_map = np.ones((self.xdimension,
# self.ydimension), dtype = 'Bool')
# if energy_instability:
# edge.delta.map = delta_map
# edge.delta.already_set_map = np.ones((self.xdimension,
# self.ydimension), dtype = 'Bool')
# i += 1
# self.create_data_cube()
# self.model = Model(self, auto_background=False)
# self.model.generate_data_from_model()
# self.data_cube = self.model.model_cube
# self.type = 'simulation'
# if energy_instability != 0:
# return instability_map
|
sillvan/hyperspy
|
hyperspy/_signals/eels_spectrum_simulation.py
|
Python
|
gpl-3.0
| 5,221
|
[
"Gaussian"
] |
d33763e3c5605c1ee01c9d6f8ce7e27436b41d390d3759f2b92c99ed2e82fb79
|
# Copyright 2002 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/dbdefs/__init__.py
|
Python
|
apache-2.0
| 225
|
[
"Biopython"
] |
f301855cc671e02677a10a34fb15c04b2926da5460bb37b4aed3fccbcad6f85f
|
"""Ordinary Least Squares regression classes."""
__author__ = "Luc Anselin luc.anselin@asu.edu, David C. Folch david.folch@asu.edu"
import numpy as np
import copy as COPY
import numpy.linalg as la
import user_output as USER
import summary_output as SUMMARY
import robust as ROBUST
from utils import spdot, sphstack, RegressionPropsY, RegressionPropsVM
__all__ = ["OLS"]
class BaseOLS(RegressionPropsY, RegressionPropsVM):
"""
Ordinary least squares (OLS) (note: no consistency checks, diagnostics or
constant added)
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
robust : string
If 'white', then a White consistent estimator of the
variance-covariance matrix is given. If 'hac', then a
HAC consistent estimator of the variance-covariance
matrix is given. Default set to None.
gwk : pysal W object
Kernel spatial weights needed for HAC estimation. Note:
matrix must have ones along the main diagonal.
sig2n_k : boolean
If True, then use n-k to estimate sigma^2. If False, use n.
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
utu : float
Sum of squared residuals
sig2 : float
Sigma squared used in computations
sig2n : float
Sigma squared (computed with n in the denominator)
sig2n_k : float
Sigma squared (computed with n-k in the denominator)
xtx : float
X'X
xtxi : float
(X'X)^-1
Examples
--------
>>> import numpy as np
>>> import pysal
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("CRIME"))
>>> X = np.array(X).T
>>> X = np.hstack((np.ones(y.shape),X))
>>> ols=BaseOLS(y,X)
>>> ols.betas
array([[ 46.42818268],
[ 0.62898397],
[ -0.48488854]])
>>> ols.vm
array([[ 1.74022453e+02, -6.52060364e+00, -2.15109867e+00],
[ -6.52060364e+00, 2.87200008e-01, 6.80956787e-02],
[ -2.15109867e+00, 6.80956787e-02, 3.33693910e-02]])
"""
def __init__(self, y, x, robust=None, gwk=None, sig2n_k=True):
self.x = x
self.xtx = spdot(self.x.T, self.x)
xty = spdot(self.x.T, y)
self.xtxi = la.inv(self.xtx)
self.betas = np.dot(self.xtxi, xty)
predy = spdot(self.x, self.betas)
u = y-predy
self.u = u
self.predy = predy
self.y = y
self.n, self.k = self.x.shape
if robust:
self.vm = ROBUST.robust_vm(reg=self, gwk=gwk)
self._cache = {}
if sig2n_k:
self.sig2 = self.sig2n_k
else:
self.sig2 = self.sig2n
class OLS(BaseOLS):
"""
Ordinary least squares with results and diagnostics.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : pysal W object
Spatial weights object (required if running spatial
diagnostics)
robust : string
If 'white', then a White consistent estimator of the
variance-covariance matrix is given. If 'hac', then a
HAC consistent estimator of the variance-covariance
matrix is given. Default set to None.
gwk : pysal W object
Kernel spatial weights needed for HAC estimation. Note:
matrix must have ones along the main diagonal.
sig2n_k : boolean
If True, then use n-k to estimate sigma^2. If False, use n.
nonspat_diag : boolean
If True, then compute non-spatial diagnostics on
the regression.
spat_diag : boolean
If True, then compute Lagrange multiplier tests (requires
w). Note: see moran for further tests.
moran : boolean
If True, compute Moran's I on the residuals. Note:
requires spat_diag=True.
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
robust : string
Adjustment for robust standard errors
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
r2 : float
R squared
ar2 : float
Adjusted R squared
utu : float
Sum of squared residuals
sig2 : float
Sigma squared used in computations
sig2ML : float
Sigma squared (maximum likelihood)
f_stat : tuple
Statistic (float), p-value (float)
logll : float
Log likelihood
aic : float
Akaike information criterion
schwarz : float
Schwarz information criterion
std_err : array
1xk array of standard errors of the betas
t_stat : list of tuples
t statistic; each tuple contains the pair (statistic,
p-value), where each is a float
mulColli : float
Multicollinearity condition number
jarque_bera : dictionary
'jb': Jarque-Bera statistic (float); 'pvalue': p-value
(float); 'df': degrees of freedom (int)
breusch_pagan : dictionary
'bp': Breusch-Pagan statistic (float); 'pvalue': p-value
(float); 'df': degrees of freedom (int)
koenker_bassett : dictionary
'kb': Koenker-Bassett statistic (float); 'pvalue':
p-value (float); 'df': degrees of freedom (int)
white : dictionary
'wh': White statistic (float); 'pvalue': p-value (float);
'df': degrees of freedom (int)
lm_error : tuple
Lagrange multiplier test for spatial error model; tuple
contains the pair (statistic, p-value), where each is a
float
lm_lag : tuple
Lagrange multiplier test for spatial lag model; tuple
contains the pair (statistic, p-value), where each is a
float
rlm_error : tuple
Robust lagrange multiplier test for spatial error model;
tuple contains the pair (statistic, p-value), where each
is a float
rlm_lag : tuple
Robust lagrange multiplier test for spatial lag model;
tuple contains the pair (statistic, p-value), where each
is a float
lm_sarma : tuple
Lagrange multiplier test for spatial SARMA model; tuple
contains the pair (statistic, p-value), where each is a
float
moran_res : tuple
Moran's I for the residuals; tuple containing the triple
(Moran's I, standardized Moran's I, p-value)
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
sig2n : float
Sigma squared (computed with n in the denominator)
sig2n_k : float
Sigma squared (computed with n-k in the denominator)
xtx : float
X'X
xtxi : float
(X'X)^-1
Examples
--------
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; also, the actual OLS class
requires data to be passed in as numpy arrays so the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an nx1 numpy array.
>>> hoval = db.by_col("HOVAL")
>>> y = np.array(hoval)
>>> y.shape = (len(hoval), 1)
Extract CRIME (crime) and INC (income) vectors from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). pysal.spreg.OLS adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("CRIME"))
>>> X = np.array(X).T
The minimum parameters needed to run an ordinary least squares regression
are the two numpy arrays containing the independent variable and dependent
variables respectively. To make the printed results more meaningful, the
user can pass in explicit names for the variables used; this is optional.
>>> ols = OLS(y, X, name_y='home value', name_x=['income','crime'], name_ds='columbus')
pysal.spreg.OLS computes the regression coefficients and their standard
errors, t-stats and p-values. It also computes a large battery of
diagnostics on the regression. All of these results can be independently
accessed as attributes of the regression object created by running
pysal.spreg.OLS. They can also be accessed at one time by printing the
summary attribute of the regression object. In the example below, the
parameter on crime is -0.4849, with a t-statistic of -2.6544 and p-value
of 0.01087.
>>> ols.betas
array([[ 46.42818268],
[ 0.62898397],
[ -0.48488854]])
>>> print ols.t_stat[2][0]
-2.65440864272
>>> print ols.t_stat[2][1]
0.0108745049098
>>> ols.r2
0.34951437785126105
Or we can easily obtain a full summary of all the results nicely formatted and
ready to be printed:
>>> print ols.summary
REGRESSION
----------
SUMMARY OF OUTPUT: ORDINARY LEAST SQUARES
-----------------------------------------
Data set : columbus
Dependent Variable : home value Number of Observations: 49
Mean dependent var : 38.4362 Number of Variables : 3
S.D. dependent var : 18.4661 Degrees of Freedom : 46
<BLANKLINE>
R-squared : 0.349514
Adjusted R-squared : 0.3212
Sum squared residual: 10647.015 F-statistic : 12.3582
Sigma-square : 231.457 Prob(F-statistic) : 5.064e-05
S.E. of regression : 15.214 Log likelihood : -201.368
Sigma-square ML : 217.286 Akaike info criterion : 408.735
S.E of regression ML: 14.7406 Schwarz criterion : 414.411
<BLANKLINE>
------------------------------------------------------------------------------------
Variable Coefficient Std.Error t-Statistic Probability
------------------------------------------------------------------------------------
CONSTANT 46.4281827 13.1917570 3.5194844 0.0009867
crime -0.4848885 0.1826729 -2.6544086 0.0108745
income 0.6289840 0.5359104 1.1736736 0.2465669
------------------------------------------------------------------------------------
<BLANKLINE>
REGRESSION DIAGNOSTICS
MULTICOLLINEARITY CONDITION NUMBER 12.537555
<BLANKLINE>
TEST ON NORMALITY OF ERRORS
TEST DF VALUE PROB
Jarque-Bera 2 39.706155 0.0000000
<BLANKLINE>
DIAGNOSTICS FOR HETEROSKEDASTICITY
RANDOM COEFFICIENTS
TEST DF VALUE PROB
Breusch-Pagan test 2 5.766791 0.0559445
Koenker-Bassett test 2 2.270038 0.3214160
<BLANKLINE>
SPECIFICATION ROBUST TEST
TEST DF VALUE PROB
White 5 2.906067 0.7144648
================================ END OF REPORT =====================================
If the optional parameters w and spat_diag are passed to pysal.spreg.OLS,
spatial diagnostics will also be computed for the regression. These
include Lagrange multiplier tests and Moran's I of the residuals. The w
parameter is a PySAL spatial weights matrix. In this example, w is built
directly from the shapefile columbus.shp, but w can also be read in from a
GAL or GWT file. In this case a rook contiguity weights matrix is built,
but PySAL also offers queen contiguity, distance weights and k nearest
neighbor weights among others. In the example, the Moran's I of the
residuals is 0.2037 with a standardized value of 2.5918 and a p-value of
0.009547.
>>> w = pysal.weights.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
>>> ols = OLS(y, X, w, spat_diag=True, moran=True, name_y='home value', name_x=['income','crime'], name_ds='columbus')
>>> ols.betas
array([[ 46.42818268],
[ 0.62898397],
[ -0.48488854]])
>>> print ols.moran_res[0]
0.20373540938
>>> print ols.moran_res[1]
2.59180452208
>>> print ols.moran_res[2]
0.00954740031251
"""
def __init__(self, y, x,\
w=None,\
robust=None, gwk=None, sig2n_k=True,\
nonspat_diag=True, spat_diag=False, moran=False,\
vm=False, name_y=None, name_x=None,\
name_w=None, name_gwk=None, name_ds=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y)
USER.check_robust(robust, gwk)
USER.check_spat_diag(spat_diag, w)
x_constant = USER.check_constant(x)
BaseOLS.__init__(self, y=y, x=x_constant, robust=robust,\
gwk=gwk, sig2n_k=sig2n_k)
self.title = "ORDINARY LEAST SQUARES"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.robust = USER.set_robust(robust)
self.name_w = USER.set_name_w(name_w, w)
self.name_gwk = USER.set_name_w(name_gwk, gwk)
SUMMARY.OLS(reg=self, vm=vm, w=w, nonspat_diag=nonspat_diag,\
spat_diag=spat_diag, moran=moran)
def _test():
import doctest
# the following line could be used to define an alternative to the '<BLANKLINE>' flag
#doctest.BLANKLINE_MARKER = 'something better than <BLANKLINE>'
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import numpy as np
import pysal
db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
y_var = 'CRIME'
y = np.array([db.by_col(y_var)]).reshape(49,1)
x_var = ['INC','HOVAL']
x = np.array([db.by_col(name) for name in x_var]).T
w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
w.transform = 'r'
ols = OLS(y, x, w=w, nonspat_diag=True, spat_diag=True, name_y=y_var, name_x=x_var, name_ds='columbus', name_w='columbus.gal')
print ols.summary
|
AlanZatarain/pysal
|
pysal/spreg/ols.py
|
Python
|
bsd-3-clause
| 19,340
|
[
"COLUMBUS"
] |
e25921cb5ad34c3bd358e7116f55e65995b5be9b7d668e6fea0b95352bf8dea0
|
#!/usr/bin/python
# Copyright(c) 2009, Gentoo Foundation
# Copyright 2010 Brian Dolbec <brian.dolbec@gmail.com>
#
# License: GPL2/BSD
# $Header$
from __future__ import print_function
from tempfile import NamedTemporaryFile, mkdtemp
import unittest
import re
from gentoolkit.test.eclean.distsupport import *
import gentoolkit.eclean.search as search
from gentoolkit.eclean.search import DistfilesSearch
from gentoolkit.eclean.exclude import parseExcludeFile
"""Tests for eclean's distfiles search functions."""
class DistLimits(DistfilesSearch):
"""subclass the DistfilesSearch class in order to override a number of
functions to isolate & test"""
def __init__(self,
output=lambda x: None,
portdb=None,
vardb=None,
):
DistfilesSearch.__init__(self, output, portdb, vardb)
self.data = None
def set_data(self, data):
"""sets the data for the functions to return for
the test being performed"""
self.data = data
class TestCheckLimits(unittest.TestCase):
"""Test the eclean.search.DistfilesSearch._check_limits() group.
it will test [ _get_default_checks(), _check_limits(),
_isreg_check_(), _size_check_(), _time_check_(), _filenames_check_()]
"""
test_excludes = {
'blank': {},
'filenames': {
'filenames': {'help2man-1.37.1.tar.gz': re.compile('help2man-1.37.1.tar.gz')}
}
}
def setUp(self):
self.testdata = [
# test is_reg_limit alone, will give a base clean_me
{ 'test': 'is_reg_limit',
'params': (0, 0, self.test_excludes['blank']),
'results': FILES[:],
'output': [" - skipping size limit check",
" - skipping time limit check",
" - skipping exclude filenames check"
]
},
# test size_limit trip
{ 'test': 'size_limit',
'params': (1024000, 0, self.test_excludes['blank']),
'results': FILES[:3] + FILES[4:],
'output': [
" - skipping time limit check",
" - skipping exclude filenames check"
]
},
# test time_limit trip
{ 'test': 'time_limit',
'params': (0,1112671872, self.test_excludes['blank']),
'results': [FILES[4]], # + FILES[5:],
'output': [" - skipping size limit check",
" - skipping exclude filenames check"
]
},
# test filenames_limit trip
{ 'test': 'filenames_limit',
'params': (0, 0, self.test_excludes['filenames']),
'results': FILES[:1] + FILES[2:],
'output': [" - skipping size limit check",
" - skipping time limit check",
]
}
]
self.testwork = TestDisfiles()
self.testwork.setUp()
self.workdir = self.testwork.workdir
self.target_class = DistLimits() #DistCheckLimits()
self.output = OutputSimulator(self.callback)
self.target_class.output = self.output
self.callback_data = []
self.test_index = 0
def tearDown(self):
self.testwork.tearDown()
#pass
def get_test(self, num):
return self.testdata[num]
def callback(self, id, data):
self.callback_data.append(data)
def set_limits(self, test):
limit = {}
#set is_reg always to testdata[0]
t1 = self.testdata[0]
limit[t1['test']] = {}
name = test['test']
limit[name] = {}
limits = test['limits']
for i in range(6):
file = self.testwork.files[i]
limits = test['limits']
limit[t1['test']][file] = t1['limits'][i]
if name != t1['test']:
limit[name][file] = limits[i]
return limit
def test_check_limits(self):
"""Testing DistfilesSearch._check_limits()"""
# pass in output=self.output.einfo
self.target_class.output = self.output.einfo
run_callbacks = []
run_results = []
print()
# run the tests
for i in range(4):
clean_me = {}
test = self.get_test(i)
#print("test =", test['test'])
if not test:
print("Error getting test data for index:", i)
#self.target_class.set_data(self.set_limits(test))
size_chk, time_chk, exclude = test["params"]
checks = self.target_class._get_default_checks(size_chk, time_chk, exclude, False)
clean_me = self.target_class._check_limits(self.workdir, checks, clean_me)
results = sorted(clean_me)
run_results.append(results)
self.callback_data.sort()
run_callbacks.append(self.callback_data)
self.callback_data = []
results = None
# check results
for i in range(4):
test = self.get_test(i)
print("test =", test['test'])
if not test:
print("Error getting test data for index:", i)
test['results'].sort()
#print("actual=", run_results[i])
#print("should-be=", test['results'])
self.failUnlessEqual(run_results[i], test["results"],
"/ntest_check_limits, test# %d, test=%s, diff=%s"
%(i, test['test'], str(set(run_results[i]).difference(test['results'])))
)
test['output'].sort()
self.failUnlessEqual(run_callbacks[i], test['output'])
class TestFetchRestricted(unittest.TestCase):
"""Tests eclean.search.DistfilesSearch._fetch_restricted and _unrestricted
functions
"""
def setUp(self):
self.vardb = Dbapi(cp_all=[], cpv_all=CPVS,
props=PROPS, cp_list=[], name="FAKE VARDB")
self.portdb = Dbapi(cp_all=[], cpv_all=CPVS[:4],
props=get_props(CPVS[:4]), cp_list=[], name="FAKE PORTDB")
# set a fetch restricted pkg
self.portdb._props[CPVS[0]]["RESTRICT"] = 'fetch'
self.callback_data = []
self.output = self.output = OutputSimulator(self.callback)
self.target_class = DistfilesSearch(self.output.einfo, self.portdb, self.vardb)
self.target_class.portdb = self.portdb
self.target_class.portdb = self.portdb
self.results = {}
self.testdata = {
'fetch_restricted1':{
'deprecated':
{'app-emulation/emul-linux-x86-baselibs-20100220': 'mirror://gentoo/emul-linux-x86-baselibs-20100220.tar.gz'
},
'pkgs':
{'sys-auth/consolekit-0.4.1': 'http://www.freedesktop.org/software/ConsoleKit/dist/ConsoleKit-0.4.1.tar.bz2'
},
'output': [
'!!! "Deprecation Warning: Installed package: app-emulation/emul-linux-x86-baselibs-20100220\n\tIs no longer in the tree or an installed overlay\n'
]
},
'fetch_restricted2':{
'deprecated':
{'app-emulation/emul-linux-x86-baselibs-20100220': 'mirror://gentoo/emul-linux-x86-baselibs-20100220.tar.gz'
},
'pkgs':
{'sys-auth/consolekit-0.4.1': 'http://www.freedesktop.org/software/ConsoleKit/dist/ConsoleKit-0.4.1.tar.bz2'
},
'output': [
'!!! "Deprecation Warning: Installed package: app-emulation/emul-linux-x86-baselibs-20100220\n\tIs no longer in the tree or an installed overlay\n',
' - Key Error looking up: app-portage/deprecated-pkg-1.0.0'
]
},
'unrestricted1':{
'deprecated':{
'app-emulation/emul-linux-x86-baselibs-20100220': 'mirror://gentoo/emul-linux-x86-baselibs-20100220.tar.gz'
},
'pkgs': {
'sys-apps/devicekit-power-014': 'http://hal.freedesktop.org/releases/DeviceKit-power-014.tar.gz',
'sys-apps/help2man-1.37.1': 'mirror://gnu/help2man/help2man-1.37.1.tar.gz',
'sys-auth/consolekit-0.4.1': 'http://www.freedesktop.org/software/ConsoleKit/dist/ConsoleKit-0.4.1.tar.bz2',
'app-emulation/emul-linux-x86-baselibs-20100220': 'mirror://gentoo/emul-linux-x86-baselibs-20100220.tar.gz',
'media-libs/sdl-pango-0.1.2': 'mirror://sourceforge/sdlpango/SDL_Pango-0.1.2.tar.gz http://zarb.org/~gc/t/SDL_Pango-0.1.2-API-adds.patch'
},
'output': [
'!!! "Deprecation Warning: Installed package: app-emulation/emul-linux-x86-baselibs-20100220\n\tIs no longer in the tree or an installed overlay\n',
]
},
'unrestricted2':{
'deprecated':{
'app-emulation/emul-linux-x86-baselibs-20100220': 'mirror://gentoo/emul-linux-x86-baselibs-20100220.tar.gz'
},
'pkgs': {
'sys-apps/devicekit-power-014': 'http://hal.freedesktop.org/releases/DeviceKit-power-014.tar.gz',
'sys-apps/help2man-1.37.1': 'mirror://gnu/help2man/help2man-1.37.1.tar.gz',
'sys-auth/consolekit-0.4.1': 'http://www.freedesktop.org/software/ConsoleKit/dist/ConsoleKit-0.4.1.tar.bz2',
'app-emulation/emul-linux-x86-baselibs-20100220': 'mirror://gentoo/emul-linux-x86-baselibs-20100220.tar.gz',
'media-libs/sdl-pango-0.1.2': 'mirror://sourceforge/sdlpango/SDL_Pango-0.1.2.tar.gz http://zarb.org/~gc/t/SDL_Pango-0.1.2-API-adds.patch'
},
'output': [
'!!! "Deprecation Warning: Installed package: app-emulation/emul-linux-x86-baselibs-20100220\n\tIs no longer in the tree or an installed overlay\n',
' - Key Error looking up: app-portage/deprecated-pkg-1.0.0'
]
}
}
def callback(self, id, data):
self.callback_data.append(data)
def test__fetch_restricted(self):
self.results = {}
pkgs, deprecated = self.target_class._fetch_restricted(None, CPVS)
self.record_results('fetch_restricted1', pkgs, deprecated)
self.callback_data = []
cpvs = CPVS[:]
cpvs.append('app-portage/deprecated-pkg-1.0.0')
pkgs, deprecated = self.target_class._fetch_restricted(None, cpvs)
self.record_results('fetch_restricted2', pkgs, deprecated)
self.check_results("test_fetch_restricted")
def test_unrestricted(self):
self.results = {}
pkgs, deprecated = self.target_class._unrestricted(None, CPVS)
self.record_results('unrestricted1', pkgs, deprecated)
self.callback_data = []
cpvs = CPVS[:]
cpvs.append('app-portage/deprecated-pkg-1.0.0')
pkgs, deprecated = self.target_class._unrestricted(None, cpvs)
self.record_results('unrestricted2', pkgs, deprecated)
self.check_results("test_unrestricted")
def check_results(self, test_name):
print("\nChecking results for %s,............" %test_name)
for key in sorted(self.results):
testdata = self.testdata[key]
results = self.results[key]
for item in sorted(testdata):
if sorted(results[item]) == sorted(testdata[item]):
test = "OK"
else:
test = "FAILED"
print("comparing %s, %s" %(key, item), test)
self.failUnlessEqual(sorted(testdata[item]), sorted(results[item]),
"\n%s: %s %s data does not match\nresult=" %(test_name, key, item) +\
str(results[item]) + "\ntestdata=" + str(testdata[item]))
def record_results(self, test, pkgs, deprecated):
self.results[test] = {'pkgs': pkgs,
'deprecated': deprecated,
'output': self.callback_data
}
def tearDown(self):
del self.portdb, self.vardb
class TestNonDestructive(unittest.TestCase):
"""Tests eclean.search.DistfilesSearch._non_destructive and _destructive
functions, with addition useage tests of fetch_restricted() and _unrestricted()
"""
def setUp(self):
self.vardb = Dbapi(cp_all=[], cpv_all=CPVS,
props=PROPS, cp_list=[], name="FAKE VARDB")
self.portdb = Dbapi(cp_all=[], cpv_all=CPVS[:4],
props=get_props(CPVS[:4]), cp_list=[], name="FAKE PORTDB")
print(self.portdb)
# set a fetch restricted pkg
self.portdb._props[CPVS[0]]["RESTRICT"] = 'fetch'
self.callback_data = []
self.output = OutputSimulator(self.callback)
self.target_class = DistfilesSearch(self.output.einfo, self.portdb, self.vardb)
search.exclDictExpand = self.exclDictExpand
self.exclude = parseExcludeFile(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'distfiles.exclude'), self.output.einfo)
#print(self.callback_data)
#print(self.exclude)
self.callback_data = []
self.results = {}
self.testdata = {
'non_destructive1':{
'deprecated':
{'app-emulation/emul-linux-x86-baselibs-20100220': 'mirror://gentoo/emul-linux-x86-baselibs-20100220.tar.gz'
},
'pkgs': {
'sys-auth/consolekit-0.4.1': 'http://www.freedesktop.org/software/ConsoleKit/dist/ConsoleKit-0.4.1.tar.bz2',
'sys-apps/help2man-1.37.1': 'mirror://gnu/help2man/help2man-1.37.1.tar.gz',
'sys-apps/devicekit-power-014': 'http://hal.freedesktop.org/releases/DeviceKit-power-014.tar.gz',
'app-emulation/emul-linux-x86-baselibs-20100220': 'mirror://gentoo/emul-linux-x86-baselibs-20100220.tar.gz',
'media-libs/sdl-pango-0.1.2': 'mirror://sourceforge/sdlpango/SDL_Pango-0.1.2.tar.gz http://zarb.org/~gc/t/SDL_Pango-0.1.2-API-adds.patch'
},
'output': [
' - getting complete ebuild list',
' - getting source file names for 5 ebuilds',
'!!! "Deprecation Warning: Installed package: app-emulation/emul-linux-x86-baselibs-20100220\n\tIs no longer in the tree or an installed overlay\n'
]
},
'non_destructive2':{
'deprecated': {
},
'pkgs': {
'sys-apps/devicekit-power-014': 'http://hal.freedesktop.org/releases/DeviceKit-power-014.tar.gz',
'sys-auth/consolekit-0.4.1': 'http://www.freedesktop.org/software/ConsoleKit/dist/ConsoleKit-0.4.1.tar.bz2',
'media-libs/sdl-pango-0.1.2': 'mirror://sourceforge/sdlpango/SDL_Pango-0.1.2.tar.gz http://zarb.org/~gc/t/SDL_Pango-0.1.2-API-adds.patch'
},
'output': [
' - getting complete ebuild list',
' - getting source file names for 3 installed ebuilds',
' - getting fetch-restricted source file names for 2 remaining ebuilds'
]
},
'non_destructive3':{
'deprecated':{
},
'pkgs': {
'sys-apps/devicekit-power-014': 'http://hal.freedesktop.org/releases/DeviceKit-power-014.tar.gz',
'sys-auth/consolekit-0.4.1': 'http://www.freedesktop.org/software/ConsoleKit/dist/ConsoleKit-0.4.1.tar.bz2',
'app-emulation/emul-linux-x86-baselibs-20100220': 'mirror://gentoo/emul-linux-x86-baselibs-20100220.tar.gz',
},
'output': [
' - getting complete ebuild list',
' - getting source file names for 2 installed ebuilds',
' - getting fetch-restricted source file names for 3 remaining ebuilds'
]
},
'destructive1':{
'deprecated':{
'app-emulation/emul-linux-x86-baselibs-20100220': 'mirror://gentoo/emul-linux-x86-baselibs-20100220.tar.gz'
},
'pkgs': {
'sys-apps/devicekit-power-014': 'http://hal.freedesktop.org/releases/DeviceKit-power-014.tar.gz',
'sys-apps/help2man-1.37.1': 'mirror://gnu/help2man/help2man-1.37.1.tar.gz',
'sys-auth/consolekit-0.4.1': 'http://www.freedesktop.org/software/ConsoleKit/dist/ConsoleKit-0.4.1.tar.bz2',
'app-emulation/emul-linux-x86-baselibs-20100220': 'mirror://gentoo/emul-linux-x86-baselibs-20100220.tar.gz',
'media-libs/sdl-pango-0.1.2': 'mirror://sourceforge/sdlpango/SDL_Pango-0.1.2.tar.gz http://zarb.org/~gc/t/SDL_Pango-0.1.2-API-adds.patch'
},
'output': [
' - processing 5 installed ebuilds', ' - processing excluded',
' - (5 of 0 total) additional excluded packages to get source filenames for',
'!!! "Deprecation Warning: Installed package: app-emulation/emul-linux-x86-baselibs-20100220\n\tIs no longer in the tree or an installed overlay\n'
]
},
'destructive2':{
'deprecated':{
},
'pkgs': {
},
'output': [
' - processing 0 installed packages',
' - processing excluded', ' - (0 of 0 total) additional excluded packages to get source filenames for'
]
},
'destructive3':{
'deprecated':{
},
'pkgs': {
'app-portage/gentoolkit-0.3.0_rc8-r1': 'mirror://gentoo/gentoolkit-0.3.0_rc8.tar.gz http://dev.gentoo.org/~fuzzyray/distfiles/gentoolkit-0.3.0_rc8.tar.gz',
'sys-apps/devicekit-power-014': 'http://hal.freedesktop.org/releases/DeviceKit-power-014.tar.gz',
'app-portage/gentoolkit-0.3.0_rc8': 'mirror://gentoo/gentoolkit-0.3.0_rc8.tar.gz http://dev.gentoo.org/~fuzzyray/distfiles/gentoolkit-0.3.0_rc8.tar.gz',
'app-portage/gentoolkit-0.2.4.6-r1': 'mirror://gentoo/gentoolkit-0.2.4.6.tar.gz http://dev.gentoo.org/~fuzzyray/distfiles/gentoolkit-0.2.4.6.tar.gz',
'app-portage/gentoolkit-0.3.0_rc7': 'mirror://gentoo/gentoolkit-0.3.0_rc7.tar.gz http://dev.gentoo.org/~fuzzyray/distfiles/gentoolkit-0.3.0_rc7.tar.gz',
'app-portage/gentoolkit-0.2.4.6': 'mirror://gentoo/gentoolkit-0.2.4.6.tar.gz http://dev.gentoo.org/~fuzzyray/distfiles/gentoolkit-0.2.4.6.tar.gz',
'app-portage/eix-0.19.2': 'mirror://sourceforge/eix/eix-0.19.2.tar.xz',
'app-portage/gentoolkit-0.2.4.5': 'mirror://gentoo/gentoolkit-0.2.4.5.tar.gz http://dev.gentoo.org/~fuzzyray/distfiles/gentoolkit-0.2.4.5.tar.gz',
'app-portage/gentoolkit-0.3.0_rc9': 'mirror://gentoo/gentoolkit-0.3.0_rc9.tar.gz http://dev.gentoo.org/~fuzzyray/distfiles/gentoolkit-0.3.0_rc9.tar.gz',
'app-portage/eix-0.20.1': 'mirror://sourceforge/eix/eix-0.20.1.tar.xz',
'app-portage/eix-0.20.2': 'mirror://berlios/eix/eix-0.20.2.tar.xz'
},
'output': [
' - processing excluded',
' - (10 of 10 total) additional excluded packages to get source filenames for'
]
},
'destructive4':{
'deprecated':{
},
'pkgs': {
'sys-auth/consolekit-0.4.1':
'http://www.freedesktop.org/software/ConsoleKit/dist/ConsoleKit-0.4.1.tar.bz2',
'sys-apps/devicekit-power-014':
'http://hal.freedesktop.org/releases/DeviceKit-power-014.tar.gz',
'media-libs/sdl-pango-0.1.2':
'mirror://sourceforge/sdlpango/SDL_Pango-0.1.2.tar.gz http://zarb.org/~gc/t/SDL_Pango-0.1.2-API-adds.patch'
},
'output': [
' - processing 3 installed ebuilds',
' - processing excluded',
' - (3 of 0 total) additional excluded packages to get source filenames for'
]
},
'destructive5':{
'deprecated':{
},
'pkgs': {
'x11-base/xorg-server-1.7.5': 'http://xorg.freedesktop.org/releases/individual/xserver/xorg-server-1.7.5.tar.bz2',
'app-portage/gentoolkit-0.3.0_rc8-r1': 'mirror://gentoo/gentoolkit-0.3.0_rc8.tar.gz http://dev.gentoo.org/~fuzzyray/distfiles/gentoolkit-0.3.0_rc8.tar.gz',
'sys-apps/devicekit-power-014': 'http://hal.freedesktop.org/releases/DeviceKit-power-014.tar.gz',
'x11-misc/util-macros-1.6.0': 'http://xorg.freedesktop.org/releases/individual/util/util-macros-1.6.0.tar.bz2',
'app-portage/eix-0.19.2': 'mirror://sourceforge/eix/eix-0.19.2.tar.xz',
'app-portage/gentoolkit-0.3.0_rc8': 'mirror://gentoo/gentoolkit-0.3.0_rc8.tar.gz http://dev.gentoo.org/~fuzzyray/distfiles/gentoolkit-0.3.0_rc8.tar.gz',
'app-portage/gentoolkit-0.2.4.6-r1': 'mirror://gentoo/gentoolkit-0.2.4.6.tar.gz http://dev.gentoo.org/~fuzzyray/distfiles/gentoolkit-0.2.4.6.tar.gz',
'app-portage/gentoolkit-0.3.0_rc7': 'mirror://gentoo/gentoolkit-0.3.0_rc7.tar.gz http://dev.gentoo.org/~fuzzyray/distfiles/gentoolkit-0.3.0_rc7.tar.gz',
'sys-auth/consolekit-0.4.1': 'http://www.freedesktop.org/software/ConsoleKit/dist/ConsoleKit-0.4.1.tar.bz2',
'app-portage/gentoolkit-0.2.4.6': 'mirror://gentoo/gentoolkit-0.2.4.6.tar.gz http://dev.gentoo.org/~fuzzyray/distfiles/gentoolkit-0.2.4.6.tar.gz',
'media-libs/sdl-pango-0.1.2': 'mirror://sourceforge/sdlpango/SDL_Pango-0.1.2.tar.gz http://zarb.org/~gc/t/SDL_Pango-0.1.2-API-adds.patch',
'x11-libs/pixman-0.16.4': 'http://xorg.freedesktop.org/releases/individual/lib/pixman-0.16.4.tar.bz2',
'app-portage/gentoolkit-0.2.4.5': 'mirror://gentoo/gentoolkit-0.2.4.5.tar.gz http://dev.gentoo.org/~fuzzyray/distfiles/gentoolkit-0.2.4.5.tar.gz',
'app-portage/gentoolkit-0.3.0_rc9': 'mirror://gentoo/gentoolkit-0.3.0_rc9.tar.gz http://dev.gentoo.org/~fuzzyray/distfiles/gentoolkit-0.3.0_rc9.tar.gz',
'app-portage/eix-0.20.1': 'mirror://sourceforge/eix/eix-0.20.1.tar.xz',
'app-portage/eix-0.20.2': 'mirror://berlios/eix/eix-0.20.2.tar.xz'
},
'output': [
' - processing 6 installed ebuilds',
' - processing excluded',
' - (16 of 10 total) additional excluded packages to get source filenames for'
]
}
}
def callback(self, id, data):
self.callback_data.append(data)
def exclDictExpand(self, exclude):
#print("Using Fake Testing exclDictExpand()")
return [
#'app-portage/layman',
'app-portage/eix',
'app-portage/gentoolkit',
#app-portage/portage-utils',
]
def test_non_destructive(self):
self.results = {}
pkgs, deprecated = self.target_class._non_destructive(destructive=False,
fetch_restricted=False, pkgs_=None)
self.record_results('non_destructive1', pkgs, deprecated)
pkgs = None
deprecated = None
self.callback_data = []
self.vardb._cpv_all=CPVS[:3]
self.vardb._props=get_props(CPVS[:3])
self.portdb._cpv_all=CPVS[:]
self.portdb._props=get_props(CPVS)
self.target_class.installed_cpvs = None
pkgs, deprecated = self.target_class._non_destructive(destructive=True,
fetch_restricted=True, pkgs_=None)
self.record_results('non_destructive2', pkgs, deprecated)
pkgs = None
deprecated = None
self.callback_data = []
self.vardb._cpv_all=CPVS[:2]
self.vardb._props=get_props(CPVS[:2])
self.portdb._cpv_all=CPVS[:]
self.portdb._props=get_props(CPVS)
# set a fetch restricted pkg
self.portdb._props[CPVS[4]]["RESTRICT"] = 'fetch'
pkgs = {'sys-apps/devicekit-power-014': 'http://hal.freedesktop.org/releases/DeviceKit-power-014.tar.gz'}
pkgs, deprecated = self.target_class._non_destructive(destructive=True,
fetch_restricted=True, pkgs_=pkgs)
self.record_results('non_destructive3', pkgs, deprecated)
self.check_results("test_non_destructive")
def check_results(self, test_name):
print("\nChecking results for %s,............" %test_name)
for key in sorted(self.results):
testdata = self.testdata[key]
results = self.results[key]
for item in sorted(testdata):
if sorted(results[item]) == sorted(testdata[item]):
test = "OK"
else:
test = "FAILED"
print("comparing %s, %s..." %(key, item), test)
if test == "FAILED":
print("", sorted(results[item]), "\n", sorted(testdata[item]))
self.failUnlessEqual(sorted(testdata[item]), sorted(results[item]),
"\n%s: %s, %s data does not match\n"
%(test_name, key, item) + \
"result=" + str(results[item]) + "\ntestdata=" + str(testdata[item])
)
def record_results(self, test, pkgs, deprecated):
self.results[test] = {'pkgs': pkgs,
'deprecated': deprecated,
'output': self.callback_data
}
def test_destructive(self):
self.results = {}
pkgs, deprecated = self.target_class._destructive(package_names=False,
exclude={}, pkgs_=None, installed_included=False )
self.record_results('destructive1', pkgs, deprecated)
self.callback_data = []
self.vardb._cpv_all=CPVS[:3]
self.vardb._props=get_props(CPVS[:3])
self.portdb._cpv_all=CPVS[:]
self.portdb._props=get_props(CPVS)
pkgs, deprecated = self.target_class._destructive(package_names=True,
exclude={}, pkgs_=None, installed_included=False )
self.record_results('destructive2', pkgs, deprecated)
self.callback_data = []
cpvs = CPVS[2:4]
cpvs.extend(CPVS3)
self.vardb._cpv_all=sorted(cpvs)
self.vardb._props= PROPS.update(get_props(CPVS3))
self.portdb._cpv_all=sorted(CPVS + CPVS2)
self.portdb._props=get_props(CPVS+CPVS2)
# set a fetch restricted pkg
self.portdb._props[CPVS[4]]["RESTRICT"] = 'fetch'
pkgs = {'sys-apps/devicekit-power-014': 'http://hal.freedesktop.org/releases/DeviceKit-power-014.tar.gz'}
pkgs, deprecated = self.target_class._destructive(package_names=True,
exclude={}, pkgs_=pkgs, installed_included=True )
self.record_results('destructive3', pkgs, deprecated)
self.callback_data = []
self.vardb._cpv_all=CPVS[:3]
self.vardb._props=get_props(CPVS[:3])
self.portdb._cpv_all=CPVS[:]
self.portdb._props=get_props(CPVS)
pkgs, deprecated = self.target_class._destructive(package_names=False,
exclude=self.exclude, pkgs_=None, installed_included=False )
self.record_results('destructive4', pkgs, deprecated)
self.check_results("test_destructive")
self.callback_data = []
self.vardb._cpv_all=CPVS[:3]
self.vardb._cpv_all.extend(CPVS3)
self.vardb._props=get_props(self.vardb._cpv_all)
self.portdb._cpv_all=CPVS2
#self.portdb._cpv_all.extend(CPVS2)
self.portdb._props=PROPS
pkgs, deprecated = self.target_class._destructive(package_names=False,
exclude=self.exclude, pkgs_=None, installed_included=False )
self.record_results('destructive5', pkgs, deprecated)
self.check_results("test_destructive")
def tearDown(self):
del self.portdb, self.vardb
class TestRemoveProtected(unittest.TestCase):
"""tests the eclean.search.DistfilesSearch._remove_protected()
"""
def setUp(self):
self.target_class = DistfilesSearch(lambda x: None)
self.results = {'layman-1.2.5.tar.gz': '/path/to/some/where/layman-1.2.5.tar.gz'}
def test_remove_protected(self):
results = self.target_class._remove_protected(PKGS, CLEAN_ME)
self.failUnlessEqual(results, self.results,
"\ntest_remove_protected: data does not match\nresult=" +\
str(results) + "\ntestdata=" + str(self.results))
def test_main():
suite = unittest.TestLoader()
suite.loadTestsFromTestCase(TestCheckLimits)
suite.loadTestsFromTestCase(TestFetchRestricted)
suite.loadTestsFromTestCase(TestNonDestructive)
suite.loadTestsFromTestCase(TestRemoveProtected)
unittest.TextTestRunner(verbosity=2).run(suite)
test_main.__test__ = False
if __name__ == '__main__':
test_main()
|
dol-sen/gentoolkit
|
pym/gentoolkit/test/eclean/test_search.py
|
Python
|
gpl-2.0
| 24,909
|
[
"Brian"
] |
d0c99353526d4d12c994593f3becd48275823024d51c8682c8e14ef2316bc441
|
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# ancestor.py - generic DAG ancestor algorithm for mercurial
#
# Copyright 2006 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import collections
import heapq
from .node import nullrev
from .pycompat import range
def commonancestorsheads(pfunc, *nodes):
"""Returns a set with the heads of all common ancestors of all nodes,
heads(::nodes[0] and ::nodes[1] and ...) .
pfunc must return a list of parent vertices for a given vertex.
"""
# Each node in 'nodes' is given a unique marker implemented as bit masks.
# A node iterates over its parents, passing its set of markers on to them.
# When a node receives multiple markers, that indicates it is an ancestor of
# the corresponding nodes. A node that receives all the markers is
# a common ancestor.
#
# This process continues until all potential ancestors have been found, and
# those which are not heads are ruled out.
if not isinstance(nodes, set):
nodes = set(nodes)
if nullrev in nodes:
return set()
if len(nodes) <= 1:
return nodes
allseen = (1 << len(nodes)) - 1
seen = [0] * (max(nodes) + 1)
for i, n in enumerate(nodes):
seen[n] = 1 << i
poison = 1 << (i + 1)
gca = set()
interesting = len(nodes)
nv = len(seen) - 1
while nv >= 0 and interesting:
v = nv
nv -= 1
if not seen[v]:
continue
sv = seen[v]
if sv < poison:
interesting -= 1
if sv == allseen:
gca.add(v)
sv |= poison
if v in nodes:
# history is linear
return {v}
if sv < poison:
for p in pfunc(v):
sp = seen[p]
if p == nullrev:
continue
if sp == 0:
seen[p] = sv
interesting += 1
elif sp != sv:
seen[p] |= sv
else:
for p in pfunc(v):
if p == nullrev:
continue
sp = seen[p]
if sp and sp < poison:
interesting -= 1
seen[p] = sv
return gca
def ancestors(pfunc, *orignodes):
"""
Returns the common ancestors of a and b that are furthest from a
root (as measured by longest path).
pfunc must return a list of parent vertices for a given vertex.
"""
def deepest(nodes):
interesting = {}
count = max(nodes) + 1
depth = [0] * count
seen = [0] * count
mapping = []
for (i, n) in enumerate(sorted(nodes)):
depth[n] = 1
b = 1 << i
seen[n] = b
interesting[b] = 1
mapping.append((b, n))
nv = count - 1
while nv >= 0 and len(interesting) > 1:
v = nv
nv -= 1
dv = depth[v]
if dv == 0:
continue
sv = seen[v]
for p in pfunc(v):
if p == nullrev:
continue
dp = depth[p]
nsp = sp = seen[p]
if dp <= dv:
depth[p] = dv + 1
if sp != sv:
interesting[sv] += 1
nsp = seen[p] = sv
if sp:
interesting[sp] -= 1
if interesting[sp] == 0:
del interesting[sp]
elif dv == dp - 1:
nsp = sp | sv
if nsp == sp:
continue
seen[p] = nsp
interesting.setdefault(nsp, 0)
interesting[nsp] += 1
interesting[sp] -= 1
if interesting[sp] == 0:
del interesting[sp]
interesting[sv] -= 1
if interesting[sv] == 0:
del interesting[sv]
if len(interesting) != 1:
return []
k = 0
for i in interesting:
k |= i
return set(n for (i, n) in mapping if k & i)
gca = commonancestorsheads(pfunc, *orignodes)
if len(gca) <= 1:
return gca
return deepest(gca)
class incrementalmissingancestors(object):
"""persistent state used to calculate missing ancestors incrementally
Although similar in spirit to lazyancestors below, this is a separate class
because trying to support contains and missingancestors operations with the
same internal data structures adds needless complexity."""
def __init__(self, pfunc, bases):
self.bases = set(bases)
if not self.bases:
self.bases.add(nullrev)
self.pfunc = pfunc
def hasbases(self):
"""whether the common set has any non-trivial bases"""
return self.bases and self.bases != {nullrev}
def addbases(self, newbases):
"""grow the ancestor set by adding new bases"""
self.bases.update(newbases)
def removeancestorsfrom(self, revs):
"""remove all ancestors of bases from the set revs (in place)"""
bases = self.bases
pfunc = self.pfunc
revs.difference_update(bases)
# nullrev is always an ancestor
revs.discard(nullrev)
if not revs:
return
# anything in revs > start is definitely not an ancestor of bases
# revs <= start needs to be investigated
start = max(bases)
keepcount = sum(1 for r in revs if r > start)
if len(revs) == keepcount:
# no revs to consider
return
for curr in range(start, min(revs) - 1, -1):
if curr not in bases:
continue
revs.discard(curr)
bases.update(pfunc(curr))
if len(revs) == keepcount:
# no more potential revs to discard
break
def missingancestors(self, revs):
"""return all the ancestors of revs that are not ancestors of self.bases
This may include elements from revs.
Equivalent to the revset (::revs - ::self.bases). Revs are returned in
revision number order, which is a topological order."""
revsvisit = set(revs)
basesvisit = self.bases
pfunc = self.pfunc
bothvisit = revsvisit.intersection(basesvisit)
revsvisit.difference_update(bothvisit)
if not revsvisit:
return []
start = max(max(revsvisit), max(basesvisit))
# At this point, we hold the invariants that:
# - revsvisit is the set of nodes we know are an ancestor of at least
# one of the nodes in revs
# - basesvisit is the same for bases
# - bothvisit is the set of nodes we know are ancestors of at least one
# of the nodes in revs and one of the nodes in bases. bothvisit and
# revsvisit are mutually exclusive, but bothvisit is a subset of
# basesvisit.
# Now we walk down in reverse topo order, adding parents of nodes
# already visited to the sets while maintaining the invariants. When a
# node is found in both revsvisit and basesvisit, it is removed from
# revsvisit and added to bothvisit. When revsvisit becomes empty, there
# are no more ancestors of revs that aren't also ancestors of bases, so
# exit.
missing = []
for curr in range(start, nullrev, -1):
if not revsvisit:
break
if curr in bothvisit:
bothvisit.remove(curr)
# curr's parents might have made it into revsvisit through
# another path
for p in pfunc(curr):
revsvisit.discard(p)
basesvisit.add(p)
bothvisit.add(p)
continue
if curr in revsvisit:
missing.append(curr)
revsvisit.remove(curr)
thisvisit = revsvisit
othervisit = basesvisit
elif curr in basesvisit:
thisvisit = basesvisit
othervisit = revsvisit
else:
# not an ancestor of revs or bases: ignore
continue
for p in pfunc(curr):
if p == nullrev:
pass
elif p in othervisit or p in bothvisit:
# p is implicitly in thisvisit. This means p is or should be
# in bothvisit
revsvisit.discard(p)
basesvisit.add(p)
bothvisit.add(p)
else:
# visit later
thisvisit.add(p)
missing.reverse()
return missing
class lazyancestors(object):
def __init__(self, pfunc, revs, stoprev=0, inclusive=False):
"""Create a new object generating ancestors for the given revs. Does
not generate revs lower than stoprev.
This is computed lazily starting from revs. The object supports
iteration and membership.
cl should be a changelog and revs should be an iterable. inclusive is
a boolean that indicates whether revs should be included. Revs lower
than stoprev will not be generated.
Result does not include the null revision."""
self._parentrevs = pfunc
self._initrevs = revs
self._stoprev = stoprev
self._inclusive = inclusive
# Initialize data structures for __contains__.
# For __contains__, we use a heap rather than a deque because
# (a) it minimizes the number of parentrevs calls made
# (b) it makes the loop termination condition obvious
# Python's heap is a min-heap. Multiply all values by -1 to convert it
# into a max-heap.
self._containsvisit = [-rev for rev in revs]
heapq.heapify(self._containsvisit)
if inclusive:
self._containsseen = set(revs)
else:
self._containsseen = set()
def __nonzero__(self):
"""False if the set is empty, True otherwise."""
try:
next(iter(self))
return True
except StopIteration:
return False
__bool__ = __nonzero__
def __iter__(self):
"""Generate the ancestors of _initrevs in reverse topological order.
If inclusive is False, yield a sequence of revision numbers starting
with the parents of each revision in revs, i.e., each revision is *not*
considered an ancestor of itself. Results are in breadth-first order:
parents of each rev in revs, then parents of those, etc.
If inclusive is True, yield all the revs first (ignoring stoprev),
then yield all the ancestors of revs as when inclusive is False.
If an element in revs is an ancestor of a different rev it is not
yielded again."""
seen = set()
revs = self._initrevs
if self._inclusive:
for rev in revs:
yield rev
seen.update(revs)
parentrevs = self._parentrevs
stoprev = self._stoprev
visit = collections.deque(revs)
see = seen.add
schedule = visit.append
while visit:
for parent in parentrevs(visit.popleft()):
if parent >= stoprev and parent not in seen:
schedule(parent)
see(parent)
yield parent
def __contains__(self, target):
"""Test whether target is an ancestor of self._initrevs."""
# Trying to do both __iter__ and __contains__ using the same visit
# heap and seen set is complex enough that it slows down both. Keep
# them separate.
seen = self._containsseen
if target in seen:
return True
parentrevs = self._parentrevs
visit = self._containsvisit
stoprev = self._stoprev
heappop = heapq.heappop
heappush = heapq.heappush
see = seen.add
targetseen = False
while visit and -visit[0] > target and not targetseen:
for parent in parentrevs(-heappop(visit)):
if parent < stoprev or parent in seen:
continue
# We need to make sure we push all parents into the heap so
# that we leave it in a consistent state for future calls.
heappush(visit, -parent)
see(parent)
if parent == target:
targetseen = True
return targetseen
|
facebookexperimental/eden
|
eden/scm/edenscm/mercurial/ancestor.py
|
Python
|
gpl-2.0
| 13,106
|
[
"VisIt"
] |
700ed7d43a5114f124b3d8413075a99b2d67705cf44d438e5d6c1e2a66984856
|
#!/usr/bin/env python
'''
Fly ArduPlane in SITL
AP_FLAKE8_CLEAN
'''
from __future__ import print_function
import math
import os
import signal
import sys
import time
from pymavlink import quaternion
from pymavlink import mavextra
from pymavlink import mavutil
from common import AutoTest
from common import AutoTestTimeoutException
from common import NotAchievedException
from common import PreconditionFailedException
from pymavlink.rotmat import Vector3
from pysim import vehicleinfo
import operator
# get location of scripts
testdir = os.path.dirname(os.path.realpath(__file__))
SITL_START_LOCATION = mavutil.location(-35.362938, 149.165085, 585, 354)
WIND = "0,180,0.2" # speed,direction,variance
class AutoTestPlane(AutoTest):
@staticmethod
def get_not_armable_mode_list():
return []
@staticmethod
def get_not_disarmed_settable_modes_list():
return ["FOLLOW"]
@staticmethod
def get_no_position_not_settable_modes_list():
return []
@staticmethod
def get_position_armable_modes_list():
return ["GUIDED", "AUTO"]
@staticmethod
def get_normal_armable_modes_list():
return ["MANUAL", "STABILIZE", "ACRO"]
def log_name(self):
return "ArduPlane"
def test_filepath(self):
return os.path.realpath(__file__)
def sitl_start_location(self):
return SITL_START_LOCATION
def defaults_filepath(self):
return os.path.join(testdir, 'default_params/plane-jsbsim.parm')
def set_current_test_name(self, name):
self.current_test_name_directory = "ArduPlane_Tests/" + name + "/"
def default_frame(self):
return "plane-elevrev"
def apply_defaultfile_parameters(self):
# plane passes in a defaults_filepath in place of applying
# parameters afterwards.
pass
def is_plane(self):
return True
def get_stick_arming_channel(self):
return int(self.get_parameter("RCMAP_YAW"))
def get_disarm_delay(self):
return int(self.get_parameter("LAND_DISARMDELAY"))
def set_autodisarm_delay(self, delay):
self.set_parameter("LAND_DISARMDELAY", delay)
def takeoff(self, alt=150, alt_max=None, relative=True):
"""Takeoff to altitude."""
if alt_max is None:
alt_max = alt + 30
self.change_mode("FBWA")
self.wait_ready_to_arm()
self.arm_vehicle()
# some rudder to counteract the prop torque
self.set_rc(4, 1700)
# some up elevator to keep the tail down
self.set_rc(2, 1200)
# get it moving a bit first
self.set_rc(3, 1300)
self.wait_groundspeed(6, 100)
# a bit faster again, straighten rudder
self.set_rc_from_map({
3: 1600,
4: 1500,
})
self.wait_groundspeed(12, 100)
# hit the gas harder now, and give it some more elevator
self.set_rc_from_map({
2: 1100,
3: 2000,
})
# gain a bit of altitude
self.wait_altitude(alt, alt_max, timeout=30, relative=relative)
# level off
self.set_rc(2, 1500)
self.progress("TAKEOFF COMPLETE")
def fly_left_circuit(self):
"""Fly a left circuit, 200m on a side."""
self.change_mode('FBWA')
self.set_rc(3, 2000)
self.wait_level_flight()
self.progress("Flying left circuit")
# do 4 turns
for i in range(0, 4):
# hard left
self.progress("Starting turn %u" % i)
self.set_rc(1, 1000)
self.wait_heading(270 - (90*i), accuracy=10)
self.set_rc(1, 1500)
self.progress("Starting leg %u" % i)
self.wait_distance(100, accuracy=20)
self.progress("Circuit complete")
def fly_RTL(self):
"""Fly to home."""
self.progress("Flying home in RTL")
self.change_mode('RTL')
self.wait_location(self.homeloc,
accuracy=120,
target_altitude=self.homeloc.alt+100,
height_accuracy=20,
timeout=180)
self.progress("RTL Complete")
def test_need_ekf_to_arm(self):
"""Loiter where we are."""
self.progress("Ensuring we need EKF to be healthy to arm")
self.reboot_sitl()
self.context_collect("STATUSTEXT")
tstart = self.get_sim_time()
success = False
while not success:
if self.get_sim_time_cached() - tstart > 60:
raise NotAchievedException("Did not get correct failure reason")
self.send_mavlink_arm_command()
try:
self.wait_statustext(".*(AHRS not healthy|AHRS: Not healthy).*", timeout=1, check_context=True, regex=True)
success = True
continue
except AutoTestTimeoutException:
pass
if self.armed():
raise NotAchievedException("Armed unexpectedly")
def fly_LOITER(self, num_circles=4):
"""Loiter where we are."""
self.progress("Testing LOITER for %u turns" % num_circles)
self.change_mode('LOITER')
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
initial_alt = m.alt
self.progress("Initial altitude %u\n" % initial_alt)
while num_circles > 0:
self.wait_heading(0, accuracy=10, timeout=60)
self.wait_heading(180, accuracy=10, timeout=60)
num_circles -= 1
self.progress("Loiter %u circles left" % num_circles)
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
final_alt = m.alt
self.progress("Final altitude %u initial %u\n" %
(final_alt, initial_alt))
self.change_mode('FBWA')
if abs(final_alt - initial_alt) > 20:
raise NotAchievedException("Failed to maintain altitude")
self.progress("Completed Loiter OK")
def fly_CIRCLE(self, num_circles=1):
"""Circle where we are."""
self.progress("Testing CIRCLE for %u turns" % num_circles)
self.change_mode('CIRCLE')
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
initial_alt = m.alt
self.progress("Initial altitude %u\n" % initial_alt)
while num_circles > 0:
self.wait_heading(0, accuracy=10, timeout=60)
self.wait_heading(180, accuracy=10, timeout=60)
num_circles -= 1
self.progress("CIRCLE %u circles left" % num_circles)
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
final_alt = m.alt
self.progress("Final altitude %u initial %u\n" %
(final_alt, initial_alt))
self.change_mode('FBWA')
if abs(final_alt - initial_alt) > 20:
raise NotAchievedException("Failed to maintain altitude")
self.progress("Completed CIRCLE OK")
def wait_level_flight(self, accuracy=5, timeout=30):
"""Wait for level flight."""
tstart = self.get_sim_time()
self.progress("Waiting for level flight")
self.set_rc(1, 1500)
self.set_rc(2, 1500)
self.set_rc(4, 1500)
while self.get_sim_time_cached() < tstart + timeout:
m = self.mav.recv_match(type='ATTITUDE', blocking=True)
roll = math.degrees(m.roll)
pitch = math.degrees(m.pitch)
self.progress("Roll=%.1f Pitch=%.1f" % (roll, pitch))
if math.fabs(roll) <= accuracy and math.fabs(pitch) <= accuracy:
self.progress("Attained level flight")
return
raise NotAchievedException("Failed to attain level flight")
def change_altitude(self, altitude, accuracy=30):
"""Get to a given altitude."""
self.change_mode('FBWA')
alt_error = self.mav.messages['VFR_HUD'].alt - altitude
if alt_error > 0:
self.set_rc(2, 2000)
else:
self.set_rc(2, 1000)
self.wait_altitude(altitude-accuracy/2, altitude+accuracy/2)
self.set_rc(2, 1500)
self.progress("Reached target altitude at %u" %
self.mav.messages['VFR_HUD'].alt)
return self.wait_level_flight()
def axial_left_roll(self, count=1):
"""Fly a left axial roll."""
# full throttle!
self.set_rc(3, 2000)
self.change_altitude(self.homeloc.alt+300)
# fly the roll in manual
self.change_mode('MANUAL')
while count > 0:
self.progress("Starting roll")
self.set_rc(1, 1000)
try:
self.wait_roll(-150, accuracy=90)
self.wait_roll(150, accuracy=90)
self.wait_roll(0, accuracy=90)
except Exception as e:
self.set_rc(1, 1500)
raise e
count -= 1
# back to FBWA
self.set_rc(1, 1500)
self.change_mode('FBWA')
self.set_rc(3, 1700)
return self.wait_level_flight()
def inside_loop(self, count=1):
"""Fly a inside loop."""
# full throttle!
self.set_rc(3, 2000)
self.change_altitude(self.homeloc.alt+300)
# fly the loop in manual
self.change_mode('MANUAL')
while count > 0:
self.progress("Starting loop")
self.set_rc(2, 1000)
self.wait_pitch(-60, accuracy=20)
self.wait_pitch(0, accuracy=20)
count -= 1
# back to FBWA
self.set_rc(2, 1500)
self.change_mode('FBWA')
self.set_rc(3, 1700)
return self.wait_level_flight()
def set_attitude_target(self, tolerance=10):
"""Test setting of attitude target in guided mode."""
self.change_mode("GUIDED")
# self.set_parameter("STALL_PREVENTION", 0)
state_roll_over = "roll-over"
state_stabilize_roll = "stabilize-roll"
state_hold = "hold"
state_roll_back = "roll-back"
state_done = "done"
tstart = self.get_sim_time()
try:
state = state_roll_over
while state != state_done:
m = self.mav.recv_match(type='ATTITUDE',
blocking=True,
timeout=0.1)
now = self.get_sim_time_cached()
if now - tstart > 20:
raise AutoTestTimeoutException("Manuevers not completed")
if m is None:
continue
r = math.degrees(m.roll)
if state == state_roll_over:
target_roll_degrees = 60
if abs(r - target_roll_degrees) < tolerance:
state = state_stabilize_roll
stabilize_start = now
elif state == state_stabilize_roll:
# just give it a little time to sort it self out
if now - stabilize_start > 2:
state = state_hold
hold_start = now
elif state == state_hold:
target_roll_degrees = 60
if now - hold_start > tolerance:
state = state_roll_back
if abs(r - target_roll_degrees) > tolerance:
raise NotAchievedException("Failed to hold attitude")
elif state == state_roll_back:
target_roll_degrees = 0
if abs(r - target_roll_degrees) < tolerance:
state = state_done
else:
raise ValueError("Unknown state %s" % str(state))
m_nav = self.mav.messages['NAV_CONTROLLER_OUTPUT']
self.progress("%s Roll: %f desired=%f set=%f" %
(state, r, m_nav.nav_roll, target_roll_degrees))
time_boot_millis = 0 # FIXME
target_system = 1 # FIXME
target_component = 1 # FIXME
type_mask = 0b10000001 ^ 0xFF # FIXME
# attitude in radians:
q = quaternion.Quaternion([math.radians(target_roll_degrees),
0,
0])
roll_rate_radians = 0.5
pitch_rate_radians = 0
yaw_rate_radians = 0
thrust = 1.0
self.mav.mav.set_attitude_target_send(time_boot_millis,
target_system,
target_component,
type_mask,
q,
roll_rate_radians,
pitch_rate_radians,
yaw_rate_radians,
thrust)
except Exception as e:
self.change_mode('FBWA')
self.set_rc(3, 1700)
raise e
# back to FBWA
self.change_mode('FBWA')
self.set_rc(3, 1700)
self.wait_level_flight()
def test_stabilize(self, count=1):
"""Fly stabilize mode."""
# full throttle!
self.set_rc(3, 2000)
self.set_rc(2, 1300)
self.change_altitude(self.homeloc.alt+300)
self.set_rc(2, 1500)
self.change_mode('STABILIZE')
while count > 0:
self.progress("Starting roll")
self.set_rc(1, 2000)
self.wait_roll(-150, accuracy=90)
self.wait_roll(150, accuracy=90)
self.wait_roll(0, accuracy=90)
count -= 1
self.set_rc(1, 1500)
self.wait_roll(0, accuracy=5)
# back to FBWA
self.change_mode('FBWA')
self.set_rc(3, 1700)
return self.wait_level_flight()
def test_acro(self, count=1):
"""Fly ACRO mode."""
# full throttle!
self.set_rc(3, 2000)
self.set_rc(2, 1300)
self.change_altitude(self.homeloc.alt+300)
self.set_rc(2, 1500)
self.change_mode('ACRO')
while count > 0:
self.progress("Starting roll")
self.set_rc(1, 1000)
self.wait_roll(-150, accuracy=90)
self.wait_roll(150, accuracy=90)
self.wait_roll(0, accuracy=90)
count -= 1
self.set_rc(1, 1500)
# back to FBWA
self.change_mode('FBWA')
self.wait_level_flight()
self.change_mode('ACRO')
count = 2
while count > 0:
self.progress("Starting loop")
self.set_rc(2, 1000)
self.wait_pitch(-60, accuracy=20)
self.wait_pitch(0, accuracy=20)
count -= 1
self.set_rc(2, 1500)
# back to FBWA
self.change_mode('FBWA')
self.set_rc(3, 1700)
return self.wait_level_flight()
def test_FBWB(self, mode='FBWB'):
"""Fly FBWB or CRUISE mode."""
self.change_mode(mode)
self.set_rc(3, 1700)
self.set_rc(2, 1500)
# lock in the altitude by asking for an altitude change then releasing
self.set_rc(2, 1000)
self.wait_distance(50, accuracy=20)
self.set_rc(2, 1500)
self.wait_distance(50, accuracy=20)
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
initial_alt = m.alt
self.progress("Initial altitude %u\n" % initial_alt)
self.progress("Flying right circuit")
# do 4 turns
for i in range(0, 4):
# hard left
self.progress("Starting turn %u" % i)
self.set_rc(1, 1800)
try:
self.wait_heading(0 + (90*i), accuracy=20, timeout=60)
except Exception as e:
self.set_rc(1, 1500)
raise e
self.set_rc(1, 1500)
self.progress("Starting leg %u" % i)
self.wait_distance(100, accuracy=20)
self.progress("Circuit complete")
self.progress("Flying rudder left circuit")
# do 4 turns
for i in range(0, 4):
# hard left
self.progress("Starting turn %u" % i)
self.set_rc(4, 1900)
try:
self.wait_heading(360 - (90*i), accuracy=20, timeout=60)
except Exception as e:
self.set_rc(4, 1500)
raise e
self.set_rc(4, 1500)
self.progress("Starting leg %u" % i)
self.wait_distance(100, accuracy=20)
self.progress("Circuit complete")
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
final_alt = m.alt
self.progress("Final altitude %u initial %u\n" %
(final_alt, initial_alt))
# back to FBWA
self.change_mode('FBWA')
if abs(final_alt - initial_alt) > 20:
raise NotAchievedException("Failed to maintain altitude")
return self.wait_level_flight()
def fly_mission(self, filename, mission_timeout=60.0, strict=True, quadplane=False):
"""Fly a mission from a file."""
self.progress("Flying mission %s" % filename)
num_wp = self.load_mission(filename, strict=strict)-1
self.set_current_waypoint(0, check_afterwards=False)
self.change_mode('AUTO')
self.wait_waypoint(1, num_wp, max_dist=60, timeout=mission_timeout)
self.wait_groundspeed(0, 0.5, timeout=mission_timeout)
if quadplane:
self.wait_statustext("Throttle disarmed", timeout=200)
else:
self.wait_statustext("Auto disarmed", timeout=60)
self.progress("Mission OK")
def fly_do_reposition(self):
self.progress("Takeoff")
self.takeoff(alt=50)
self.set_rc(3, 1500)
self.progress("Entering guided and flying somewhere constant")
self.change_mode("GUIDED")
loc = self.mav.location()
self.location_offset_ne(loc, 500, 500)
new_alt = 100
self.run_cmd_int(
mavutil.mavlink.MAV_CMD_DO_REPOSITION,
0,
0,
0,
0,
int(loc.lat * 1e7),
int(loc.lng * 1e7),
new_alt, # alt
frame=mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT,
)
self.wait_altitude(new_alt-10, new_alt, timeout=30, relative=True)
self.fly_home_land_and_disarm()
def fly_deepstall(self):
# self.fly_deepstall_absolute()
self.fly_deepstall_relative()
def fly_deepstall_absolute(self):
self.start_subtest("DeepStall Relative Absolute")
self.set_parameter("LAND_TYPE", 1)
deepstall_elevator_pwm = 1661
self.set_parameter("LAND_DS_ELEV_PWM", deepstall_elevator_pwm)
self.load_mission("plane-deepstall-mission.txt")
self.change_mode("AUTO")
self.wait_ready_to_arm()
self.arm_vehicle()
self.progress("Waiting for deepstall messages")
self.wait_text("Deepstall: Entry: ", timeout=240)
# assume elevator is on channel 2:
self.wait_servo_channel_value(2, deepstall_elevator_pwm)
self.disarm_wait(timeout=120)
self.progress("Flying home")
self.takeoff(10)
self.set_parameter("LAND_TYPE", 0)
self.fly_home_land_and_disarm()
def fly_deepstall_relative(self):
self.start_subtest("DeepStall Relative")
self.set_parameter("LAND_TYPE", 1)
deepstall_elevator_pwm = 1661
self.set_parameter("LAND_DS_ELEV_PWM", deepstall_elevator_pwm)
self.load_mission("plane-deepstall-relative-mission.txt")
self.change_mode("AUTO")
self.wait_ready_to_arm()
self.arm_vehicle()
self.progress("Waiting for deepstall messages")
self.wait_text("Deepstall: Entry: ", timeout=240)
# assume elevator is on channel 2:
self.wait_servo_channel_value(2, deepstall_elevator_pwm)
self.disarm_wait(timeout=120)
self.progress("Flying home")
self.takeoff(100)
self.set_parameter("LAND_TYPE", 0)
self.fly_home_land_and_disarm(timeout=240)
def SmartBattery(self):
self.set_parameters({
"BATT_MONITOR": 16, # Maxell battery monitor
})
# Must reboot sitl after setting montior type for SMBus parameters to be set due to dynamic group
self.reboot_sitl()
self.set_parameters({
"BATT_I2C_BUS": 2, # specified in SIM_I2C.cpp
"BATT_I2C_ADDR": 11, # specified in SIM_I2C.cpp
})
self.reboot_sitl()
self.wait_ready_to_arm()
m = self.mav.recv_match(type='BATTERY_STATUS', blocking=True, timeout=10)
if m is None:
raise NotAchievedException("Did not get BATTERY_STATUS message")
if m.voltages_ext[0] == 65536:
raise NotAchievedException("Flag value rather than voltage")
if abs(m.voltages_ext[0] - 1000) > 300:
raise NotAchievedException("Did not get good ext voltage (got=%f)" %
(m.voltages_ext[0],))
self.arm_vehicle()
self.delay_sim_time(5)
self.disarm_vehicle()
if not self.current_onboard_log_contains_message("BCL2"):
raise NotAchievedException("Expected BCL2 message")
def fly_do_change_speed(self):
# the following lines ensure we revert these parameter values
# - DO_CHANGE_AIRSPEED is a permanent vehicle change!
self.set_parameters({
"TRIM_ARSPD_CM": self.get_parameter("TRIM_ARSPD_CM"),
"MIN_GNDSPD_CM": self.get_parameter("MIN_GNDSPD_CM"),
})
self.progress("Takeoff")
self.takeoff(alt=100)
self.set_rc(3, 1500)
# ensure we know what the airspeed is:
self.progress("Entering guided and flying somewhere constant")
self.change_mode("GUIDED")
self.run_cmd_int(
mavutil.mavlink.MAV_CMD_DO_REPOSITION,
0,
0,
0,
0,
12345, # lat* 1e7
12345, # lon* 1e7
100 # alt
)
self.delay_sim_time(10)
self.progress("Ensuring initial speed is known and relatively constant")
initial_speed = 21.5
timeout = 10
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > timeout:
break
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
self.progress("GroundSpeed: %f want=%f" %
(m.groundspeed, initial_speed))
if abs(initial_speed - m.groundspeed) > 1:
raise NotAchievedException("Initial speed not as expected (want=%f got=%f" % (initial_speed, m.groundspeed))
self.progress("Setting groundspeed")
new_target_groundspeed = initial_speed + 5
self.run_cmd(
mavutil.mavlink.MAV_CMD_DO_CHANGE_SPEED,
1, # groundspeed
new_target_groundspeed,
-1, # throttle / no change
0, # absolute values
0,
0,
0)
self.wait_groundspeed(new_target_groundspeed-0.5, new_target_groundspeed+0.5, timeout=40)
self.progress("Adding some wind, ensuring groundspeed holds")
self.set_parameter("SIM_WIND_SPD", 5)
self.delay_sim_time(5)
self.wait_groundspeed(new_target_groundspeed-0.5, new_target_groundspeed+0.5, timeout=40)
self.set_parameter("SIM_WIND_SPD", 0)
self.progress("Setting airspeed")
new_target_airspeed = initial_speed + 5
self.run_cmd(
mavutil.mavlink.MAV_CMD_DO_CHANGE_SPEED,
0, # airspeed
new_target_airspeed,
-1, # throttle / no change
0, # absolute values
0,
0,
0)
self.wait_groundspeed(new_target_airspeed-0.5, new_target_airspeed+0.5)
self.progress("Adding some wind, hoping groundspeed increases/decreases")
self.set_parameters({
"SIM_WIND_SPD": 5,
"SIM_WIND_DIR": 270,
})
self.delay_sim_time(5)
timeout = 10
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > timeout:
raise NotAchievedException("Did not achieve groundspeed delta")
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
delta = abs(m.airspeed - m.groundspeed)
want_delta = 4
self.progress("groundspeed and airspeed should be different (have=%f want=%f)" % (delta, want_delta))
if delta > want_delta:
break
self.fly_home_land_and_disarm(timeout=240)
def fly_home_land_and_disarm(self, timeout=120):
filename = "flaps.txt"
self.progress("Using %s to fly home" % filename)
self.load_generic_mission(filename)
self.change_mode("AUTO")
# don't set current waypoint to 8 unless we're distant from it
# or we arrive instantly and never see it as our current
# waypoint:
self.wait_distance_to_waypoint(8, 100, 10000000)
self.set_current_waypoint(8)
self.drain_mav()
# TODO: reflect on file to find this magic waypoint number?
# self.wait_waypoint(7, num_wp-1, timeout=500) # we
# tend to miss the final waypoint by a fair bit, and
# this is probably too noisy anyway?
self.wait_disarmed(timeout=timeout)
def fly_flaps(self):
"""Test flaps functionality."""
filename = "flaps.txt"
self.context_push()
ex = None
try:
flaps_ch = 5
flaps_ch_min = 1000
flaps_ch_trim = 1500
flaps_ch_max = 2000
servo_ch = 5
servo_ch_min = 1200
servo_ch_trim = 1300
servo_ch_max = 1800
self.set_parameters({
"SERVO%u_FUNCTION" % servo_ch: 3, # flapsauto
"RC%u_OPTION" % flaps_ch: 208, # Flaps RCx_OPTION
"LAND_FLAP_PERCNT": 50,
"LOG_DISARMED": 1,
"RC%u_MIN" % flaps_ch: flaps_ch_min,
"RC%u_MAX" % flaps_ch: flaps_ch_max,
"RC%u_TRIM" % flaps_ch: flaps_ch_trim,
"SERVO%u_MIN" % servo_ch: servo_ch_min,
"SERVO%u_MAX" % servo_ch: servo_ch_max,
"SERVO%u_TRIM" % servo_ch: servo_ch_trim,
})
self.progress("check flaps are not deployed")
self.set_rc(flaps_ch, flaps_ch_min)
self.wait_servo_channel_value(servo_ch, servo_ch_min, timeout=3)
self.progress("deploy the flaps")
self.set_rc(flaps_ch, flaps_ch_max)
tstart = self.get_sim_time()
self.wait_servo_channel_value(servo_ch, servo_ch_max)
tstop = self.get_sim_time_cached()
delta_time = tstop - tstart
delta_time_min = 0.5
delta_time_max = 1.5
if delta_time < delta_time_min or delta_time > delta_time_max:
raise NotAchievedException((
"Flaps Slew not working (%f seconds)" % (delta_time,)))
self.progress("undeploy flaps")
self.set_rc(flaps_ch, flaps_ch_min)
self.wait_servo_channel_value(servo_ch, servo_ch_min)
self.progress("Flying mission %s" % filename)
self.load_mission(filename)
self.set_current_waypoint(1)
self.change_mode('AUTO')
self.wait_ready_to_arm()
self.arm_vehicle()
last_mission_current_msg = 0
last_seq = None
while self.armed():
m = self.mav.recv_match(type='MISSION_CURRENT', blocking=True)
time_delta = (self.get_sim_time_cached() -
last_mission_current_msg)
if (time_delta > 1 or m.seq != last_seq):
dist = None
x = self.mav.messages.get("NAV_CONTROLLER_OUTPUT", None)
if x is not None:
dist = x.wp_dist
self.progress("MISSION_CURRENT.seq=%u (dist=%s)" %
(m.seq, str(dist)))
last_mission_current_msg = self.get_sim_time_cached()
last_seq = m.seq
# flaps should undeploy at the end
self.wait_servo_channel_value(servo_ch, servo_ch_min, timeout=30)
# do a short flight in FBWA, watching for flaps
# self.mavproxy.send('switch 4\n')
# self.wait_mode('FBWA')
# self.delay_sim_time(10)
# self.mavproxy.send('switch 6\n')
# self.wait_mode('MANUAL')
# self.delay_sim_time(10)
self.progress("Flaps OK")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex:
if self.armed():
self.disarm_vehicle()
raise ex
def test_rc_relay(self):
'''test toggling channel 12 toggles relay'''
self.set_parameter("RC12_OPTION", 28) # Relay On/Off
self.set_rc(12, 1000)
self.reboot_sitl() # needed for RC12_OPTION to take effect
off = self.get_parameter("SIM_PIN_MASK")
if off:
raise PreconditionFailedException("SIM_MASK_PIN off")
# allow time for the RC library to register initial value:
self.delay_sim_time(1)
self.set_rc(12, 2000)
self.wait_heartbeat()
self.wait_heartbeat()
on = self.get_parameter("SIM_PIN_MASK")
if not on:
raise NotAchievedException("SIM_PIN_MASK doesn't reflect ON")
self.set_rc(12, 1000)
self.wait_heartbeat()
self.wait_heartbeat()
off = self.get_parameter("SIM_PIN_MASK")
if off:
raise NotAchievedException("SIM_PIN_MASK doesn't reflect OFF")
def test_rc_option_camera_trigger(self):
'''test toggling channel 12 takes picture'''
self.set_parameter("RC12_OPTION", 9) # CameraTrigger
self.reboot_sitl() # needed for RC12_OPTION to take effect
x = self.mav.messages.get("CAMERA_FEEDBACK", None)
if x is not None:
raise PreconditionFailedException("Receiving CAMERA_FEEDBACK?!")
self.set_rc(12, 2000)
tstart = self.get_sim_time()
while self.get_sim_time_cached() - tstart < 10:
x = self.mav.messages.get("CAMERA_FEEDBACK", None)
if x is not None:
break
self.wait_heartbeat()
self.set_rc(12, 1000)
if x is None:
raise NotAchievedException("No CAMERA_FEEDBACK message received")
self.wait_ready_to_arm()
original_alt = self.get_altitude()
takeoff_alt = 30
self.takeoff(takeoff_alt)
self.set_rc(12, 2000)
self.delay_sim_time(1)
self.set_rc(12, 1000)
x = self.mav.messages.get("CAMERA_FEEDBACK", None)
if abs(x.alt_rel - takeoff_alt) > 10:
raise NotAchievedException("Bad relalt (want=%f vs got=%f)" % (takeoff_alt, x.alt_rel))
if abs(x.alt_msl - (original_alt+30)) > 10:
raise NotAchievedException("Bad absalt (want=%f vs got=%f)" % (original_alt+30, x.alt_msl))
self.fly_home_land_and_disarm()
def test_throttle_failsafe(self):
self.change_mode('MANUAL')
m = self.mav.recv_match(type='SYS_STATUS', blocking=True)
receiver_bit = mavutil.mavlink.MAV_SYS_STATUS_SENSOR_RC_RECEIVER
self.progress("Testing receiver enabled")
if (not (m.onboard_control_sensors_enabled & receiver_bit)):
raise PreconditionFailedException()
self.progress("Testing receiver present")
if (not (m.onboard_control_sensors_present & receiver_bit)):
raise PreconditionFailedException()
self.progress("Testing receiver health")
if (not (m.onboard_control_sensors_health & receiver_bit)):
raise PreconditionFailedException()
self.progress("Ensure we know original throttle value")
self.wait_rc_channel_value(3, 1000)
self.set_parameter("THR_FS_VALUE", 960)
self.progress("Failing receiver (throttle-to-950)")
self.context_collect("HEARTBEAT")
self.set_parameter("SIM_RC_FAIL", 2) # throttle-to-950
self.wait_mode('RTL') # long failsafe
if (not self.get_mode_from_mode_mapping("CIRCLE") in
[x.custom_mode for x in self.context_stop_collecting("HEARTBEAT")]):
raise NotAchievedException("Did not go via circle mode")
self.progress("Ensure we've had our throttle squashed to 950")
self.wait_rc_channel_value(3, 950)
self.drain_mav_unparsed()
m = self.mav.recv_match(type='SYS_STATUS', blocking=True)
self.progress("Got (%s)" % str(m))
self.progress("Testing receiver enabled")
if (not (m.onboard_control_sensors_enabled & receiver_bit)):
raise NotAchievedException("Receiver not enabled")
self.progress("Testing receiver present")
if (not (m.onboard_control_sensors_present & receiver_bit)):
raise NotAchievedException("Receiver not present")
# skip this until RC is fixed
# self.progress("Testing receiver health")
# if (m.onboard_control_sensors_health & receiver_bit):
# raise NotAchievedException("Sensor healthy when it shouldn't be")
self.set_parameter("SIM_RC_FAIL", 0)
self.drain_mav_unparsed()
# have to allow time for RC to be fetched from SITL
self.delay_sim_time(0.5)
m = self.mav.recv_match(type='SYS_STATUS', blocking=True)
self.progress("Testing receiver enabled")
if (not (m.onboard_control_sensors_enabled & receiver_bit)):
raise NotAchievedException("Receiver not enabled")
self.progress("Testing receiver present")
if (not (m.onboard_control_sensors_present & receiver_bit)):
raise NotAchievedException("Receiver not present")
self.progress("Testing receiver health")
if (not (m.onboard_control_sensors_health & receiver_bit)):
raise NotAchievedException("Receiver not healthy2")
self.change_mode('MANUAL')
self.progress("Failing receiver (no-pulses)")
self.context_collect("HEARTBEAT")
self.set_parameter("SIM_RC_FAIL", 1) # no-pulses
self.wait_mode('RTL') # long failsafe
if (not self.get_mode_from_mode_mapping("CIRCLE") in
[x.custom_mode for x in self.context_stop_collecting("HEARTBEAT")]):
raise NotAchievedException("Did not go via circle mode")
self.drain_mav_unparsed()
m = self.mav.recv_match(type='SYS_STATUS', blocking=True)
self.progress("Got (%s)" % str(m))
self.progress("Testing receiver enabled")
if (not (m.onboard_control_sensors_enabled & receiver_bit)):
raise NotAchievedException("Receiver not enabled")
self.progress("Testing receiver present")
if (not (m.onboard_control_sensors_present & receiver_bit)):
raise NotAchievedException("Receiver not present")
self.progress("Testing receiver health")
if (m.onboard_control_sensors_health & receiver_bit):
raise NotAchievedException("Sensor healthy when it shouldn't be")
self.progress("Making RC work again")
self.set_parameter("SIM_RC_FAIL", 0)
# have to allow time for RC to be fetched from SITL
self.progress("Giving receiver time to recover")
self.delay_sim_time(0.5)
self.drain_mav_unparsed()
m = self.mav.recv_match(type='SYS_STATUS', blocking=True)
self.progress("Testing receiver enabled")
if (not (m.onboard_control_sensors_enabled & receiver_bit)):
raise NotAchievedException("Receiver not enabled")
self.progress("Testing receiver present")
if (not (m.onboard_control_sensors_present & receiver_bit)):
raise NotAchievedException("Receiver not present")
self.progress("Testing receiver health")
if (not (m.onboard_control_sensors_health & receiver_bit)):
raise NotAchievedException("Receiver not healthy")
self.change_mode('MANUAL')
self.progress("Ensure long failsafe can trigger when short failsafe disabled")
self.context_push()
self.context_collect("STATUSTEXT")
ex = None
try:
self.set_parameters({
"FS_SHORT_ACTN": 3, # 3 means disabled
"SIM_RC_FAIL": 1,
})
self.wait_statustext("Long event on", check_context=True)
self.wait_mode("RTL")
# self.context_clear_collection("STATUSTEXT")
self.set_parameter("SIM_RC_FAIL", 0)
self.wait_text("Long event off", check_context=True)
self.change_mode("MANUAL")
self.progress("Trying again with THR_FS_VALUE")
self.set_parameters({
"THR_FS_VALUE": 960,
"SIM_RC_FAIL": 2,
})
self.wait_statustext("Long event on", check_context=True)
self.wait_mode("RTL")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex is not None:
raise ex
def test_throttle_failsafe_fence(self):
fence_bit = mavutil.mavlink.MAV_SYS_STATUS_GEOFENCE
self.progress("Checking fence is not present before being configured")
m = self.mav.recv_match(type='SYS_STATUS', blocking=True)
self.progress("Got (%s)" % str(m))
if (m.onboard_control_sensors_enabled & fence_bit):
raise NotAchievedException("Fence enabled before being configured")
self.change_mode('MANUAL')
self.wait_ready_to_arm()
self.load_fence("CMAC-fence.txt")
self.set_parameter("RC7_OPTION", 11) # AC_Fence uses Aux switch functionality
self.set_parameter("FENCE_ACTION", 4) # Fence action Brake
self.set_rc_from_map({
3: 1000,
7: 2000,
}) # Turn fence on with aux function
m = self.mav.recv_match(type='FENCE_STATUS', blocking=True, timeout=2)
self.progress("Got (%s)" % str(m))
if m is None:
raise NotAchievedException("Got FENCE_STATUS unexpectedly")
self.progress("Checking fence is initially OK")
self.wait_sensor_state(mavutil.mavlink.MAV_SYS_STATUS_GEOFENCE,
present=True,
enabled=True,
healthy=True,
verbose=True,
timeout=30)
self.set_parameter("THR_FS_VALUE", 960)
self.progress("Failing receiver (throttle-to-950)")
self.set_parameter("SIM_RC_FAIL", 2) # throttle-to-950
self.wait_mode("CIRCLE")
self.delay_sim_time(1) # give
self.drain_mav_unparsed()
self.progress("Checking fence is OK after receiver failure (bind-values)")
fence_bit = mavutil.mavlink.MAV_SYS_STATUS_GEOFENCE
m = self.mav.recv_match(type='SYS_STATUS', blocking=True)
self.progress("Got (%s)" % str(m))
if (not (m.onboard_control_sensors_enabled & fence_bit)):
raise NotAchievedException("Fence not enabled after RC fail")
self.do_fence_disable() # Ensure the fence is disabled after test
def test_gripper_mission(self):
self.context_push()
ex = None
try:
self.load_mission("plane-gripper-mission.txt")
self.set_current_waypoint(1)
self.change_mode('AUTO')
self.wait_ready_to_arm()
self.arm_vehicle()
self.wait_statustext("Gripper Grabbed", timeout=60)
self.wait_statustext("Gripper Released", timeout=60)
self.wait_statustext("Auto disarmed", timeout=60)
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex is not None:
raise ex
def assert_fence_sys_status(self, present, enabled, health):
self.delay_sim_time(1)
self.drain_mav_unparsed()
m = self.mav.recv_match(type='SYS_STATUS', blocking=True, timeout=1)
if m is None:
raise NotAchievedException("Did not receive SYS_STATUS")
tests = [
("present", present, m.onboard_control_sensors_present),
("enabled", enabled, m.onboard_control_sensors_enabled),
("health", health, m.onboard_control_sensors_health),
]
bit = mavutil.mavlink.MAV_SYS_STATUS_GEOFENCE
for test in tests:
(name, want, field) = test
got = (field & bit) != 0
if want != got:
raise NotAchievedException("fence status incorrect; %s want=%u got=%u" %
(name, want, got))
def wait_circling_point_with_radius(self, loc, want_radius, epsilon=5.0, min_circle_time=5, timeout=120):
on_radius_start_heading = None
average_radius = 0.0
circle_time_start = 0
done_time = False
done_angle = False
tstart = self.get_sim_time()
while True:
if self.get_sim_time() - tstart > timeout:
raise AutoTestTimeoutException("Did not get onto circle")
here = self.mav.location()
got_radius = self.get_distance(loc, here)
average_radius = 0.95*average_radius + 0.05*got_radius
on_radius = abs(got_radius - want_radius) < epsilon
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
heading = m.heading
on_string = "off"
got_angle = ""
if on_radius_start_heading is not None:
got_angle = "%0.2f" % abs(on_radius_start_heading - heading) # FIXME
on_string = "on"
want_angle = 180 # we don't actually get this (angle-substraction issue. But we get enough...
self.progress("wait-circling: got-r=%0.2f want-r=%f avg-r=%f %s want-a=%0.1f got-a=%s" %
(got_radius, want_radius, average_radius, on_string, want_angle, got_angle))
if on_radius:
if on_radius_start_heading is None:
on_radius_start_heading = heading
average_radius = got_radius
circle_time_start = self.get_sim_time()
continue
if abs(on_radius_start_heading - heading) > want_angle: # FIXME
done_angle = True
if self.get_sim_time() - circle_time_start > min_circle_time:
done_time = True
if done_time and done_angle:
return
continue
if on_radius_start_heading is not None:
average_radius = 0.0
on_radius_start_heading = None
circle_time_start = 0
def test_fence_static(self):
ex = None
try:
self.progress("Checking for bizarre healthy-when-not-present-or-enabled")
self.set_parameter("FENCE_TYPE", 4) # Start by only setting polygon fences, otherwise fence will report present
self.assert_fence_sys_status(False, False, True)
self.load_fence("CMAC-fence.txt")
m = self.mav.recv_match(type='FENCE_STATUS', blocking=True, timeout=2)
if m is not None:
raise NotAchievedException("Got FENCE_STATUS unexpectedly")
self.drain_mav_unparsed()
self.set_parameter("FENCE_ACTION", 0) # report only
self.assert_fence_sys_status(True, False, True)
self.set_parameter("FENCE_ACTION", 1) # RTL
self.assert_fence_sys_status(True, False, True)
self.do_fence_enable()
self.assert_fence_sys_status(True, True, True)
m = self.mav.recv_match(type='FENCE_STATUS', blocking=True, timeout=2)
if m is None:
raise NotAchievedException("Did not get FENCE_STATUS")
if m.breach_status:
raise NotAchievedException("Breached fence unexpectedly (%u)" %
(m.breach_status))
self.do_fence_disable()
self.assert_fence_sys_status(True, False, True)
self.set_parameter("FENCE_ACTION", 1)
self.assert_fence_sys_status(True, False, True)
self.set_parameter("FENCE_ACTION", 0)
self.assert_fence_sys_status(True, False, True)
self.clear_fence()
if self.get_parameter("FENCE_TOTAL") != 0:
raise NotAchievedException("Expected zero points remaining")
self.assert_fence_sys_status(False, False, True)
self.progress("Trying to enable fence with no points")
self.do_fence_enable(want_result=mavutil.mavlink.MAV_RESULT_FAILED)
# test a rather unfortunate behaviour:
self.progress("Killing a live fence with fence-clear")
self.load_fence("CMAC-fence.txt")
self.set_parameter("FENCE_ACTION", 1) # AC_FENCE_ACTION_RTL_AND_LAND == 1. mavutil.mavlink.FENCE_ACTION_RTL == 4
self.do_fence_enable()
self.assert_fence_sys_status(True, True, True)
self.clear_fence()
self.wait_sensor_state(mavutil.mavlink.MAV_SYS_STATUS_GEOFENCE, False, False, True)
if self.get_parameter("FENCE_TOTAL") != 0:
raise NotAchievedException("Expected zero points remaining")
self.assert_fence_sys_status(False, False, True)
self.do_fence_disable()
# ensure that a fence is present if it is tin can, min alt or max alt
self.progress("Test other fence types (tin-can, min alt, max alt")
self.set_parameter("FENCE_TYPE", 1) # max alt
self.assert_fence_sys_status(True, False, True)
self.set_parameter("FENCE_TYPE", 8) # min alt
self.assert_fence_sys_status(True, False, True)
self.set_parameter("FENCE_TYPE", 2) # tin can
self.assert_fence_sys_status(True, False, True)
# Test cannot arm if outside of fence and fence is enabled
self.progress("Test Arming while vehicle below FENCE_ALT_MIN")
default_fence_alt_min = self.get_parameter("FENCE_ALT_MIN")
self.set_parameter("FENCE_ALT_MIN", 50)
self.set_parameter("FENCE_TYPE", 8) # Enables minimum altitude breaches
self.do_fence_enable()
self.delay_sim_time(2) # Allow breach to propagate
self.assert_fence_enabled()
self.try_arm(False, "vehicle outside fence")
self.do_fence_disable()
self.set_parameter("FENCE_ALT_MIN", default_fence_alt_min)
# Test arming outside inclusion zone
self.progress("Test arming while vehicle outside of inclusion zone")
self.set_parameter("FENCE_TYPE", 4) # Enables polygon fence types
locs = [
mavutil.location(1.000, 1.000, 0, 0),
mavutil.location(1.000, 1.001, 0, 0),
mavutil.location(1.001, 1.001, 0, 0),
mavutil.location(1.001, 1.000, 0, 0)
]
self.upload_fences_from_locations(
mavutil.mavlink.MAV_CMD_NAV_FENCE_POLYGON_VERTEX_INCLUSION,
[
locs
]
)
self.delay_sim_time(10) # let fence check run so it loads-from-eeprom
self.do_fence_enable()
self.assert_fence_enabled()
self.delay_sim_time(2) # Allow breach to propagate
self.try_arm(False, "vehicle outside fence")
self.do_fence_disable()
self.clear_fence()
self.progress("Test arming while vehicle inside exclusion zone")
self.set_parameter("FENCE_TYPE", 4) # Enables polygon fence types
home_loc = self.mav.location()
locs = [
mavutil.location(home_loc.lat - 0.001, home_loc.lng - 0.001, 0, 0),
mavutil.location(home_loc.lat - 0.001, home_loc.lng + 0.001, 0, 0),
mavutil.location(home_loc.lat + 0.001, home_loc.lng + 0.001, 0, 0),
mavutil.location(home_loc.lat + 0.001, home_loc.lng - 0.001, 0, 0),
]
self.upload_fences_from_locations(
mavutil.mavlink.MAV_CMD_NAV_FENCE_POLYGON_VERTEX_EXCLUSION,
[
locs
]
)
self.delay_sim_time(10) # let fence check run so it loads-from-eeprom
self.do_fence_enable()
self.assert_fence_enabled()
self.delay_sim_time(2) # Allow breach to propagate
self.try_arm(False, "vehicle outside fence")
self.do_fence_disable()
self.clear_fence()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.clear_fence()
if ex is not None:
raise ex
def test_fence_breach_circle_at(self, loc, disable_on_breach=False):
ex = None
try:
self.load_fence("CMAC-fence.txt")
want_radius = 100
# when ArduPlane is fixed, remove this fudge factor
REALLY_BAD_FUDGE_FACTOR = 1.16
expected_radius = REALLY_BAD_FUDGE_FACTOR * want_radius
self.set_parameters({
"RTL_RADIUS": want_radius,
"NAVL1_LIM_BANK": 60,
"FENCE_ACTION": 1, # AC_FENCE_ACTION_RTL_AND_LAND == 1. mavutil.mavlink.FENCE_ACTION_RTL == 4
})
self.wait_ready_to_arm() # need an origin to load fence
self.do_fence_enable()
self.assert_fence_sys_status(True, True, True)
self.takeoff(alt=45, alt_max=300)
tstart = self.get_sim_time()
while True:
if self.get_sim_time() - tstart > 30:
raise NotAchievedException("Did not breach fence")
m = self.mav.recv_match(type='FENCE_STATUS', blocking=True, timeout=2)
if m is None:
raise NotAchievedException("Did not get FENCE_STATUS")
if m.breach_status == 0:
continue
# we've breached; check our state;
if m.breach_type != mavutil.mavlink.FENCE_BREACH_BOUNDARY:
raise NotAchievedException("Unexpected breach type %u" %
(m.breach_type,))
if m.breach_count == 0:
raise NotAchievedException("Unexpected breach count %u" %
(m.breach_count,))
self.assert_fence_sys_status(True, True, False)
break
if disable_on_breach:
self.do_fence_disable()
self.wait_circling_point_with_radius(loc, expected_radius)
self.disarm_vehicle(force=True)
self.reboot_sitl()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.clear_fence()
if ex is not None:
raise ex
def test_fence_rtl(self):
self.progress("Testing FENCE_ACTION_RTL no rally point")
# have to disable the fence once we've breached or we breach
# it as part of the loiter-at-home!
self.test_fence_breach_circle_at(self.home_position_as_mav_location(),
disable_on_breach=True)
def test_fence_rtl_rally(self):
ex = None
target_system = 1
target_component = 1
try:
self.progress("Testing FENCE_ACTION_RTL with rally point")
self.wait_ready_to_arm()
loc = self.home_relative_loc_ne(50, -50)
self.set_parameter("RALLY_TOTAL", 1)
self.mav.mav.rally_point_send(target_system,
target_component,
0, # sequence number
1, # total count
int(loc.lat * 1e7),
int(loc.lng * 1e7),
15,
0, # "break" alt?!
0, # "land dir"
0) # flags
self.delay_sim_time(1)
if self.mavproxy is not None:
self.mavproxy.send("rally list\n")
self.test_fence_breach_circle_at(loc)
except Exception as e:
self.print_exception_caught(e)
ex = e
self.clear_mission(mavutil.mavlink.MAV_MISSION_TYPE_RALLY)
if ex is not None:
raise ex
def test_fence_ret_rally(self):
""" Tests the FENCE_RET_RALLY flag, either returning to fence return point,
or rally point """
target_system = 1
target_component = 1
self.progress("Testing FENCE_ACTION_RTL with fence rally point")
self.wait_ready_to_arm()
self.homeloc = self.mav.location()
# Grab a location for fence return point, and upload it.
fence_loc = self.home_position_as_mav_location()
self.location_offset_ne(fence_loc, 50, 50)
fence_return_mission_items = [
self.mav.mav.mission_item_int_encode(
target_system,
target_component,
0, # seq
mavutil.mavlink.MAV_FRAME_GLOBAL_INT,
mavutil.mavlink.MAV_CMD_NAV_FENCE_RETURN_POINT,
0, # current
0, # autocontinue
0, # p1
0, # p2
0, # p3
0, # p4
int(fence_loc.lat * 1e7), # latitude
int(fence_loc.lng * 1e7), # longitude
0, # altitude
mavutil.mavlink.MAV_MISSION_TYPE_FENCE
)
]
self.upload_using_mission_protocol(mavutil.mavlink.MAV_MISSION_TYPE_FENCE,
fence_return_mission_items)
self.delay_sim_time(1)
# Grab a location for rally point, and upload it.
rally_loc = self.home_relative_loc_ne(-50, 50)
self.set_parameter("RALLY_TOTAL", 1)
self.mav.mav.rally_point_send(target_system,
target_component,
0, # sequence number
1, # total count
int(rally_loc.lat * 1e7),
int(rally_loc.lng * 1e7),
15,
0, # "break" alt?!
0, # "land dir"
0) # flags
self.delay_sim_time(1)
return_radius = 100
return_alt = 80
self.set_parameters({
"RTL_RADIUS": return_radius,
"FENCE_ACTION": 6, # Set Fence Action to Guided
"FENCE_TYPE": 8, # Only use fence floor
"FENCE_RET_ALT": return_alt,
})
self.do_fence_enable()
self.assert_fence_enabled()
self.takeoff(alt=50, alt_max=300)
# Trigger fence breach, fly to rally location
self.set_parameters({
"FENCE_RET_RALLY": 1,
"FENCE_ALT_MIN": 60,
})
self.wait_circling_point_with_radius(rally_loc, return_radius)
self.set_parameter("FENCE_ALT_MIN", 0) # Clear fence breach
# Fly up before re-triggering fence breach. Fly to fence return point
self.change_altitude(self.homeloc.alt+30)
self.set_parameters({
"FENCE_RET_RALLY": 0,
"FENCE_ALT_MIN": 60,
})
self.wait_altitude(altitude_min=return_alt-3,
altitude_max=return_alt+3,
relative=True)
self.wait_circling_point_with_radius(fence_loc, return_radius)
self.do_fence_disable() # Disable fence so we can land
self.fly_home_land_and_disarm() # Pack it up, we're going home.
def test_parachute(self):
self.set_rc(9, 1000)
self.set_parameters({
"CHUTE_ENABLED": 1,
"CHUTE_TYPE": 10,
"SERVO9_FUNCTION": 27,
"SIM_PARA_ENABLE": 1,
"SIM_PARA_PIN": 9,
})
self.load_mission("plane-parachute-mission.txt")
self.set_current_waypoint(1)
self.change_mode('AUTO')
self.wait_ready_to_arm()
self.arm_vehicle()
self.wait_statustext("BANG", timeout=60)
self.disarm_vehicle(force=True)
self.reboot_sitl()
def test_parachute_sinkrate(self):
self.set_rc(9, 1000)
self.set_parameters({
"CHUTE_ENABLED": 1,
"CHUTE_TYPE": 10,
"SERVO9_FUNCTION": 27,
"SIM_PARA_ENABLE": 1,
"SIM_PARA_PIN": 9,
"CHUTE_CRT_SINK": 9,
})
self.progress("Takeoff")
self.takeoff(alt=300)
self.progress("Diving")
self.set_rc(2, 2000)
self.wait_statustext("BANG", timeout=60)
self.disarm_vehicle(force=True)
self.reboot_sitl()
def run_subtest(self, desc, func):
self.start_subtest(desc)
func()
def check_attitudes_match(self, a, b):
'''make sure ahrs2 and simstate and ATTTIUDE_QUATERNION all match'''
# these are ordered to bookend the list with timestamps (which
# both attitude messages have):
get_names = ['ATTITUDE', 'SIMSTATE', 'AHRS2', 'ATTITUDE_QUATERNION']
msgs = self.get_messages_frame(get_names)
for get_name in get_names:
self.progress("%s: %s" % (get_name, msgs[get_name]))
simstate = msgs['SIMSTATE']
attitude = msgs['ATTITUDE']
ahrs2 = msgs['AHRS2']
attitude_quaternion = msgs['ATTITUDE_QUATERNION']
# check ATTITUDE
want = math.degrees(simstate.roll)
got = math.degrees(attitude.roll)
if abs(mavextra.angle_diff(want, got)) > 20:
raise NotAchievedException("ATTITUDE.Roll looks bad (want=%f got=%f)" %
(want, got))
want = math.degrees(simstate.pitch)
got = math.degrees(attitude.pitch)
if abs(mavextra.angle_diff(want, got)) > 20:
raise NotAchievedException("ATTITUDE.Pitch looks bad (want=%f got=%f)" %
(want, got))
# check AHRS2
want = math.degrees(simstate.roll)
got = math.degrees(ahrs2.roll)
if abs(mavextra.angle_diff(want, got)) > 20:
raise NotAchievedException("AHRS2.Roll looks bad (want=%f got=%f)" %
(want, got))
want = math.degrees(simstate.pitch)
got = math.degrees(ahrs2.pitch)
if abs(mavextra.angle_diff(want, got)) > 20:
raise NotAchievedException("AHRS2.Pitch looks bad (want=%f got=%f)" %
(want, got))
# check ATTITUDE_QUATERNION
q = quaternion.Quaternion([
attitude_quaternion.q1,
attitude_quaternion.q2,
attitude_quaternion.q3,
attitude_quaternion.q4
])
euler = q.euler
self.progress("attquat:%s q:%s euler:%s" % (
str(attitude_quaternion), q, euler))
want = math.degrees(simstate.roll)
got = math.degrees(euler[0])
if mavextra.angle_diff(want, got) > 20:
raise NotAchievedException("quat roll differs from attitude roll; want=%f got=%f" %
(want, got))
want = math.degrees(simstate.pitch)
got = math.degrees(euler[1])
if mavextra.angle_diff(want, got) > 20:
raise NotAchievedException("quat pitch differs from attitude pitch; want=%f got=%f" %
(want, got))
def fly_ahrs2_test(self):
'''check secondary estimator is looking OK'''
ahrs2 = self.mav.recv_match(type='AHRS2', blocking=True, timeout=1)
if ahrs2 is None:
raise NotAchievedException("Did not receive AHRS2 message")
self.progress("AHRS2: %s" % str(ahrs2))
# check location
gpi = self.mav.recv_match(
type='GLOBAL_POSITION_INT',
blocking=True,
timeout=5
)
if gpi is None:
raise NotAchievedException("Did not receive GLOBAL_POSITION_INT message")
self.progress("GPI: %s" % str(gpi))
if self.get_distance_int(gpi, ahrs2) > 10:
raise NotAchievedException("Secondary location looks bad")
self.check_attitudes_match(1, 2)
def test_main_flight(self):
self.change_mode('MANUAL')
self.progress("Asserting we do support transfer of fence via mission item protocol")
self.assert_capability(mavutil.mavlink.MAV_PROTOCOL_CAPABILITY_MISSION_FENCE)
# grab home position:
self.mav.recv_match(type='HOME_POSITION', blocking=True)
self.homeloc = self.mav.location()
self.run_subtest("Takeoff", self.takeoff)
self.run_subtest("Set Attitude Target", self.set_attitude_target)
self.run_subtest("Fly left circuit", self.fly_left_circuit)
self.run_subtest("Left roll", lambda: self.axial_left_roll(1))
self.run_subtest("Inside loop", self.inside_loop)
self.run_subtest("Stablize test", self.test_stabilize)
self.run_subtest("ACRO test", self.test_acro)
self.run_subtest("FBWB test", self.test_FBWB)
self.run_subtest("CRUISE test", lambda: self.test_FBWB(mode='CRUISE'))
self.run_subtest("RTL test", self.fly_RTL)
self.run_subtest("LOITER test", self.fly_LOITER)
self.run_subtest("CIRCLE test", self.fly_CIRCLE)
self.run_subtest("AHRS2 test", self.fly_ahrs2_test)
self.run_subtest("Mission test",
lambda: self.fly_mission("ap1.txt", strict=False))
def airspeed_autocal(self):
self.progress("Ensure no AIRSPEED_AUTOCAL on ground")
self.set_parameter("ARSPD_AUTOCAL", 1)
m = self.mav.recv_match(type='AIRSPEED_AUTOCAL',
blocking=True,
timeout=5)
if m is not None:
raise NotAchievedException("Got autocal on ground")
mission_filepath = "flaps.txt"
num_wp = self.load_mission(mission_filepath)
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode("AUTO")
self.progress("Ensure AIRSPEED_AUTOCAL in air")
m = self.mav.recv_match(type='AIRSPEED_AUTOCAL',
blocking=True,
timeout=5)
self.wait_waypoint(7, num_wp-1, max_dist=5, timeout=500)
self.wait_disarmed(timeout=120)
def deadreckoning_main(self, disable_airspeed_sensor=False):
self.wait_ready_to_arm()
self.gpi = None
self.simstate = None
self.last_print = 0
self.max_divergence = 0
def validate_global_position_int_against_simstate(mav, m):
if m.get_type() == 'GLOBAL_POSITION_INT':
self.gpi = m
elif m.get_type() == 'SIMSTATE':
self.simstate = m
if self.gpi is None:
return
if self.simstate is None:
return
divergence = self.get_distance_int(self.gpi, self.simstate)
max_allowed_divergence = 200
if (time.time() - self.last_print > 1 or
divergence > self.max_divergence):
self.progress("position-estimate-divergence=%fm" % (divergence,))
self.last_print = time.time()
if divergence > self.max_divergence:
self.max_divergence = divergence
if divergence > max_allowed_divergence:
raise NotAchievedException(
"global-position-int diverged from simstate by %fm (max=%fm" %
(divergence, max_allowed_divergence,))
self.install_message_hook(validate_global_position_int_against_simstate)
try:
# wind is from the West:
self.set_parameter("SIM_WIND_DIR", 270)
# light winds:
self.set_parameter("SIM_WIND_SPD", 10)
if disable_airspeed_sensor:
self.set_parameter("ARSPD_USE", 0)
self.takeoff(50)
loc = self.mav.location()
self.location_offset_ne(loc, 500, 500)
self.run_cmd_int(
mavutil.mavlink.MAV_CMD_DO_REPOSITION,
0,
mavutil.mavlink.MAV_DO_REPOSITION_FLAGS_CHANGE_MODE,
0,
0,
int(loc.lat * 1e7),
int(loc.lng * 1e7),
100, # alt
frame=mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT,
)
self.wait_location(loc, accuracy=100)
self.progress("Stewing")
self.delay_sim_time(20)
self.set_parameter("SIM_GPS_DISABLE", 1)
self.progress("Roasting")
self.delay_sim_time(20)
self.change_mode("RTL")
self.wait_distance_to_home(100, 200, timeout=200)
self.set_parameter("SIM_GPS_DISABLE", 0)
self.delay_sim_time(10)
self.set_rc(3, 1000)
self.fly_home_land_and_disarm()
self.progress("max-divergence: %fm" % (self.max_divergence,))
finally:
self.remove_message_hook(validate_global_position_int_against_simstate)
def deadreckoning(self):
self.deadreckoning_main()
def deadreckoning_no_airspeed_sensor(self):
self.deadreckoning_main(disable_airspeed_sensor=True)
def climb_before_turn(self):
self.wait_ready_to_arm()
self.set_parameters({
"FLIGHT_OPTIONS": 0,
"ALT_HOLD_RTL": 8000,
})
takeoff_alt = 10
self.takeoff(alt=takeoff_alt)
self.change_mode("CRUISE")
self.wait_distance_to_home(500, 1000, timeout=60)
self.change_mode("RTL")
expected_alt = self.get_parameter("ALT_HOLD_RTL") / 100.0
home = self.home_position_as_mav_location()
distance = self.get_distance(home, self.mav.location())
self.wait_altitude(expected_alt - 10, expected_alt + 10, relative=True)
new_distance = self.get_distance(home, self.mav.location())
# We should be closer to home.
if new_distance > distance:
raise NotAchievedException(
"Expected to be closer to home (was %fm, now %fm)."
% (distance, new_distance)
)
self.fly_home_land_and_disarm()
self.change_mode("MANUAL")
self.set_rc(3, 1000)
self.wait_ready_to_arm()
self.set_parameters({
"FLIGHT_OPTIONS": 16,
"ALT_HOLD_RTL": 10000,
})
self.takeoff(alt=takeoff_alt)
self.change_mode("CRUISE")
self.wait_distance_to_home(500, 1000, timeout=60)
self.change_mode("RTL")
home = self.home_position_as_mav_location()
distance = self.get_distance(home, self.mav.location())
self.wait_altitude(expected_alt - 10, expected_alt + 10, relative=True)
new_distance = self.get_distance(home, self.mav.location())
# We should be farther from to home.
if new_distance < distance:
raise NotAchievedException(
"Expected to be farther from home (was %fm, now %fm)."
% (distance, new_distance)
)
self.fly_home_land_and_disarm(timeout=240)
def rtl_climb_min(self):
self.wait_ready_to_arm()
rtl_climb_min = 100
self.set_parameter("RTL_CLIMB_MIN", rtl_climb_min)
takeoff_alt = 50
self.takeoff(alt=takeoff_alt)
self.change_mode('CRUISE')
self.wait_distance_to_home(1000, 1500, timeout=60)
post_cruise_alt = self.get_altitude(relative=True)
self.change_mode('RTL')
expected_alt = self.get_parameter("ALT_HOLD_RTL")/100.0
if expected_alt == -1:
expected_alt = self.get_altitude(relative=True)
# ensure we're about half-way-down at the half-way-home stage:
self.wait_distance_to_nav_target(
0,
500,
timeout=120,
)
alt = self.get_altitude(relative=True)
expected_halfway_alt = expected_alt + (post_cruise_alt + rtl_climb_min - expected_alt)/2.0
if abs(alt - expected_halfway_alt) > 30:
raise NotAchievedException("Not half-way-down and half-way-home (want=%f got=%f" %
(expected_halfway_alt, alt))
self.progress("Half-way-down at half-way-home (want=%f vs got=%f)" %
(expected_halfway_alt, alt))
rtl_radius = self.get_parameter("RTL_RADIUS")
if rtl_radius == 0:
rtl_radius = self.get_parameter("WP_LOITER_RAD")
self.wait_distance_to_nav_target(
0,
rtl_radius,
timeout=120,
)
alt = self.get_altitude(relative=True)
if abs(alt - expected_alt) > 10:
raise NotAchievedException(
"Expected to have %fm altitude at end of RTL (got %f)" %
(expected_alt, alt))
self.fly_home_land_and_disarm()
def sample_enable_parameter(self):
return "Q_ENABLE"
def test_rangefinder(self):
ex = None
self.context_push()
self.progress("Making sure we don't ordinarily get RANGEFINDER")
m = None
try:
m = self.mav.recv_match(type='RANGEFINDER',
blocking=True,
timeout=5)
except Exception as e:
self.print_exception_caught(e)
if m is not None:
raise NotAchievedException("Received unexpected RANGEFINDER msg")
try:
self.set_analog_rangefinder_parameters()
self.reboot_sitl()
'''ensure rangefinder gives height-above-ground'''
self.load_mission("plane-gripper-mission.txt") # borrow this
self.set_current_waypoint(1)
self.change_mode('AUTO')
self.wait_ready_to_arm()
self.arm_vehicle()
self.wait_waypoint(5, 5, max_dist=100)
rf = self.mav.recv_match(type="RANGEFINDER", timeout=1, blocking=True)
if rf is None:
raise NotAchievedException("Did not receive rangefinder message")
gpi = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True, timeout=1)
if gpi is None:
raise NotAchievedException("Did not receive GLOBAL_POSITION_INT message")
if abs(rf.distance - gpi.relative_alt/1000.0) > 3:
raise NotAchievedException(
"rangefinder alt (%s) disagrees with global-position-int.relative_alt (%s)" %
(rf.distance, gpi.relative_alt/1000.0))
self.wait_statustext("Auto disarmed", timeout=60)
self.progress("Ensure RFND messages in log")
if not self.current_onboard_log_contains_message("RFND"):
raise NotAchievedException("No RFND messages in log")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def rc_defaults(self):
ret = super(AutoTestPlane, self).rc_defaults()
ret[3] = 1000
ret[8] = 1800
return ret
def initial_mode_switch_mode(self):
return "MANUAL"
def default_mode(self):
return "MANUAL"
def test_pid_tuning(self):
self.change_mode("FBWA") # we don't update PIDs in MANUAL
super(AutoTestPlane, self).test_pid_tuning()
def test_setting_modes_via_auxswitches(self):
self.set_parameter("FLTMODE1", 1) # circle
self.set_rc(8, 950)
self.wait_mode("CIRCLE")
self.set_rc(9, 1000)
self.set_rc(10, 1000)
self.set_parameters({
"RC9_OPTION": 4, # RTL
"RC10_OPTION": 55, # guided
})
self.set_rc(9, 1900)
self.wait_mode("RTL")
self.set_rc(10, 1900)
self.wait_mode("GUIDED")
self.progress("resetting both switches - should go back to CIRCLE")
self.set_rc(9, 1000)
self.set_rc(10, 1000)
self.wait_mode("CIRCLE")
self.set_rc(9, 1900)
self.wait_mode("RTL")
self.set_rc(10, 1900)
self.wait_mode("GUIDED")
self.progress("Resetting switch should repoll mode switch")
self.set_rc(10, 1000) # this re-polls the mode switch
self.wait_mode("CIRCLE")
self.set_rc(9, 1000)
def wait_for_collision_threat_to_clear(self):
'''wait to get a "clear" collision message", then slurp remaining
messages'''
last_collision = self.get_sim_time()
while True:
now = self.get_sim_time()
if now - last_collision > 5:
return
self.progress("Waiting for collision message")
m = self.mav.recv_match(type='COLLISION', blocking=True, timeout=1)
self.progress("Got (%s)" % str(m))
if m is None:
continue
last_collision = now
def SimADSB(self):
'''trivial tests to ensure simulated ADSB sensor continues to
function'''
self.set_parameters({
"SIM_ADSB_COUNT": 1,
"ADSB_TYPE": 1,
})
self.reboot_sitl()
self.assert_receive_message('ADSB_VEHICLE', timeout=30)
def test_adsb(self):
self.context_push()
ex = None
try:
# message ADSB_VEHICLE 37 -353632614 1491652305 0 584070 0 0 0 "bob" 3 1 255 17
self.set_parameter("RC12_OPTION", 38) # avoid-adsb
self.set_rc(12, 2000)
self.set_parameters({
"ADSB_TYPE": 1,
"AVD_ENABLE": 1,
"AVD_F_ACTION": mavutil.mavlink.MAV_COLLISION_ACTION_RTL,
})
self.reboot_sitl()
self.wait_ready_to_arm()
here = self.mav.location()
self.change_mode("FBWA")
self.delay_sim_time(2) # TODO: work out why this is required...
self.test_adsb_send_threatening_adsb_message(here)
self.progress("Waiting for collision message")
m = self.mav.recv_match(type='COLLISION', blocking=True, timeout=4)
if m is None:
raise NotAchievedException("Did not get collision message")
if m.threat_level != 2:
raise NotAchievedException("Expected some threat at least")
if m.action != mavutil.mavlink.MAV_COLLISION_ACTION_RTL:
raise NotAchievedException("Incorrect action; want=%u got=%u" %
(mavutil.mavlink.MAV_COLLISION_ACTION_RTL, m.action))
self.wait_mode("RTL")
self.progress("Sending far-away ABSD_VEHICLE message")
self.mav.mav.adsb_vehicle_send(
37, # ICAO address
int(here.lat+1 * 1e7),
int(here.lng * 1e7),
mavutil.mavlink.ADSB_ALTITUDE_TYPE_PRESSURE_QNH,
int(here.alt*1000 + 10000), # 10m up
0, # heading in cdeg
0, # horizontal velocity cm/s
0, # vertical velocity cm/s
"bob".encode("ascii"), # callsign
mavutil.mavlink.ADSB_EMITTER_TYPE_LIGHT,
1, # time since last communication
65535, # flags
17 # squawk
)
self.wait_for_collision_threat_to_clear()
self.change_mode("FBWA")
self.progress("Disabling ADSB-avoidance with RC channel")
self.set_rc(12, 1000)
self.delay_sim_time(1) # let the switch get polled
self.test_adsb_send_threatening_adsb_message(here)
m = self.mav.recv_match(type='COLLISION', blocking=True, timeout=4)
self.progress("Got (%s)" % str(m))
if m is not None:
raise NotAchievedException("Got collision message when I shouldn't have")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def fly_do_guided_request(self, target_system=1, target_component=1):
self.progress("Takeoff")
self.takeoff(alt=50)
self.set_rc(3, 1500)
self.start_subtest("Ensure command bounced outside guided mode")
desired_relative_alt = 33
loc = self.mav.location()
self.location_offset_ne(loc, 300, 300)
loc.alt += desired_relative_alt
self.mav.mav.mission_item_int_send(
target_system,
target_component,
0, # seq
mavutil.mavlink.MAV_FRAME_GLOBAL,
mavutil.mavlink.MAV_CMD_NAV_WAYPOINT,
2, # current - guided-mode request
0, # autocontinue
0, # p1
0, # p2
0, # p3
0, # p4
int(loc.lat * 1e7), # latitude
int(loc.lng * 1e7), # longitude
loc.alt, # altitude
mavutil.mavlink.MAV_MISSION_TYPE_MISSION)
m = self.mav.recv_match(type='MISSION_ACK', blocking=True, timeout=5)
if m is None:
raise NotAchievedException("Did not get MISSION_ACK")
if m.type != mavutil.mavlink.MAV_MISSION_ERROR:
raise NotAchievedException("Did not get appropriate error")
self.start_subtest("Enter guided and flying somewhere constant")
self.change_mode("GUIDED")
self.mav.mav.mission_item_int_send(
target_system,
target_component,
0, # seq
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_NAV_WAYPOINT,
2, # current - guided-mode request
0, # autocontinue
0, # p1
0, # p2
0, # p3
0, # p4
int(loc.lat * 1e7), # latitude
int(loc.lng * 1e7), # longitude
desired_relative_alt, # altitude
mavutil.mavlink.MAV_MISSION_TYPE_MISSION)
m = self.mav.recv_match(type='MISSION_ACK', blocking=True, timeout=5)
if m is None:
raise NotAchievedException("Did not get MISSION_ACK")
if m.type != mavutil.mavlink.MAV_MISSION_ACCEPTED:
raise NotAchievedException("Did not get accepted response")
self.wait_location(loc, accuracy=100) # based on loiter radius
self.delay_sim_time(20)
self.wait_altitude(altitude_min=desired_relative_alt-3,
altitude_max=desired_relative_alt+3,
relative=True)
self.fly_home_land_and_disarm()
def LOITER(self):
self.takeoff(alt=200)
self.set_rc(3, 1500)
self.change_mode("LOITER")
self.progress("Doing a bit of loitering to start with")
tstart = self.get_sim_time()
while True:
now = self.get_sim_time_cached()
if now - tstart > 60:
break
m = self.mav.recv_match(type='VFR_HUD', blocking=True, timeout=5)
if m is None:
raise NotAchievedException("Did not get VFR_HUD")
new_throttle = m.throttle
alt = m.alt
m = self.mav.recv_match(type='ATTITUDE', blocking=True, timeout=5)
if m is None:
raise NotAchievedException("Did not get ATTITUDE")
pitch = math.degrees(m.pitch)
self.progress("Pitch:%f throttle:%u alt:%f" % (pitch, new_throttle, alt))
m = self.mav.recv_match(type='VFR_HUD', blocking=True, timeout=5)
if m is None:
raise NotAchievedException("Did not get VFR_HUD")
initial_throttle = m.throttle
initial_alt = m.alt
self.progress("Initial throttle: %u" % initial_throttle)
# pitch down, ensure throttle decreases:
rc2_max = self.get_parameter("RC2_MAX")
self.set_rc(2, int(rc2_max))
tstart = self.get_sim_time()
while True:
now = self.get_sim_time_cached()
'''stick-mixing is pushing the aircraft down. It doesn't want to go
down (the target loiter altitude hasn't changed), so it
tries to add energy by increasing the throttle.
'''
if now - tstart > 60:
raise NotAchievedException("Did not see increase in throttle")
m = self.mav.recv_match(type='VFR_HUD', blocking=True, timeout=5)
if m is None:
raise NotAchievedException("Did not get VFR_HUD")
new_throttle = m.throttle
alt = m.alt
m = self.mav.recv_match(type='ATTITUDE', blocking=True, timeout=5)
if m is None:
raise NotAchievedException("Did not get ATTITUDE")
pitch = math.degrees(m.pitch)
self.progress("Pitch:%f throttle:%u alt:%f" % (pitch, new_throttle, alt))
if new_throttle - initial_throttle > 20:
self.progress("Throttle delta achieved")
break
self.progress("Centering elevator and ensuring we get back to loiter altitude")
self.set_rc(2, 1500)
self.wait_altitude(initial_alt-1, initial_alt+1)
self.fly_home_land_and_disarm()
def CPUFailsafe(self):
'''In lockup Plane should copy RC inputs to RC outputs'''
self.plane_CPUFailsafe()
def test_large_missions(self):
self.load_mission("Kingaroy-vlarge.txt", strict=False)
self.load_mission("Kingaroy-vlarge2.txt", strict=False)
def fly_soaring(self):
model = "plane-soaring"
self.customise_SITL_commandline(
[],
model=model,
defaults_filepath=self.model_defaults_filepath(model),
wipe=True)
self.load_mission('CMAC-soar.txt', strict=False)
self.set_current_waypoint(1)
self.change_mode('AUTO')
self.wait_ready_to_arm()
self.arm_vehicle()
# Enable thermalling RC
rc_chan = 0
for i in range(8):
rcx_option = self.get_parameter('RC{0}_OPTION'.format(i+1))
if rcx_option == 88:
rc_chan = i+1
break
if rc_chan == 0:
raise NotAchievedException("Did not find soaring enable channel option.")
self.set_rc_from_map({
rc_chan: 1900,
3: 1500, # Use trim airspeed.
})
# Wait to detect thermal
self.progress("Waiting for thermal")
self.wait_mode('THERMAL', timeout=600)
self.set_parameter("SOAR_VSPEED", 0.6)
# Wait to climb to SOAR_ALT_MAX
self.progress("Waiting for climb to max altitude")
alt_max = self.get_parameter('SOAR_ALT_MAX')
self.wait_altitude(alt_max-10, alt_max, timeout=600, relative=True)
# Wait for AUTO
self.progress("Waiting for AUTO mode")
self.wait_mode('AUTO')
# Disable thermals
self.set_parameter("SIM_THML_SCENARI", 0)
# Wait to descend to SOAR_ALT_MIN
self.progress("Waiting for glide to min altitude")
alt_min = self.get_parameter('SOAR_ALT_MIN')
self.wait_altitude(alt_min-10, alt_min, timeout=600, relative=True)
self.progress("Waiting for throttle up")
self.wait_servo_channel_value(3, 1200, timeout=2, comparator=operator.gt)
self.progress("Waiting for climb to cutoff altitude")
alt_ctf = self.get_parameter('SOAR_ALT_CUTOFF')
self.wait_altitude(alt_ctf-10, alt_ctf, timeout=600, relative=True)
# Allow time to suppress throttle and start descent.
self.delay_sim_time(20)
# Now set FBWB mode
self.change_mode('FBWB')
self.delay_sim_time(5)
# Now disable soaring (should hold altitude)
self.set_parameter("SOAR_ENABLE", 0)
self.delay_sim_time(10)
# And reenable. This should force throttle-down
self.set_parameter("SOAR_ENABLE", 1)
self.delay_sim_time(10)
# Now wait for descent and check throttle up
self.wait_altitude(alt_min-10, alt_min, timeout=600, relative=True)
self.progress("Waiting for climb")
self.wait_altitude(alt_ctf-10, alt_ctf, timeout=600, relative=True)
# Back to auto
self.change_mode('AUTO')
# Reenable thermals
self.set_parameter("SIM_THML_SCENARI", 1)
# Disable soaring using RC channel.
self.set_rc(rc_chan, 1100)
# Wait to get back to waypoint before thermal.
self.progress("Waiting to get back to position")
self.wait_current_waypoint(3, timeout=1200)
# Enable soaring with mode changes suppressed)
self.set_rc(rc_chan, 1500)
# Make sure this causes throttle down.
self.wait_servo_channel_value(3, 1200, timeout=2, comparator=operator.lt)
self.progress("Waiting for next WP with no thermalling")
self.wait_waypoint(4, 4, timeout=1200, max_dist=120)
# Disarm
self.disarm_vehicle()
self.progress("Mission OK")
def fly_soaring_speed_to_fly(self):
model = "plane-soaring"
self.customise_SITL_commandline(
[],
model=model,
defaults_filepath=self.model_defaults_filepath(model),
wipe=True)
self.load_mission('CMAC-soar.txt', strict=False)
# Turn of environmental thermals.
self.set_parameter("SIM_THML_SCENARI", 0)
# Get thermalling RC channel
rc_chan = 0
for i in range(8):
rcx_option = self.get_parameter('RC{0}_OPTION'.format(i+1))
if rcx_option == 88:
rc_chan = i+1
break
if rc_chan == 0:
raise NotAchievedException("Did not find soaring enable channel option.")
# Disable soaring
self.set_rc(rc_chan, 1100)
self.set_current_waypoint(1)
self.change_mode('AUTO')
self.wait_ready_to_arm()
self.arm_vehicle()
# Wait for to 400m before starting.
self.wait_altitude(390, 400, timeout=600, relative=True)
# Wait 10s to stabilize.
self.delay_sim_time(30)
# Enable soaring (no automatic thermalling)
self.set_rc(rc_chan, 1500)
# Enable speed to fly.
self.set_parameter("SOAR_CRSE_ARSPD", -1)
# Set appropriate McCready.
self.set_parameter("SOAR_VSPEED", 1)
self.set_parameter("SIM_WIND_SPD", 0)
# Wait a few seconds before determining the "trim" airspeed.
self.delay_sim_time(20)
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
trim_airspeed = m.airspeed
min_airspeed = self.get_parameter("ARSPD_FBW_MIN")
max_airspeed = self.get_parameter("ARSPD_FBW_MAX")
# Add updraft
self.set_parameter("SIM_WIND_SPD", 1)
self.set_parameter('SIM_WIND_DIR_Z', 90)
self.delay_sim_time(20)
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
if not m.airspeed < trim_airspeed and trim_airspeed > min_airspeed:
raise NotAchievedException("Airspeed did not reduce in updraft")
# Add downdraft
self.set_parameter('SIM_WIND_DIR_Z', -90)
self.delay_sim_time(20)
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
if not m.airspeed > trim_airspeed and trim_airspeed < max_airspeed:
raise NotAchievedException("Airspeed did not increase in downdraft")
# Zero the wind and increase McCready.
self.set_parameter("SIM_WIND_SPD", 0)
self.set_parameter("SOAR_VSPEED", 2)
self.delay_sim_time(20)
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
if not m.airspeed > trim_airspeed and trim_airspeed < max_airspeed:
raise NotAchievedException("Airspeed did not increase with higher SOAR_VSPEED")
# Reduce McCready.
self.set_parameter("SOAR_VSPEED", 0)
self.delay_sim_time(20)
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
if not m.airspeed < trim_airspeed and trim_airspeed > min_airspeed:
raise NotAchievedException("Airspeed did not reduce with lower SOAR_VSPEED")
# Disarm
self.disarm_vehicle()
self.progress("Mission OK")
def test_airspeed_drivers(self):
airspeed_sensors = [
("MS5525", 3, 1),
("DLVR", 7, 2),
]
for (name, t, bus) in airspeed_sensors:
self.context_push()
if bus is not None:
self.set_parameter("ARSPD2_BUS", bus)
self.set_parameter("ARSPD2_TYPE", t)
self.reboot_sitl()
self.wait_ready_to_arm()
self.arm_vehicle()
# insert listener to compare airspeeds:
airspeed = [None, None]
def check_airspeeds(mav, m):
m_type = m.get_type()
if (m_type == 'NAMED_VALUE_FLOAT' and
m.name == 'AS2'):
airspeed[1] = m.value
elif m_type == 'VFR_HUD':
airspeed[0] = m.airspeed
else:
return
if airspeed[0] is None or airspeed[1] is None:
return
delta = abs(airspeed[0] - airspeed[1])
if delta > 2:
raise NotAchievedException("Airspeed mismatch (as1=%f as2=%f)" % (airspeed[0], airspeed[1]))
self.install_message_hook_context(check_airspeeds)
self.fly_mission("ap1.txt", strict=False)
if airspeed[0] is None:
raise NotAchievedException("Never saw an airspeed1")
if airspeed[1] is None:
raise NotAchievedException("Never saw an airspeed2")
self.context_pop()
self.reboot_sitl()
def fly_terrain_mission(self):
self.wait_ready_to_arm()
self.arm_vehicle()
self.fly_mission("ap-terrain.txt", mission_timeout=600)
def test_loiter_terrain(self):
default_rad = self.get_parameter("WP_LOITER_RAD")
self.set_parameters({
"TERRAIN_FOLLOW": 1, # enable terrain following in loiter
"WP_LOITER_RAD": 2000, # set very large loiter rad to get some terrain changes
})
alt = 200
self.takeoff(alt*0.9, alt*1.1)
self.set_rc(3, 1500)
self.change_mode("LOITER")
self.progress("loitering at %um" % alt)
tstart = self.get_sim_time()
while True:
now = self.get_sim_time_cached()
if now - tstart > 60*15: # enough time to do one and a bit circles
break
terrain = self.mav.recv_match(
type='TERRAIN_REPORT',
blocking=True,
timeout=1
)
if terrain is None:
raise NotAchievedException("Did not get TERRAIN_REPORT message")
rel_alt = terrain.current_height
self.progress("%um above terrain" % rel_alt)
if rel_alt > alt*1.2 or rel_alt < alt * 0.8:
raise NotAchievedException("Not terrain following")
self.progress("Returning home")
self.set_parameters({
"TERRAIN_FOLLOW": 0,
"WP_LOITER_RAD": default_rad,
})
self.fly_home_land_and_disarm(240)
def fly_external_AHRS(self, sim, eahrs_type, mission):
"""Fly with external AHRS (VectorNav)"""
self.customise_SITL_commandline(["--uartE=sim:%s" % sim])
self.set_parameters({
"EAHRS_TYPE": eahrs_type,
"SERIAL4_PROTOCOL": 36,
"SERIAL4_BAUD": 230400,
"GPS_TYPE": 21,
"AHRS_EKF_TYPE": 11,
"INS_GYR_CAL": 1,
})
self.reboot_sitl()
self.progress("Running accelcal")
self.run_cmd(mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION,
0, 0, 0, 0, 4, 0, 0,
timeout=5)
self.wait_ready_to_arm()
self.arm_vehicle()
self.fly_mission(mission)
def test_vectornav(self):
self.fly_external_AHRS("VectorNav", 1, "ap1.txt")
def test_lord(self):
self.fly_external_AHRS("LORD", 2, "ap1.txt")
def get_accelvec(self, m):
return Vector3(m.xacc, m.yacc, m.zacc) * 0.001 * 9.81
def get_gyrovec(self, m):
return Vector3(m.xgyro, m.ygyro, m.zgyro) * 0.001 * math.degrees(1)
def test_imu_tempcal(self):
self.progress("Setting up SITL temperature profile")
self.set_parameters({
"SIM_IMUT1_ENABLE" : 1,
"SIM_IMUT1_ACC1_X" : 120000.000000,
"SIM_IMUT1_ACC1_Y" : -190000.000000,
"SIM_IMUT1_ACC1_Z" : 1493.864746,
"SIM_IMUT1_ACC2_X" : -51.624416,
"SIM_IMUT1_ACC2_Y" : 10.364172,
"SIM_IMUT1_ACC2_Z" : -7878.000000,
"SIM_IMUT1_ACC3_X" : -0.514242,
"SIM_IMUT1_ACC3_Y" : 0.862218,
"SIM_IMUT1_ACC3_Z" : -234.000000,
"SIM_IMUT1_GYR1_X" : -5122.513817,
"SIM_IMUT1_GYR1_Y" : -3250.470428,
"SIM_IMUT1_GYR1_Z" : -2136.346676,
"SIM_IMUT1_GYR2_X" : 30.720505,
"SIM_IMUT1_GYR2_Y" : 17.778447,
"SIM_IMUT1_GYR2_Z" : 0.765997,
"SIM_IMUT1_GYR3_X" : -0.003572,
"SIM_IMUT1_GYR3_Y" : 0.036346,
"SIM_IMUT1_GYR3_Z" : 0.015457,
"SIM_IMUT1_TMAX" : 70.0,
"SIM_IMUT1_TMIN" : -20.000000,
"SIM_IMUT2_ENABLE" : 1,
"SIM_IMUT2_ACC1_X" : -160000.000000,
"SIM_IMUT2_ACC1_Y" : 198730.000000,
"SIM_IMUT2_ACC1_Z" : 27812.000000,
"SIM_IMUT2_ACC2_X" : 30.658159,
"SIM_IMUT2_ACC2_Y" : 32.085022,
"SIM_IMUT2_ACC2_Z" : 1572.000000,
"SIM_IMUT2_ACC3_X" : 0.102912,
"SIM_IMUT2_ACC3_Y" : 0.229734,
"SIM_IMUT2_ACC3_Z" : 172.000000,
"SIM_IMUT2_GYR1_X" : 3173.925644,
"SIM_IMUT2_GYR1_Y" : -2368.312836,
"SIM_IMUT2_GYR1_Z" : -1796.497177,
"SIM_IMUT2_GYR2_X" : 13.029696,
"SIM_IMUT2_GYR2_Y" : -10.349280,
"SIM_IMUT2_GYR2_Z" : -15.082653,
"SIM_IMUT2_GYR3_X" : 0.004831,
"SIM_IMUT2_GYR3_Y" : -0.020528,
"SIM_IMUT2_GYR3_Z" : 0.009469,
"SIM_IMUT2_TMAX" : 70.000000,
"SIM_IMUT2_TMIN" : -20.000000,
"SIM_IMUT_END" : 45.000000,
"SIM_IMUT_START" : 3.000000,
"SIM_IMUT_TCONST" : 75.000000,
"SIM_DRIFT_SPEED" : 0,
"INS_GYR_CAL" : 0,
})
self.set_parameter("SIM_IMUT_FIXED", 12)
self.progress("Running accel cal")
self.run_cmd(mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION,
0, 0, 0, 0, 4, 0, 0,
timeout=5)
self.progress("Running gyro cal")
self.run_cmd(mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION,
0, 0, 0, 0, 1, 0, 0,
timeout=5)
self.set_parameters({
"SIM_IMUT_FIXED": 0,
"INS_TCAL1_ENABLE": 2,
"INS_TCAL1_TMAX": 42,
"INS_TCAL2_ENABLE": 2,
"INS_TCAL2_TMAX": 42,
"SIM_SPEEDUP": 200,
})
self.set_parameter("LOG_DISARMED", 1)
self.reboot_sitl()
self.progress("Waiting for IMU temperature")
self.assert_reach_imu_temperature(43, timeout=600)
if self.get_parameter("INS_TCAL1_ENABLE") != 1.0:
raise NotAchievedException("TCAL1 did not complete")
if self.get_parameter("INS_TCAL2_ENABLE") != 1.0:
raise NotAchievedException("TCAL2 did not complete")
self.progress("Logging with calibration enabled")
self.reboot_sitl()
self.assert_reach_imu_temperature(43, timeout=600)
self.progress("Testing with compensation enabled")
test_temperatures = range(10, 45, 5)
corrected = {}
uncorrected = {}
for temp in test_temperatures:
self.progress("Testing temperature %.1f" % temp)
self.set_parameter("SIM_IMUT_FIXED", temp)
self.delay_sim_time(2)
for msg in ['RAW_IMU', 'SCALED_IMU2']:
m = self.mav.recv_match(type=msg, blocking=True, timeout=2)
if m is None:
raise NotAchievedException(msg)
temperature = m.temperature*0.01
if abs(temperature - temp) > 0.2:
raise NotAchievedException("incorrect %s temperature %.1f should be %.1f" % (msg, temperature, temp))
accel = self.get_accelvec(m)
gyro = self.get_gyrovec(m)
accel2 = accel + Vector3(0, 0, 9.81)
corrected[temperature] = (accel2, gyro)
self.progress("Testing with compensation disabled")
self.set_parameters({
"INS_TCAL1_ENABLE": 0,
"INS_TCAL2_ENABLE": 0,
})
gyro_threshold = 0.2
accel_threshold = 0.2
for temp in test_temperatures:
self.progress("Testing temperature %.1f" % temp)
self.set_parameter("SIM_IMUT_FIXED", temp)
self.wait_heartbeat()
self.wait_heartbeat()
for msg in ['RAW_IMU', 'SCALED_IMU2']:
m = self.mav.recv_match(type=msg, blocking=True, timeout=2)
if m is None:
raise NotAchievedException(msg)
temperature = m.temperature*0.01
if abs(temperature - temp) > 0.2:
raise NotAchievedException("incorrect %s temperature %.1f should be %.1f" % (msg, temperature, temp))
accel = self.get_accelvec(m)
gyro = self.get_gyrovec(m)
accel2 = accel + Vector3(0, 0, 9.81)
uncorrected[temperature] = (accel2, gyro)
for temp in test_temperatures:
(accel, gyro) = corrected[temp]
self.progress("Corrected gyro at %.1f %s" % (temp, gyro))
self.progress("Corrected accel at %.1f %s" % (temp, accel))
for temp in test_temperatures:
(accel, gyro) = uncorrected[temp]
self.progress("Uncorrected gyro at %.1f %s" % (temp, gyro))
self.progress("Uncorrected accel at %.1f %s" % (temp, accel))
bad_value = False
for temp in test_temperatures:
(accel, gyro) = corrected[temp]
if gyro.length() > gyro_threshold:
raise NotAchievedException("incorrect corrected at %.1f gyro %s" % (temp, gyro))
if accel.length() > accel_threshold:
raise NotAchievedException("incorrect corrected at %.1f accel %s" % (temp, accel))
(accel, gyro) = uncorrected[temp]
if gyro.length() > gyro_threshold*2:
bad_value = True
if accel.length() > accel_threshold*2:
bad_value = True
if not bad_value:
raise NotAchievedException("uncompensated IMUs did not vary enough")
# the above tests change the internal persistent state of the
# vehicle in ways that autotest doesn't track (magically set
# parameters). So wipe the vehicle's eeprom:
self.reset_SITL_commandline()
def ekf_lane_switch(self):
self.context_push()
ex = None
# new lane swtich available only with EK3
self.set_parameters({
"EK3_ENABLE": 1,
"EK2_ENABLE": 0,
"AHRS_EKF_TYPE": 3,
"EK3_AFFINITY": 15, # enable affinity for all sensors
"EK3_IMU_MASK": 3, # use only 2 IMUs
"GPS_TYPE2": 1,
"SIM_GPS2_DISABLE": 0,
"SIM_BARO_COUNT": 2,
"SIM_BAR2_DISABLE": 0,
"ARSPD2_TYPE": 2,
"ARSPD2_USE": 1,
"ARSPD2_PIN": 2,
})
# some parameters need reboot to take effect
self.reboot_sitl()
self.lane_switches = []
# add an EKF lane switch hook
def statustext_hook(mav, message):
if message.get_type() != 'STATUSTEXT':
return
# example msg: EKF3 lane switch 1
if not message.text.startswith("EKF3 lane switch "):
return
newlane = int(message.text[-1])
self.lane_switches.append(newlane)
self.install_message_hook(statustext_hook)
# get flying
self.takeoff(alt=50)
self.change_mode('CIRCLE')
try:
###################################################################
self.progress("Checking EKF3 Lane Switching trigger from all sensors")
###################################################################
self.start_subtest("ACCELEROMETER: Change z-axis offset")
# create an accelerometer error by changing the Z-axis offset
self.context_collect("STATUSTEXT")
old_parameter = self.get_parameter("INS_ACCOFFS_Z")
self.wait_statustext(
text="EKF3 lane switch",
timeout=30,
the_function=self.set_parameter("INS_ACCOFFS_Z", old_parameter + 5),
check_context=True)
if self.lane_switches != [1]:
raise NotAchievedException("Expected lane switch 1, got %s" % str(self.lane_switches[-1]))
# Cleanup
self.set_parameter("INS_ACCOFFS_Z", old_parameter)
self.context_clear_collection("STATUSTEXT")
self.wait_heading(0, accuracy=10, timeout=60)
self.wait_heading(180, accuracy=10, timeout=60)
###################################################################
self.start_subtest("BAROMETER: Freeze to last measured value")
self.context_collect("STATUSTEXT")
# create a barometer error by inhibiting any pressure change while changing altitude
old_parameter = self.get_parameter("SIM_BAR2_FREEZE")
self.set_parameter("SIM_BAR2_FREEZE", 1)
self.wait_statustext(
text="EKF3 lane switch",
timeout=30,
the_function=lambda: self.set_rc(2, 2000),
check_context=True)
if self.lane_switches != [1, 0]:
raise NotAchievedException("Expected lane switch 0, got %s" % str(self.lane_switches[-1]))
# Cleanup
self.set_rc(2, 1500)
self.set_parameter("SIM_BAR2_FREEZE", old_parameter)
self.context_clear_collection("STATUSTEXT")
self.wait_heading(0, accuracy=10, timeout=60)
self.wait_heading(180, accuracy=10, timeout=60)
###################################################################
self.start_subtest("GPS: Apply GPS Velocity Error in NED")
self.context_push()
self.context_collect("STATUSTEXT")
# create a GPS velocity error by adding a random 2m/s
# noise on each axis
def sim_gps_verr():
self.set_parameters({
"SIM_GPS_VERR_X": self.get_parameter("SIM_GPS_VERR_X") + 2,
"SIM_GPS_VERR_Y": self.get_parameter("SIM_GPS_VERR_Y") + 2,
"SIM_GPS_VERR_Z": self.get_parameter("SIM_GPS_VERR_Z") + 2,
})
self.wait_statustext(text="EKF3 lane switch", timeout=30, the_function=sim_gps_verr, check_context=True)
if self.lane_switches != [1, 0, 1]:
raise NotAchievedException("Expected lane switch 1, got %s" % str(self.lane_switches[-1]))
# Cleanup
self.context_pop()
self.context_clear_collection("STATUSTEXT")
self.wait_heading(0, accuracy=10, timeout=60)
self.wait_heading(180, accuracy=10, timeout=60)
###################################################################
self.start_subtest("MAGNETOMETER: Change X-Axis Offset")
self.context_collect("STATUSTEXT")
# create a magnetometer error by changing the X-axis offset
old_parameter = self.get_parameter("SIM_MAG2_OFS_X")
self.wait_statustext(
text="EKF3 lane switch",
timeout=30,
the_function=self.set_parameter("SIM_MAG2_OFS_X", old_parameter + 150),
check_context=True)
if self.lane_switches != [1, 0, 1, 0]:
raise NotAchievedException("Expected lane switch 0, got %s" % str(self.lane_switches[-1]))
# Cleanup
self.set_parameter("SIM_MAG2_OFS_X", old_parameter)
self.context_clear_collection("STATUSTEXT")
self.wait_heading(0, accuracy=10, timeout=60)
self.wait_heading(180, accuracy=10, timeout=60)
###################################################################
self.start_subtest("AIRSPEED: Fail to constant value")
self.context_push()
self.context_collect("STATUSTEXT")
old_parameter = self.get_parameter("SIM_ARSPD_FAIL")
def fail_speed():
self.change_mode("GUIDED")
loc = self.mav.location()
self.run_cmd_int(
mavutil.mavlink.MAV_CMD_DO_REPOSITION,
0,
0,
0,
0,
int(loc.lat * 1e7),
int(loc.lng * 1e7),
50 # alt
)
self.delay_sim_time(5)
# create an airspeed sensor error by freezing to the
# current airspeed then changing the airspeed demand
# to a higher value and waiting for the TECS speed
# loop to diverge
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
self.set_parameter("SIM_ARSPD_FAIL", m.airspeed)
self.run_cmd(
mavutil.mavlink.MAV_CMD_DO_CHANGE_SPEED,
0, # airspeed
30,
-1, # throttle / no change
0, # absolute values
0,
0,
0
)
self.wait_statustext(text="EKF3 lane switch", timeout=30, the_function=fail_speed, check_context=True)
if self.lane_switches != [1, 0, 1, 0, 1]:
raise NotAchievedException("Expected lane switch 1, got %s" % str(self.lane_switches[-1]))
# Cleanup
self.set_parameter("SIM_ARSPD_FAIL", old_parameter)
self.change_mode('CIRCLE')
self.context_pop()
self.context_clear_collection("STATUSTEXT")
self.wait_heading(0, accuracy=10, timeout=60)
self.wait_heading(180, accuracy=10, timeout=60)
###################################################################
self.progress("GYROSCOPE: Change Y-Axis Offset")
self.context_collect("STATUSTEXT")
# create a gyroscope error by changing the Y-axis offset
old_parameter = self.get_parameter("INS_GYR2OFFS_Y")
self.wait_statustext(
text="EKF3 lane switch",
timeout=30,
the_function=self.set_parameter("INS_GYR2OFFS_Y", old_parameter + 1),
check_context=True)
if self.lane_switches != [1, 0, 1, 0, 1, 0]:
raise NotAchievedException("Expected lane switch 0, got %s" % str(self.lane_switches[-1]))
# Cleanup
self.set_parameter("INS_GYR2OFFS_Y", old_parameter)
self.context_clear_collection("STATUSTEXT")
###################################################################
self.disarm_vehicle()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.remove_message_hook(statustext_hook)
self.context_pop()
# some parameters need reboot to take effect
self.reboot_sitl()
if ex is not None:
raise ex
def test_fence_alt_ceil_floor(self):
fence_bit = mavutil.mavlink.MAV_SYS_STATUS_GEOFENCE
self.set_parameters({
"FENCE_TYPE": 9, # Set fence type to max and min alt
"FENCE_ACTION": 0, # Set action to report
"FENCE_ALT_MAX": 200,
"FENCE_ALT_MIN": 100,
})
# Grab Home Position
self.mav.recv_match(type='HOME_POSITION', blocking=True)
self.homeloc = self.mav.location()
cruise_alt = 150
self.takeoff(cruise_alt)
self.do_fence_enable()
self.progress("Fly above ceiling and check for breach")
self.change_altitude(self.homeloc.alt + cruise_alt + 80)
m = self.mav.recv_match(type='SYS_STATUS', blocking=True)
self.progress("Got (%s)" % str(m))
if ((m.onboard_control_sensors_health & fence_bit)):
raise NotAchievedException("Fence Ceiling did not breach")
self.progress("Return to cruise alt and check for breach clear")
self.change_altitude(self.homeloc.alt + cruise_alt)
m = self.mav.recv_match(type='SYS_STATUS', blocking=True)
self.progress("Got (%s)" % str(m))
if (not (m.onboard_control_sensors_health & fence_bit)):
raise NotAchievedException("Fence breach did not clear")
self.progress("Fly below floor and check for breach")
self.change_altitude(self.homeloc.alt + cruise_alt - 80)
m = self.mav.recv_match(type='SYS_STATUS', blocking=True)
self.progress("Got (%s)" % str(m))
if ((m.onboard_control_sensors_health & fence_bit)):
raise NotAchievedException("Fence Floor did not breach")
self.do_fence_disable()
self.fly_home_land_and_disarm(timeout=150)
def test_fence_breached_change_mode(self):
""" Attempts to change mode while a fence is breached.
This should revert to the mode specified by the fence action. """
self.set_parameters({
"FENCE_ACTION": 1,
"FENCE_TYPE": 4,
})
home_loc = self.mav.location()
locs = [
mavutil.location(home_loc.lat - 0.001, home_loc.lng - 0.001, 0, 0),
mavutil.location(home_loc.lat - 0.001, home_loc.lng + 0.001, 0, 0),
mavutil.location(home_loc.lat + 0.001, home_loc.lng + 0.001, 0, 0),
mavutil.location(home_loc.lat + 0.001, home_loc.lng - 0.001, 0, 0),
]
self.upload_fences_from_locations(
mavutil.mavlink.MAV_CMD_NAV_FENCE_POLYGON_VERTEX_INCLUSION,
[
locs
]
)
self.delay_sim_time(1)
self.wait_ready_to_arm()
self.takeoff(alt=50)
self.change_mode("CRUISE")
self.wait_distance(90, accuracy=15)
self.progress("Enable fence and initiate fence action")
self.do_fence_enable()
self.assert_fence_enabled()
self.wait_mode("RTL") # We should RTL because of fence breach
self.progress("User mode change to cruise should retrigger fence action")
self.change_mode("CRUISE")
self.wait_mode("RTL", timeout=5)
self.progress("Test complete, disable fence and come home")
self.do_fence_disable()
self.fly_home_land_and_disarm()
def test_fence_breach_no_return_point(self):
""" Attempts to change mode while a fence is breached.
This should revert to the mode specified by the fence action. """
want_radius = 100 # Fence Return Radius
self.set_parameters({
"FENCE_ACTION": 6,
"FENCE_TYPE": 4,
"RTL_RADIUS": want_radius,
"NAVL1_LIM_BANK": 60,
})
home_loc = self.mav.location()
locs = [
mavutil.location(home_loc.lat - 0.003, home_loc.lng - 0.001, 0, 0),
mavutil.location(home_loc.lat - 0.003, home_loc.lng + 0.003, 0, 0),
mavutil.location(home_loc.lat + 0.001, home_loc.lng + 0.003, 0, 0),
mavutil.location(home_loc.lat + 0.001, home_loc.lng - 0.001, 0, 0),
]
self.upload_fences_from_locations(
mavutil.mavlink.MAV_CMD_NAV_FENCE_POLYGON_VERTEX_INCLUSION,
[
locs
]
)
self.delay_sim_time(1)
self.wait_ready_to_arm()
self.takeoff(alt=50)
self.change_mode("CRUISE")
self.wait_distance(150, accuracy=20)
self.progress("Enable fence and initiate fence action")
self.do_fence_enable()
self.assert_fence_enabled()
self.wait_mode("GUIDED", timeout=120) # We should RTL because of fence breach
self.delay_sim_time(60)
items = self.download_using_mission_protocol(mavutil.mavlink.MAV_MISSION_TYPE_FENCE)
if len(items) != 4:
raise NotAchievedException("Unexpected fencepoint count (want=%u got=%u)" % (4, len(items)))
# Check there are no fence return points specified still
for fence_loc in items:
if fence_loc.command == mavutil.mavlink.MAV_CMD_NAV_FENCE_RETURN_POINT:
raise NotAchievedException(
"Unexpected fence return point found (%u) got %u" %
(fence_loc.command,
mavutil.mavlink.MAV_CMD_NAV_FENCE_RETURN_POINT))
# Work out the approximate return point when no fence return point present
# Logic taken from AC_PolyFence_loader.cpp
min_loc = self.mav.location()
max_loc = self.mav.location()
for new_loc in locs:
if new_loc.lat < min_loc.lat:
min_loc.lat = new_loc.lat
if new_loc.lng < min_loc.lng:
min_loc.lng = new_loc.lng
if new_loc.lat > max_loc.lat:
max_loc.lat = new_loc.lat
if new_loc.lng > max_loc.lng:
max_loc.lng = new_loc.lng
# Generate the return location based on min and max locs
ret_lat = (min_loc.lat + max_loc.lat) / 2
ret_lng = (min_loc.lng + max_loc.lng) / 2
ret_loc = mavutil.location(ret_lat, ret_lng, 0, 0)
self.progress("Return loc: (%s)" % str(ret_loc))
# Wait for guided return to vehicle calculated fence return location
self.wait_distance_to_location(ret_loc, 90, 110)
self.wait_circling_point_with_radius(ret_loc, 92)
self.progress("Test complete, disable fence and come home")
self.do_fence_disable()
self.fly_home_land_and_disarm()
def test_fence_breach_no_return_point_no_inclusion(self):
""" Test result when a breach occurs and No fence return point is present and
no inclusion fence is present and exclusion fence is present """
want_radius = 100 # Fence Return Radius
self.set_parameters({
"FENCE_ACTION": 6,
"FENCE_TYPE": 2,
"FENCE_RADIUS": 300,
"RTL_RADIUS": want_radius,
"NAVL1_LIM_BANK": 60,
})
self.clear_fence()
self.delay_sim_time(1)
self.wait_ready_to_arm()
home_loc = self.mav.location()
self.takeoff(alt=50)
self.change_mode("CRUISE")
self.wait_distance(150, accuracy=20)
self.progress("Enable fence and initiate fence action")
self.do_fence_enable()
self.assert_fence_enabled()
self.wait_mode("GUIDED") # We should RTL because of fence breach
self.delay_sim_time(30)
items = self.download_using_mission_protocol(mavutil.mavlink.MAV_MISSION_TYPE_FENCE)
if len(items) != 0:
raise NotAchievedException("Unexpected fencepoint count (want=%u got=%u)" % (0, len(items)))
# Check there are no fence return points specified still
for fence_loc in items:
if fence_loc.command == mavutil.mavlink.MAV_CMD_NAV_FENCE_RETURN_POINT:
raise NotAchievedException(
"Unexpected fence return point found (%u) got %u" %
(fence_loc.command,
mavutil.mavlink.MAV_CMD_NAV_FENCE_RETURN_POINT))
# Wait for guided return to vehicle calculated fence return location
self.wait_distance_to_location(home_loc, 90, 110)
self.wait_circling_point_with_radius(home_loc, 92)
self.progress("Test complete, disable fence and come home")
self.do_fence_disable()
self.fly_home_land_and_disarm()
def test_fence_disable_under_breach_action(self):
""" Fence breach will cause the vehicle to enter guided mode.
Upon breach clear, check the vehicle is in the expected mode"""
self.set_parameters({
"FENCE_ALT_MIN": 50, # Sets the fence floor
"FENCE_TYPE": 8, # Only use fence floor for breaches
})
self.wait_ready_to_arm()
def attempt_fence_breached_disable(start_mode, end_mode, expected_mode, action):
self.set_parameter("FENCE_ACTION", action) # Set Fence Action to Guided
self.change_mode(start_mode)
self.arm_vehicle()
self.do_fence_enable()
self.assert_fence_enabled()
self.wait_mode(expected_mode)
self.do_fence_disable()
self.assert_fence_disabled()
self.wait_mode(end_mode)
self.disarm_vehicle(force=True)
attempt_fence_breached_disable(start_mode="FBWA", end_mode="RTL", expected_mode="RTL", action=1)
attempt_fence_breached_disable(start_mode="FBWA", end_mode="FBWA", expected_mode="GUIDED", action=6)
attempt_fence_breached_disable(start_mode="FBWA", end_mode="FBWA", expected_mode="GUIDED", action=7)
def run_auxfunc(self,
function,
level,
want_result=mavutil.mavlink.MAV_RESULT_ACCEPTED):
self.run_cmd(
mavutil.mavlink.MAV_CMD_DO_AUX_FUNCTION,
function, # p1
level, # p2
0, # p3
0, # p4
0, # p5
0, # p6
0, # p7
want_result=want_result
)
def fly_aux_function(self):
self.context_collect('STATUSTEXT')
self.run_auxfunc(64, 2) # 64 == reverse throttle
self.wait_statustext("RevThrottle: ENABLE", check_context=True)
self.run_auxfunc(64, 0)
self.wait_statustext("RevThrottle: DISABLE", check_context=True)
self.run_auxfunc(65, 2) # 65 == GPS_DISABLE
self.start_subtest("Bad auxfunc")
self.run_auxfunc(
65231,
2,
want_result=mavutil.mavlink.MAV_RESULT_FAILED
)
self.start_subtest("Bad switchpos")
self.run_auxfunc(
62,
17,
want_result=mavutil.mavlink.MAV_RESULT_DENIED
)
def fly_each_frame(self):
vinfo = vehicleinfo.VehicleInfo()
vinfo_options = vinfo.options[self.vehicleinfo_key()]
known_broken_frames = {
"firefly": "falls out of sky after transition",
"plane-tailsitter": "does not take off; immediately emits 'AP: Transition VTOL done' while on ground",
"quadplane-cl84": "falls out of sky instead of transitioning",
"quadplane-tilttri": "falls out of sky instead of transitioning",
"quadplane-tilttrivec": "loses attitude control and crashes",
}
for frame in sorted(vinfo_options["frames"].keys()):
self.start_subtest("Testing frame (%s)" % str(frame))
if frame in known_broken_frames:
self.progress("Actually, no I'm not - it is known-broken (%s)" %
(known_broken_frames[frame]))
continue
frame_bits = vinfo_options["frames"][frame]
print("frame_bits: %s" % str(frame_bits))
if frame_bits.get("external", False):
self.progress("Actually, no I'm not - it is an external simulation")
continue
model = frame_bits.get("model", frame)
# the model string for Callisto has crap in it.... we
# should really have another entry in the vehicleinfo data
# to carry the path to the JSON.
actual_model = model.split(":")[0]
defaults = self.model_defaults_filepath(actual_model)
if type(defaults) != list:
defaults = [defaults]
self.customise_SITL_commandline(
["--defaults", ','.join(defaults), ],
model=model,
wipe=True,
)
mission_file = "basic.txt"
quadplane = self.get_parameter('Q_ENABLE')
if quadplane:
mission_file = "basic-quadplane.txt"
tailsitter = self.get_parameter('Q_TAILSIT_ENABLE')
if tailsitter:
# tailsitter needs extra re-boot to pick up the rotated AHRS view
self.reboot_sitl()
self.wait_ready_to_arm()
self.arm_vehicle()
self.fly_mission(mission_file, strict=False, quadplane=quadplane, mission_timeout=400.0)
self.wait_disarmed()
def RCDisableAirspeedUse(self):
self.set_parameter("RC9_OPTION", 106)
self.delay_sim_time(5)
self.set_rc(9, 1000)
self.wait_sensor_state(
mavutil.mavlink.MAV_SYS_STATUS_SENSOR_DIFFERENTIAL_PRESSURE,
True,
True,
True)
self.set_rc(9, 2000)
self.wait_sensor_state(
mavutil.mavlink.MAV_SYS_STATUS_SENSOR_DIFFERENTIAL_PRESSURE,
True,
False,
True)
self.set_rc(9, 1000)
self.wait_sensor_state(
mavutil.mavlink.MAV_SYS_STATUS_SENSOR_DIFFERENTIAL_PRESSURE,
True,
True,
True)
def WatchdogHome(self):
if self.gdb:
# we end up signalling the wrong process. I think.
# Probably need to have a "sitl_pid()" method to get the
# ardupilot process's PID.
self.progress("######## Skipping WatchdogHome test under GDB")
return
ex = None
try:
self.progress("Enabling watchdog")
self.set_parameter("BRD_OPTIONS", 1 << 0)
self.reboot_sitl()
self.wait_ready_to_arm()
self.progress("Explicitly setting home to a known location")
orig_home = self.poll_home_position()
new_home = orig_home
new_home.latitude = new_home.latitude + 1000
new_home.longitude = new_home.longitude + 2000
new_home.altitude = new_home.altitude + 300000 # 300 metres
self.run_cmd_int(
mavutil.mavlink.MAV_CMD_DO_SET_HOME,
0, # p1,
0, # p2,
0, # p3,
0, # p4,
new_home.latitude,
new_home.longitude,
new_home.altitude/1000.0, # mm => m
)
old_bootcount = self.get_parameter('STAT_BOOTCNT')
self.progress("Forcing watchdog reset")
os.kill(self.sitl.pid, signal.SIGALRM)
self.detect_and_handle_reboot(old_bootcount)
self.wait_statustext("WDG:")
self.wait_statustext("IMU1 is using GPS") # won't be come armable
self.progress("Verifying home position")
post_reboot_home = self.poll_home_position()
delta = self.get_distance_int(new_home, post_reboot_home)
max_delta = 1
if delta > max_delta:
raise NotAchievedException(
"New home not where it should be (dist=%f) (want=%s) (got=%s)" %
(delta, str(new_home), str(post_reboot_home)))
except Exception as e:
self.print_exception_caught(e)
ex = e
self.reboot_sitl()
if ex is not None:
raise ex
def AUTOTUNE(self):
self.takeoff(100)
self.change_mode('AUTOTUNE')
self.context_collect('STATUSTEXT')
tstart = self.get_sim_time()
axis = "Roll"
rc_value = 1000
while True:
timeout = 600
if self.get_sim_time() - tstart > timeout:
raise NotAchievedException("Did not complete within %u seconds" % timeout)
try:
m = self.wait_statustext("%s: Finished" % axis, check_context=True, timeout=0.1)
self.progress("Got %s" % str(m))
if axis == "Roll":
axis = "Pitch"
elif axis == "Pitch":
break
else:
raise ValueError("Bug: %s" % axis)
except AutoTestTimeoutException:
pass
self.delay_sim_time(1)
if rc_value == 1000:
rc_value = 2000
elif rc_value == 2000:
rc_value = 1000
elif rc_value == 1000:
rc_value = 2000
else:
raise ValueError("Bug")
if axis == "Roll":
self.set_rc(1, rc_value)
self.set_rc(2, 1500)
elif axis == "Pitch":
self.set_rc(1, 1500)
self.set_rc(2, rc_value)
else:
raise ValueError("Bug")
tdelta = self.get_sim_time() - tstart
self.progress("Finished in %0.1f seconds" % (tdelta,))
self.set_rc(1, 1500)
self.set_rc(2, 1500)
self.change_mode('FBWA')
self.fly_home_land_and_disarm(timeout=tdelta+240)
def fly_landing_baro_drift(self):
self.customise_SITL_commandline([], wipe=True)
self.set_analog_rangefinder_parameters()
self.set_parameters({
"SIM_BARO_DRIFT": -0.02,
"SIM_TERRAIN": 0,
"RNGFND_LANDING": 1,
"LAND_SLOPE_RCALC": 2,
"LAND_ABORT_DEG": 1,
})
self.reboot_sitl()
self.wait_ready_to_arm()
self.arm_vehicle()
self.fly_mission("ap-circuit.txt", mission_timeout=1200)
def DCMFallback(self):
self.reboot_sitl()
self.delay_sim_time(30)
self.wait_ready_to_arm()
self.arm_vehicle()
self.takeoff(50)
self.change_mode('CIRCLE')
self.context_collect('STATUSTEXT')
self.set_parameters({
"EK3_POS_I_GATE": 0,
"SIM_GPS_HZ": 1,
"SIM_GPS_LAG_MS": 1000,
})
self.wait_statustext("DCM Active", check_context=True, timeout=60)
self.wait_statustext("EKF3 Active", check_context=True)
self.wait_statustext("DCM Active", check_context=True)
self.wait_statustext("EKF3 Active", check_context=True)
self.wait_statustext("DCM Active", check_context=True)
self.wait_statustext("EKF3 Active", check_context=True)
self.context_stop_collecting('STATUSTEXT')
self.fly_home_land_and_disarm()
def ForcedDCM(self):
self.wait_ready_to_arm()
self.arm_vehicle()
self.takeoff(50)
self.context_collect('STATUSTEXT')
self.set_parameter("AHRS_EKF_TYPE", 0)
self.wait_statustext("DCM Active", check_context=True)
self.context_stop_collecting('STATUSTEXT')
self.fly_home_land_and_disarm()
def MegaSquirt(self):
self.assert_not_receiving_message('EFI_STATUS')
self.set_parameters({
'SIM_EFI_TYPE': 1,
'EFI_TYPE': 1,
'SERIAL5_PROTOCOL': 24,
})
self.customise_SITL_commandline(["--uartF=sim:megasquirt"])
self.delay_sim_time(5)
m = self.assert_receive_message('EFI_STATUS')
mavutil.dump_message_verbose(sys.stdout, m)
if m.throttle_out != 0:
raise NotAchievedException("Expected zero throttle")
if m.health != 1:
raise NotAchievedException("Not healthy")
if m.intake_manifold_temperature < 20:
raise NotAchievedException("Bad intake manifold temperature")
def test_glide_slope_threshold(self):
# Test that GLIDE_SLOPE_THRESHOLD correctly controls re-planning glide slope
# in the scenario that aircraft is above planned slope and slope is positive (climbing).
#
#
# Behaviour with GLIDE_SLOPE_THRESH = 0 (no slope replanning)
# (2).. __(4)
# | \..__/
# | __/
# (3)
#
# Behaviour with GLIDE_SLOPE_THRESH = 5 (slope replanning when >5m error)
# (2)........__(4)
# | __/
# | __/
# (3)
# Solid is plan, dots are actual flightpath.
self.load_mission('rapid-descent-then-climb.txt', strict=False)
self.set_current_waypoint(1)
self.change_mode('AUTO')
self.wait_ready_to_arm()
self.arm_vehicle()
#
# Initial run with GLIDE_SLOPE_THR = 5 (default).
#
self.set_parameter("GLIDE_SLOPE_THR", 5)
# Wait for waypoint commanding rapid descent, followed by climb.
self.wait_current_waypoint(5, timeout=1200)
# Altitude should not descend significantly below the initial altitude
init_altitude = self.get_altitude(relative=True, timeout=2)
timeout = 600
wpnum = 7
tstart = self.get_sim_time()
while True:
if self.get_sim_time() - tstart > timeout:
raise AutoTestTimeoutException("Did not get wanted current waypoint")
if (self.get_altitude(relative=True, timeout=2) - init_altitude) < -10:
raise NotAchievedException("Descended >10m before reaching desired waypoint,\
indicating slope was not replanned")
seq = self.mav.waypoint_current()
self.progress("Waiting for wp=%u current=%u" % (wpnum, seq))
if seq == wpnum:
break
self.set_current_waypoint(2)
#
# Second run with GLIDE_SLOPE_THR = 0 (no re-plan).
#
self.set_parameter("GLIDE_SLOPE_THR", 0)
# Wait for waypoint commanding rapid descent, followed by climb.
self.wait_current_waypoint(5, timeout=1200)
# This time altitude should descend significantly below the initial altitude
init_altitude = self.get_altitude(relative=True, timeout=2)
timeout = 600
wpnum = 7
tstart = self.get_sim_time()
while True:
if self.get_sim_time() - tstart > timeout:
raise AutoTestTimeoutException("Did not get wanted altitude")
seq = self.mav.waypoint_current()
self.progress("Waiting for wp=%u current=%u" % (wpnum, seq))
if seq == wpnum:
raise NotAchievedException("Reached desired waypoint without first decending 10m,\
indicating slope was replanned unexpectedly")
if (self.get_altitude(relative=True, timeout=2) - init_altitude) < -10:
break
# Disarm
self.wait_disarmed(timeout=600)
self.progress("Mission OK")
def tests(self):
'''return list of all tests'''
ret = super(AutoTestPlane, self).tests()
ret.extend([
("AuxModeSwitch",
"Set modes via auxswitches",
self.test_setting_modes_via_auxswitches),
("TestRCCamera",
"Test RC Option - Camera Trigger",
self.test_rc_option_camera_trigger),
("TestRCRelay", "Test Relay RC Channel Option", self.test_rc_relay),
("ThrottleFailsafe",
"Fly throttle failsafe",
self.test_throttle_failsafe),
("NeedEKFToArm",
"Ensure we need EKF to be healthy to arm",
self.test_need_ekf_to_arm),
("ThrottleFailsafeFence",
"Fly fence survives throttle failsafe",
self.test_throttle_failsafe_fence),
("TestFlaps", "Flaps", self.fly_flaps),
("DO_CHANGE_SPEED", "Test mavlink DO_CHANGE_SPEED command", self.fly_do_change_speed),
("DO_REPOSITION",
"Test mavlink DO_REPOSITION command",
self.fly_do_reposition),
("GuidedRequest",
"Test handling of MISSION_ITEM in guided mode",
self.fly_do_guided_request),
("MainFlight",
"Lots of things in one flight",
self.test_main_flight),
("TestGripperMission",
"Test Gripper mission items",
self.test_gripper_mission),
("Parachute", "Test Parachute", self.test_parachute),
("ParachuteSinkRate", "Test Parachute (SinkRate triggering)", self.test_parachute_sinkrate),
("AIRSPEED_AUTOCAL", "Test AIRSPEED_AUTOCAL", self.airspeed_autocal),
("RangeFinder",
"Test RangeFinder Basic Functionality",
self.test_rangefinder),
("FenceStatic",
"Test Basic Fence Functionality",
self.test_fence_static),
("FenceRTL",
"Test Fence RTL",
self.test_fence_rtl),
("FenceRTLRally",
"Test Fence RTL Rally",
self.test_fence_rtl_rally),
("FenceRetRally",
"Test Fence Ret_Rally",
self.test_fence_ret_rally),
("FenceAltCeilFloor",
"Tests the fence ceiling and floor",
self.test_fence_alt_ceil_floor),
("FenceBreachedChangeMode",
"Tests retrigger of fence action when changing of mode while fence is breached",
self.test_fence_breached_change_mode),
("FenceNoFenceReturnPoint",
"Tests calculated return point during fence breach when no fence return point present",
self.test_fence_breach_no_return_point),
("FenceNoFenceReturnPointInclusion",
"Tests using home as fence return point when none is present, and no inclusion fence is uploaded",
self.test_fence_breach_no_return_point_no_inclusion),
("FenceDisableUnderAction",
"Tests Disabling fence while undergoing action caused by breach",
self.test_fence_disable_under_breach_action),
("ADSB",
"Test ADSB",
self.test_adsb),
("SimADSB",
"Test SIM_ADSB",
self.SimADSB),
("Button",
"Test Buttons",
self.test_button),
("FRSkySPort",
"Test FrSky SPort mode",
self.test_frsky_sport),
("FRSkyPassThrough",
"Test FrSky PassThrough serial output",
self.test_frsky_passthrough),
("FRSkyMAVlite",
"Test FrSky MAVlite serial output",
self.test_frsky_mavlite),
("FRSkyD",
"Test FrSkyD serial output",
self.test_frsky_d),
("LTM",
"Test LTM serial output",
self.test_ltm),
("DEVO",
"Test DEVO serial output",
self.DEVO),
("AdvancedFailsafe",
"Test Advanced Failsafe",
self.test_advanced_failsafe),
("LOITER",
"Test Loiter mode",
self.LOITER),
("DeepStall",
"Test DeepStall Landing",
self.fly_deepstall),
("WatchdogHome",
"Ensure home is restored after watchdog reset",
self.WatchdogHome),
("LargeMissions",
"Test Manipulation of Large missions",
self.test_large_missions),
("Soaring",
"Test Soaring feature",
self.fly_soaring),
("Terrain",
"Test terrain following in mission",
self.fly_terrain_mission),
("Terrain-loiter",
"Test terrain following in loiter",
self.test_loiter_terrain),
("VectorNavEAHRS",
"Test VectorNav EAHRS support",
self.test_vectornav),
("LordEAHRS",
"Test LORD Microstrain EAHRS support",
self.test_lord),
("Deadreckoning",
"Test deadreckoning support",
self.deadreckoning),
("DeadreckoningNoAirSpeed",
"Test deadreckoning support with no airspeed sensor",
self.deadreckoning_no_airspeed_sensor),
("EKFlaneswitch",
"Test EKF3 Affinity and Lane Switching",
self.ekf_lane_switch),
("AirspeedDrivers",
"Test AirSpeed drivers",
self.test_airspeed_drivers),
("RTL_CLIMB_MIN",
"Test RTL_CLIMB_MIN",
self.rtl_climb_min),
("ClimbBeforeTurn",
"Test climb-before-turn",
self.climb_before_turn),
("IMUTempCal",
"Test IMU temperature calibration",
self.test_imu_tempcal),
("MAV_DO_AUX_FUNCTION",
"Test triggering Auxillary Functions via mavlink",
self.fly_aux_function),
("SmartBattery",
"Test smart battery logging etc",
self.SmartBattery),
("FlyEachFrame",
"Fly each supported internal frame",
self.fly_each_frame),
("RCDisableAirspeedUse",
"Test RC DisableAirspeedUse option",
self.RCDisableAirspeedUse),
("AHRS_ORIENTATION",
"Test AHRS_ORIENTATION parameter",
self.AHRS_ORIENTATION),
("AHRSTrim",
"AHRS trim testing",
self.ahrstrim),
("Landing-Drift",
"Circuit with baro drift",
self.fly_landing_baro_drift),
("ForcedDCM",
"Switch to DCM mid-flight",
self.ForcedDCM),
("DCMFallback",
"Really annoy the EKF and force fallback",
self.DCMFallback),
("MAVFTP",
"Test MAVProxy can talk FTP to autopilot",
self.MAVFTP),
("AUTOTUNE",
"Test AutoTune mode",
self.AUTOTUNE),
("MegaSquirt",
"Test MegaSquirt EFI",
self.MegaSquirt),
("MSP_DJI",
"Test MSP DJI serial output",
self.test_msp_dji),
("SpeedToFly",
"Test soaring speed-to-fly",
self.fly_soaring_speed_to_fly),
("GlideSlopeThresh",
"Test rebuild glide slope if above and climbing",
self.test_glide_slope_threshold),
("LogUpload",
"Log upload",
self.log_upload),
("HIGH_LATENCY2",
"Set sending of HIGH_LATENCY2",
self.HIGH_LATENCY2),
])
return ret
def disabled_tests(self):
return {
"Terrain-loiter": "Loading of terrain data is not reliable",
}
|
samuelctabor/ardupilot
|
Tools/autotest/arduplane.py
|
Python
|
gpl-3.0
| 141,356
|
[
"Firefly"
] |
9fb56372afb56b475e8830d421c71bdfc910ad59f947eacb91508e0f4d2bd71a
|
import numpy as np
from numpy.random import normal as randn
import tspy as ts
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
def gen_ar(T, coeffs):
c = np.flipud(coeffs)
p = c.size
x = np.zeros((T+p))
for t in range(p, T):
x[t] = np.dot(x[t-p:t], c) + randn()
return x[p:]
def main():
T = 500
N = 2048
e = np.zeros((T,))
for t in range(T):
e[t] = randn()
a1 = 0.7
b1, b2 = 0.3, -0.6
x = gen_ar(T, [a1])
y = gen_ar(T, [b1, b2])
w, method = 201, 'hamming'
settings0 = {'smooth': False}
settings1 = {'smooth': True, 'smooth_window': w, 'smooth_method': method}
se = ts.spectrum(e, num=N, **settings0)
sx = ts.spectrum(x, num=N, **settings0)
sy = ts.spectrum(y, num=N, **settings0)
sse = ts.spectrum(e, num=N, **settings1)
ssx = ts.spectrum(x, num=N, **settings1)
ssy = ts.spectrum(y, num=N, **settings1)
freq = np.fft.rfftfreq(N, d=1/(2*np.pi))
tse = (1 / (2 * np.pi)) * np.ones(freq.shape)
tsx = (1 / (2 * np.pi)) * (1 / (1 + a1 ** 2 - 2 * a1 * np.cos(freq)))
tsy = (1 / (2 * np.pi)) * (1 / (1 + b1 ** 2 + b2 ** 2
- 2 * b1 * np.cos(freq)
- 2 * b2 * np.cos(2 * freq)
+ 2 * b1 * b2 * np.cos(freq)))
ticks = [0, np.pi/4, np.pi/2, 3*np.pi/4, np.pi]
tick_labels = ['0', r'$\frac{1}{4} \pi$', r'$\frac{2}{4} \pi$',
r'$\frac{3}{4} \pi$', r'$\pi$']
tpl_conf = {'color': 'red', 'linestyle': 'dashed', 'linewidth': 2,
'label': 'Theoretical'}
pl_conf = {'color': 'black', 'alpha': 0.4, 'linewidth':0.5,
'label': 'Estimated'}
spl_conf = {'color': 'black', 'linewidth': 2,
'label': 'Estimated (smooth)'}
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(12, 7))
ax[0, 0].plot(e, color='black', linewidth=0.5)
ax[0, 0].grid(alpha=0.3)
ax[0, 0].set_xlabel('Time')
ax[0, 0].set_ylabel('Value')
ax[0, 0].set_title('Gaussian white noise - sample realization')
ax[0, 1].plot(x, color='black', linewidth=0.5)
ax[0, 1].grid(alpha=0.3)
ax[0, 1].set_xlabel('Time')
ax[0, 1].set_ylabel('Value')
ax[0, 1].set_title('AR(1) process - sample realization')
ax[0, 2].plot(y, color='black', linewidth=0.5)
ax[0, 2].grid(alpha=0.3)
ax[0, 2].set_xlabel('Time')
ax[0, 2].set_ylabel('Value')
ax[0, 2].set_title('AR(2) process - sample realization')
ax[1, 0].plot(freq, tse, **tpl_conf)
ax[1, 0].plot(freq, se, **pl_conf)
ax[1, 0].plot(freq, sse, **spl_conf)
ax[1, 0].legend()
ax[1, 0].grid(alpha=0.3)
ax[1, 0].xaxis.set_ticks(ticks)
ax[1, 0].xaxis.set_ticklabels(tick_labels)
ax[1, 0].set_xlabel('Frequency')
ax[1, 0].set_ylabel('Power density')
ax[1, 0].set_title('Gaussian white noise - spectrum')
ax[1, 1].plot(freq, tsx, **tpl_conf)
ax[1, 1].plot(freq, sx, **pl_conf)
ax[1, 1].plot(freq, ssx, **spl_conf)
ax[1, 1].legend()
ax[1, 1].grid(alpha=0.3)
ax[1, 1].xaxis.set_ticks(ticks)
ax[1, 1].xaxis.set_ticklabels(tick_labels)
ax[1, 1].set_xlabel('Frequency')
ax[1, 1].set_ylabel('Power density')
ax[1, 1].set_title('AR(1) process - spectrum')
ax[1, 2].plot(freq, tsy, **tpl_conf)
ax[1, 2].plot(freq, sy, **pl_conf)
ax[1, 2].plot(freq, ssy, **spl_conf)
ax[1, 2].legend()
ax[1, 2].grid(alpha=0.3)
ax[1, 2].xaxis.set_ticks(ticks)
ax[1, 2].xaxis.set_ticklabels(tick_labels)
ax[1, 2].set_xlabel('Frequency')
ax[1, 2].set_ylabel('Power density')
ax[1, 2].set_title('AR(2) process - spectrum')
plt.tight_layout()
fig.savefig('../t_spectrum.pdf')
fig.savefig('../t_spectrum.png')
# plt.show()
if __name__ == '__main__':
main()
|
apsql/time_series
|
tests/t_spectrum.py
|
Python
|
mit
| 3,836
|
[
"Gaussian"
] |
ff0ab4d3169775bbc37de2900dad39a25e1d61246078e65027358db22390ca80
|
#!/usr/bin/python3
from __future__ import print_function
from vizdoom import *
import sys
import threading
import math
from random import choice
from time import sleep
from matplotlib import pyplot as plt
sys.path.append('../../deep_feedback_learning')
import numpy as np
import cv2
import deep_feedback_learning
# Create DoomGame instance. It will run the game and communicate with you.
game = DoomGame()
# Now it's time for configuration!
# load_config could be used to load configuration instead of doing it here with code.
# If load_config is used in-code configuration will also work - most recent changes will add to previous ones.
# game.load_config("../../scenarios/basic.cfg")
# Sets path to additional resources wad file which is basically your scenario wad.
# If not specified default maps will be used and it's pretty much useless... unless you want to play good old Doom.
game.set_doom_scenario_path("./basic.wad")
# Sets map to start (scenario .wad files can contain many maps).
game.set_doom_map("map01")
# Sets resolution. Default is 320X240
game.set_screen_resolution(ScreenResolution.RES_640X480)
# create masks for left and right visual fields - note that these only cover the upper half of the image
# this is to help prevent the tracking getting confused by the floor pattern
width = 640
widthNet = 320
height = 480
heightNet = 240
# Sets the screen buffer format. Not used here but now you can change it. Defalut is CRCGCB.
game.set_screen_format(ScreenFormat.RGB24)
# Enables depth buffer.
game.set_depth_buffer_enabled(True)
# Enables labeling of in game objects labeling.
game.set_labels_buffer_enabled(True)
# Enables buffer with top down map of the current episode/level.
game.set_automap_buffer_enabled(True)
# Sets other rendering options
game.set_render_hud(False)
game.set_render_minimal_hud(False) # If hud is enabled
game.set_render_crosshair(True)
game.set_render_weapon(False)
game.set_render_decals(False)
game.set_render_particles(False)
game.set_render_effects_sprites(False)
game.set_render_messages(False)
game.set_render_corpses(False)
# Adds buttons that will be allowed.
# game.add_available_button(Button.MOVE_LEFT)
# game.add_available_button(Button.MOVE_RIGHT)
game.add_available_button(Button.MOVE_LEFT_RIGHT_DELTA, 50)
game.add_available_button(Button.ATTACK)
game.add_available_button(Button.TURN_LEFT_RIGHT_DELTA)
# Adds game variables that will be included in state.
game.add_available_game_variable(GameVariable.AMMO2)
# Causes episodes to finish after 200 tics (actions)
game.set_episode_timeout(500)
# Makes episodes start after 10 tics (~after raising the weapon)
game.set_episode_start_time(10)
# Makes the window appear (turned on by default)
game.set_window_visible(True)
# Turns on the sound. (turned off by default)
game.set_sound_enabled(True)
# Sets the livin reward (for each move) to -1
game.set_living_reward(-1)
# Sets ViZDoom mode (PLAYER, ASYNC_PLAYER, SPECTATOR, ASYNC_SPECTATOR, PLAYER mode is default)
game.set_mode(Mode.PLAYER)
# Enables engine output to console.
#game.set_console_enabled(True)
nFiltersInput = 3
nFiltersHidden = 3
minT = 3
maxT = 30
nHidden0 = 4
nHidden1 = 2
net = deep_feedback_learning.DeepFeedbackLearning(widthNet*heightNet,[nHidden0*nHidden0,nHidden1*nHidden1], 1, nFiltersInput, nFiltersHidden, minT,maxT)
net.getLayer(0).setConvolution(widthNet,heightNet)
net.getLayer(1).setConvolution(nHidden0,nHidden0)
net.initWeights(0.5,0,deep_feedback_learning.Neuron.MAX_OUTPUT_RANDOM);
net.setLearningRate(0)
net.setAlgorithm(deep_feedback_learning.DeepFeedbackLearning.ico);
# net.getLayer(0).setInputNorm2ZeroMean(128,256)
net.getLayer(0).setLearningRate(1E-10)
net.getLayer(1).setLearningRate(0.00001)
net.getLayer(2).setLearningRate(0.001)
#net.getLayer(1).setNormaliseWeights(True)
#net.getLayer(2).setNormaliseWeights(True)
net.setUseDerivative(1)
net.setBias(0)
# Initialize the game. Further configuration won't take any effect from now on.
game.init()
# Run this many episodes
episodes = 1000
# Sets time that will pause the engine after each action (in seconds)
# Without this everything would go too fast for you to keep track of what's happening.
sleep_time = 1.0 / DEFAULT_TICRATE # = 0.028
delta2 = 0
dontshoot = 1
deltaZeroCtr = 1
inp = np.zeros(widthNet*heightNet)
sharpen = np.array((
[0, 1, 0],
[1, 4, 1],
[0, 1, 0]), dtype="int")
edge = np.array((
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]), dtype="int")
plt.ion()
plt.show()
ln1 = False
ln2 = [False,False,False,False]
def getWeights2D(neuron):
n_neurons = net.getLayer(0).getNneurons()
n_inputs = net.getLayer(0).getNeuron(neuron).getNinputs()
weights = np.zeros(n_inputs)
for i in range(n_inputs):
if net.getLayer(0).getNeuron(neuron).getMask(i):
weights[i] = net.getLayer(0).getNeuron(neuron).getAvgWeight(i)
else:
weights[i] = np.nan
return weights.reshape(heightNet,widthNet)
def getWeights1D(layer,neuron):
n_neurons = net.getLayer(layer).getNneurons()
n_inputs = net.getLayer(layer).getNeuron(neuron).getNinputs()
weights = np.zeros(n_inputs)
for i in range(n_inputs):
weights[i] = net.getLayer(layer).getNeuron(neuron).getAvgWeight(i)
return weights
def plotWeights():
global ln1
global ln2
while True:
if ln1:
ln1.remove()
plt.figure(1)
w1 = getWeights2D(0)
for i in range(1,net.getLayer(0).getNneurons()):
w2 = getWeights2D(i)
w1 = np.where(np.isnan(w2),w1,w2)
ln1 = plt.imshow(w1,cmap='gray')
plt.draw()
plt.pause(0.1)
for j in range(1,3):
if ln2[j]:
ln2[j].remove()
plt.figure(j+1)
w1 = np.zeros( (net.getLayer(j).getNneurons(),net.getLayer(j).getNeuron(0).getNinputs()) )
for i in range(0,net.getLayer(j).getNneurons()):
w1[i,:] = getWeights1D(j,i)
ln2[j] = plt.imshow(w1,cmap='gray')
plt.draw()
plt.pause(0.1)
t1 = threading.Thread(target=plotWeights)
t1.start()
for i in range(episodes):
print("Episode #" + str(i + 1))
# Starts a new episode. It is not needed right after init() but it doesn't cost much. At least the loop is nicer.
game.new_episode()
while not game.is_episode_finished():
# Gets the state
state = game.get_state()
# Which consists of:
n = state.number
vars = state.game_variables
screen_buf = state.screen_buffer
depth_buf = state.depth_buffer
labels_buf = state.labels_buffer
automap_buf = state.automap_buffer
labels = state.labels
midlinex = int(width/2);
midliney = int(height*0.75);
crcb = screen_buf
screen_left = screen_buf[100:midliney,0:midlinex-1,2]
screen_right = screen_buf[100:midliney,midlinex+1:(width-1),2]
screen_left = cv2.filter2D(screen_left, -1, sharpen);
screen_right = cv2.filter2D(screen_right, -1, sharpen);
# cv2.imwrite('/tmp/left.png',screen_left)
# cv2.imwrite('/tmp/right.png',screen_right)
lavg = np.average(screen_left)
ravg = np.average(screen_right)
delta = (lavg - ravg)*15
dd = delta - delta2
delta2 = delta
# print(delta)
# Makes a random action and get remember reward.
shoot = 0
if (dontshoot > 1) :
dontshoot = dontshoot - 1
else :
if (abs(dd) < 10) :
shoot = 1
dontshoot = 60
deltaZeroCtr = 4
if deltaZeroCtr>0:
deltaZeroCtr = deltaZeroCtr - 1
delta = 0
blue = cv2.resize(crcb, (widthNet,heightNet));
blue = blue[:,:,2]
blue = cv2.filter2D(blue, -1, edge);
err = np.linspace(delta,delta,nHidden0*nHidden0);
net.doStep(blue.flatten()/512-0.5,err)
#weightsplot.set_xdata(np.append(weightsplot.get_xdata(),n))
#weightsplot.set_ydata(np.append(weightsplot.get_ydata(),net.getLayer(0).getWeightDistanceFromInitialWeights()))
output = net.getOutput(0)*5
print(delta,output,
net.getLayer(0).getWeightDistanceFromInitialWeights(),"\t",
net.getLayer(1).getWeightDistanceFromInitialWeights(),"\t",
net.getLayer(2).getWeightDistanceFromInitialWeights())
# action[0] is translating left/right; action[2] is rotating/aiming
# action = [ delta+output , shoot, 0. ]
action = [ 0., shoot, (delta+output)*0.1 ]
r = game.make_action(action)
# if sleep_time > 0:
# sleep(sleep_time)
# Check how the episode went.
print("Episode finished.")
print("Total reward:", game.get_total_reward())
print("************************")
sleep(1)
# It will be done automatically anyway but sometimes you need to do it in the middle of the program...
game.close()
|
nlholdem/icodoom
|
ICO1/deep_feedback_learning_old/vizdoom/ico.py
|
Python
|
gpl-3.0
| 8,982
|
[
"NEURON"
] |
81fc1a09cbaf8e4e6e966dcbf2a6397edb99a71ad7cbb5865e171420e4b55fbc
|
##
# Copyright 2015-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Molpro, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
import shutil
import re
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.binary import Binary
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option
from easybuild.tools.filetools import apply_regex_substitutions, mkdir, read_file
from easybuild.tools.run import run_cmd, run_cmd_qa
class EB_Molpro(ConfigureMake, Binary):
"""Support for building and installing Molpro."""
@staticmethod
def extra_options():
"""Define custom easyconfig parameters for Molpro."""
# Combine extra variables from Binary and ConfigureMake easyblocks as
# well as those needed for Molpro specifically
extra_vars = Binary.extra_options()
extra_vars = ConfigureMake.extra_options(extra_vars)
extra_vars.update({
'precompiled_binaries': [False, "Are we installing precompiled binaries?", CUSTOM],
})
return EasyBlock.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
"""Easyblock constructor, initialize class variables specific to Molpro and check on license token."""
super(EB_Molpro, self).__init__(*args, **kwargs)
self.full_prefix = '' # no None, to make easyblock compatible with --module-only
self.orig_launcher = None
self.cleanup_token_symlink = False
self.license_token = os.path.join(os.path.expanduser('~'), '.molpro', 'token')
def extract_step(self):
"""Extract Molpro source files, or just copy in case of binary install."""
if self.cfg['precompiled_binaries']:
Binary.extract_step(self)
else:
ConfigureMake.extract_step(self)
def configure_step(self):
"""Custom configuration procedure for Molpro: use 'configure -batch'."""
if not os.path.isfile(self.license_token):
if self.cfg['license_file'] is not None and os.path.isfile(self.cfg['license_file']):
# put symlink in place to specified license file in $HOME/.molpro/token
# other approaches (like defining $MOLPRO_KEY) don't seem to work
self.cleanup_token_symlink = True
mkdir(os.path.dirname(self.license_token))
try:
os.symlink(self.cfg['license_file'], self.license_token)
self.log.debug("Symlinked %s to %s", self.cfg['license_file'], self.license_token)
except OSError, err:
raise EasyBuildError("Failed to create symlink for license token at %s", self.license_token)
else:
self.log.warning("No licence token found at either {0} or via 'license_file'".format(self.license_token))
# Only do the rest of the configuration if we're building from source
if not self.cfg['precompiled_binaries']:
# installation prefix
self.cfg.update('configopts', "-prefix %s" % self.installdir)
# compilers
# compilers & MPI
if self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', "-%s -%s" % (os.environ['CC_SEQ'], os.environ['F90_SEQ']))
if 'MPI_INC_DIR' in os.environ:
self.cfg.update('configopts', "-mpp -mppbase %s" % os.environ['MPI_INC_DIR'])
else:
raise EasyBuildError("$MPI_INC_DIR not defined")
else:
self.cfg.update('configopts', "-%s -%s" % (os.environ['CC'], os.environ['F90']))
# BLAS/LAPACK
if 'BLAS_LIB_DIR' in os.environ:
self.cfg.update('configopts', "-blas -blaspath %s" % os.environ['BLAS_LIB_DIR'])
else:
raise EasyBuildError("$BLAS_LIB_DIR not defined")
if 'LAPACK_LIB_DIR' in os.environ:
self.cfg.update('configopts', "-lapack -lapackpath %s" % os.environ['LAPACK_LIB_DIR'])
else:
raise EasyBuildError("$LAPACK_LIB_DIR not defined")
# 32 vs 64 bit
if self.toolchain.options.get('32bit', None):
self.cfg.update('configopts', '-i4')
else:
self.cfg.update('configopts', '-i8')
run_cmd("./configure -batch %s" % self.cfg['configopts'])
cfgfile = os.path.join(self.cfg['start_dir'], 'CONFIG')
cfgtxt = read_file(cfgfile)
# determine original LAUNCHER value
launcher_regex = re.compile('^LAUNCHER=(.*)$', re.M)
res = launcher_regex.search(cfgtxt)
if res:
self.orig_launcher = res.group(1)
self.log.debug("Found original value for LAUNCHER: %s", self.orig_launcher)
else:
raise EasyBuildError("Failed to determine LAUNCHER value")
# determine full installation prefix
prefix_regex = re.compile('^PREFIX=(.*)$', re.M)
res = prefix_regex.search(cfgtxt)
if res:
self.full_prefix = res.group(1)
self.log.debug("Found full installation prefix: %s", self.full_prefix)
else:
raise EasyBuildError("Failed to determine full installation prefix")
# determine MPI launcher command that can be used during build/test
# obtain command with specific number of cores (required by mpi_cmd_for), then replace that number with '%n'
launcher = self.toolchain.mpi_cmd_for('%x', self.cfg['parallel'])
launcher = launcher.replace(' %s' % self.cfg['parallel'], ' %n')
# patch CONFIG file to change LAUNCHER definition, in order to avoid having to start mpd
apply_regex_substitutions(cfgfile, [(r"^(LAUNCHER\s*=\s*).*$", r"\1 %s" % launcher)])
# reread CONFIG and log contents
cfgtxt = read_file(cfgfile)
self.log.info("Contents of CONFIG file:\n%s", cfgtxt)
def build_step(self):
"""Custom build procedure for Molpro, unless it is a binary install."""
if not self.cfg['precompiled_binaries']:
super(EB_Molpro, self).build_step()
def test_step(self):
"""
Custom test procedure for Molpro.
Run 'make quicktest, make test', but only for source install and if license is available.
"""
# Only bother to check if the licence token is available
if os.path.isfile(self.license_token) and not self.cfg['precompiled_binaries']:
# check 'main routes' only
run_cmd("make quicktest")
if build_option('mpi_tests'):
# extensive test
run_cmd("make MOLPRO_OPTIONS='-n%s' test" % self.cfg['parallel'])
else:
self.log.info("Skipping extensive testing of Molpro since MPI testing is disabled")
def install_step(self):
"""
Custom install procedure for Molpro.
For source install:
* put license token in place in $installdir/.token
* run 'make tuning'
* install with 'make install'
For binary install:
* run interactive installer
"""
if self.cfg['precompiled_binaries']:
"""Build by running the command with the inputfiles"""
try:
os.chdir(self.cfg['start_dir'])
except OSError, err:
raise EasyBuildError("Failed to move (back) to %s: %s", self.cfg['start_dir'], err)
for src in self.src:
if LooseVersion(self.version) >= LooseVersion('2015'):
# install dir must be non-existent
shutil.rmtree(self.installdir)
cmd = "./{0} -batch -prefix {1}".format(src['name'], self.installdir)
else:
cmd = "./{0} -batch -instbin {1}/bin -instlib {1}/lib".format(src['name'], self.installdir)
# questions whose text must match exactly as asked
qa = {
"Please give your username for accessing molpro\n": '',
"Please give your password for accessing molpro\n": '',
}
# questions whose text may be matched as a regular expression
stdqa = {
r"Enter installation directory for executable files \[.*\]\n": os.path.join(self.installdir, 'bin'),
r"Enter installation directory for library files \[.*\]\n": os.path.join(self.installdir, 'lib'),
r"directory .* does not exist, try to create [Y]/n\n": '',
}
run_cmd_qa(cmd, qa=qa, std_qa=stdqa, log_all=True, simple=True)
else:
if os.path.isfile(self.license_token):
run_cmd("make tuning")
super(EB_Molpro, self).install_step()
# put original LAUNCHER definition back in place in bin/molpro that got installed,
# since the value used during installation point to temporary files
molpro_path = os.path.join(self.full_prefix, 'bin', 'molpro')
apply_regex_substitutions(molpro_path, [(r"^(LAUNCHER\s*=\s*).*$", r"\1 %s" % self.orig_launcher)])
if self.cleanup_token_symlink:
try:
os.remove(self.license_token)
self.log.debug("Symlink to license token %s removed", self.license_token)
except OSError, err:
raise EasyBuildError("Failed to remove %s: %s", self.license_token, err)
def make_module_req_guess(self):
"""Customize $PATH guesses for Molpro module."""
guesses = super(EB_Molpro, self).make_module_req_guess()
guesses.update({
'PATH': [os.path.join(os.path.basename(self.full_prefix), x) for x in ['bin', 'utilities']],
})
return guesses
def sanity_check_step(self):
"""Custom sanity check for Molpro."""
prefix_subdir = os.path.basename(self.full_prefix)
files_to_check = ['bin/molpro']
dirs_to_check = []
if LooseVersion(self.version) >= LooseVersion('2015') or not self.cfg['precompiled_binaries']:
files_to_check.extend(['bin/molpro.exe'])
dirs_to_check.extend(['doc', 'examples', 'utilities'])
custom_paths = {
'files': [os.path.join(prefix_subdir, x) for x in files_to_check],
'dirs': [os.path.join(prefix_subdir, x) for x in dirs_to_check],
}
super(EB_Molpro, self).sanity_check_step(custom_paths=custom_paths)
|
ULHPC/easybuild-easyblocks
|
easybuild/easyblocks/m/molpro.py
|
Python
|
gpl-2.0
| 11,923
|
[
"Molpro"
] |
333a9c80a9a98bb1c41041fb17b8752ea697cc9fd346307041c29fafe4b0262f
|
#! /usr/bin/env python
"""
April 2013
eeesh GSEA does NOT respect the mode flag!
Now realise that the creation of the input rank file for gsea needs to take the lowest p value for duplicate
feature names. To make Ish's life easier, remove duplicate gene ids from any gene set to stop GSEA from
barfing.
October 14 2012
Amazingly long time to figure out that GSEA fails with useless error message if any filename contains a dash "-"
eesh.
Added history .gmt source - requires passing a faked name to gsea
Wrapper for GSEA http://www.broadinstitute.org/gsea/index.jsp
Started Feb 22
Copyright 2012 Ross Lazarus
All rights reserved
Licensed under the LGPL
called eg as
#!/bin/sh
GALAXY_LIB="/data/extended/galaxy/lib"
if [ "$GALAXY_LIB" != "None" ]; then
if [ -n "$PYTHONPATH" ]; then
PYTHONPATH="$GALAXY_LIB:$PYTHONPATH"
else
PYTHONPATH="$GALAXY_LIB"
fi
export PYTHONPATH
fi
cd /data/extended/galaxy/database/job_working_directory/027/27311
python /data/extended/galaxy/tools/rgenetics/rgGSEA.py --input_tab "/data/extended/galaxy/database/files/033/dataset_33806.dat" --adjpvalcol "5" --signcol "2"
--idcol "1" --outhtml "/data/extended/galaxy/database/files/034/dataset_34455.dat" --input_name "actaearly-Controlearly-actalate-Controllate_topTable.xls"
--setMax "500" --setMin "15" --nPerm "1000" --plotTop "20"
--gsea_jar "/data/extended/galaxy/tool-data/shared/jars/gsea2-2.0.12.jar"
--output_dir "/data/extended/galaxy/database/job_working_directory/027/27311/dataset_34455_files" --mode "Max_probe"
--title " actaearly-Controlearly-actalate-Controllate_interpro_GSEA" --builtin_gmt "/data/genomes/gsea/3.1/IPR_DOMAIN.gmt"
"""
import optparse
import tempfile
import os
import sys
import subprocess
import time
import shutil
import glob
import math
import re
KEEPSELECTION = False # detailed records for selection of multiple probes
def timenow():
"""return current time as a string
"""
return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
def fix_subdir(adir,destdir):
""" Galaxy wants everything in the same files_dir
if os.path.exists(adir):
for (d,dirs,files) in os.path.walk(adir):
for f in files:
sauce = os.path.join(d,f)
shutil.copy(sauce,destdir)
"""
def fixAffycrap(apath=''):
"""class='richTable'>RUNNING ES</th><th class='richTable'>CORE ENRICHMENT</th><tr><td class='lessen'>1</td>
<td><a href='https://www.affymetrix.com/LinkServlet?probeset=LBR'>LBR</a></td><td></td><td></td><td>1113</td>
<td>0.194</td><td>-0.1065</td><td>No</td></tr><tr><td class='lessen'>2</td><td>
<a href='https://www.affymetrix.com/LinkServlet?probeset=GGPS1'>GGPS1</a></td><td></td><td></td><td>4309</td><td>0.014</td><td>-0.4328</td>
<td>No</td></tr>
"""
html = []
try:
html = open(apath,'r').readlines()
except:
return html
for i,row in enumerate(html):
row = re.sub('https\:\/\/www.affymetrix.com\/LinkServlet\?probeset=',"http://www.genecards.org/index.php?path=/Search/keyword/",row)
html[i] = row
return html
cleanup = False
if os.path.exists(adir):
flist = os.listdir(adir) # get all files created
for f in flist:
apath = os.path.join(adir,f)
dest = os.path.join(destdir,f)
if not os.path.isdir(apath):
if os.path.splitext(f)[1].lower() == '.html':
html = fixAffycrap(apath)
fixed = open(apath,'w')
fixed.write('\n'.join(html))
fixed.write('\n')
fixed.close()
if not os.path.isfile(dest):
shutil.copy(apath,dest)
else:
fix_subdir(apath,destdir)
if cleanup:
try:
shutil.rmtree(path=adir,ignore_errors=True)
except:
pass
def getFileString(fpath, outpath):
"""
format a nice file size string
"""
size = ''
fp = os.path.join(outpath, fpath)
s = fpath
if os.path.isfile(fp):
n = float(os.path.getsize(fp))
if n > 2**20:
size = ' (%1.1f MB)' % (n/2**20)
elif n > 2**10:
size = ' (%1.1f KB)' % (n/2**10)
elif n > 0:
size = ' (%d B)' % (int(n))
s = '%s %s' % (fpath, size)
return s
class gsea_wrapper:
"""
GSEA java desktop client has a CL interface. CL can be gleaned by clicking the 'command line' button after setting up an analysis
We don't want gsea to do the analysis but it can read .rnk files containing rows of identifiers and an evidence weight such as the signed t statistic from limma for differential expression
(vgalaxy)rlazarus@iaas1:~/public_html/idle_illumina_analysis$ cat gseaHumanREFSEQ.sh
#!/bin/bash
for RNK in `ls *.rnk`
do
DIRNAME=${RNK%.*}
echo $DIRNAME
qsub -cwd -b y java -Xmx4096m -cp /data/app/bin/gsea2-2.07.jar xtools.gsea.GseaPreranked -gmx ../msigdb.v3.0.symbols.gmt -collapse true -mode Max_probe -norm meandiv
-nperm 1000 -rnk $RNK -scoring_scheme weighted -rpt_label $RNK -chip ../RefSeq_human.chip -include_only_symbols true -make_sets true -plot_top_x 20 -rnd_seed timestamp
-set_max 500 -set_min 15 -zip_report false -out gseaout/${DIRNAME} -gui false
done
"""
def __init__(self,myName=None,opts=None):
""" setup cl for gsea
"""
self.idcol = 0
self.signcol = 0
self.adjpvalcol = 0
self.progname=myName
self.opts = opts
remove_duplicates=True
if not os.path.isdir(opts.output_dir):
try:
os.makedirs(opts.output_dir)
except:
print >> sys.stderr,'##Error: GSEA wrapper unable to create or find output directory %s. Stopping' % (opts.output_dir)
if not os.path.isdir(opts.output_dir):
print >> sys.stderr,'##Error: 2nd try GSEA wrapper unable to create or find output directory %s. Stopping' % (opts.output_dir)
fakeBase = re.sub('[^a-zA-Z0-9_]+', '', opts.input_name) # gives a more useful title for the GSEA report
fakeGMT = os.path.join(opts.output_dir,fakeBase)
fakeGMT = os.path.abspath(fakeGMT)
# print ['mkdir ',opts.output_dir]
# try:
# subprocess.call(['mkdir',opts.output_dir])
# except:
# pass
fakeRanks = '%s.rnk' % fakeGMT
fakeRankBase = '%s.rnk' % fakeBase
if not fakeGMT.endswith('.gmt'):
fakeGMT = '%s.gmt' % fakeGMT
if not fakeBase.endswith('.gmt'):
fakeGMTBase = '%s.gmt' % fakeBase
if opts.builtin_gmt and opts.history_gmt:
newfile = open(fakeGMT,'w')
print >> sys.stderr , 'cat %s %s > %s ' % (opts.builtin_gmt,opts.history_gmt,newfile)
subprocess.call(['cat',opts.builtin_gmt,opts.history_gmt],stdout=newfile)
newfile.close()
elif opts.history_gmt:
#print >> sys.stderr , 'cp1 %s %s ' % (opts.history_gmt,fakeGMT)
subprocess.call(['cp',opts.history_gmt,fakeGMT])
else:
subprocess.call(['cp',opts.builtin_gmt,fakeGMT])
# remove dupes from each gene set
gmt = open(fakeGMT,'r').readlines()
gmt = [x for x in gmt if len(x.split('\t')) > 3]
ugmt = []
for i,row in enumerate(gmt):
rows = row.rstrip().split('\t')
gmtname = rows[0]
gmtcomment = rows[1]
glist = list(rows[2:])
newgmt = [gmtname,gmtcomment]
newgmt += glist
ugmt.append('\t'.join(newgmt))
gmt_out = open(fakeGMT,'w')
gmt_out.write('\n'.join(ugmt))
gmt_out.write('\n')
gmt_out.close()
if opts.input_ranks:
infname = opts.input_ranks
rdat = open(opts.input_ranks,'r').readlines() # suck in and remove blank ids that cause gsea to barf rml april 10 2012
rdat = [x.rstrip().split('\t') for x in rdat[1:]] # ignore head
try:
dat = [[x[0],x[1],x[1]] for x in rdat]
except:
print >> sys.stderr, '## error converting row', x
pass
# fake same structure as input tabular file
try:
pvals = [float(x[1]) for x in dat]
signs = [float(x[1]) for x in dat]
except:
print >> sys.stderr, '## error converting floating point - cannot process this input'
pass
#sys.exit(4)
else: # read tabular
self.idcol = int(opts.idcol) - 1
self.signcol = int(opts.signcol) - 1
self.adjpvalcol = int(opts.adjpvalcol) - 1
maxcol = max(self.idcol,self.signcol,self.adjpvalcol)
infname = opts.input_tab
indat = open(opts.input_tab,'r').readlines()
dat = [x.rstrip().split('\t') for x in indat[1:]]
dat = [x for x in dat if len(x) > maxcol]
dat = [[x[self.idcol],x[self.adjpvalcol],x[self.signcol]] for x in dat] # reduce to rank form
pvals = [float(x[1]) for x in dat]
outofrange = [x for x in pvals if ((x < 0.0) or (x > 1.0))]
assert len(outofrange) == 0, '## p values outside 0-1 encountered - was that the right column for adjusted p value?'
signs = [float(x[2]) for x in dat]
outofrange = [i for i,x in enumerate(signs) if (not x) and (dat[i][self.signcol] <> '0')]
bad = [dat[x][2] for x in outofrange]
assert len(outofrange) == 0, '## null numeric values encountered for sign - was that the right column? %s' % bad
ids = [x[0] for x in dat]
res = []
self.comments = []
useme = []
for i,row in enumerate(dat):
if row[1].upper() != 'NA' and row[2].upper() != 'NA' and row[0] != '' :
useme.append(i)
lost = len(dat) - len(useme)
if lost <> 0:
newdat = [dat[x] for x in useme]
del dat
dat = newdat
print >> sys.stdout, '## %d lost - NA values or null id' % lost
if remove_duplicates:
uids = list(set(ids)) # complex procedure to get min pval for each unique id
if len(uids) <> len(ids): # dupes - deal with mode
print >> sys.stdout,'## Dealing with %d uids in %d ids' % (len(uids),len(ids))
ures = {}
for i,id in enumerate(ids):
p = pvals[i]
ures.setdefault(id,[])
ures[id].append((p,signs[i]))
for id in uids:
tlist = ures[id]
tp = [x[0] for x in tlist]
ts = [x[1] for x in tlist]
if len(tp) == 1:
p = tp[0]
sign = ts[0]
else:
if opts.mode == "Max_probe":
p = min(tp)
sign = ts[tp.index(p)]
else: # guess median - too bad if even count
tp.sort()
ltp = len(tp)
ind = ltp/2 # yes, this is wrong for evens but what if sign wobbles?
if ltp % 2 == 1: # odd
ind += 1 # take the median
p = tp[ind]
sign = ts[ind]
if KEEPSELECTION:
self.comments.append('## for id=%s, got tp=%s, ts=%s, chose p=%f, sign =%f'\
% (id,str(tp),str(ts),p,sign))
if opts.input_ranks: # must be a rank file
res.append((id,'%f' % p))
else:
if p == 0.0:
p = 1e-99
try:
lp = -math.log10(p) # large positive if low p value
except ValueError:
lp = 0.0
if sign < 0:
lp = -lp # if negative, swap p to negative
res.append((id,'%f' % lp))
else: # no duplicates
for i,row in enumerate(dat):
(id,p,sign) = (row[0],float(row[1]),float(row[2]))
if opts.input_ranks: # must be a rank file
res.append((id,'%f' % p))
else:
if p == 0.0:
p = 1e-99
try:
lp = -math.log10(p) # large positive if low p value
except ValueError:
lp = 0.0
if sign < 0:
lp = -lp # if negative, swap p to negative
res.append((id,'%f' % lp))
else:
for i,row in enumerate(dat):
(id,p,sign) = (row[0],float(row[1]),float(row[2]))
if opts.input_ranks: # must be a rank file
res.append((id,'%f' % p))
else:
if p == 0.0:
p = 1e-99
try:
lp = -math.log10(p) # large positive if low p value
except ValueError:
lp = 0.0
if sign < 0:
lp = -lp # if negative, swap p to negative
res.append((id,'%f' % lp))
len1 = len(ids)
ranks = res # [x for x in res if len(x[0]) > 0 and len(x[1].strip()) > 0 and x[0].upper() <> 'NA']
# rd = dict(zip([x[0] for x in res],res)) # this does something mysterious to dupes - probably keeps last one
# ranks = rd.values()
len2 = len(ranks)
delta = len1 - len2
if delta <> 0:
print >> sys.stdout,'NOTE: %d of %d rank input file %s rows deleted - dup, null or NA IDs, pvals or logFCs' % (delta,len1,infname)
ranks = [(float(x[1]),x) for x in ranks] # decorate
ranks.sort()
ranks.reverse()
ranks = [x[1] for x in ranks] # undecorate
if opts.chip == '': # if mouse - need HUGO
ranks = [[x[0].upper(),x[1]] for x in ranks]
print >> sys.stderr, '## Fixed any lower case - now have',','.join([x[0] for x in ranks[:5]])
ranks = ['\t'.join(x) for x in ranks]
# if len(ranks) < 2:
# print >> sys.stderr,'Input %s has 1 or less rows with two tab delimited fields - please check the tool documentation' % infname
# sys.exit(3)
#print >> sys.stderr, '### opening %s and writing %s' % (fakeRanks,str(ranks[:10]))
rclean = open(fakeRanks,'w')
rclean.write('contig\tscore\n')
rclean.write('\n'.join(ranks))
rclean.write('\n')
rclean.close()
cl = []
a = cl.append
a('java -Xmx6G -cp')
a(opts.gsea_jar)
a('xtools.gsea.GseaPreranked')
# NOTE: changed from `fakeGMTBase` to `opts.builtin_gmt` to fix bug
print fakeGMTBase
a('-gmx %s' % opts.builtin_gmt) # ensure .gmt extension as required by GSEA - gene sets to use
a('-gui false') # use preranked file mode and no gui
a('-make_sets true -rnd_seed timestamp') # more things from the GUI command line display
a('-norm meandiv -zip_report true -scoring_scheme weighted') # ? need to set these?
# NOTE: changed from `fakeRankBase` to `opts.input_tab` to fix bug
a('-rnk %s' % opts.input_tab) # input ranks file symbol (the chip file is the crosswalk for ids in first column)
a('-out .' )
#a('-out' % opts.output_dir)
a('-set_max %s' % opts.setMax)
a('-set_min %s' % opts.setMin)
a('-mode %s' % opts.mode)
if opts.chip > '':
#a('-chip %s -collapse true -include_only_symbols true' % opts.chip)
a('-chip %s -collapse true' % opts.chip)
else:
a("-collapse false") # needed if no chip
a('-nperm %s' % opts.nPerm)
a('-rpt_label %s' % opts.title)
a('-plot_top_x %s' % opts.plotTop)
self.cl = cl
self.comments.append('## GSEA command line:')
self.comments.append(' '.join(self.cl))
print >> sys.stderr, 'CMD: ',' '.join(self.cl)
self.fakeRanks = fakeRanks
self.fakeGMT = fakeGMT
def grepIds(self):
"""
"""
found = []
allids = open(self.opts.input_ranks,'r').readlines()
allids = [x.strip().split() for x in allids]
allids = [x[0] for x in allids] # list of ids
gmtpath = os.path.split(self.opts.fakeGMT)[0] # get path to all chip
def run(self):
"""
"""
tlog = os.path.join(self.opts.output_dir,"gsea_runner.log")
sto = open(tlog,'w')
x = subprocess.Popen(' '.join(self.cl),shell=True,stdout=sto,stderr=sto,cwd=self.opts.output_dir)
retval = x.wait()
if retval <> 0:
print >> sys.stdout, 'GSEA return value = %d' % (retval)
sto.close()
d = glob.glob(os.path.join(self.opts.output_dir,'%s*' % self.opts.title))
dir_list = glob.glob(os.path.join(self.opts.output_dir,'*Gsea*' ))
for f in dir_list:
print >> sys.stdout, 'file : '+f
gsea_dir = "./"
if len(d) > 0:
fix_subdir(d[0],self.opts.output_dir)
htmlfname = os.path.join(self.opts.output_dir,'index.html')
# try:
# html = open(htmlfname,'r').readlines()
# html = [x.strip() for x in html if len(x.strip()) > 0]
# if len(self.comments) > 0:
# s = ['<pre>']
# s += self.comments
# s.append('</pre>')
# try:
# i = html.index('<div id="footer">')
# except:
# i = len(html) - 7 # fudge
# html = html[:i] + s + html[i:]
# except:
# html = []
# htmlhead = '<html><head></head><body>'
# html.append('## Galaxy GSEA wrapper failure')
# html.append('## Unable to find index.html in %s - listdir=%s' % (d,' '.join(os.listdir(self.opts.output_dir))))
# html.append('## Command line was %s' % (' '.join(self.cl)))
# html.append('## commonly caused by mismatched ID/chip selection')
# glog = open(os.path.join(self.opts.output_dir,'gsea_runner.log'),'r').readlines()
# html.append('## gsea_runner.log=%s' % '\n'.join(glog))
# #tryme = self.grepIds()
# #retval = 1
# print >> sys.stderr,'\n'.join(html)
# html = ['%s<br/>' % x for x in html]
# html.insert(0,htmlhead)
# html.append('</body></html>')
htmlf = file(self.opts.outhtml,'w')
htmlf.write("<html><HEAD> <meta HTTP-EQUIV='REFRESH' content='0; url="+gsea_dir+"/index.html'> </HEAD><body><H2><a href='"+gsea_dir+"/index.html'>Click to open GSEA result</a></H2></body></html>")
#htmlf.write('\n'.join(html))
htmlf.write('\n')
htmlf.close()
#os.unlink(self.fakeRanks)
#os.unlink(self.fakeGMT)
if opts.outtab_neg:
tabs = glob.glob(os.path.join(opts.output_dir,"gsea_report_for_*.xls"))
if len(tabs) > 0:
for tabi,t in enumerate(tabs):
tkind = os.path.basename(t).split('_')[4].lower()
if tkind == 'neg':
outtab = opts.outtab_neg
elif tkind == 'pos':
outtab = opts.outtab_pos
else:
print >> sys.stderr, '## tab file matched %s which is not "neg" or "pos" in 4th segment %s' % (t,tkind)
sys.exit(2)
content = open(t).readlines()
tabf = open(outtab,'w')
tabf.write(''.join(content))
tabf.close()
else:
print >> sys.stdout, 'Odd, maketab = %s but no matches - tabs = %s' % (opts.outtab_neg,tabs)
return retval
if __name__ == "__main__":
"""
called as:
<command interpreter="python">rgGSEA.py --input_ranks "$input1" --outhtml "$html_file"
--setMax "$setMax" --setMin "$setMin" --nPerm "$nPerm" --plotTop "$plotTop" --gsea_jar "$GALAXY_DATA_INDEX_DIR/shared/jars/gsea2-2.07.jar"
--output_dir "$html_file.files_path" --use_gmt ""${use_gmt.fields.path}"" --chip "${use_chip.fields.path}"
</command>
"""
op = optparse.OptionParser()
a = op.add_option
a('--input_ranks',default=None)
a('--input_tab',default=None)
a('--input_name',default=None)
a('--use_gmt',default=None)
a('--history_gmt',default=None)
a('--builtin_gmt',default=None)
a('--history_gmt_name',default=None)
a('--setMax',default="500")
a('--setMin',default="15")
a('--nPerm',default="1000")
a('--title',default="GSEA report")
a('--chip',default='')
a('--plotTop',default='20')
a('--outhtml',default=None)
a('--makeTab',default=None)
a('--output_dir',default=None)
a('--outtab_neg',default=None)
a('--outtab_pos',default=None)
a('--adjpvalcol',default=None)
a('--signcol',default=None)
a('--idcol',default=None)
a('--mode',default='Max_probe')
a('-j','--gsea_jar',default='gsea2-2.0.12.jar')
opts, args = op.parse_args()
#assert os.path.isfile(opts.gsea_jar),'## GSEA runner unable to find supplied gsea java desktop executable file %s' % opts.gsea_jar
if opts.input_ranks:
inpf = opts.input_ranks
else:
inpf = opts.input_tab
assert opts.idcol <> None, '## GSEA runner needs an id column if a tabular file provided'
assert opts.signcol <> None, '## GSEA runner needs a sign column if a tabular file provided'
assert opts.adjpvalcol <> None, '## GSEA runner needs an adjusted p value column if a tabular file provided'
assert os.path.isfile(inpf),'## GSEA runner unable to open supplied input file %s' % inpf
if opts.chip > '':
assert os.path.isfile(opts.chip),'## GSEA runner unable to open supplied chip file %s' % opts.chip
some = None
if opts.history_gmt <> None:
some = 1
assert os.path.isfile(opts.history_gmt),'## GSEA runner unable to open supplied history gene set matrix (.gmt) file %s' % opts.history_gmt
if opts.builtin_gmt <> None:
some = 1
assert os.path.isfile(opts.builtin_gmt),'## GSEA runner unable to open supplied history gene set matrix (.gmt) file %s' % opts.builtin_gmt
assert some, '## GSEA runner needs a gene set matrix file - none chosen?'
opts.title = re.sub('[^a-zA-Z0-9_]+', '', opts.title)
myName=os.path.split(sys.argv[0])[-1]
gse = gsea_wrapper(myName, opts=opts)
retcode = gse.run()
if retcode <> 0:
sys.exit(retcode) # indicate failure to job runner
|
UCSC-MedBook/MedBook_
|
tools/gsea/rgGSEA.py
|
Python
|
bsd-3-clause
| 23,186
|
[
"Galaxy"
] |
530831affc4399d516e029e8902f6fb8209ef776ebef2d98532149a662347b44
|
import numpy as np
from skimage.transform import AffineTransform, SimilarityTransform, PolynomialTransform, warp
from skimage.filters import gaussian
from skimage import exposure, img_as_float
from scipy.misc import imresize
import Augmentor
center_shift = 256 / 2
tf_center = SimilarityTransform(translation=-center_shift)
tf_uncenter = SimilarityTransform(translation=center_shift)
def apply_chain(chain):
def call(x):
for fn in chain:
x = fn(x)
return x
return call
def resize(output_size):
def call(x):
x = imresize(x, output_size) # x[::256//output_size, ::256//output_size, :]
return x
return call
def random_crop(size):
def call(x):
height, width, d = x.shape
r1 = np.random.random()
r2 = np.random.random()
left = round(r1 * (width - size))
right = round((1 - r1) * (width - size))
top = round(r2 * (height - size))
bottom = round((1 - r2) * (height - size))
crop = x[top:height-bottom, left:width-right, :]
return crop
return call
def center_crop(size):
def call(x):
height, width, d = x.shape
a = int(0.5 * (height - size))
b = int(0.5 * (width - size))
top, bottom = a, (height - size) - a
left, right = b, (height - size) - b
crop = x[top:height-bottom, left:width-right, :]
return crop
return call
def crop_top_left(size):
def call(x):
crop = x[:size, :size, :]
return crop
return call
def crop_top_right(size):
def call(x):
crop = x[:size, x.shape[1] - size:, :]
return crop
return call
def crop_bottom_left(size):
def call(x):
crop = x[x.shape[0] - size:, :size, :]
return crop
return call
def crop_bottom_right(size):
def call(x):
crop = x[x.shape[0] - size:, x.shape[1] - size:, :]
return crop
return call
def image_to_array(x):
return np.array(x)
def to_float(x):
return img_as_float(x)
def blur(sigma=0.1):
def call(x):
x = gaussian(x, sigma=sigma, preserve_range=True, multichannel=True)
return x
return call
def random_blur(sigma=lambda: np.random.random_sample()*1):
def call(x):
x = gaussian(x, sigma=sigma(), preserve_range=True, multichannel=True)
return x
return call
def random_gamma(gamma=lambda: np.random.rand() * 0.4 + 0.8):
def call(x):
return exposure.adjust_gamma(x, gamma())
return call
def random_contrast(weight=lambda: np.random.rand() * 0.3 + 0.7):
def call(x):
w = weight()
return x * w + (1 - w) * exposure.rescale_intensity(x)
return call
def augment_color(weight=0.1):
def call(x):
height, width, channels = x.shape
img_rgb_col = x.reshape(height*width, channels)
cov = np.cov(img_rgb_col.T)
eigvals, eigvects = np.linalg.eigh(cov)
random_eigvals = np.sqrt(eigvals) * np.random.randn(channels) * weight
scaled_eigvects = np.dot(eigvects, random_eigvals)
x = np.clip(x + scaled_eigvects, 0, 1)
return x
return call
def augment_color_deterministic(weight=0.1):
def call(x):
height, width, channels = x.shape
img_rgb_col = x.reshape(height*width, channels)
cov = np.cov(img_rgb_col.T)
eigvals, eigvects = np.linalg.eigh(cov)
random_eigvals = np.sqrt(eigvals) * np.array([1, 1, 1]) * weight
scaled_eigvects = np.dot(eigvects, random_eigvals)
x = np.clip(x + scaled_eigvects, 0, 1)
return x
return call
def distort():
p = Augmentor.Pipeline()
p.random_distortion(probability=1, grid_width=5, grid_height=5, magnitude=8)
def call(x):
x = p.sample_with_array(x.astype('uint8'), False)
return x
return call
def random_zoom_range(zoom_range=[1/1.2, 1.2]):
def call():
#https://github.com/benanne/kaggle-galaxies/blob/master/realtime_augmentation.py#L147
#log_zoom_range = [np.log(z) for z in zoom_range]
#zoom = np.exp(np.random.uniform(*log_zoom_range))
zoom = np.random.uniform(zoom_range[0], zoom_range[1])
return zoom, zoom
return call
def augment(
rotation_fn=lambda: np.random.random_integers(0, 360),
translation_fn=lambda: (np.random.random_integers(-20, 20), np.random.random_integers(-20, 20)),
scale_factor_fn=random_zoom_range(),
shear_fn=lambda: np.random.random_integers(-10, 10)
):
def call(x):
rotation = rotation_fn()
translation = translation_fn()
scale = scale_factor_fn()
shear = shear_fn()
tf_augment = AffineTransform(scale=scale, rotation=np.deg2rad(rotation), translation=translation, shear=np.deg2rad(shear))
tf = tf_center + tf_augment + tf_uncenter
x = warp(x, tf, order=1, preserve_range=True, mode='symmetric')
return x
return call
def rotate_90(k=1):
def call(x):
x = np.rot90(x, k).copy()
return x
return call
def fliplr():
def call(x):
x = np.fliplr(x).copy()
return x
return call
def flipud():
def call(x):
x = np.flipud(x).copy()
return x
return call
def random_fliplr():
def call(x):
if np.random.randint(2) > 0:
x = np.fliplr(x).copy()
return x
return call
def random_flipud():
def call(x):
if np.random.randint(2) > 0:
x = np.flipud(x).copy()
return x
return call
def augment_deterministic(
rotation=0,
translation=0,
scale_factor=1,
shear=0
):
def call(x):
scale = scale_factor, scale_factor
rotation_tmp = rotation
tf_augment = AffineTransform(
scale=scale,
rotation=np.deg2rad(rotation_tmp),
translation=translation,
shear=np.deg2rad(shear)
)
tf = tf_center + tf_augment + tf_uncenter
x = warp(x, tf, order=1, preserve_range=True, mode='symmetric')
return x
return call
|
Mctigger/KagglePlanetPytorch
|
transforms.py
|
Python
|
mit
| 6,150
|
[
"Gaussian"
] |
c6cb87f4d470d0c861b1d8caa971b27c2a8096ac52a4c1e89b98571acdb511e9
|
## Copyright 2004-2009 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import sys
import types
import unittest
from timtools.tools import tsttools
from timtools.tools.my_import import my_import
from timtools.console import Application
from timtools.console import syscon
class StoppingTestResult(unittest._TextTestResult):
def stopTest(self, test):
"Called when the given test has been run"
if len(self.errors) or len(self.failures):
self.stop()
class StoppingTestRunner(unittest.TextTestRunner):
def _makeResult(self):
return StoppingTestResult(self.stream,
self.descriptions,
self.verbosity)
class Runtests(Application):
name = "Lino/runtests"
copyright = """\
Copyright (c) 2004-2009 Luc Saffre.
This software comes with ABSOLUTELY NO WARRANTY and is
distributed under the terms of the GNU General Public License.
See file COPYING.txt for more information."""
usage = "usage: %prog [options] [TESTS]"
description = """\
scan a directory tree for .py files containing test cases and run
them. TESTS specifies the tests to run. Default is all. Other
possible values e.g. `1` or `1-7`.
"""
configfile = "runtests.ini"
configdefaults = dict(
postscript_printer = "psfile"
# a valid Windows printer name
)
def setupConfigParser(self,parser):
parser.add_option("postscript_printer",
help="""\
a valid Windows printer name.
""",
dest="postscript_printer",
default=None,
metavar="NAME")
Application.setupConfigParser(self,parser)
def setupOptionParser(self,parser):
Application.setupOptionParser(self,parser)
parser.add_option("-i", "--ignore-failures",
help="""\
continue testing even if failures or errors occur""",
action="store_true",
dest="ignore",
default=False)
def makeSuite(self,argv,top='.'):
self.status("Collecting test cases")
suites=[]
cases = []
#skipped=[]
for root, dirs, files in os.walk(top):
sys.path.append(root)
if '.svn' in dirs:
dirs.remove(".svn") # don't visit CVS directories
for filename in files:
modname,ext = os.path.splitext(filename)
if ext == '.py':
self.status(filename)
doit = (len(argv) == 0)
for arg in argv:
a = arg.split('-')
if len(a) == 2:
if a[0].isdigit() and a[1].isdigit():
if modname.isdigit():
if int(modname) >= int(a[0]) \
and int(modname) <= int(a[1]):
doit = True
else:
if modname >= a[0] and modname <= a[1]:
doit = True
elif len(a) == 1:
if modname == a[0]:
doit = True
else:
self.warning("Unrecognized argument %s",
arg)
if doit:
self.verbose("Loading cases from %s...",
modname)
self.findTestCases(modname,cases,suites)
sys.path.remove(root)
self.notice("found %d cases and %d suites.",
len(cases),len(suites))
for tcl in cases:
if hasattr(tcl,"todo"):
self.notice("Todo %s : %s", tcl.__module__,tcl.todo)
else:
suites.append(unittest.makeSuite(tcl))
return unittest.TestSuite(suites)
def findTestCases(self,modname,cases,suites):
try:
mod = my_import(modname)
except ImportError,e:
self.notice("could not import %s : %s",modname,e)
return
#cases=[]
if hasattr(mod,"suite"):
#print modname + ".suite()"
suites.append(mod.suite())
for (k,v) in mod.__dict__.items():
# Python 2.2 if type(v) == types.ClassType:
if type(v) == types.TypeType: # since 2.3
if issubclass(v,unittest.TestCase) \
and v != unittest.TestCase \
and v != tsttools.TestCase:
if hasattr(v,"skip") and v.skip:
self.notice("Skipping %s.%s",
modname,v.__name__)
else:
v.runtests = self
cases.append(v)
return cases
def showfile(self,filename):
assert os.path.exists(filename)
def run(self):
suite = self.makeSuite(self.args)
stream = self.toolkit.stdout
if self.options.ignore:
runner = unittest.TextTestRunner(stream=stream)
else:
runner = StoppingTestRunner(stream=stream)
# print "foo", suite
result = runner.run(suite)
def tests(case):
if hasattr(case,'_tests'):
for c in case._tests:
for d in tests(c):
yield d
yield case
for case in tests(suite):
if hasattr(case,'afterRun'):
case.afterRun(self)
def main(*args,**kw):
Runtests().main(*args,**kw)
if __name__ == '__main__': main()
|
lsaffre/timtools
|
timtools/scripts/runtests.py
|
Python
|
bsd-2-clause
| 6,831
|
[
"VisIt"
] |
f706b614c8c0d801c812fb4e4431fadfeaccfc0cc78a3d7b60c0ef6f381f1521
|
"""
Module for rendering axes.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import vtk
class Axes(object):
"""
Axes object.
"""
def __init__(self, renWinInteract):
# create axes
self._axes = vtk.vtkAxesActor()
self._axes.SetShaftTypeToCylinder()
self._axes.GetXAxisCaptionActor2D().GetCaptionTextProperty().SetColor(1, 0, 0)
self._axes.GetXAxisCaptionActor2D().GetCaptionTextProperty().SetFontFamilyToArial()
self._axes.GetXAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff()
self._axes.GetYAxisCaptionActor2D().GetCaptionTextProperty().SetColor(0, 1, 0)
self._axes.GetYAxisCaptionActor2D().GetCaptionTextProperty().SetFontFamilyToArial()
self._axes.GetYAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff()
self._axes.GetZAxisCaptionActor2D().GetCaptionTextProperty().SetColor(0, 0, 1)
self._axes.GetZAxisCaptionActor2D().GetCaptionTextProperty().SetFontFamilyToArial()
self._axes.GetZAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff()
# create axes marker
self._marker = vtk.vtkOrientationMarkerWidget()
self._marker.SetInteractor(renWinInteract)
self._marker.SetOrientationMarker(self._axes)
self._marker.SetViewport(0, 0, 0.25, 0.25)
self._marker.SetEnabled(0)
self._enabled = False
def isEnabled(self):
"""Returns True if the axes is enabled."""
return self._enabled
def toggle(self):
"""Toggle axes visibilty."""
if self.isEnabled():
self.remove()
else:
self.add()
def add(self):
"""Add the axis label."""
if not self.isEnabled():
self._marker.SetEnabled(1)
self._enabled = True
def remove(self):
"""Remove the axis label."""
if self.isEnabled():
self._marker.SetEnabled(0)
self._enabled = False
|
chrisdjscott/Atoman
|
atoman/rendering/axes.py
|
Python
|
mit
| 2,023
|
[
"VTK"
] |
d08029ac921f160818e17d874a1100f9637b201f895f11339b9100715361c8b9
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein, Ant1, Marius van Voorden
#
# This code is subject to the (new) BSD license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Module images2gif
Provides functionality for reading and writing animated GIF images.
Use writeGif to write a series of numpy arrays or PIL images as an
animated GIF. Use readGif to read an animated gif as a series of numpy
arrays.
Note that since July 2004, all patents on the LZW compression patent have
expired. Therefore the GIF format may now be used freely.
Acknowledgements
----------------
Many thanks to Ant1 for:
* noting the use of "palette=PIL.Image.ADAPTIVE", which significantly
improves the results.
* the modifications to save each image with its own palette, or optionally
the global palette (if its the same).
Many thanks to Marius van Voorden for porting the NeuQuant quantization
algorithm of Anthony Dekker to Python (See the NeuQuant class for its
license).
Many thanks to Alex Robinson for implementing the concept of subrectangles,
which (depending on image content) can give a very significant reduction in
file size.
This code is based on gifmaker (in the scripts folder of the source
distribution of PIL)
Useful links
-------------
* http://tronche.com/computer-graphics/gif/
* http://en.wikipedia.org/wiki/Graphics_Interchange_Format
* http://www.w3.org/Graphics/GIF/spec-gif89a.txt
"""
# todo: This module should be part of imageio (or at least based on)
import os, time
try:
import Image
PIL = Image
from PIL.GifImagePlugin import getheader, getdata
except ImportError:
PIL = None
try:
import numpy as np
except ImportError:
np = None
def get_cKDTree():
try:
from scipy.spatial import cKDTree
except ImportError:
cKDTree = None
return cKDTree
# getheader gives a 87a header and a color palette (two elements in a list).
# getdata()[0] gives the Image Descriptor up to (including) "LZW min code size".
# getdatas()[1:] is the image data itself in chuncks of 256 bytes (well
# technically the first byte says how many bytes follow, after which that
# amount (max 255) follows).
def checkImages(images):
""" checkImages(images)
Check numpy images and correct intensity range etc.
The same for all movie formats.
"""
# Init results
images2 = []
for im in images:
if PIL and isinstance(im, PIL.Image):
# We assume PIL images are allright
images2.append(im)
elif np and isinstance(im, np.ndarray):
# Check and convert dtype
if im.dtype == np.uint8:
images2.append(im) # Ok
elif im.dtype in [np.float32, np.float64]:
im = im.copy()
im[im<0] = 0
im[im>1] = 1
im *= 255
images2.append( im.astype(np.uint8) )
else:
im = im.astype(np.uint8)
images2.append(im)
# Check size
if im.ndim == 2:
pass # ok
elif im.ndim == 3:
if im.shape[2] not in [3,4]:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('Invalid image type: ' + str(type(im)))
# Done
return images2
def intToBin(i):
""" Integer to two bytes """
# divide in two parts (bytes)
i1 = i % 256
i2 = int( i/256)
# make string (little endian)
return chr(i1) + chr(i2)
class GifWriter:
""" GifWriter()
Class that contains methods for helping write the animated GIF file.
"""
def getheaderAnim(self, im):
""" getheaderAnim(im)
Get animation header. To replace PILs getheader()[0]
"""
bb = "GIF89a"
bb += intToBin(im.size[0])
bb += intToBin(im.size[1])
bb += "\x87\x00\x00"
return bb
def getImageDescriptor(self, im, xy=None):
""" getImageDescriptor(im, xy=None)
Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
whether additional colors comes in play that require a redefined
palette. Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
Modified by Alex Robinson in January 2011 to implement subrectangles.
"""
# Default use full image and place at upper left
if xy is None:
xy = (0,0)
# Image separator,
bb = '\x2C'
# Image position and size
bb += intToBin( xy[0] ) # Left position
bb += intToBin( xy[1] ) # Top position
bb += intToBin( im.size[0] ) # image width
bb += intToBin( im.size[1] ) # image height
# packed field: local color table flag1, interlace0, sorted table0,
# reserved00, lct size111=7=2^(7+1)=256.
bb += '\x87'
# LZW minimum size code now comes later, begining of [image data] blocks
return bb
def getAppExt(self, loops=float('inf')):
""" getAppExt(loops=float('inf'))
Application extension. This part specifies the amount of loops.
If loops is 0 or inf, it goes on infinitely.
"""
if loops==0 or loops==float('inf'):
loops = 2**16-1
#bb = "" # application extension should not be used
# (the extension interprets zero loops
# to mean an infinite number of loops)
# Mmm, does not seem to work
if True:
bb = "\x21\xFF\x0B" # application extension
bb += "NETSCAPE2.0"
bb += "\x03\x01"
bb += intToBin(loops)
bb += '\x00' # end
return bb
def getGraphicsControlExt(self, duration=0.1, dispose=2):
""" getGraphicsControlExt(duration=0.1, dispose=2)
Graphics Control Extension. A sort of header at the start of
each image. Specifies duration and transparency.
Dispose
-------
* 0 - No disposal specified.
* 1 - Do not dispose. The graphic is to be left in place.
* 2 - Restore to background color. The area used by the graphic
must be restored to the background color.
* 3 - Restore to previous. The decoder is required to restore the
area overwritten by the graphic with what was there prior to
rendering the graphic.
* 4-7 -To be defined.
"""
bb = '\x21\xF9\x04'
bb += chr((dispose & 3) << 2) # low bit 1 == transparency,
# 2nd bit 1 == user input , next 3 bits, the low two of which are used,
# are dispose.
bb += intToBin( int(duration*100) ) # in 100th of seconds
bb += '\x00' # no transparent color
bb += '\x00' # end
return bb
def handleSubRectangles(self, images, subRectangles):
""" handleSubRectangles(images)
Handle the sub-rectangle stuff. If the rectangles are given by the
user, the values are checked. Otherwise the subrectangles are
calculated automatically.
"""
if isinstance(subRectangles, (tuple,list)):
# xy given directly
# Check xy
xy = subRectangles
if xy is None:
xy = (0,0)
if hasattr(xy, '__len__'):
if len(xy) == len(images):
xy = [xxyy for xxyy in xy]
else:
raise ValueError("len(xy) doesn't match amount of images.")
else:
xy = [xy for im in images]
xy[0] = (0,0)
else:
# Calculate xy using some basic image processing
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to use auto-subRectangles.")
# First make numpy arrays if required
for i in range(len(images)):
im = images[i]
if isinstance(im, Image.Image):
tmp = im.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
images[i] = a
# Determine the sub rectangles
images, xy = self.getSubRectangles(images)
# Done
return images, xy
def getSubRectangles(self, ims):
""" getSubRectangles(ims)
Calculate the minimal rectangles that need updating each frame.
Returns a two-element tuple containing the cropped images and a
list of x-y positions.
Calculating the subrectangles takes extra time, obviously. However,
if the image sizes were reduced, the actual writing of the GIF
goes faster. In some cases applying this method produces a GIF faster.
"""
# Check image count
if len(ims) < 2:
return ims, [(0,0) for i in ims]
# We need numpy
if np is None:
raise RuntimeError("Need Numpy to calculate sub-rectangles. ")
# Prepare
ims2 = [ims[0]]
xy = [(0,0)]
t0 = time.time()
# Iterate over images
prev = ims[0]
for im in ims[1:]:
# Get difference, sum over colors
diff = np.abs(im-prev)
if diff.ndim==3:
diff = diff.sum(2)
# Get begin and end for both dimensions
X = np.argwhere(diff.sum(0))
Y = np.argwhere(diff.sum(1))
# Get rect coordinates
if X.size and Y.size:
x0, x1 = X[0], X[-1]+1
y0, y1 = Y[0], Y[-1]+1
else: # No change ... make it minimal
x0, x1 = 0, 2
y0, y1 = 0, 2
# Cut out and store
im2 = im[y0:y1,x0:x1]
prev = im
ims2.append(im2)
xy.append((x0,y0))
# Done
#print('%1.2f seconds to determine subrectangles of %i images' %
# (time.time()-t0, len(ims2)) )
return ims2, xy
def convertImagesToPIL(self, images, dither, nq=0):
""" convertImagesToPIL(images, nq=0)
Convert images to Paletted PIL images, which can then be
written to a single animated GIF.
"""
# Convert to PIL images
images2 = []
for im in images:
if isinstance(im, Image.Image):
images2.append(im)
elif np and isinstance(im, np.ndarray):
if im.ndim==3 and im.shape[2]==3:
im = Image.fromarray(im,'RGB')
elif im.ndim==3 and im.shape[2]==4:
im = Image.fromarray(im[:,:,:3],'RGB')
elif im.ndim==2:
im = Image.fromarray(im,'L')
images2.append(im)
# Convert to paletted PIL images
images, images2 = images2, []
if nq >= 1:
# NeuQuant algorithm
for im in images:
im = im.convert("RGBA") # NQ assumes RGBA
nqInstance = NeuQuant(im, int(nq)) # Learn colors from image
if dither:
im = im.convert("RGB").quantize(palette=nqInstance.paletteImage())
else:
im = nqInstance.quantize(im) # Use to quantize the image itself
images2.append(im)
else:
# Adaptive PIL algorithm
AD = Image.ADAPTIVE
for im in images:
im = im.convert('P', palette=AD, dither=dither)
images2.append(im)
# Done
return images2
def writeGifToFile(self, fp, images, durations, loops, xys, disposes):
""" writeGifToFile(fp, images, durations, loops, xys, disposes)
Given a set of images writes the bytes to the specified stream.
"""
# Obtain palette for all images and count each occurrence
palettes, occur = [], []
for im in images:
palettes.append( getheader(im)[1] )
for palette in palettes:
occur.append( palettes.count( palette ) )
# Select most-used palette as the global one (or first in case no max)
globalPalette = palettes[ occur.index(max(occur)) ]
# Init
frames = 0
firstFrame = True
for im, palette in zip(images, palettes):
if firstFrame:
# Write header
# Gather info
header = self.getheaderAnim(im)
appext = self.getAppExt(loops)
# Write
fp.write(header)
fp.write(globalPalette)
fp.write(appext)
# Next frame is not the first
firstFrame = False
if True:
# Write palette and image data
# Gather info
data = getdata(im)
imdes, data = data[0], data[1:]
graphext = self.getGraphicsControlExt(durations[frames],
disposes[frames])
# Make image descriptor suitable for using 256 local color palette
lid = self.getImageDescriptor(im, xys[frames])
# Write local header
if (palette != globalPalette) or (disposes[frames] != 2):
# Use local color palette
fp.write(graphext)
fp.write(lid) # write suitable image descriptor
fp.write(palette) # write local color table
fp.write('\x08') # LZW minimum size code
else:
# Use global color palette
fp.write(graphext)
fp.write(imdes) # write suitable image descriptor
# Write image data
for d in data:
fp.write(d)
# Prepare for next round
frames = frames + 1
fp.write(";") # end gif
return frames
## Exposed functions
def writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None):
""" writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None)
Write an animated gif from the specified images.
Parameters
----------
filename : string
The name of the file to write the image to.
images : list
Should be a list consisting of PIL images or numpy arrays.
The latter should be between 0 and 255 for integer types, and
between 0 and 1 for float types.
duration : scalar or list of scalars
The duration for all frames, or (if a list) for each frame.
repeat : bool or integer
The amount of loops. If True, loops infinitely.
dither : bool
Whether to apply dithering
nq : integer
If nonzero, applies the NeuQuant quantization algorithm to create
the color palette. This algorithm is superior, but slower than
the standard PIL algorithm. The value of nq is the quality
parameter. 1 represents the best quality. 10 is in general a
good tradeoff between quality and speed. When using this option,
better results are usually obtained when subRectangles is False.
subRectangles : False, True, or a list of 2-element tuples
Whether to use sub-rectangles. If True, the minimal rectangle that
is required to update each frame is automatically detected. This
can give significant reductions in file size, particularly if only
a part of the image changes. One can also give a list of x-y
coordinates if you want to do the cropping yourself. The default
is True.
dispose : int
How to dispose each frame. 1 means that each frame is to be left
in place. 2 means the background color should be restored after
each frame. 3 means the decoder should restore the previous frame.
If subRectangles==False, the default is 2, otherwise it is 1.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to write animated gif files.")
# Check images
images = checkImages(images)
# Instantiate writer object
gifWriter = GifWriter()
# Check loops
if repeat is False:
loops = 1
elif repeat is True:
loops = 0 # zero means infinite
else:
loops = int(repeat)
# Check duration
if hasattr(duration, '__len__'):
if len(duration) == len(images):
duration = [d for d in duration]
else:
raise ValueError("len(duration) doesn't match amount of images.")
else:
duration = [duration for im in images]
# Check subrectangles
if subRectangles:
images, xy = gifWriter.handleSubRectangles(images, subRectangles)
defaultDispose = 1 # Leave image in place
else:
# Normal mode
xy = [(0,0) for im in images]
defaultDispose = 2 # Restore to background color.
# Check dispose
if dispose is None:
dispose = defaultDispose
if hasattr(dispose, '__len__'):
if len(dispose) != len(images):
raise ValueError("len(xy) doesn't match amount of images.")
else:
dispose = [dispose for im in images]
# Make images in a format that we can write easy
images = gifWriter.convertImagesToPIL(images, dither, nq)
# Write
fp = open(filename, 'wb')
try:
gifWriter.writeGifToFile(fp, images, duration, loops, xy, dispose)
finally:
fp.close()
def readGif(filename, asNumpy=True):
""" readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
# Check whether it exists
if not os.path.isfile(filename):
raise IOError('File not found: '+str(filename))
# Load file using PIL
pilIm = PIL.open(filename)
pilIm.seek(0)
# Read all images inside
images = []
try:
while True:
# Get image as numpy array
tmp = pilIm.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
# Store, and next
images.append(a)
pilIm.seek(pilIm.tell()+1)
except EOFError:
pass
# Convert to normal PIL images if needed
if not asNumpy:
images2 = images
images = []
for im in images2:
images.append( PIL.fromarray(im) )
# Done
return images
class NeuQuant:
""" NeuQuant(image, samplefac=10, colors=256)
samplefac should be an integer number of 1 or higher, 1
being the highest quality, but the slowest performance.
With avalue of 10, one tenth of all pixels are used during
training. This value seems a nice tradeof between speed
and quality.
colors is the amount of colors to reduce the image to. This
should best be a power of two.
See also:
http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
License of the NeuQuant Neural-Net Quantization Algorithm
---------------------------------------------------------
Copyright (c) 1994 Anthony Dekker
Ported to python by Marius van Voorden in 2010
NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994.
See "Kohonen neural networks for optimal colour quantization"
in "network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
for a discussion of the algorithm.
See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
Any party obtaining a copy of these files from the author, directly or
indirectly, is granted, free of charge, a full and unrestricted irrevocable,
world-wide, paid up, royalty-free, nonexclusive right and license to deal
in this software and documentation files (the "Software"), including without
limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons who receive
copies from any such party to do so, with the only requirement being
that this copyright notice remain intact.
"""
NCYCLES = None # Number of learning cycles
NETSIZE = None # Number of colours used
SPECIALS = None # Number of reserved colours used
BGCOLOR = None # Reserved background colour
CUTNETSIZE = None
MAXNETPOS = None
INITRAD = None # For 256 colours, radius starts at 32
RADIUSBIASSHIFT = None
RADIUSBIAS = None
INITBIASRADIUS = None
RADIUSDEC = None # Factor of 1/30 each cycle
ALPHABIASSHIFT = None
INITALPHA = None # biased by 10 bits
GAMMA = None
BETA = None
BETAGAMMA = None
network = None # The network itself
colormap = None # The network itself
netindex = None # For network lookup - really 256
bias = None # Bias and freq arrays for learning
freq = None
pimage = None
# Four primes near 500 - assume no image has a length so large
# that it is divisible by all four primes
PRIME1 = 499
PRIME2 = 491
PRIME3 = 487
PRIME4 = 503
MAXPRIME = PRIME4
pixels = None
samplefac = None
a_s = None
def setconstants(self, samplefac, colors):
self.NCYCLES = 100 # Number of learning cycles
self.NETSIZE = colors # Number of colours used
self.SPECIALS = 3 # Number of reserved colours used
self.BGCOLOR = self.SPECIALS-1 # Reserved background colour
self.CUTNETSIZE = self.NETSIZE - self.SPECIALS
self.MAXNETPOS = self.NETSIZE - 1
self.INITRAD = self.NETSIZE/8 # For 256 colours, radius starts at 32
self.RADIUSBIASSHIFT = 6
self.RADIUSBIAS = 1 << self.RADIUSBIASSHIFT
self.INITBIASRADIUS = self.INITRAD * self.RADIUSBIAS
self.RADIUSDEC = 30 # Factor of 1/30 each cycle
self.ALPHABIASSHIFT = 10 # Alpha starts at 1
self.INITALPHA = 1 << self.ALPHABIASSHIFT # biased by 10 bits
self.GAMMA = 1024.0
self.BETA = 1.0/1024.0
self.BETAGAMMA = self.BETA * self.GAMMA
self.network = np.empty((self.NETSIZE, 3), dtype='float64') # The network itself
self.colormap = np.empty((self.NETSIZE, 4), dtype='int32') # The network itself
self.netindex = np.empty(256, dtype='int32') # For network lookup - really 256
self.bias = np.empty(self.NETSIZE, dtype='float64') # Bias and freq arrays for learning
self.freq = np.empty(self.NETSIZE, dtype='float64')
self.pixels = None
self.samplefac = samplefac
self.a_s = {}
def __init__(self, image, samplefac=10, colors=256):
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy for the NeuQuant algorithm.")
# Check image
if image.size[0] * image.size[1] < NeuQuant.MAXPRIME:
raise IOError("Image is too small")
if image.mode != "RGBA":
raise IOError("Image mode should be RGBA.")
# Initialize
self.setconstants(samplefac, colors)
self.pixels = np.fromstring(image.tostring(), np.uint32)
self.setUpArrays()
self.learn()
self.fix()
self.inxbuild()
def writeColourMap(self, rgb, outstream):
for i in range(self.NETSIZE):
bb = self.colormap[i,0];
gg = self.colormap[i,1];
rr = self.colormap[i,2];
outstream.write(rr if rgb else bb)
outstream.write(gg)
outstream.write(bb if rgb else rr)
return self.NETSIZE
def setUpArrays(self):
self.network[0,0] = 0.0 # Black
self.network[0,1] = 0.0
self.network[0,2] = 0.0
self.network[1,0] = 255.0 # White
self.network[1,1] = 255.0
self.network[1,2] = 255.0
# RESERVED self.BGCOLOR # Background
for i in range(self.SPECIALS):
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
for i in range(self.SPECIALS, self.NETSIZE):
p = self.network[i]
p[:] = (255.0 * (i-self.SPECIALS)) / self.CUTNETSIZE
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
# Omitted: setPixels
def altersingle(self, alpha, i, b, g, r):
"""Move neuron i towards biased (b,g,r) by factor alpha"""
n = self.network[i] # Alter hit neuron
n[0] -= (alpha*(n[0] - b))
n[1] -= (alpha*(n[1] - g))
n[2] -= (alpha*(n[2] - r))
def geta(self, alpha, rad):
try:
return self.a_s[(alpha, rad)]
except KeyError:
length = rad*2-1
mid = length/2
q = np.array(list(range(mid-1,-1,-1))+list(range(-1,mid)))
a = alpha*(rad*rad - q*q)/(rad*rad)
a[mid] = 0
self.a_s[(alpha, rad)] = a
return a
def alterneigh(self, alpha, rad, i, b, g, r):
if i-rad >= self.SPECIALS-1:
lo = i-rad
start = 0
else:
lo = self.SPECIALS-1
start = (self.SPECIALS-1 - (i-rad))
if i+rad <= self.NETSIZE:
hi = i+rad
end = rad*2-1
else:
hi = self.NETSIZE
end = (self.NETSIZE - (i+rad))
a = self.geta(alpha, rad)[start:end]
p = self.network[lo+1:hi]
p -= np.transpose(np.transpose(p - np.array([b, g, r])) * a)
#def contest(self, b, g, r):
# """ Search for biased BGR values
# Finds closest neuron (min dist) and updates self.freq
# finds best neuron (min dist-self.bias) and returns position
# for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
# self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
#
# i, j = self.SPECIALS, self.NETSIZE
# dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
# bestpos = i + np.argmin(dists)
# biasdists = dists - self.bias[i:j]
# bestbiaspos = i + np.argmin(biasdists)
# self.freq[i:j] -= self.BETA * self.freq[i:j]
# self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
# self.freq[bestpos] += self.BETA
# self.bias[bestpos] -= self.BETAGAMMA
# return bestbiaspos
def contest(self, b, g, r):
""" Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1-self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def specialFind(self, b, g, r):
for i in range(self.SPECIALS):
n = self.network[i]
if n[0] == b and n[1] == g and n[2] == r:
return i
return -1
def learn(self):
biasRadius = self.INITBIASRADIUS
alphadec = 30 + ((self.samplefac-1)/3)
lengthcount = self.pixels.size
samplepixels = lengthcount / self.samplefac
delta = samplepixels / self.NCYCLES
alpha = self.INITALPHA
i = 0;
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print("Beginning 1D learning: samplepixels = %1.2f rad = %i" %
(samplepixels, rad) )
step = 0
pos = 0
if lengthcount%NeuQuant.PRIME1 != 0:
step = NeuQuant.PRIME1
elif lengthcount%NeuQuant.PRIME2 != 0:
step = NeuQuant.PRIME2
elif lengthcount%NeuQuant.PRIME3 != 0:
step = NeuQuant.PRIME3
else:
step = NeuQuant.PRIME4
i = 0
printed_string = ''
while i < samplepixels:
if i%100 == 99:
tmp = '\b'*len(printed_string)
printed_string = str((i+1)*100/samplepixels)+"%\n"
print(tmp + printed_string)
p = self.pixels[pos]
r = (p >> 16) & 0xff
g = (p >> 8) & 0xff
b = (p ) & 0xff
if i == 0: # Remember background colour
self.network[self.BGCOLOR] = [b, g, r]
j = self.specialFind(b, g, r)
if j < 0:
j = self.contest(b, g, r)
if j >= self.SPECIALS: # Don't learn for specials
a = (1.0 * alpha) / self.INITALPHA
self.altersingle(a, j, b, g, r)
if rad > 0:
self.alterneigh(a, rad, j, b, g, r)
pos = (pos+step)%lengthcount
i += 1
if i%delta == 0:
alpha -= alpha / alphadec
biasRadius -= biasRadius / self.RADIUSDEC
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
finalAlpha = (1.0*alpha)/self.INITALPHA
print("Finished 1D learning: final alpha = %1.2f!" % finalAlpha)
def fix(self):
for i in range(self.NETSIZE):
for j in range(3):
x = int(0.5 + self.network[i,j])
x = max(0, x)
x = min(255, x)
self.colormap[i,j] = x
self.colormap[i,3] = i
def inxbuild(self):
previouscol = 0
startpos = 0
for i in range(self.NETSIZE):
p = self.colormap[i]
q = None
smallpos = i
smallval = p[1] # Index on g
# Find smallest in i..self.NETSIZE-1
for j in range(i+1, self.NETSIZE):
q = self.colormap[j]
if q[1] < smallval: # Index on g
smallpos = j
smallval = q[1] # Index on g
q = self.colormap[smallpos]
# Swap p (i) and q (smallpos) entries
if i != smallpos:
p[:],q[:] = q, p.copy()
# smallval entry is now in position i
if smallval != previouscol:
self.netindex[previouscol] = (startpos+i) >> 1
for j in range(previouscol+1, smallval):
self.netindex[j] = i
previouscol = smallval
startpos = i
self.netindex[previouscol] = (startpos+self.MAXNETPOS) >> 1
for j in range(previouscol+1, 256): # Really 256
self.netindex[j] = self.MAXNETPOS
def paletteImage(self):
""" PIL weird interface for making a paletted image: create an image which
already has the palette, and use that in Image.quantize. This function
returns this palette image. """
if self.pimage is None:
palette = []
for i in range(self.NETSIZE):
palette.extend(self.colormap[i][:3])
palette.extend([0]*(256-self.NETSIZE)*3)
# a palette image to use for quant
self.pimage = Image.new("P", (1, 1), 0)
self.pimage.putpalette(palette)
return self.pimage
def quantize(self, image):
""" Use a kdtree to quickly find the closest palette colors for the pixels """
if get_cKDTree():
return self.quantize_with_scipy(image)
else:
print('Scipy not available, falling back to slower version.')
return self.quantize_without_scipy(image)
def quantize_with_scipy(self, image):
w,h = image.size
px = np.asarray(image).copy()
px2 = px[:,:,:3].reshape((w*h,3))
cKDTree = get_cKDTree()
kdtree = cKDTree(self.colormap[:,:3],leafsize=10)
result = kdtree.query(px2)
colorindex = result[1]
print("Distance: %1.2f" % (result[0].sum()/(w*h)) )
px2[:] = self.colormap[colorindex,:3]
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def quantize_without_scipy(self, image):
"""" This function can be used if no scipy is availabe.
It's 7 times slower though.
"""
w,h = image.size
px = np.asarray(image).copy()
memo = {}
for j in range(w):
for i in range(h):
key = (px[i,j,0],px[i,j,1],px[i,j,2])
try:
val = memo[key]
except KeyError:
val = self.convert(*key)
memo[key] = val
px[i,j,0],px[i,j,1],px[i,j,2] = val
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def convert(self, *color):
i = self.inxsearch(*color)
return self.colormap[i,:3]
def inxsearch(self, r, g, b):
"""Search for BGR values 0..255 and return colour index"""
dists = (self.colormap[:,:3] - np.array([r,g,b]))
a= np.argmin((dists*dists).sum(1))
return a
if __name__ == '__main__':
im = np.zeros((200,200), dtype=np.uint8)
im[10:30,:] = 100
im[:,80:120] = 255
im[-50:-40,:] = 50
images = [im*1.0, im*0.8, im*0.6, im*0.4, im*0]
writeGif('lala3.gif',images, duration=0.5, dither=0)
|
schaul/py-vgdl
|
external_libs/images2gif.py
|
Python
|
bsd-3-clause
| 36,933
|
[
"NEURON"
] |
e8cc8e9cd1661bdb4265e020795862e1e9bbcf6af115414277a13843fd3de5d6
|
"""Sensitive variant calling using VarDict.
Defaults to using the faster, equally sensitive Java port:
https://github.com/AstraZeneca-NGS/VarDictJava
if 'vardict' or 'vardict-java' is specified in the configuration. To use the
VarDict perl version:
https://github.com/AstraZeneca-NGS/VarDict
specify 'vardict-perl'.
"""
from decimal import *
from distutils.version import LooseVersion
import os
import sys
from six.moves import zip
import six
import toolz as tz
import pybedtools
from bcbio import bam, broad, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.heterogeneity import chromhacks
from bcbio.pipeline import config_utils, shared
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do, programs
from bcbio.variation import bamprep, bedutils, vcfutils
def _is_bed_file(target):
return target and isinstance(target, six.string_types) and os.path.isfile(target)
def _vardict_options_from_config(items, config, out_file, target=None, is_rnaseq=False):
var2vcf_opts = []
opts = ["-c 1", "-S 2", "-E 3", "-g 4"]
# ["-z", "-F", "-c", "1", "-S", "2", "-E", "3", "-g", "4", "-x", "0",
# "-k", "3", "-r", "4", "-m", "8"]
cores = dd.get_num_cores(items[0])
if cores and cores > 1:
opts += ["-th", str(cores)]
# Disable SV calling for vardict, causes issues with regional analysis
# by detecting SVs outside of target regions, which messes up merging
# SV calling will be worked on as a separate step
vardict_cl = get_vardict_command(items[0])
version = programs.get_version_manifest(vardict_cl)
if (vardict_cl and version and
((vardict_cl == "vardict-java" and LooseVersion(version) >= LooseVersion("1.5.5")) or
(vardict_cl == "vardict" and LooseVersion(version) >= LooseVersion("2018.07.25")))):
opts += ["--nosv"]
if (vardict_cl and version and
(vardict_cl == "vardict-java" and LooseVersion(version) >= LooseVersion("1.5.6"))):
opts += ["--deldupvar"]
# remove low mapping quality reads
if not is_rnaseq:
opts += ["-Q", "10"]
# Remove QCfail reads, avoiding high depth repetitive regions
opts += ["-F", "0x700"]
resources = config_utils.get_resources("vardict", config)
if resources.get("options"):
opts += [str(x) for x in resources["options"]]
resources = config_utils.get_resources("var2vcf", config)
if resources.get("options"):
var2vcf_opts += [str(x) for x in resources["options"]]
if target and _is_bed_file(target):
target = _enforce_max_region_size(target, items[0])
opts += [target] # this must be the last option
_add_freq_options(config, opts, var2vcf_opts)
return " ".join(opts), " ".join(var2vcf_opts)
def _add_freq_options(config, opts, var2vcf_opts):
""" Setting -f option for vardict and var2vcf_valid
Prioritizing settings in resources/vardict/options, then algorithm/min_allele_fraction:
min_allele_fraction "-f" in opts var2vcfopts -> vardict -f var2vcf -f
yes yes yes opts var2vcfopts
yes yes - opts -
yes - yes min_allele_fraction var2vcfopts
yes - - min_allele_fraction min_allele_fraction
default yes yes opts var2vcfopts
default yes - opts -
default - yes min_allele_fraction var2vcfopts
default - - min_allele_fraction min_allele_fraction
"""
if "-f" not in opts:
freq = Decimal(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / Decimal(100.0)
opts.extend(["-f", str(freq)])
if "-f" not in var2vcf_opts:
var2vcf_opts.extend(["-f", str(freq)])
def _enforce_max_region_size(in_file, data):
"""Ensure we don't have any chunks in the region greater than 20kb.
VarDict memory usage depends on size of individual windows in the input
file. This breaks regions into 20kb chunks with 250bp overlaps. 20kb gives
~1Gb/core memory usage and the overlaps avoid missing indels spanning a
gap. Downstream VarDict merging sorts out any variants across windows.
https://github.com/AstraZeneca-NGS/VarDictJava/issues/64
"""
max_size = 20000
overlap_size = 250
def _has_larger_regions(f):
return any(r.stop - r.start > max_size for r in pybedtools.BedTool(f))
out_file = "%s-regionlimit%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
if _has_larger_regions(in_file):
with file_transaction(data, out_file) as tx_out_file:
pybedtools.BedTool().window_maker(w=max_size,
s=max_size - overlap_size,
b=pybedtools.BedTool(in_file)).saveas(tx_out_file)
else:
utils.symlink_plus(in_file, out_file)
return out_file
def run_vardict(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run VarDict variant calling.
"""
items = shared.add_highdepth_genome_exclusion(items)
if vcfutils.is_paired_analysis(align_bams, items):
call_file = _run_vardict_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = _run_vardict_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file
def _get_jvm_opts(data, out_file):
"""Retrieve JVM options when running the Java version of VarDict.
"""
if get_vardict_command(data) == "vardict-java":
resources = config_utils.get_resources("vardict", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx4g"])
jvm_opts += broad.get_default_jvm_opts(os.path.dirname(out_file))
return "export VAR_DICT_OPTS='%s' && " % " ".join(jvm_opts)
else:
return ""
def _run_vardict_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect SNPs and indels with VarDict.
var2vcf_valid uses -A flag which reports all alleles and improves sensitivity:
https://github.com/AstraZeneca-NGS/VarDict/issues/35#issuecomment-276738191
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(
vrs, region, out_file, items=items, do_merge=False)
num_bams = len(align_bams)
sample_vcf_names = [] # for individual sample names, given batch calling may be required
for bamfile, item in zip(align_bams, items):
# prepare commands
sample = dd.get_sample_name(item)
vardict = get_vardict_command(items[0])
opts, var2vcf_opts = _vardict_options_from_config(items, config, out_file, target)
vcfstreamsort = config_utils.get_program("vcfstreamsort", config)
compress_cmd = "| bgzip -c" if tx_out_file.endswith("gz") else ""
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
remove_dup = vcfutils.remove_dup_cl()
py_cl = os.path.join(utils.get_bcbio_bin(), "py")
jvm_opts = _get_jvm_opts(items[0], tx_out_file)
setup = ("%s && unset JAVA_HOME &&" % utils.get_R_exports())
contig_cl = vcfutils.add_contig_to_header_cl(ref_file, tx_out_file)
use_lowfreq_filter = config["algorithm"].get("use_lowfreq_filter")
if use_lowfreq_filter is False:
lowfreq_filter = " | "
else:
lowfreq_filter = " | " + _lowfreq_linear_filter(0, False) + " | "
teststrandbias = config_utils.get_program("teststrandbias.R", config)
var2vcf_valid = config_utils.get_program("var2vcf_valid.pl", config)
fudge_vcf_version = "sed 's/^##fileformat=VCFv4.3/##fileformat=VCFv4.2/'"
cmd = ("{setup}{jvm_opts}{vardict} -G {ref_file} "
"-N {sample} -b {bamfile} {opts} "
"| {teststrandbias} "
"| {var2vcf_valid} -A -N {sample} -E {var2vcf_opts} "
"| {contig_cl} | bcftools filter -i 'QUAL >= 0' {lowfreq_filter} "
"{fix_ambig_ref} | {fix_ambig_alt} | {fudge_vcf_version} | {remove_dup} | {vcfstreamsort} {compress_cmd}")
if num_bams > 1:
temp_file_prefix = out_file.replace(".gz", "").replace(".vcf", "") + item["name"][1]
tmp_out = temp_file_prefix + ".temp.vcf"
tmp_out += ".gz" if out_file.endswith("gz") else ""
sample_vcf_names.append(tmp_out)
with file_transaction(item, tmp_out) as tx_tmp_file:
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_tmp_file, config, samples=[sample])
else:
cmd += " > {tx_tmp_file}"
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
else:
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_out_file, config, samples=[sample])
else:
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
if num_bams > 1:
# N.B. merge_variant_files wants region in 1-based end-inclusive
# coordinates. Thus use bamprep.region_to_gatk
vcfutils.merge_variant_files(orig_files=sample_vcf_names,
out_file=tx_out_file, ref_file=ref_file,
config=config, region=bamprep.region_to_gatk(region))
return out_file
def _lowfreq_linear_filter(tumor_index, is_paired):
"""Linear classifier for removing low frequency false positives.
Uses a logistic classifier based on 0.5% tumor only variants from the smcounter2 paper:
https://github.com/bcbio/bcbio_validations/tree/master/somatic-lowfreq
The classifier uses strand bias (SBF) and read mismatches (NM) and
applies only for low frequency (<2%) and low depth (<30) variants.
"""
if is_paired:
sbf = "FORMAT/SBF[%s]" % tumor_index
nm = "FORMAT/NM[%s]" % tumor_index
else:
sbf = "INFO/SBF"
nm = "INFO/NM"
cmd = ("""bcftools filter --soft-filter 'LowFreqBias' --mode '+' """
"""-e 'FORMAT/AF[{tumor_index}:0] < 0.02 && FORMAT/VD[{tumor_index}] < 30 """
"""&& {sbf} < 0.1 && {nm} >= 2.0'""")
return cmd.format(**locals())
def add_db_germline_flag(line):
"""Adds a DB flag for Germline filters, allowing downstream compatibility with PureCN.
"""
if line.startswith("#CHROM"):
headers = ['##INFO=<ID=DB,Number=0,Type=Flag,Description="Likely germline variant">']
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
if parts[7].find("STATUS=Germline") >= 0:
parts[7] += ";DB"
return "\t".join(parts)
def depth_freq_filter(line, tumor_index, aligner):
"""Command line to filter VarDict calls based on depth, frequency and quality.
Looks at regions with low depth for allele frequency (AF * DP < 6, the equivalent
of < 13bp for heterogygote calls, but generalized. Within these calls filters if a
calls has:
- Low mapping quality and multiple mismatches in a read (NM)
For bwa only: MQ < 55.0 and NM > 1.0 or MQ < 60.0 and NM > 2.0
- Low depth (DP < 10)
- Low QUAL (QUAL < 45)
Also filters in low allele frequency regions with poor quality, if all of these are
true:
- Allele frequency < 0.2
- Quality < 55
- P-value (SSF) > 0.06
"""
if line.startswith("#CHROM"):
headers = [('##FILTER=<ID=LowAlleleDepth,Description="Low depth per allele frequency '
'along with poor depth, quality, mapping quality and read mismatches.">'),
('##FILTER=<ID=LowFreqQuality,Description="Low frequency read with '
'poor quality and p-value (SSF).">')]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
sample_ft = {a: v for (a, v) in zip(parts[8].split(":"), parts[9 + tumor_index].split(":"))}
qual = utils.safe_to_float(parts[5])
dp = utils.safe_to_float(sample_ft.get("DP"))
af = utils.safe_to_float(sample_ft.get("AF"))
nm = utils.safe_to_float(sample_ft.get("NM"))
mq = utils.safe_to_float(sample_ft.get("MQ"))
ssfs = [x for x in parts[7].split(";") if x.startswith("SSF=")]
pval = utils.safe_to_float(ssfs[0].split("=")[-1] if ssfs else None)
fname = None
if not chromhacks.is_sex(parts[0]) and dp is not None and af is not None:
if dp * af < 6:
if aligner == "bwa" and nm is not None and mq is not None:
if (mq < 55.0 and nm > 1.0) or (mq < 60.0 and nm > 2.0):
fname = "LowAlleleDepth"
if dp < 10:
fname = "LowAlleleDepth"
if qual is not None and qual < 45:
fname = "LowAlleleDepth"
if af is not None and qual is not None and pval is not None:
if af < 0.2 and qual < 45 and pval > 0.06:
fname = "LowFreqQuality"
if fname:
if parts[6] in set([".", "PASS"]):
parts[6] = fname
else:
parts[6] += ";%s" % fname
line = "\t".join(parts)
return line
def _run_vardict_paired(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect variants with Vardict.
This is used for paired tumor / normal samples.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(vrs, region,
out_file, items=items, do_merge=True)
paired = vcfutils.get_paired_bams(align_bams, items)
if not _is_bed_file(target):
vcfutils.write_empty_vcf(tx_out_file, config,
samples=[x for x in [paired.tumor_name, paired.normal_name] if x])
else:
if not paired.normal_bam:
ann_file = _run_vardict_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return ann_file
vardict = get_vardict_command(items[0])
vcfstreamsort = config_utils.get_program("vcfstreamsort", config)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
freq = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
# merge bed file regions as amplicon VarDict is only supported in single sample mode
opts, var2vcf_opts = _vardict_options_from_config(items, config, out_file, target)
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
remove_dup = vcfutils.remove_dup_cl()
if any("vardict_somatic_filter" in tz.get_in(("config", "algorithm", "tools_off"), data, [])
for data in items):
somatic_filter = ""
freq_filter = ""
else:
var2vcf_opts += " -M " # this makes VarDict soft filter non-differential variants
somatic_filter = ("| sed 's/\\\\.*Somatic\\\\/Somatic/' "
"| sed 's/REJECT,Description=\".*\">/REJECT,Description=\"Not Somatic via VarDict\">/' "
"""| %s -c 'from bcbio.variation import freebayes; """
"""freebayes.call_somatic("%s", "%s")' """
% (sys.executable, paired.tumor_name, paired.normal_name))
freq_filter = ("| bcftools filter -m '+' -s 'REJECT' -e 'STATUS !~ \".*Somatic\"' 2> /dev/null "
"| %s -x 'bcbio.variation.vardict.add_db_germline_flag(x)' "
"| %s "
"| %s -x 'bcbio.variation.vardict.depth_freq_filter(x, %s, \"%s\")'" %
(os.path.join(os.path.dirname(sys.executable), "py"),
_lowfreq_linear_filter(0, True),
os.path.join(os.path.dirname(sys.executable), "py"),
0, bam.aligner_from_header(paired.tumor_bam)))
jvm_opts = _get_jvm_opts(items[0], tx_out_file)
py_cl = os.path.join(utils.get_bcbio_bin(), "py")
setup = ("%s && unset JAVA_HOME &&" % utils.get_R_exports())
contig_cl = vcfutils.add_contig_to_header_cl(ref_file, tx_out_file)
fudge_vcf_version = "sed 's/^##fileformat=VCFv4.3/##fileformat=VCFv4.2/'"
cmd = ("{setup}{jvm_opts}{vardict} -G {ref_file} "
"-N {paired.tumor_name} -b \"{paired.tumor_bam}|{paired.normal_bam}\" {opts} "
"| awk 'NF>=48' | testsomatic.R "
"| var2vcf_paired.pl -P 0.9 -m 4.25 {var2vcf_opts} "
"-N \"{paired.tumor_name}|{paired.normal_name}\" "
"| {contig_cl} {freq_filter} "
"| bcftools filter -i 'QUAL >= 0' "
"{somatic_filter} | {fix_ambig_ref} | {fix_ambig_alt} | {remove_dup} | {vcfstreamsort} | "
"{fudge_vcf_version} {compress_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {})
return out_file
def get_vardict_command(data):
"""
convert variantcaller specification to proper vardict command, handling
string or list specification
"""
vcaller = dd.get_variantcaller(data)
if isinstance(vcaller, list):
vardict = [x for x in vcaller if "vardict" in x]
if not vardict:
return None
vardict = vardict[0]
elif not vcaller:
return None
else:
vardict = vcaller
vardict = "vardict-java" if not vardict.endswith("-perl") else "vardict"
return vardict
|
chapmanb/bcbio-nextgen
|
bcbio/variation/vardict.py
|
Python
|
mit
| 19,796
|
[
"BWA"
] |
c83cd583354721961cb6a3aafda85526802af7b2e0444f45822295a0a7369abf
|
""" A computing element class using singularity containers.
This computing element will start the job in the container set by
the "ContainerRoot" config option.
DIRAC will the re-installed within the container, extra flags can
be given to the dirac-install command with the "ContainerExtraOpts"
option.
See the Configuration/Resources/Computing documention for details on
where to set the option parameters.
"""
import os
import sys
import shutil
import tempfile
import DIRAC
from DIRAC import S_OK, S_ERROR, gConfig, gLogger
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.Subprocess import systemCall
from DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals
from DIRAC.ConfigurationSystem.Client.Helpers import Operations
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.WorkloadManagementSystem.Utilities.Utils import createRelocatedJobWrapper
__RCSID__ = "$Id$"
DIRAC_INSTALL = os.path.join(DIRAC.rootPath, 'DIRAC', 'Core', 'scripts', 'dirac-install.py')
# Default container to use if it isn't specified in the CE options
CONTAINER_DEFROOT = "/cvmfs/cernvm-prod.cern.ch/cvm3"
CONTAINER_WORKDIR = "containers"
CONTAINER_INNERDIR = "/tmp"
CONTAINER_WRAPPER = """#!/bin/bash
echo "Starting inner container wrapper scripts at `date`."
set -x
cd /tmp
# Install DIRAC
./dirac-install.py %(install_args)s
source bashrc
dirac-configure -F %(config_args)s -I
# Run next wrapper (to start actual job)
bash %(next_wrapper)s
# Write the payload errorcode to a file for the outer scripts
echo $? > retcode
chmod 644 retcode
echo "Finishing inner continer wrapper scripts at `date`."
"""
class SingularityComputingElement(ComputingElement):
""" A Computing Element for running a job within a Singularity container.
"""
def __init__(self, ceUniqueID):
""" Standard constructor.
"""
super(SingularityComputingElement, self).__init__(ceUniqueID)
self.__submittedJobs = 0
self.__runningJobs = 0
self.__root = CONTAINER_DEFROOT
if 'ContainerRoot' in self.ceParameters:
self.__root = self.ceParameters['ContainerRoot']
self.__workdir = CONTAINER_WORKDIR
self.__innerdir = CONTAINER_INNERDIR
self.__singularityBin = 'singularity'
self.log = gLogger.getSubLogger('Singularity')
def __hasSingularity(self):
""" Search the current PATH for an exectuable named singularity.
Returns True if it is found, False otherwise.
"""
if self.ceParameters.get('ContainerBin'):
binPath = self.ceParameters['ContainerBin']
if os.path.isfile(binPath) and os.access(binPath, os.X_OK):
self.__singularityBin = binPath
self.log.debug('Use singularity from "%s"' % self.__singularityBin)
return True
if "PATH" not in os.environ:
return False # Hmm, PATH not set? How unusual...
for searchPath in os.environ["PATH"].split(os.pathsep):
binPath = os.path.join(searchPath, 'singularity')
if os.path.isfile(binPath):
# File found, check it's exectuable to be certain:
if os.access(binPath, os.X_OK):
self.log.debug('Find singularity from PATH "%s"' % binPath)
return True
# No suitablable binaries found
return False
def __getInstallFlags(self):
""" Get the flags to pass to dirac-install.py inside the container.
Returns a string containing the command line flags.
"""
instOpts = []
setup = gConfig.getValue("/DIRAC/Setup", "unknown")
opsHelper = Operations.Operations(setup=setup)
installationName = opsHelper.getValue("Pilot/Installation", "")
if installationName:
instOpts.append('-V %s' % installationName)
diracVersions = opsHelper.getValue("Pilot/Version", [])
instOpts.append("-r '%s'" % diracVersions[0])
pyVer = "%u%u" % (sys.version_info.major, sys.version_info.minor)
instOpts.append("-i %s" % pyVer)
pilotExtensionsList = opsHelper.getValue("Pilot/Extensions", [])
extensionsList = []
if pilotExtensionsList:
if pilotExtensionsList[0] != 'None':
extensionsList = pilotExtensionsList
else:
extensionsList = CSGlobals.getCSExtensions()
if extensionsList:
instOpts.append("-e '%s'" % ','.join([ext for ext in extensionsList if 'Web' not in ext]))
lcgVer = opsHelper.getValue("Pilot/LCGBundleVersion", None)
if lcgVer:
instOpts.append("-g %s" % lcgVer)
if 'ContainerExtraOpts' in self.ceParameters:
instOpts.append(self.ceParameters['ContainerExtraOpts'])
return ' '.join(instOpts)
@staticmethod
def __getConfigFlags():
""" Get the flags for dirac-configure inside the container.
Returns a string containing the command line flags.
"""
cfgOpts = []
setup = gConfig.getValue("/DIRAC/Setup", "unknown")
if setup:
cfgOpts.append("-S '%s'" % setup)
csServers = gConfig.getValue("/DIRAC/Configuration/Servers", [])
cfgOpts.append("-C '%s'" % ','.join(csServers))
cfgOpts.append("-n '%s'" % DIRAC.siteName())
return ' '.join(cfgOpts)
def __createWorkArea(self, proxy, jobDesc, log, logLevel):
""" Creates a directory for the container and populates it with the
template directories, scripts & proxy.
"""
# Create the directory for our continer area
try:
os.mkdir(self.__workdir)
except OSError:
if not os.path.isdir(self.__workdir):
result = S_ERROR("Failed to create container base directory '%s'" % self.__workdir)
result['ReschedulePayload'] = True
return result
# Otherwise, directory probably just already exists...
baseDir = None
try:
baseDir = tempfile.mkdtemp(prefix="job%s_" % jobDesc["jobID"], dir=self.__workdir)
except OSError:
result = S_ERROR("Failed to create container work directory in '%s'" % self.__workdir)
result['ReschedulePayload'] = True
return result
self.log.debug('Use singularity workarea: %s' % baseDir)
for subdir in ["home", "tmp", "var_tmp"]:
os.mkdir(os.path.join(baseDir, subdir))
tmpDir = os.path.join(baseDir, "tmp")
# Now we have a directory, we can stage in the proxy and scripts
# Proxy
proxyLoc = os.path.join(tmpDir, "proxy")
rawfd = os.open(proxyLoc, os.O_WRONLY | os.O_CREAT, 0o600)
fd = os.fdopen(rawfd, "w")
fd.write(proxy)
fd.close()
# dirac-install.py
install_loc = os.path.join(tmpDir, "dirac-install.py")
shutil.copyfile(DIRAC_INSTALL, install_loc)
os.chmod(install_loc, 0o755)
# Job Wrapper (Standard DIRAC wrapper)
result = createRelocatedJobWrapper(tmpDir, self.__innerdir,
log=log, logLevel=logLevel, **jobDesc)
if not result['OK']:
result['ReschedulePayload'] = True
return result
wrapperPath = result['Value']
# Extra Wrapper (Container DIRAC installer)
wrapSubs = {'next_wrapper': wrapperPath,
'install_args': self.__getInstallFlags(),
'config_args': self.__getConfigFlags(),
}
wrapLoc = os.path.join(tmpDir, "dirac_container.sh")
rawfd = os.open(wrapLoc, os.O_WRONLY | os.O_CREAT, 0o700)
fd = os.fdopen(rawfd, "w")
fd.write(CONTAINER_WRAPPER % wrapSubs)
fd.close()
ret = S_OK()
ret['baseDir'] = baseDir
ret['tmpDir'] = tmpDir
ret['proxyLocation'] = proxyLoc
return ret
def __deleteWorkArea(self, baseDir):
""" Deletes the container work area (baseDir path) unless 'KeepWorkArea'
option is set. Returns None.
"""
if self.ceParameters.get('KeepWorkArea', False):
return
# We can't really do anything about errors: The tree should be fully owned
# by the pilot user, so we don't expect any permissions problems.
shutil.rmtree(baseDir, ignore_errors=True)
def __getEnv(self):
""" Gets the environment for use within the container.
We blank almost everything to prevent contamination from the host system.
"""
payloadEnv = {}
if 'TERM' in os.environ:
payloadEnv['TERM'] = os.environ['TERM']
payloadEnv['TMP'] = '/tmp'
payloadEnv['TMPDIR'] = '/tmp'
payloadEnv['X509_USER_PROXY'] = os.path.join(self.__innerdir, "proxy")
return payloadEnv
@staticmethod
def __checkResult(tmpDir):
""" Gets the result of the payload command and returns it. """
# The wrapper writes the inner job return code to "retcode"
# in the working directory.
try:
fd = open(os.path.join(tmpDir, "retcode"), "r")
retCode = int(fd.read())
fd.close()
except (IOError, ValueError):
# Something failed while trying to get the return code
result = S_ERROR("Failed to get return code from inner wrapper")
result['ReschedulePayload'] = True
return result
result = S_OK()
if retCode:
# This is the one case where we don't reschedule:
# An actual failure of the inner payload for some reason
result = S_ERROR("Command failed with exit code %d" % retCode)
return result
# pylint: disable=unused-argument,arguments-differ
def submitJob(self, executableFile, proxy, jobDesc, log, logLevel, **kwargs):
""" Start a container for a job.
executableFile is ignored. A new wrapper suitable for running in a
container is created from jobDesc.
"""
rootImage = self.__root
# Check that singularity is available
if not self.__hasSingularity():
self.log.error('Singularity is not installed on PATH.')
result = S_ERROR("Failed to find singularity ")
result['ReschedulePayload'] = True
return result
self.log.info('Creating singularity container')
# Start by making the directory for the container
ret = self.__createWorkArea(proxy, jobDesc, log, logLevel)
if not ret['OK']:
return ret
baseDir = ret['baseDir']
tmpDir = ret['tmpDir']
proxyLoc = ret['proxyLocation']
# Now we have to set-up proxy renewal for the container
# This is fairly easy as it remains visible on the host filesystem
ret = getProxyInfo()
if not ret['OK']:
pilotProxy = None
else:
pilotProxy = ret['Value']['path']
result = gThreadScheduler.addPeriodicTask(self.proxyCheckPeriod, self._monitorProxy,
taskArgs=(pilotProxy, proxyLoc),
executions=0, elapsedTime=0)
renewTask = None
if result['OK']:
renewTask = result['Value']
else:
self.log.warn('Failed to start proxy renewal task')
# Very simple accounting
self.__submittedJobs += 1
self.__runningJobs += 1
# Now prepare start singularity
# Mount /cvmfs in if it exists on the host
withCVMFS = os.path.isdir("/cvmfs")
innerCmd = os.path.join(self.__innerdir, "dirac_container.sh")
cmd = [self.__singularityBin, "exec"]
cmd.extend(["-c", "-i", "-p"])
cmd.extend(["-W", baseDir])
if withCVMFS:
cmd.extend(["-B", "/cvmfs"])
if 'ContainerBind' in self.ceParameters:
bindPaths = self.ceParameters['ContainerBind'].split(',')
for bindPath in bindPaths:
cmd.extend(["-B", bindPath.strip()])
if 'ContainerOptions' in self.ceParameters:
containerOpts = self.ceParameters['ContainerOptions'].split(',')
for opt in containerOpts:
cmd.extend([opt.strip()])
cmd.extend([rootImage, innerCmd])
self.log.debug('Execute singularity command: %s' % cmd)
self.log.debug('Execute singularity env: %s' % self.__getEnv())
result = systemCall(0, cmd, callbackFunction=self.sendOutput, env=self.__getEnv())
self.__runningJobs -= 1
if not result["OK"]:
if renewTask:
gThreadScheduler.removeTask(renewTask)
self.__deleteWorkArea(baseDir)
result = S_ERROR("Error running singularity command")
result['ReschedulePayload'] = True
return result
result = self.__checkResult(tmpDir)
if renewTask:
gThreadScheduler.removeTask(renewTask)
self.__deleteWorkArea(baseDir)
return result
def getCEStatus(self, jobIDList=None):
""" Method to return information on running and pending jobs.
"""
result = S_OK()
result['SubmittedJobs'] = self.__submittedJobs
result['RunningJobs'] = self.__runningJobs
result['WaitingJobs'] = 0
return result
|
andresailer/DIRAC
|
Resources/Computing/SingularityComputingElement.py
|
Python
|
gpl-3.0
| 12,438
|
[
"DIRAC"
] |
225644d0968cbb233bc033593f2f161c0064d604649019653399f0b44c55c90a
|
# kchans.py ---
#
# Filename: kchans.py
# Description:
# Author: subhasis ray
# Maintainer:
# Created: Fri Apr 17 23:58:49 2009 (+0530)
# Version:
# Last-Updated: Sat Dec 8 15:52:42 2012 (+0530)
# By: subha
# Update #: 1057
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
#
# Code:
import moose
from channelbase import *
from numpy import where, linspace, exp, arange, ones, zeros, array
import numpy as np
class KChannel(ChannelBase):
"""This is a dummy base class to keep type information."""
annotation = {'cno': 'cno_0000047'}
abstract = True
Ek = -95e-3
X = 0.0
def __init__(self, path):
ChannelBase.__init__(self, path)
class KDR(KChannel):
"""Delayed rectifier current
`In hippocampal pyramidal neurons, however, it has been reported
have relatively slow activation, with a time to peak of some
50-100 msec and even slower inactivation. Such a slow activation
would make it ill suited to participate in the repolarization of
the AP.... An equation that can describe IK(DR) in cortical
neurons is
IK(DR) = m^3 * h * gbar_K(DR) * (Vm - EK)
where m and h depend on voltage and time.`
- Johnston & Wu, Foundations of Cellular Neurophysiology (1995).
But in Traub 2005, the equation used is:
IK(DR) = m^4 * gbar_K(DR) * (Vm - EK)
"""
annotation = {'cno': 'cno_0000105'}
abstract = False
Xpower = 4
tau_x = where(v_array < -10e-3, \
1e-3 * (0.25 + 4.35 * exp((v_array + 10.0e-3) / 10.0e-3)), \
1e-3 * (0.25 + 4.35 * exp((- v_array - 10.0e-3) / 10.0e-3)))
inf_x = 1.0 / (1.0 + exp((- v_array - 29.5e-3) / 10e-3))
def __init__(self, path):
KChannel.__init__(self, path)
class KDR_FS(KDR):
"""KDR for fast spiking neurons"""
annotation = {'cno': 'cno_0000105'}
abstract = False
Ek = -100e-3
inf_x = 1.0 / (1.0 + exp((- v_array - 27e-3) / 11.5e-3))
tau_x = where(v_array < -10e-3, \
1e-3 * (0.25 + 4.35 * exp((v_array + 10.0e-3) / 10.0e-3)), \
1e-3 * (0.25 + 4.35 * exp((- v_array - 10.0e-3) / 10.0e-3)))
def __init__(self, path):
KChannel.__init__(self, path)
class KA(KChannel):
"""A type K+ channel"""
annotation = {'cno': 'cno_0000105'}
abstract = False
Xpower = 4
Ypower = 1
inf_x = 1 / ( 1 + exp( ( - v_array - 60e-3 ) / 8.5e-3 ) )
tau_x = 1e-3 * (0.185 + 0.5 / ( exp( ( v_array + 35.8e-3 ) / 19.7e-3 ) + exp( ( - v_array - 79.7e-3 ) / 12.7e-3 ) ))
inf_y = 1 / ( 1 + exp( ( v_array + 78e-3 ) / 6e-3 ) )
tau_y = where( v_array <= -63e-3,\
1e-3 * 0.5 / ( exp( ( v_array + 46e-3 ) / 5e-3 ) + exp( ( - v_array - 238e-3 ) / 37.5e-3 ) ), \
9.5e-3)
def __init__(self, path):
KChannel.__init__(self, path)
class KA_IB(KA):
"""A type K+ channel for tufted intrinsically bursting cells -
multiplies tau_h of KA by 2.6"""
annotation = {'cno': 'cno_0000105'}
abstract = False
inf_y = 1 / ( 1 + exp( ( v_array + 78e-3 ) / 6e-3 ) )
tau_y = 2.6 * KA.tau_y
def __init__(self, path):
KChannel.__init__(self, path)
class K2(KChannel):
annotation = {'cno': 'cno_0000105'}
Xpower = 1
Ypower = 1
inf_x = 1 / ( 1 + exp( ( - v_array *1e3 - 10 ) / 17 ) )
tau_x = 1e-3 * (4.95 + 0.5 / ( exp( ( v_array * 1e3 - 81 ) / 25.6 ) + exp( ( - v_array * 1e3 - 132 ) / 18 ) ))
inf_y = 1 / ( 1 + exp( ( v_array*1e3 + 58 ) / 10.6 ) )
tau_y = 1e-3 * (60 + 0.5 / ( exp( ( v_array*1e3 - 1.33 ) / 200 ) + exp( ( - v_array*1e3 - 130 ) / 7.1 ) ))
def __init__(self, path):
KChannel.__init__(self, path)
class KM(KChannel):
"""Mascarinic sensitive K channel"""
annotation = {'cno': 'cno_0000105'}
abstract = False
Xpower = 1
alpha_x = 1e3 * 0.02 / ( 1 + exp((-v_array - 20e-3 ) / 5e-3))
beta_x = 1e3 * 0.01 * exp((-v_array - 43e-3) / 18e-3)
def __init__(self, path):
KChannel.__init__(self, path)
class KCaChannel(KChannel):
"""[Ca+2] dependent K+ channel base class."""
annotation = {'cno': 'cno_0000047'}
abstract = True
Zpower = 1
mstring = ('addmsg1', '../CaPool concOut . concen')
def __init__(self, path):
KChannel.__init__(self, path)
class KAHPBase(KCaChannel):
annotation = {'cno': 'cno_0000108'}
abstract = True
Z = 0.0
def __init__(self, path):
KCaChannel.__init__(self, path)
class KAHP(KAHPBase):
"""AHP type K+ current"""
annotation = {'cno': 'cno_0000108'}
abstract = False
alpha_z = where(ca_conc < 100.0, 0.1 * ca_conc, 10.0)
beta_z = ones(ca_divs + 1) * 10.0
def __init__(self, path):
KAHPBase.__init__(self, path)
class KAHP_SLOWER(KAHPBase):
annotation = {'cno': 'cno_0000108'}
abstract = False
alpha_z = where(ca_conc < 500.0, 1e3 * ca_conc / 50000, 10.0)
beta_z = ones(ca_divs + 1) * 1.0
def __init__(self, path):
KAHPBase.__init__(self, path)
class KAHP_DP(KAHPBase):
"""KAHP for deep pyramidal cell"""
annotation = {'cno': 'cno_0000108'}
abstract = False
alpha_z = where(ca_conc < 100.0, 1e-1 * ca_conc, 10.0)
beta_z = ones(ca_divs + 1)
def __init__(self, path):
KAHPBase.__init__(self, path)
class KC(KCaChannel):
"""C type K+ channel
"""
annotation = {'cno': 'cno_0000106'}
abstract = False
Xpower = 1
Zpower = 1
tableA_z = where(ca_conc < 250.0, ca_conc / 250.0, 1.0)
tableB_z = ones(ca_divs + 1)
tableA_x = where(v_array < -10e-3,
2e3 / 37.95 * ( exp( ( v_array * 1e3 + 50 ) / 11 - ( v_array * 1e3 + 53.5 ) / 27 ) ),
2e3 * exp(( - v_array * 1e3 - 53.5) / 27))
tableB_x = where(v_array < -10e-3,
2e3 * exp(( - v_array * 1e3 - 53.5) / 27),
0.0)
instant = 4
def __init__(self, path):
KCaChannel.__init__(self, path)
class KC_FAST(KC):
"""Fast KC channel
"""
annotation = {'cno': 'cno_0000106'}
abstract = False
tableA_x = KC.tableA_x * 2
tableB_x = KC.tableB_x * 2
def __init__(self, path):
KC.__init__(self, path)
def initKChannelPrototypes(libpath='/library'):
channel_names = ['KDR',
'KDR_FS',
'KA',
'KA_IB',
'K2',
'KM',
'KAHP',
'KAHP_SLOWER',
'KAHP_DP',
'KC',
'KC_FAST']
return dict([(key, eval('%s("%s")' % (key, prototypes[key].path))) for key in channel_names])
#
# kchans.py ends here
|
dilawar/moose-full
|
moose-examples/traub_2005/py/kchans.py
|
Python
|
gpl-2.0
| 6,886
|
[
"MOOSE"
] |
d392765892fc810df712a98634560c655cf90f4dbb75428ec2ee29e178d54d65
|
#
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import numpy as np
import espressomd.shapes
class ShapeTests(ut.TestCase):
def test_Union(self):
union = espressomd.shapes.Union()
wall1 = espressomd.shapes.Wall(normal=[0, 0, 1], dist=0)
wall2 = espressomd.shapes.Wall(normal=[0, 0, -1], dist=-10)
self.assertTrue(union.call_method('empty'))
union.add([wall1, wall2])
self.assertFalse(union.call_method('empty'))
self.assertEqual(union.size(), 2)
# check object retrieval
pwall1, pwall2 = union.call_method('get_elements')
self.assertIsInstance(pwall1, espressomd.shapes.Wall)
self.assertIsInstance(pwall2, espressomd.shapes.Wall)
np.testing.assert_almost_equal(
np.copy(pwall1.normal), np.copy(wall1.normal))
np.testing.assert_almost_equal(
np.copy(pwall2.normal), np.copy(wall2.normal))
np.testing.assert_almost_equal(pwall1.dist, wall1.dist)
np.testing.assert_almost_equal(pwall2.dist, wall2.dist)
self.assertAlmostEqual(union.calc_distance(
position=[1, 2, 4.5])[0], 4.5)
self.assertAlmostEqual(union.calc_distance(
position=[1, 2, 5.0])[0], 5.0)
self.assertAlmostEqual(union.calc_distance(
position=[1, 2, 6.5])[0], 3.5)
# negative distances are not well-defined for a union of shapes
with self.assertRaises(ValueError):
union.calc_distance(position=[1, 2, 11.5])
union.clear()
self.assertTrue(union.call_method('empty'))
self.assertEqual(union.size(), 0)
self.assertEqual(union.calc_distance(position=[1, 2, 6.5])[0], np.inf)
union.add([wall1, wall2])
union.remove(wall2)
self.assertAlmostEqual(union.calc_distance(
position=[1, 2, 6.5])[0], 6.5)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/python/shapes.py
|
Python
|
gpl-3.0
| 2,593
|
[
"ESPResSo"
] |
0c73209a065e4e14122aac55331e1e9a8b56f5c35b7e93e9cd268330247a17c9
|
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import re
"""
Guidelines for writing new hacking checks
- Use only for Cinder specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to
cinder/tests/test_hacking.py
"""
# NOTE(thangp): Ignore N323 pep8 error caused by importing cinder objects
UNDERSCORE_IMPORT_FILES = ['./cinder/objects/__init__.py']
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
"\(\s*_\(\s*('|\")")
string_translation = re.compile(r"(.)*_\(\s*('|\")")
vi_header_re = re.compile(r"^#\s+vim?:.+")
underscore_import_check = re.compile(r"(.)*i18n\s+import\s+_(.)*")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
no_audit_log = re.compile(r"(.)*LOG\.audit(.)*")
no_print_statements = re.compile(r"\s*print\s*\(.+\).*")
# NOTE(jsbryant): When other oslo libraries switch over non-namespaced
# imports, we will need to add them to the regex below.
oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](concurrency|db"
"|config|utils|serialization|log)")
no_contextlib_nested = re.compile(r"\s*with (contextlib\.)?nested\(")
log_translation_LI = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
log_translation_LE = re.compile(
r"(.)*LOG\.(exception|error)\(\s*(_\(|'|\")")
log_translation_LW = re.compile(
r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
# Need to disable pylint check here as it doesn't catch CHECK_DESC
# being defined in the subclasses.
message = message or self.CHECK_DESC # pylint: disable=E1101
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
def no_vi_headers(physical_line, line_number, lines):
"""Check for vi editor configuration in source files.
By default vi modelines can only appear in the first or
last 5 lines of a source file.
N314
"""
# NOTE(gilliard): line_number is 1-indexed
if line_number <= 5 or line_number > len(lines) - 5:
if vi_header_re.match(physical_line):
return 0, "N314: Don't put vi configuration in source files"
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
N319
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "N319 Don't translate debug level logs")
def no_mutable_default_args(logical_line):
msg = "N322: Method's default argument shouldn't be mutable!"
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
if mutable_default_args.match(logical_line):
yield (0, msg)
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif(translated_log.match(logical_line) or
string_translation.match(logical_line)):
yield(0, "N323: Found use of _() without explicit import of _ !")
class CheckForStrUnicodeExc(BaseASTChecker):
"""Checks for the use of str() or unicode() on an exception.
This currently only handles the case where str() or unicode()
is used in the scope of an exception handler. If the exception
is passed into a function, returned from an assertRaises, or
used on an exception created in the same scope, this does not
catch it.
"""
CHECK_DESC = ('N325 str() and unicode() cannot be used on an '
'exception. Remove or use six.text_type()')
def __init__(self, tree, filename):
super(CheckForStrUnicodeExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
def visit_TryExcept(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
def visit_Call(self, node):
if self._check_call_names(node, ['str', 'unicode']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
super(CheckForStrUnicodeExc, self).generic_visit(node)
def check_assert_called_once(logical_line, filename):
msg = ("N327: assert_called_once is a no-op. please use assert_called_"
"once_with to test with explicit parameters or an assertEqual with"
" call_count.")
if 'cinder/tests/functional' or 'cinder/tests/unit' in filename:
pos = logical_line.find('.assert_called_once(')
if pos != -1:
yield (pos, msg)
def validate_log_translations(logical_line, filename):
# TODO(smcginnis): The following is temporary as a series
# of patches are done to address these issues. It should be
# removed completely when bug 1433216 is closed.
ignore_dirs = [
"cinder/openstack",
"cinder/volume"]
for directory in ignore_dirs:
if directory in filename:
return
# Translations are not required in the test directory.
# This will not catch all instances of violations, just direct
# misuse of the form LOG.info('Message').
if "cinder/tests" in filename:
return
msg = "N328: LOG.info messages require translations `_LI()`!"
if log_translation_LI.match(logical_line):
yield (0, msg)
msg = ("N329: LOG.exception and LOG.error messages require "
"translations `_LE()`!")
if log_translation_LE.match(logical_line):
yield (0, msg)
msg = "N330: LOG.warning messages require translations `_LW()`!"
if log_translation_LW.match(logical_line):
yield (0, msg)
def check_oslo_namespace_imports(logical_line):
if re.match(oslo_namespace_imports, logical_line):
msg = ("N333: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
def check_datetime_now(logical_line, noqa):
if noqa:
return
msg = ("C301: Found datetime.now(). "
"Please use timeutils.utcnow() from oslo_utils.")
if 'datetime.now' in logical_line:
yield(0, msg)
def check_unicode_usage(logical_line, noqa):
if noqa:
return
msg = "C302: Found unicode() call. Please use six.text_type()."
if 'unicode(' in logical_line:
yield(0, msg)
def check_no_print_statements(logical_line, filename, noqa):
# The files in cinder/cmd do need to use 'print()' so
# we don't need to check those files. Other exemptions
# should use '# noqa' to avoid failing here.
if "cinder/cmd" not in filename and not noqa:
if re.match(no_print_statements, logical_line):
msg = ("C303: print() should not be used. "
"Please use LOG.[info|error|warning|exception|debug]. "
"If print() must be used, use '# noqa' to skip this check.")
yield(0, msg)
def check_no_log_audit(logical_line):
"""Ensure that we are not using LOG.audit messages
Plans are in place going forward as discussed in the following
spec (https://review.openstack.org/#/c/91446/) to take out
LOG.audit messages. Given that audit was a concept invented
for OpenStack we can enforce not using it.
"""
if no_audit_log.match(logical_line):
yield(0, "C304: Found LOG.audit. Use LOG.info instead.")
def check_no_contextlib_nested(logical_line):
msg = ("C305: contextlib.nested is deprecated. With Python 2.7 and later "
"the with-statement supports multiple nested objects. See https://"
"docs.python.org/2/library/contextlib.html#contextlib.nested "
"for more information.")
if no_contextlib_nested.match(logical_line):
yield(0, msg)
def check_timeutils_strtime(logical_line):
msg = ("C306: Found timeutils.strtime(). "
"Please use oslo_utils.timeutils.isotime() or datetime.strftime()")
if 'timeutils.strtime' in logical_line:
yield(0, msg)
def factory(register):
register(no_vi_headers)
register(no_translate_debug_logs)
register(no_mutable_default_args)
register(check_explicit_underscore_import)
register(CheckForStrUnicodeExc)
register(check_assert_called_once)
register(check_oslo_namespace_imports)
register(check_datetime_now)
register(check_timeutils_strtime)
register(validate_log_translations)
register(check_unicode_usage)
register(check_no_print_statements)
register(check_no_log_audit)
register(check_no_contextlib_nested)
|
rakeshmi/cinder
|
cinder/hacking/checks.py
|
Python
|
apache-2.0
| 11,942
|
[
"VisIt"
] |
b914fe94bd9bba6479eb46a8ded9f2a6a10711bf37486c60cdd0d90e24630a47
|
# Copyright (C) 2018 Jaguar Land Rover
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Authors:
# * Guillaume Tucker <guillaume.tucker@collabora.com>
import importlib
import select
def load(name, *args, **kwargs):
"""Load an IPC class and return an instance of it.
The provided name needs to be a module and a class name in a dotted format.
For example: ipc.something.MyIPC
An instance of this class will be created using any arbitrary args and
kwargs passed to it. The resulting object will be returned.
"""
module_name, _, cls_name = name.rpartition('.')
module = importlib.import_module(module_name)
return getattr(module, cls_name)(*args, **kwargs)
class IPC(object):
"""The IPC module interface
All the VSM IPC modules should inherit from this class and implement the
necessary methods to enable the VSM to use them.
"""
def __init__(self):
"""Connect to the IPC system."""
pass
def close(self):
"""Close connection to the IPC system."""
pass
def send(self, signal, value):
"""Send signal to the IPC system.
Both signal and value arguments are strings, with the signal name and
which value to send. The IPC module should take care of reformatting
these arguments into a message to send via its IPC mechanism. It
should also filter out unknown signals.
"""
raise NotImplementedError("IPC.send")
def receive(self):
"""Receive signal from the IPC system.
When called, this method should block while the IPC module is waiting
for an incoming message to be read. It should then handle the data and
reformat it to return a (signal, value) 2-tuple with strings, in the
same format as for the send(signal, value) method arguments.
"""
raise NotImplementedError("IPC.receive")
class FilenoIPC(IPC):
"""Interface for IPC modules that read from a file descriptor.
In order to be able to wait for an input from multiple modules, each IPC
module needs to provide a file descriptor via the fileno() method. This
will then be used directly with the select() standard library function.
"""
def fileno(self):
"""Return the file descriptor to use to read incoming data."""
raise NotImplementedError("IPC.fileno")
class IPCList(IPC):
"""List of multiple IPC modules to use in parallel.
This will instanciate a list of class names and use them as a list of IPC
modules. Each signal that the VSM needs to send will be sent through all
the modules. Likewise, a signal received from any module will be used by
the VSM. Each module that needs to be able to receive signals must
implement the FilenoIPC interface (essentially the fileno() method) for
this purpose. Modules without this method will only be able to send
signals, not receive any.
"""
def __init__(self, names):
self._list = list(load(name) for name in names)
self._inputs = list(i for i in self._list if hasattr(i, 'fileno'))
self._read = list()
def close(self):
for i in self._list:
i.close()
def send(self, *args, **kw):
for i in self._list:
i.send(*args, **kw)
def receive(self, *args, **kw):
if not self._read:
self._read, _, _ = select.select(self._inputs, [], [])
return self._read.pop().receive(*args, **kw)
|
GENIVI/vehicle_signal_manager
|
ipc/__init__.py
|
Python
|
mpl-2.0
| 3,647
|
[
"Jaguar"
] |
f236f0b9df9dbee79f1877d6a3ce530109e46d5f54063b81c67ac38182dd9993
|
"""MDTraj: Read, write and analyze MD trajectories with only a few lines of Python code.
MDTraj is a python library that allows users to manipulate molecular dynamics
(MD) trajectories and perform a variety of analyses, including fast RMSD,
solvent accessible surface area, hydrogen bonding, etc. A highlight of MDTraj
is the wide variety of molecular dynamics trajectory file formats which are
supported, including RCSB pdb, GROMACS xtc and trr, CHARMM / NAMD dcd, AMBER
binpos, AMBER NetCDF, AMBER mdcrd, TINKER arc and MDTraj HDF5.
"""
from __future__ import print_function, absolute_import
DOCLINES = __doc__.split("\n")
import sys
from setuptools import setup, Extension
sys.path.insert(0, '.')
from basesetup import (find_packages, write_version_py, build_ext,
StaticLibrary, CompilerDetection)
try:
import numpy
except ImportError:
print('Building and running mdtraj requires numpy', file=sys.stderr)
sys.exit(1)
try:
import Cython
if Cython.__version__ < '0.19':
raise ImportError
from Cython.Build import cythonize
except ImportError:
print('Building from source requires cython >= 0.19', file=sys.stderr)
exit(1)
try:
# add an optional --disable-openmp to disable OpenMP support
sys.argv.remove('--disable-openmp')
disable_openmp = True
except ValueError:
disable_openmp = False
try:
# add an optional command line flag --no-install-deps to setup.py
# to turn off setuptools automatic downloading of dependencies
sys.argv.remove('--no-install-deps')
no_install_deps = True
except ValueError:
no_install_deps = False
if sys.version_info[0] == 2:
# required to fix cythoninze() for old versions of setuptools on
# python 2
m = sys.modules['setuptools.extension']
m.Extension.__dict__ = m._Extension.__dict__
##########################
VERSION = "1.4.0.dev0"
ISRELEASED = False
__version__ = VERSION
##########################
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Scientific/Engineering :: Chemistry
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
# Global info about compiler
compiler = CompilerDetection(disable_openmp)
extra_cpp_libraries = []
if sys.platform == 'darwin':
extra_cpp_libraries.append('stdc++')
if sys.platform == 'win32':
extra_cpp_libraries.append('Ws2_32')
# For determining if a path is relative (for dtr)
extra_cpp_libraries.append('Shlwapi')
################################################################################
# Declaration of the compiled extension modules (cython + c)
################################################################################
xtc = Extension('mdtraj.formats.xtc',
sources=['mdtraj/formats/xtc/src/xdrfile.c',
'mdtraj/formats/xtc/src/xdrfile_xtc.c',
'mdtraj/formats/xtc/xtc.pyx'],
include_dirs=['mdtraj/formats/xtc/include/',
'mdtraj/formats/xtc/', numpy.get_include()])
trr = Extension('mdtraj.formats.trr',
sources=['mdtraj/formats/xtc/src/xdrfile.c',
'mdtraj/formats/xtc/src/xdrfile_trr.c',
'mdtraj/formats/xtc/trr.pyx'],
include_dirs=['mdtraj/formats/xtc/include/',
'mdtraj/formats/xtc/', numpy.get_include()])
dcd = Extension('mdtraj.formats.dcd',
sources=['mdtraj/formats/dcd/src/dcdplugin.c',
'mdtraj/formats/dcd/dcd.pyx'],
include_dirs=["mdtraj/formats/dcd/include/",
'mdtraj/formats/dcd/', numpy.get_include()])
binpos = Extension('mdtraj.formats.binpos',
sources=['mdtraj/formats/binpos/src/binposplugin.c',
'mdtraj/formats/binpos/binpos.pyx'],
include_dirs=['mdtraj/formats/binpos/include/',
'mdtraj/formats/binpos/', numpy.get_include()])
dtr = Extension('mdtraj.formats.dtr',
sources=['mdtraj/formats/dtr/src/dtrplugin.cxx',
'mdtraj/formats/dtr/dtr.pyx'],
include_dirs=['mdtraj/formats/dtr/include/',
'mdtraj/formats/dtr/', numpy.get_include()],
define_macros = [('DESRES_READ_TIMESTEP2', 1)],
language='c++',
libraries=extra_cpp_libraries)
def rmsd_extensions():
compiler_args = (compiler.compiler_args_openmp + compiler.compiler_args_sse2 +
compiler.compiler_args_sse3 + compiler.compiler_args_opt)
compiler_libraries = compiler.compiler_libraries_openmp
libtheobald = StaticLibrary(
'mdtraj.core.lib.libtheobald',
sources=[
'mdtraj/rmsd/src/theobald_rmsd.c',
'mdtraj/rmsd/src/center.c'],
include_dirs=[
'mdtraj/rmsd/include'],
export_include=['mdtraj/rmsd/include/theobald_rmsd.h',
'mdtraj/rmsd/include/center.h'],
# don't enable OpenMP
extra_compile_args=(compiler.compiler_args_sse2 +
compiler.compiler_args_sse3 +
compiler.compiler_args_opt))
rmsd = Extension('mdtraj._rmsd',
sources=[
'mdtraj/rmsd/src/theobald_rmsd.c',
'mdtraj/rmsd/src/rotation.c',
'mdtraj/rmsd/src/center.c',
'mdtraj/rmsd/_rmsd.pyx'],
include_dirs=[
'mdtraj/rmsd/include', numpy.get_include()],
extra_compile_args=compiler_args,
libraries=compiler_libraries)
lprmsd = Extension('mdtraj._lprmsd',
sources=[
'mdtraj/rmsd/src/theobald_rmsd.c',
'mdtraj/rmsd/src/rotation.c',
'mdtraj/rmsd/src/center.c',
'mdtraj/rmsd/src/fancy_index.cpp',
'mdtraj/rmsd/src/Munkres.cpp',
'mdtraj/rmsd/src/euclidean_permutation.cpp',
'mdtraj/rmsd/_lprmsd.pyx'],
language='c++',
include_dirs=[
'mdtraj/rmsd/include', numpy.get_include()],
extra_compile_args=compiler_args,
libraries=compiler_libraries + extra_cpp_libraries)
return rmsd, lprmsd, libtheobald
def geometry_extensions():
compiler_args = (compiler.compiler_args_sse2 + compiler.compiler_args_sse3 +
compiler.compiler_args_opt)
define_macros = None
return [
Extension('mdtraj.geometry._geometry',
sources=['mdtraj/geometry/src/geometry.c',
'mdtraj/geometry/src/sasa.c',
'mdtraj/geometry/src/dssp.cpp',
'mdtraj/geometry/src/_geometry.pyx'],
include_dirs=['mdtraj/geometry/include',
'mdtraj/geometry/src/kernels',
numpy.get_include()],
define_macros=define_macros,
extra_compile_args=compiler_args,
libraries=extra_cpp_libraries,
language='c++'),
Extension('mdtraj.geometry.drid',
sources=["mdtraj/geometry/drid.pyx",
"mdtraj/geometry/src/dridkernels.c",
"mdtraj/geometry/src/cephes/cbrt.c",
"mdtraj/geometry/src/cephes/isnan.c",
"mdtraj/geometry/src/moments.c"],
include_dirs=["mdtraj/geometry/include",
"mdtraj/geometry/include/cephes",
numpy.get_include()],
define_macros=define_macros,
extra_compile_args=compiler_args),
Extension('mdtraj.geometry.neighbors',
sources=["mdtraj/geometry/neighbors.pyx",
"mdtraj/geometry/src/neighbors.cpp"],
include_dirs=["mdtraj/geometry/include",],
define_macros=define_macros,
extra_compile_args=compiler_args,
language='c++'),
]
extensions = [xtc, trr, dcd, binpos, dtr]
extensions.extend(rmsd_extensions())
extensions.extend(geometry_extensions())
write_version_py(VERSION, ISRELEASED)
setup(name='mdtraj',
author='Robert McGibbon',
author_email='rmcgibbo@gmail.com',
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
version=__version__,
license='LGPLv2.1+',
url='http://mdtraj.org',
download_url = "https://github.com/rmcgibbo/mdtraj/releases/latest",
platforms=['Linux', 'Mac OS-X', 'Unix', 'Windows'],
classifiers=CLASSIFIERS.splitlines(),
packages=find_packages(),
cmdclass={'build_ext': build_ext},
ext_modules=cythonize(extensions),
package_data={'mdtraj.formats.pdb': ['data/*'],
'mdtraj.testing': ['reference/*',
'reference/ala_dipeptide_trj/*',
'reference/ala_dipeptide_trj/not_hashed/*',
'reference/frame0.dtr/*',
'reference/frame0.dtr/not_hashed/*',],
'mdtraj.html': ['static/*']},
exclude_package_data={'mdtraj.testing': ['reference/ala_dipeptide_trj',
'reference/ala_dipeptide_trj/not_hashed',
'reference/frame0.dtr',
'reference/frame0.dtr/not_hashed',]},
zip_safe=False,
entry_points={'console_scripts':
['mdconvert = mdtraj.scripts.mdconvert:entry_point',
'mdinspect = mdtraj.scripts.mdinspect:entry_point']},
)
|
kyleabeauchamp/mdtraj
|
setup.py
|
Python
|
lgpl-2.1
| 10,342
|
[
"Amber",
"CHARMM",
"Gromacs",
"MDTraj",
"NAMD",
"NetCDF",
"TINKER"
] |
1d88097c635c07c2a1b631dbcbdb5e08852d45c4e22efcf245e3474c4576e4d6
|
# Copyright 2016-2020 The GPflow Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from ..base import InputData, MeanAndVariance, Parameter, RegressionData
from ..conditionals import conditional
from ..config import default_float, default_jitter
from ..kernels import Kernel
from ..likelihoods import Likelihood
from ..mean_functions import MeanFunction
from ..utilities import to_default_float
from .model import GPModel
from .training_mixins import InternalDataTrainingLossMixin
from .util import data_input_to_tensor
class GPMC(GPModel, InternalDataTrainingLossMixin):
def __init__(
self,
data: RegressionData,
kernel: Kernel,
likelihood: Likelihood,
mean_function: Optional[MeanFunction] = None,
num_latent_gps: Optional[int] = None,
):
"""
data is a tuple of X, Y with X, a data matrix, size [N, D] and Y, a data matrix, size [N, R]
kernel, likelihood, mean_function are appropriate GPflow objects
This is a vanilla implementation of a GP with a non-Gaussian
likelihood. The latent function values are represented by centered
(whitened) variables, so
v ~ N(0, I)
f = Lv + m(x)
with
L L^T = K
"""
if num_latent_gps is None:
num_latent_gps = self.calc_num_latent_gps_from_data(data, kernel, likelihood)
super().__init__(kernel, likelihood, mean_function, num_latent_gps)
self.data = data_input_to_tensor(data)
self.num_data = self.data[0].shape[0]
self.V = Parameter(np.zeros((self.num_data, self.num_latent_gps)))
self.V.prior = tfp.distributions.Normal(
loc=to_default_float(0.0), scale=to_default_float(1.0)
)
# type-ignore is because of changed method signature:
def log_posterior_density(self) -> tf.Tensor: # type: ignore
return self.log_likelihood() + self.log_prior_density()
# type-ignore is because of changed method signature:
def _training_loss(self) -> tf.Tensor: # type: ignore
return -self.log_posterior_density()
# type-ignore is because of changed method signature:
def maximum_log_likelihood_objective(self) -> tf.Tensor: # type: ignore
return self.log_likelihood()
def log_likelihood(self) -> tf.Tensor:
r"""
Construct a tf function to compute the likelihood of a general GP
model.
\log p(Y | V, theta).
"""
X_data, Y_data = self.data
K = self.kernel(X_data)
L = tf.linalg.cholesky(
K + tf.eye(tf.shape(X_data)[0], dtype=default_float()) * default_jitter()
)
F = tf.linalg.matmul(L, self.V) + self.mean_function(X_data)
return tf.reduce_sum(self.likelihood.log_prob(F, Y_data))
def predict_f(
self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
) -> MeanAndVariance:
"""
Xnew is a data matrix, point at which we want to predict
This method computes
p(F* | (F=LV) )
where F* are points on the GP at Xnew, F=LV are points on the GP at X.
"""
X_data, _Y_data = self.data
mu, var = conditional(
Xnew, X_data, self.kernel, self.V, full_cov=full_cov, q_sqrt=None, white=True
)
return mu + self.mean_function(Xnew), var
|
GPflow/GPflow
|
gpflow/models/gpmc.py
|
Python
|
apache-2.0
| 4,024
|
[
"Gaussian"
] |
f52c5a77da38231963366c6c66be5fc2ba2f88d6542c14857778080bef32748c
|
import os
import math
import numpy as np
import cPickle as pickle
from ase import Atoms
from ase.data import chemical_symbols
from ase.cluster.base import ClusterBase
from ase.cluster.clusteratom import ClusterAtom
class Cluster(Atoms, ClusterBase):
_datasyn = {'numbers': ('number', int, () ),
'positions': ('position', float, (3,)),
'tags': ('tag', int, () ),
'momenta': ('momentum', float, (3,)),
'masses': ('mass', float, () ),
'magmoms': ('magmom', float, () ),
'charges': ('charge', float, () ),
}
symmetry = None
center = None
surfaces = None
lattice_basis = None
resiproc_basis = None
atomic_basis = None
multiplicity = 1
def __getitem__(self, i):
c = ClusterAtom(atoms=self, index=i)
return c
def __setitem__(self, i, atom):
#raise Warning('Use direct assignment like atoms[i].type = x!')
#If implemented make sure that all values are cleared before copied
if not isinstance(atom, ClusterAtom):
raise Warning('The added atom is not a ClusterAtom instance!')
for name in self.arrays.keys():
singular, dtype, shape = self._datasyn[name]
self[i]._set(singular, np.zeros(shape, dtype))
self[i]._set('number', atom._get('number', True))
for name in atom._data:
self[i]._set(name, atom._get(name, True))
def append(self, atom):
if not isinstance(atom, ClusterAtom):
raise Warning('The added atom is not a ClusterAtom instance!')
n = len(self)
for name, a in self.arrays.items():
b = np.zeros((n + 1,) + a.shape[1:], a.dtype)
b[:n] = a
self.arrays[name] = b
for name in atom._data:
self[-1]._set(name, atom._get(name, True))
def extend(self, atoms):
if not isinstance(atoms, Cluster):
raise Warning('The added atoms is not in a Cluster instance!')
for atom in atoms:
self.append(atom)
def copy(self):
cluster = Atoms.copy(self)
cluster.symmetry = self.symmetry
cluster.center = self.center.copy()
cluster.surfaces = self.surfaces.copy()
cluster.lattice_basis = self.lattice_basis.copy()
cluster.atomic_basis = self.atomic_basis.copy()
cluster.resiproc_basis = self.resiproc_basis.copy()
return cluster
def get_surfaces(self):
"""Returns the miller indexs of the stored surfaces of the cluster."""
if not self.surfaces is None:
return self.surfaces.copy()
else:
return None
def get_layers(self):
"""Returns the number of atomic layers in the stored surfaces directions."""
layers = []
for s in self.surfaces:
n = self.miller_to_direction(s)
r = np.dot(self.get_positions() - self.center, n).max()
d = self.get_layer_distance(s, 2)
l = 2 * np.round(r / d).astype(int)
ls = np.arange(l-1,l+2)
ds = np.array([self.get_layer_distance(s, i) for i in ls])
mask = (np.abs(ds - r) < 1e-10)
layers.append(ls[mask][0])
return np.array(layers, int)
def get_diameter(self, method='volume'):
"""Returns an estimate of the cluster diameter based on two different
methods.
method = 'volume': Returns the diameter of a sphere with the
same volume as the atoms. (Default)
method = 'shape': Returns the averaged diameter calculated from the
directions given by the defined surfaces.
"""
if method == 'shape':
pos = self.get_positions() - self.center
d = 0.0
for s in self.surfaces:
n = self.miller_to_direction(s)
r = np.dot(pos, n)
d += r.max() - r.min()
return d / len(self.surfaces)
elif method == 'volume':
V_cell = np.abs(np.linalg.det(self.lattice_basis))
N_cell = len(self.atomic_basis)
N = len(self)
return 2.0 * (3.0 * N * V_cell / (4.0 * math.pi * N_cell)) ** (1.0/3.0)
else:
return 0.0
#Functions to store the cluster
def write(self, filename=None):
if not isinstance(filename, str):
raise Warning('You must specify a valid filename.')
if os.path.isfile(filename):
os.rename(filename, filename + '.bak')
d = {'symmetry': self.symmetry,
'center': self.get_center(),
'surfaces': self.surfaces,
'lattice_basis': self.lattice_basis,
'resiproc_basis': self.resiproc_basis,
'atomic_basis': self.atomic_basis,
'multiplicity': self.multiplicity,
'cell': self.get_cell(),
'pbc': self.get_pbc()}
f = open(filename, 'w')
f.write('Cluster')
pickle.dump(d, f)
pickle.dump(self.arrays, f)
f.close()
def read(self, filename):
if not os.path.isfile(filename):
raise Warning('The file specified do not exist.')
f = open(filename, 'r')
try:
if f.read(len('Cluster')) != 'Cluster':
raise Warning('This is not a compatible file.')
d = pickle.load(f)
self.arrays = pickle.load(f)
except EOFError:
raise Warinig('Bad file.')
f.close()
if 'multiplicity' in d:
self.multiplicity = d['multiplicity']
else:
self.multiplicity = 1
self.symmetry = d['symmetry']
self.center = d['center']
self.surfaces = d['surfaces']
self.lattice_basis = d['lattice_basis']
self.resiproc_basis = d['resiproc_basis']
self.atomic_basis = d['atomic_basis']
self.set_cell(d['cell'])
self.set_pbc(d['pbc'])
self.set_constraint()
self.adsorbate_info = {}
self.calc = None
|
slabanja/ase
|
ase/cluster/cluster.py
|
Python
|
gpl-2.0
| 6,238
|
[
"ASE"
] |
736dbd0ed702b4b3219c9519e896a37b90c7a402045acacb2b43dee7a8f5b206
|
# coding: utf-8
from __future__ import unicode_literals
import binascii
import collections
import email
import getpass
import io
import optparse
import os
import re
import shlex
import shutil
import socket
import struct
import subprocess
import sys
import itertools
import xml.etree.ElementTree
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import urllib.response as compat_urllib_response
except ImportError: # Python 2
import urllib as compat_urllib_response
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import http.cookies as compat_cookies
except ImportError: # Python 2
import Cookie as compat_cookies
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try: # Python >= 3.3
compat_html_entities_html5 = compat_html_entities.html5
except AttributeError:
# Copied from CPython 3.5.1 html/entities.py
compat_html_entities_html5 = {
'Aacute': '\xc1',
'aacute': '\xe1',
'Aacute;': '\xc1',
'aacute;': '\xe1',
'Abreve;': '\u0102',
'abreve;': '\u0103',
'ac;': '\u223e',
'acd;': '\u223f',
'acE;': '\u223e\u0333',
'Acirc': '\xc2',
'acirc': '\xe2',
'Acirc;': '\xc2',
'acirc;': '\xe2',
'acute': '\xb4',
'acute;': '\xb4',
'Acy;': '\u0410',
'acy;': '\u0430',
'AElig': '\xc6',
'aelig': '\xe6',
'AElig;': '\xc6',
'aelig;': '\xe6',
'af;': '\u2061',
'Afr;': '\U0001d504',
'afr;': '\U0001d51e',
'Agrave': '\xc0',
'agrave': '\xe0',
'Agrave;': '\xc0',
'agrave;': '\xe0',
'alefsym;': '\u2135',
'aleph;': '\u2135',
'Alpha;': '\u0391',
'alpha;': '\u03b1',
'Amacr;': '\u0100',
'amacr;': '\u0101',
'amalg;': '\u2a3f',
'AMP': '&',
'amp': '&',
'AMP;': '&',
'amp;': '&',
'And;': '\u2a53',
'and;': '\u2227',
'andand;': '\u2a55',
'andd;': '\u2a5c',
'andslope;': '\u2a58',
'andv;': '\u2a5a',
'ang;': '\u2220',
'ange;': '\u29a4',
'angle;': '\u2220',
'angmsd;': '\u2221',
'angmsdaa;': '\u29a8',
'angmsdab;': '\u29a9',
'angmsdac;': '\u29aa',
'angmsdad;': '\u29ab',
'angmsdae;': '\u29ac',
'angmsdaf;': '\u29ad',
'angmsdag;': '\u29ae',
'angmsdah;': '\u29af',
'angrt;': '\u221f',
'angrtvb;': '\u22be',
'angrtvbd;': '\u299d',
'angsph;': '\u2222',
'angst;': '\xc5',
'angzarr;': '\u237c',
'Aogon;': '\u0104',
'aogon;': '\u0105',
'Aopf;': '\U0001d538',
'aopf;': '\U0001d552',
'ap;': '\u2248',
'apacir;': '\u2a6f',
'apE;': '\u2a70',
'ape;': '\u224a',
'apid;': '\u224b',
'apos;': "'",
'ApplyFunction;': '\u2061',
'approx;': '\u2248',
'approxeq;': '\u224a',
'Aring': '\xc5',
'aring': '\xe5',
'Aring;': '\xc5',
'aring;': '\xe5',
'Ascr;': '\U0001d49c',
'ascr;': '\U0001d4b6',
'Assign;': '\u2254',
'ast;': '*',
'asymp;': '\u2248',
'asympeq;': '\u224d',
'Atilde': '\xc3',
'atilde': '\xe3',
'Atilde;': '\xc3',
'atilde;': '\xe3',
'Auml': '\xc4',
'auml': '\xe4',
'Auml;': '\xc4',
'auml;': '\xe4',
'awconint;': '\u2233',
'awint;': '\u2a11',
'backcong;': '\u224c',
'backepsilon;': '\u03f6',
'backprime;': '\u2035',
'backsim;': '\u223d',
'backsimeq;': '\u22cd',
'Backslash;': '\u2216',
'Barv;': '\u2ae7',
'barvee;': '\u22bd',
'Barwed;': '\u2306',
'barwed;': '\u2305',
'barwedge;': '\u2305',
'bbrk;': '\u23b5',
'bbrktbrk;': '\u23b6',
'bcong;': '\u224c',
'Bcy;': '\u0411',
'bcy;': '\u0431',
'bdquo;': '\u201e',
'becaus;': '\u2235',
'Because;': '\u2235',
'because;': '\u2235',
'bemptyv;': '\u29b0',
'bepsi;': '\u03f6',
'bernou;': '\u212c',
'Bernoullis;': '\u212c',
'Beta;': '\u0392',
'beta;': '\u03b2',
'beth;': '\u2136',
'between;': '\u226c',
'Bfr;': '\U0001d505',
'bfr;': '\U0001d51f',
'bigcap;': '\u22c2',
'bigcirc;': '\u25ef',
'bigcup;': '\u22c3',
'bigodot;': '\u2a00',
'bigoplus;': '\u2a01',
'bigotimes;': '\u2a02',
'bigsqcup;': '\u2a06',
'bigstar;': '\u2605',
'bigtriangledown;': '\u25bd',
'bigtriangleup;': '\u25b3',
'biguplus;': '\u2a04',
'bigvee;': '\u22c1',
'bigwedge;': '\u22c0',
'bkarow;': '\u290d',
'blacklozenge;': '\u29eb',
'blacksquare;': '\u25aa',
'blacktriangle;': '\u25b4',
'blacktriangledown;': '\u25be',
'blacktriangleleft;': '\u25c2',
'blacktriangleright;': '\u25b8',
'blank;': '\u2423',
'blk12;': '\u2592',
'blk14;': '\u2591',
'blk34;': '\u2593',
'block;': '\u2588',
'bne;': '=\u20e5',
'bnequiv;': '\u2261\u20e5',
'bNot;': '\u2aed',
'bnot;': '\u2310',
'Bopf;': '\U0001d539',
'bopf;': '\U0001d553',
'bot;': '\u22a5',
'bottom;': '\u22a5',
'bowtie;': '\u22c8',
'boxbox;': '\u29c9',
'boxDL;': '\u2557',
'boxDl;': '\u2556',
'boxdL;': '\u2555',
'boxdl;': '\u2510',
'boxDR;': '\u2554',
'boxDr;': '\u2553',
'boxdR;': '\u2552',
'boxdr;': '\u250c',
'boxH;': '\u2550',
'boxh;': '\u2500',
'boxHD;': '\u2566',
'boxHd;': '\u2564',
'boxhD;': '\u2565',
'boxhd;': '\u252c',
'boxHU;': '\u2569',
'boxHu;': '\u2567',
'boxhU;': '\u2568',
'boxhu;': '\u2534',
'boxminus;': '\u229f',
'boxplus;': '\u229e',
'boxtimes;': '\u22a0',
'boxUL;': '\u255d',
'boxUl;': '\u255c',
'boxuL;': '\u255b',
'boxul;': '\u2518',
'boxUR;': '\u255a',
'boxUr;': '\u2559',
'boxuR;': '\u2558',
'boxur;': '\u2514',
'boxV;': '\u2551',
'boxv;': '\u2502',
'boxVH;': '\u256c',
'boxVh;': '\u256b',
'boxvH;': '\u256a',
'boxvh;': '\u253c',
'boxVL;': '\u2563',
'boxVl;': '\u2562',
'boxvL;': '\u2561',
'boxvl;': '\u2524',
'boxVR;': '\u2560',
'boxVr;': '\u255f',
'boxvR;': '\u255e',
'boxvr;': '\u251c',
'bprime;': '\u2035',
'Breve;': '\u02d8',
'breve;': '\u02d8',
'brvbar': '\xa6',
'brvbar;': '\xa6',
'Bscr;': '\u212c',
'bscr;': '\U0001d4b7',
'bsemi;': '\u204f',
'bsim;': '\u223d',
'bsime;': '\u22cd',
'bsol;': '\\',
'bsolb;': '\u29c5',
'bsolhsub;': '\u27c8',
'bull;': '\u2022',
'bullet;': '\u2022',
'bump;': '\u224e',
'bumpE;': '\u2aae',
'bumpe;': '\u224f',
'Bumpeq;': '\u224e',
'bumpeq;': '\u224f',
'Cacute;': '\u0106',
'cacute;': '\u0107',
'Cap;': '\u22d2',
'cap;': '\u2229',
'capand;': '\u2a44',
'capbrcup;': '\u2a49',
'capcap;': '\u2a4b',
'capcup;': '\u2a47',
'capdot;': '\u2a40',
'CapitalDifferentialD;': '\u2145',
'caps;': '\u2229\ufe00',
'caret;': '\u2041',
'caron;': '\u02c7',
'Cayleys;': '\u212d',
'ccaps;': '\u2a4d',
'Ccaron;': '\u010c',
'ccaron;': '\u010d',
'Ccedil': '\xc7',
'ccedil': '\xe7',
'Ccedil;': '\xc7',
'ccedil;': '\xe7',
'Ccirc;': '\u0108',
'ccirc;': '\u0109',
'Cconint;': '\u2230',
'ccups;': '\u2a4c',
'ccupssm;': '\u2a50',
'Cdot;': '\u010a',
'cdot;': '\u010b',
'cedil': '\xb8',
'cedil;': '\xb8',
'Cedilla;': '\xb8',
'cemptyv;': '\u29b2',
'cent': '\xa2',
'cent;': '\xa2',
'CenterDot;': '\xb7',
'centerdot;': '\xb7',
'Cfr;': '\u212d',
'cfr;': '\U0001d520',
'CHcy;': '\u0427',
'chcy;': '\u0447',
'check;': '\u2713',
'checkmark;': '\u2713',
'Chi;': '\u03a7',
'chi;': '\u03c7',
'cir;': '\u25cb',
'circ;': '\u02c6',
'circeq;': '\u2257',
'circlearrowleft;': '\u21ba',
'circlearrowright;': '\u21bb',
'circledast;': '\u229b',
'circledcirc;': '\u229a',
'circleddash;': '\u229d',
'CircleDot;': '\u2299',
'circledR;': '\xae',
'circledS;': '\u24c8',
'CircleMinus;': '\u2296',
'CirclePlus;': '\u2295',
'CircleTimes;': '\u2297',
'cirE;': '\u29c3',
'cire;': '\u2257',
'cirfnint;': '\u2a10',
'cirmid;': '\u2aef',
'cirscir;': '\u29c2',
'ClockwiseContourIntegral;': '\u2232',
'CloseCurlyDoubleQuote;': '\u201d',
'CloseCurlyQuote;': '\u2019',
'clubs;': '\u2663',
'clubsuit;': '\u2663',
'Colon;': '\u2237',
'colon;': ':',
'Colone;': '\u2a74',
'colone;': '\u2254',
'coloneq;': '\u2254',
'comma;': ',',
'commat;': '@',
'comp;': '\u2201',
'compfn;': '\u2218',
'complement;': '\u2201',
'complexes;': '\u2102',
'cong;': '\u2245',
'congdot;': '\u2a6d',
'Congruent;': '\u2261',
'Conint;': '\u222f',
'conint;': '\u222e',
'ContourIntegral;': '\u222e',
'Copf;': '\u2102',
'copf;': '\U0001d554',
'coprod;': '\u2210',
'Coproduct;': '\u2210',
'COPY': '\xa9',
'copy': '\xa9',
'COPY;': '\xa9',
'copy;': '\xa9',
'copysr;': '\u2117',
'CounterClockwiseContourIntegral;': '\u2233',
'crarr;': '\u21b5',
'Cross;': '\u2a2f',
'cross;': '\u2717',
'Cscr;': '\U0001d49e',
'cscr;': '\U0001d4b8',
'csub;': '\u2acf',
'csube;': '\u2ad1',
'csup;': '\u2ad0',
'csupe;': '\u2ad2',
'ctdot;': '\u22ef',
'cudarrl;': '\u2938',
'cudarrr;': '\u2935',
'cuepr;': '\u22de',
'cuesc;': '\u22df',
'cularr;': '\u21b6',
'cularrp;': '\u293d',
'Cup;': '\u22d3',
'cup;': '\u222a',
'cupbrcap;': '\u2a48',
'CupCap;': '\u224d',
'cupcap;': '\u2a46',
'cupcup;': '\u2a4a',
'cupdot;': '\u228d',
'cupor;': '\u2a45',
'cups;': '\u222a\ufe00',
'curarr;': '\u21b7',
'curarrm;': '\u293c',
'curlyeqprec;': '\u22de',
'curlyeqsucc;': '\u22df',
'curlyvee;': '\u22ce',
'curlywedge;': '\u22cf',
'curren': '\xa4',
'curren;': '\xa4',
'curvearrowleft;': '\u21b6',
'curvearrowright;': '\u21b7',
'cuvee;': '\u22ce',
'cuwed;': '\u22cf',
'cwconint;': '\u2232',
'cwint;': '\u2231',
'cylcty;': '\u232d',
'Dagger;': '\u2021',
'dagger;': '\u2020',
'daleth;': '\u2138',
'Darr;': '\u21a1',
'dArr;': '\u21d3',
'darr;': '\u2193',
'dash;': '\u2010',
'Dashv;': '\u2ae4',
'dashv;': '\u22a3',
'dbkarow;': '\u290f',
'dblac;': '\u02dd',
'Dcaron;': '\u010e',
'dcaron;': '\u010f',
'Dcy;': '\u0414',
'dcy;': '\u0434',
'DD;': '\u2145',
'dd;': '\u2146',
'ddagger;': '\u2021',
'ddarr;': '\u21ca',
'DDotrahd;': '\u2911',
'ddotseq;': '\u2a77',
'deg': '\xb0',
'deg;': '\xb0',
'Del;': '\u2207',
'Delta;': '\u0394',
'delta;': '\u03b4',
'demptyv;': '\u29b1',
'dfisht;': '\u297f',
'Dfr;': '\U0001d507',
'dfr;': '\U0001d521',
'dHar;': '\u2965',
'dharl;': '\u21c3',
'dharr;': '\u21c2',
'DiacriticalAcute;': '\xb4',
'DiacriticalDot;': '\u02d9',
'DiacriticalDoubleAcute;': '\u02dd',
'DiacriticalGrave;': '`',
'DiacriticalTilde;': '\u02dc',
'diam;': '\u22c4',
'Diamond;': '\u22c4',
'diamond;': '\u22c4',
'diamondsuit;': '\u2666',
'diams;': '\u2666',
'die;': '\xa8',
'DifferentialD;': '\u2146',
'digamma;': '\u03dd',
'disin;': '\u22f2',
'div;': '\xf7',
'divide': '\xf7',
'divide;': '\xf7',
'divideontimes;': '\u22c7',
'divonx;': '\u22c7',
'DJcy;': '\u0402',
'djcy;': '\u0452',
'dlcorn;': '\u231e',
'dlcrop;': '\u230d',
'dollar;': '$',
'Dopf;': '\U0001d53b',
'dopf;': '\U0001d555',
'Dot;': '\xa8',
'dot;': '\u02d9',
'DotDot;': '\u20dc',
'doteq;': '\u2250',
'doteqdot;': '\u2251',
'DotEqual;': '\u2250',
'dotminus;': '\u2238',
'dotplus;': '\u2214',
'dotsquare;': '\u22a1',
'doublebarwedge;': '\u2306',
'DoubleContourIntegral;': '\u222f',
'DoubleDot;': '\xa8',
'DoubleDownArrow;': '\u21d3',
'DoubleLeftArrow;': '\u21d0',
'DoubleLeftRightArrow;': '\u21d4',
'DoubleLeftTee;': '\u2ae4',
'DoubleLongLeftArrow;': '\u27f8',
'DoubleLongLeftRightArrow;': '\u27fa',
'DoubleLongRightArrow;': '\u27f9',
'DoubleRightArrow;': '\u21d2',
'DoubleRightTee;': '\u22a8',
'DoubleUpArrow;': '\u21d1',
'DoubleUpDownArrow;': '\u21d5',
'DoubleVerticalBar;': '\u2225',
'DownArrow;': '\u2193',
'Downarrow;': '\u21d3',
'downarrow;': '\u2193',
'DownArrowBar;': '\u2913',
'DownArrowUpArrow;': '\u21f5',
'DownBreve;': '\u0311',
'downdownarrows;': '\u21ca',
'downharpoonleft;': '\u21c3',
'downharpoonright;': '\u21c2',
'DownLeftRightVector;': '\u2950',
'DownLeftTeeVector;': '\u295e',
'DownLeftVector;': '\u21bd',
'DownLeftVectorBar;': '\u2956',
'DownRightTeeVector;': '\u295f',
'DownRightVector;': '\u21c1',
'DownRightVectorBar;': '\u2957',
'DownTee;': '\u22a4',
'DownTeeArrow;': '\u21a7',
'drbkarow;': '\u2910',
'drcorn;': '\u231f',
'drcrop;': '\u230c',
'Dscr;': '\U0001d49f',
'dscr;': '\U0001d4b9',
'DScy;': '\u0405',
'dscy;': '\u0455',
'dsol;': '\u29f6',
'Dstrok;': '\u0110',
'dstrok;': '\u0111',
'dtdot;': '\u22f1',
'dtri;': '\u25bf',
'dtrif;': '\u25be',
'duarr;': '\u21f5',
'duhar;': '\u296f',
'dwangle;': '\u29a6',
'DZcy;': '\u040f',
'dzcy;': '\u045f',
'dzigrarr;': '\u27ff',
'Eacute': '\xc9',
'eacute': '\xe9',
'Eacute;': '\xc9',
'eacute;': '\xe9',
'easter;': '\u2a6e',
'Ecaron;': '\u011a',
'ecaron;': '\u011b',
'ecir;': '\u2256',
'Ecirc': '\xca',
'ecirc': '\xea',
'Ecirc;': '\xca',
'ecirc;': '\xea',
'ecolon;': '\u2255',
'Ecy;': '\u042d',
'ecy;': '\u044d',
'eDDot;': '\u2a77',
'Edot;': '\u0116',
'eDot;': '\u2251',
'edot;': '\u0117',
'ee;': '\u2147',
'efDot;': '\u2252',
'Efr;': '\U0001d508',
'efr;': '\U0001d522',
'eg;': '\u2a9a',
'Egrave': '\xc8',
'egrave': '\xe8',
'Egrave;': '\xc8',
'egrave;': '\xe8',
'egs;': '\u2a96',
'egsdot;': '\u2a98',
'el;': '\u2a99',
'Element;': '\u2208',
'elinters;': '\u23e7',
'ell;': '\u2113',
'els;': '\u2a95',
'elsdot;': '\u2a97',
'Emacr;': '\u0112',
'emacr;': '\u0113',
'empty;': '\u2205',
'emptyset;': '\u2205',
'EmptySmallSquare;': '\u25fb',
'emptyv;': '\u2205',
'EmptyVerySmallSquare;': '\u25ab',
'emsp13;': '\u2004',
'emsp14;': '\u2005',
'emsp;': '\u2003',
'ENG;': '\u014a',
'eng;': '\u014b',
'ensp;': '\u2002',
'Eogon;': '\u0118',
'eogon;': '\u0119',
'Eopf;': '\U0001d53c',
'eopf;': '\U0001d556',
'epar;': '\u22d5',
'eparsl;': '\u29e3',
'eplus;': '\u2a71',
'epsi;': '\u03b5',
'Epsilon;': '\u0395',
'epsilon;': '\u03b5',
'epsiv;': '\u03f5',
'eqcirc;': '\u2256',
'eqcolon;': '\u2255',
'eqsim;': '\u2242',
'eqslantgtr;': '\u2a96',
'eqslantless;': '\u2a95',
'Equal;': '\u2a75',
'equals;': '=',
'EqualTilde;': '\u2242',
'equest;': '\u225f',
'Equilibrium;': '\u21cc',
'equiv;': '\u2261',
'equivDD;': '\u2a78',
'eqvparsl;': '\u29e5',
'erarr;': '\u2971',
'erDot;': '\u2253',
'Escr;': '\u2130',
'escr;': '\u212f',
'esdot;': '\u2250',
'Esim;': '\u2a73',
'esim;': '\u2242',
'Eta;': '\u0397',
'eta;': '\u03b7',
'ETH': '\xd0',
'eth': '\xf0',
'ETH;': '\xd0',
'eth;': '\xf0',
'Euml': '\xcb',
'euml': '\xeb',
'Euml;': '\xcb',
'euml;': '\xeb',
'euro;': '\u20ac',
'excl;': '!',
'exist;': '\u2203',
'Exists;': '\u2203',
'expectation;': '\u2130',
'ExponentialE;': '\u2147',
'exponentiale;': '\u2147',
'fallingdotseq;': '\u2252',
'Fcy;': '\u0424',
'fcy;': '\u0444',
'female;': '\u2640',
'ffilig;': '\ufb03',
'fflig;': '\ufb00',
'ffllig;': '\ufb04',
'Ffr;': '\U0001d509',
'ffr;': '\U0001d523',
'filig;': '\ufb01',
'FilledSmallSquare;': '\u25fc',
'FilledVerySmallSquare;': '\u25aa',
'fjlig;': 'fj',
'flat;': '\u266d',
'fllig;': '\ufb02',
'fltns;': '\u25b1',
'fnof;': '\u0192',
'Fopf;': '\U0001d53d',
'fopf;': '\U0001d557',
'ForAll;': '\u2200',
'forall;': '\u2200',
'fork;': '\u22d4',
'forkv;': '\u2ad9',
'Fouriertrf;': '\u2131',
'fpartint;': '\u2a0d',
'frac12': '\xbd',
'frac12;': '\xbd',
'frac13;': '\u2153',
'frac14': '\xbc',
'frac14;': '\xbc',
'frac15;': '\u2155',
'frac16;': '\u2159',
'frac18;': '\u215b',
'frac23;': '\u2154',
'frac25;': '\u2156',
'frac34': '\xbe',
'frac34;': '\xbe',
'frac35;': '\u2157',
'frac38;': '\u215c',
'frac45;': '\u2158',
'frac56;': '\u215a',
'frac58;': '\u215d',
'frac78;': '\u215e',
'frasl;': '\u2044',
'frown;': '\u2322',
'Fscr;': '\u2131',
'fscr;': '\U0001d4bb',
'gacute;': '\u01f5',
'Gamma;': '\u0393',
'gamma;': '\u03b3',
'Gammad;': '\u03dc',
'gammad;': '\u03dd',
'gap;': '\u2a86',
'Gbreve;': '\u011e',
'gbreve;': '\u011f',
'Gcedil;': '\u0122',
'Gcirc;': '\u011c',
'gcirc;': '\u011d',
'Gcy;': '\u0413',
'gcy;': '\u0433',
'Gdot;': '\u0120',
'gdot;': '\u0121',
'gE;': '\u2267',
'ge;': '\u2265',
'gEl;': '\u2a8c',
'gel;': '\u22db',
'geq;': '\u2265',
'geqq;': '\u2267',
'geqslant;': '\u2a7e',
'ges;': '\u2a7e',
'gescc;': '\u2aa9',
'gesdot;': '\u2a80',
'gesdoto;': '\u2a82',
'gesdotol;': '\u2a84',
'gesl;': '\u22db\ufe00',
'gesles;': '\u2a94',
'Gfr;': '\U0001d50a',
'gfr;': '\U0001d524',
'Gg;': '\u22d9',
'gg;': '\u226b',
'ggg;': '\u22d9',
'gimel;': '\u2137',
'GJcy;': '\u0403',
'gjcy;': '\u0453',
'gl;': '\u2277',
'gla;': '\u2aa5',
'glE;': '\u2a92',
'glj;': '\u2aa4',
'gnap;': '\u2a8a',
'gnapprox;': '\u2a8a',
'gnE;': '\u2269',
'gne;': '\u2a88',
'gneq;': '\u2a88',
'gneqq;': '\u2269',
'gnsim;': '\u22e7',
'Gopf;': '\U0001d53e',
'gopf;': '\U0001d558',
'grave;': '`',
'GreaterEqual;': '\u2265',
'GreaterEqualLess;': '\u22db',
'GreaterFullEqual;': '\u2267',
'GreaterGreater;': '\u2aa2',
'GreaterLess;': '\u2277',
'GreaterSlantEqual;': '\u2a7e',
'GreaterTilde;': '\u2273',
'Gscr;': '\U0001d4a2',
'gscr;': '\u210a',
'gsim;': '\u2273',
'gsime;': '\u2a8e',
'gsiml;': '\u2a90',
'GT': '>',
'gt': '>',
'GT;': '>',
'Gt;': '\u226b',
'gt;': '>',
'gtcc;': '\u2aa7',
'gtcir;': '\u2a7a',
'gtdot;': '\u22d7',
'gtlPar;': '\u2995',
'gtquest;': '\u2a7c',
'gtrapprox;': '\u2a86',
'gtrarr;': '\u2978',
'gtrdot;': '\u22d7',
'gtreqless;': '\u22db',
'gtreqqless;': '\u2a8c',
'gtrless;': '\u2277',
'gtrsim;': '\u2273',
'gvertneqq;': '\u2269\ufe00',
'gvnE;': '\u2269\ufe00',
'Hacek;': '\u02c7',
'hairsp;': '\u200a',
'half;': '\xbd',
'hamilt;': '\u210b',
'HARDcy;': '\u042a',
'hardcy;': '\u044a',
'hArr;': '\u21d4',
'harr;': '\u2194',
'harrcir;': '\u2948',
'harrw;': '\u21ad',
'Hat;': '^',
'hbar;': '\u210f',
'Hcirc;': '\u0124',
'hcirc;': '\u0125',
'hearts;': '\u2665',
'heartsuit;': '\u2665',
'hellip;': '\u2026',
'hercon;': '\u22b9',
'Hfr;': '\u210c',
'hfr;': '\U0001d525',
'HilbertSpace;': '\u210b',
'hksearow;': '\u2925',
'hkswarow;': '\u2926',
'hoarr;': '\u21ff',
'homtht;': '\u223b',
'hookleftarrow;': '\u21a9',
'hookrightarrow;': '\u21aa',
'Hopf;': '\u210d',
'hopf;': '\U0001d559',
'horbar;': '\u2015',
'HorizontalLine;': '\u2500',
'Hscr;': '\u210b',
'hscr;': '\U0001d4bd',
'hslash;': '\u210f',
'Hstrok;': '\u0126',
'hstrok;': '\u0127',
'HumpDownHump;': '\u224e',
'HumpEqual;': '\u224f',
'hybull;': '\u2043',
'hyphen;': '\u2010',
'Iacute': '\xcd',
'iacute': '\xed',
'Iacute;': '\xcd',
'iacute;': '\xed',
'ic;': '\u2063',
'Icirc': '\xce',
'icirc': '\xee',
'Icirc;': '\xce',
'icirc;': '\xee',
'Icy;': '\u0418',
'icy;': '\u0438',
'Idot;': '\u0130',
'IEcy;': '\u0415',
'iecy;': '\u0435',
'iexcl': '\xa1',
'iexcl;': '\xa1',
'iff;': '\u21d4',
'Ifr;': '\u2111',
'ifr;': '\U0001d526',
'Igrave': '\xcc',
'igrave': '\xec',
'Igrave;': '\xcc',
'igrave;': '\xec',
'ii;': '\u2148',
'iiiint;': '\u2a0c',
'iiint;': '\u222d',
'iinfin;': '\u29dc',
'iiota;': '\u2129',
'IJlig;': '\u0132',
'ijlig;': '\u0133',
'Im;': '\u2111',
'Imacr;': '\u012a',
'imacr;': '\u012b',
'image;': '\u2111',
'ImaginaryI;': '\u2148',
'imagline;': '\u2110',
'imagpart;': '\u2111',
'imath;': '\u0131',
'imof;': '\u22b7',
'imped;': '\u01b5',
'Implies;': '\u21d2',
'in;': '\u2208',
'incare;': '\u2105',
'infin;': '\u221e',
'infintie;': '\u29dd',
'inodot;': '\u0131',
'Int;': '\u222c',
'int;': '\u222b',
'intcal;': '\u22ba',
'integers;': '\u2124',
'Integral;': '\u222b',
'intercal;': '\u22ba',
'Intersection;': '\u22c2',
'intlarhk;': '\u2a17',
'intprod;': '\u2a3c',
'InvisibleComma;': '\u2063',
'InvisibleTimes;': '\u2062',
'IOcy;': '\u0401',
'iocy;': '\u0451',
'Iogon;': '\u012e',
'iogon;': '\u012f',
'Iopf;': '\U0001d540',
'iopf;': '\U0001d55a',
'Iota;': '\u0399',
'iota;': '\u03b9',
'iprod;': '\u2a3c',
'iquest': '\xbf',
'iquest;': '\xbf',
'Iscr;': '\u2110',
'iscr;': '\U0001d4be',
'isin;': '\u2208',
'isindot;': '\u22f5',
'isinE;': '\u22f9',
'isins;': '\u22f4',
'isinsv;': '\u22f3',
'isinv;': '\u2208',
'it;': '\u2062',
'Itilde;': '\u0128',
'itilde;': '\u0129',
'Iukcy;': '\u0406',
'iukcy;': '\u0456',
'Iuml': '\xcf',
'iuml': '\xef',
'Iuml;': '\xcf',
'iuml;': '\xef',
'Jcirc;': '\u0134',
'jcirc;': '\u0135',
'Jcy;': '\u0419',
'jcy;': '\u0439',
'Jfr;': '\U0001d50d',
'jfr;': '\U0001d527',
'jmath;': '\u0237',
'Jopf;': '\U0001d541',
'jopf;': '\U0001d55b',
'Jscr;': '\U0001d4a5',
'jscr;': '\U0001d4bf',
'Jsercy;': '\u0408',
'jsercy;': '\u0458',
'Jukcy;': '\u0404',
'jukcy;': '\u0454',
'Kappa;': '\u039a',
'kappa;': '\u03ba',
'kappav;': '\u03f0',
'Kcedil;': '\u0136',
'kcedil;': '\u0137',
'Kcy;': '\u041a',
'kcy;': '\u043a',
'Kfr;': '\U0001d50e',
'kfr;': '\U0001d528',
'kgreen;': '\u0138',
'KHcy;': '\u0425',
'khcy;': '\u0445',
'KJcy;': '\u040c',
'kjcy;': '\u045c',
'Kopf;': '\U0001d542',
'kopf;': '\U0001d55c',
'Kscr;': '\U0001d4a6',
'kscr;': '\U0001d4c0',
'lAarr;': '\u21da',
'Lacute;': '\u0139',
'lacute;': '\u013a',
'laemptyv;': '\u29b4',
'lagran;': '\u2112',
'Lambda;': '\u039b',
'lambda;': '\u03bb',
'Lang;': '\u27ea',
'lang;': '\u27e8',
'langd;': '\u2991',
'langle;': '\u27e8',
'lap;': '\u2a85',
'Laplacetrf;': '\u2112',
'laquo': '\xab',
'laquo;': '\xab',
'Larr;': '\u219e',
'lArr;': '\u21d0',
'larr;': '\u2190',
'larrb;': '\u21e4',
'larrbfs;': '\u291f',
'larrfs;': '\u291d',
'larrhk;': '\u21a9',
'larrlp;': '\u21ab',
'larrpl;': '\u2939',
'larrsim;': '\u2973',
'larrtl;': '\u21a2',
'lat;': '\u2aab',
'lAtail;': '\u291b',
'latail;': '\u2919',
'late;': '\u2aad',
'lates;': '\u2aad\ufe00',
'lBarr;': '\u290e',
'lbarr;': '\u290c',
'lbbrk;': '\u2772',
'lbrace;': '{',
'lbrack;': '[',
'lbrke;': '\u298b',
'lbrksld;': '\u298f',
'lbrkslu;': '\u298d',
'Lcaron;': '\u013d',
'lcaron;': '\u013e',
'Lcedil;': '\u013b',
'lcedil;': '\u013c',
'lceil;': '\u2308',
'lcub;': '{',
'Lcy;': '\u041b',
'lcy;': '\u043b',
'ldca;': '\u2936',
'ldquo;': '\u201c',
'ldquor;': '\u201e',
'ldrdhar;': '\u2967',
'ldrushar;': '\u294b',
'ldsh;': '\u21b2',
'lE;': '\u2266',
'le;': '\u2264',
'LeftAngleBracket;': '\u27e8',
'LeftArrow;': '\u2190',
'Leftarrow;': '\u21d0',
'leftarrow;': '\u2190',
'LeftArrowBar;': '\u21e4',
'LeftArrowRightArrow;': '\u21c6',
'leftarrowtail;': '\u21a2',
'LeftCeiling;': '\u2308',
'LeftDoubleBracket;': '\u27e6',
'LeftDownTeeVector;': '\u2961',
'LeftDownVector;': '\u21c3',
'LeftDownVectorBar;': '\u2959',
'LeftFloor;': '\u230a',
'leftharpoondown;': '\u21bd',
'leftharpoonup;': '\u21bc',
'leftleftarrows;': '\u21c7',
'LeftRightArrow;': '\u2194',
'Leftrightarrow;': '\u21d4',
'leftrightarrow;': '\u2194',
'leftrightarrows;': '\u21c6',
'leftrightharpoons;': '\u21cb',
'leftrightsquigarrow;': '\u21ad',
'LeftRightVector;': '\u294e',
'LeftTee;': '\u22a3',
'LeftTeeArrow;': '\u21a4',
'LeftTeeVector;': '\u295a',
'leftthreetimes;': '\u22cb',
'LeftTriangle;': '\u22b2',
'LeftTriangleBar;': '\u29cf',
'LeftTriangleEqual;': '\u22b4',
'LeftUpDownVector;': '\u2951',
'LeftUpTeeVector;': '\u2960',
'LeftUpVector;': '\u21bf',
'LeftUpVectorBar;': '\u2958',
'LeftVector;': '\u21bc',
'LeftVectorBar;': '\u2952',
'lEg;': '\u2a8b',
'leg;': '\u22da',
'leq;': '\u2264',
'leqq;': '\u2266',
'leqslant;': '\u2a7d',
'les;': '\u2a7d',
'lescc;': '\u2aa8',
'lesdot;': '\u2a7f',
'lesdoto;': '\u2a81',
'lesdotor;': '\u2a83',
'lesg;': '\u22da\ufe00',
'lesges;': '\u2a93',
'lessapprox;': '\u2a85',
'lessdot;': '\u22d6',
'lesseqgtr;': '\u22da',
'lesseqqgtr;': '\u2a8b',
'LessEqualGreater;': '\u22da',
'LessFullEqual;': '\u2266',
'LessGreater;': '\u2276',
'lessgtr;': '\u2276',
'LessLess;': '\u2aa1',
'lesssim;': '\u2272',
'LessSlantEqual;': '\u2a7d',
'LessTilde;': '\u2272',
'lfisht;': '\u297c',
'lfloor;': '\u230a',
'Lfr;': '\U0001d50f',
'lfr;': '\U0001d529',
'lg;': '\u2276',
'lgE;': '\u2a91',
'lHar;': '\u2962',
'lhard;': '\u21bd',
'lharu;': '\u21bc',
'lharul;': '\u296a',
'lhblk;': '\u2584',
'LJcy;': '\u0409',
'ljcy;': '\u0459',
'Ll;': '\u22d8',
'll;': '\u226a',
'llarr;': '\u21c7',
'llcorner;': '\u231e',
'Lleftarrow;': '\u21da',
'llhard;': '\u296b',
'lltri;': '\u25fa',
'Lmidot;': '\u013f',
'lmidot;': '\u0140',
'lmoust;': '\u23b0',
'lmoustache;': '\u23b0',
'lnap;': '\u2a89',
'lnapprox;': '\u2a89',
'lnE;': '\u2268',
'lne;': '\u2a87',
'lneq;': '\u2a87',
'lneqq;': '\u2268',
'lnsim;': '\u22e6',
'loang;': '\u27ec',
'loarr;': '\u21fd',
'lobrk;': '\u27e6',
'LongLeftArrow;': '\u27f5',
'Longleftarrow;': '\u27f8',
'longleftarrow;': '\u27f5',
'LongLeftRightArrow;': '\u27f7',
'Longleftrightarrow;': '\u27fa',
'longleftrightarrow;': '\u27f7',
'longmapsto;': '\u27fc',
'LongRightArrow;': '\u27f6',
'Longrightarrow;': '\u27f9',
'longrightarrow;': '\u27f6',
'looparrowleft;': '\u21ab',
'looparrowright;': '\u21ac',
'lopar;': '\u2985',
'Lopf;': '\U0001d543',
'lopf;': '\U0001d55d',
'loplus;': '\u2a2d',
'lotimes;': '\u2a34',
'lowast;': '\u2217',
'lowbar;': '_',
'LowerLeftArrow;': '\u2199',
'LowerRightArrow;': '\u2198',
'loz;': '\u25ca',
'lozenge;': '\u25ca',
'lozf;': '\u29eb',
'lpar;': '(',
'lparlt;': '\u2993',
'lrarr;': '\u21c6',
'lrcorner;': '\u231f',
'lrhar;': '\u21cb',
'lrhard;': '\u296d',
'lrm;': '\u200e',
'lrtri;': '\u22bf',
'lsaquo;': '\u2039',
'Lscr;': '\u2112',
'lscr;': '\U0001d4c1',
'Lsh;': '\u21b0',
'lsh;': '\u21b0',
'lsim;': '\u2272',
'lsime;': '\u2a8d',
'lsimg;': '\u2a8f',
'lsqb;': '[',
'lsquo;': '\u2018',
'lsquor;': '\u201a',
'Lstrok;': '\u0141',
'lstrok;': '\u0142',
'LT': '<',
'lt': '<',
'LT;': '<',
'Lt;': '\u226a',
'lt;': '<',
'ltcc;': '\u2aa6',
'ltcir;': '\u2a79',
'ltdot;': '\u22d6',
'lthree;': '\u22cb',
'ltimes;': '\u22c9',
'ltlarr;': '\u2976',
'ltquest;': '\u2a7b',
'ltri;': '\u25c3',
'ltrie;': '\u22b4',
'ltrif;': '\u25c2',
'ltrPar;': '\u2996',
'lurdshar;': '\u294a',
'luruhar;': '\u2966',
'lvertneqq;': '\u2268\ufe00',
'lvnE;': '\u2268\ufe00',
'macr': '\xaf',
'macr;': '\xaf',
'male;': '\u2642',
'malt;': '\u2720',
'maltese;': '\u2720',
'Map;': '\u2905',
'map;': '\u21a6',
'mapsto;': '\u21a6',
'mapstodown;': '\u21a7',
'mapstoleft;': '\u21a4',
'mapstoup;': '\u21a5',
'marker;': '\u25ae',
'mcomma;': '\u2a29',
'Mcy;': '\u041c',
'mcy;': '\u043c',
'mdash;': '\u2014',
'mDDot;': '\u223a',
'measuredangle;': '\u2221',
'MediumSpace;': '\u205f',
'Mellintrf;': '\u2133',
'Mfr;': '\U0001d510',
'mfr;': '\U0001d52a',
'mho;': '\u2127',
'micro': '\xb5',
'micro;': '\xb5',
'mid;': '\u2223',
'midast;': '*',
'midcir;': '\u2af0',
'middot': '\xb7',
'middot;': '\xb7',
'minus;': '\u2212',
'minusb;': '\u229f',
'minusd;': '\u2238',
'minusdu;': '\u2a2a',
'MinusPlus;': '\u2213',
'mlcp;': '\u2adb',
'mldr;': '\u2026',
'mnplus;': '\u2213',
'models;': '\u22a7',
'Mopf;': '\U0001d544',
'mopf;': '\U0001d55e',
'mp;': '\u2213',
'Mscr;': '\u2133',
'mscr;': '\U0001d4c2',
'mstpos;': '\u223e',
'Mu;': '\u039c',
'mu;': '\u03bc',
'multimap;': '\u22b8',
'mumap;': '\u22b8',
'nabla;': '\u2207',
'Nacute;': '\u0143',
'nacute;': '\u0144',
'nang;': '\u2220\u20d2',
'nap;': '\u2249',
'napE;': '\u2a70\u0338',
'napid;': '\u224b\u0338',
'napos;': '\u0149',
'napprox;': '\u2249',
'natur;': '\u266e',
'natural;': '\u266e',
'naturals;': '\u2115',
'nbsp': '\xa0',
'nbsp;': '\xa0',
'nbump;': '\u224e\u0338',
'nbumpe;': '\u224f\u0338',
'ncap;': '\u2a43',
'Ncaron;': '\u0147',
'ncaron;': '\u0148',
'Ncedil;': '\u0145',
'ncedil;': '\u0146',
'ncong;': '\u2247',
'ncongdot;': '\u2a6d\u0338',
'ncup;': '\u2a42',
'Ncy;': '\u041d',
'ncy;': '\u043d',
'ndash;': '\u2013',
'ne;': '\u2260',
'nearhk;': '\u2924',
'neArr;': '\u21d7',
'nearr;': '\u2197',
'nearrow;': '\u2197',
'nedot;': '\u2250\u0338',
'NegativeMediumSpace;': '\u200b',
'NegativeThickSpace;': '\u200b',
'NegativeThinSpace;': '\u200b',
'NegativeVeryThinSpace;': '\u200b',
'nequiv;': '\u2262',
'nesear;': '\u2928',
'nesim;': '\u2242\u0338',
'NestedGreaterGreater;': '\u226b',
'NestedLessLess;': '\u226a',
'NewLine;': '\n',
'nexist;': '\u2204',
'nexists;': '\u2204',
'Nfr;': '\U0001d511',
'nfr;': '\U0001d52b',
'ngE;': '\u2267\u0338',
'nge;': '\u2271',
'ngeq;': '\u2271',
'ngeqq;': '\u2267\u0338',
'ngeqslant;': '\u2a7e\u0338',
'nges;': '\u2a7e\u0338',
'nGg;': '\u22d9\u0338',
'ngsim;': '\u2275',
'nGt;': '\u226b\u20d2',
'ngt;': '\u226f',
'ngtr;': '\u226f',
'nGtv;': '\u226b\u0338',
'nhArr;': '\u21ce',
'nharr;': '\u21ae',
'nhpar;': '\u2af2',
'ni;': '\u220b',
'nis;': '\u22fc',
'nisd;': '\u22fa',
'niv;': '\u220b',
'NJcy;': '\u040a',
'njcy;': '\u045a',
'nlArr;': '\u21cd',
'nlarr;': '\u219a',
'nldr;': '\u2025',
'nlE;': '\u2266\u0338',
'nle;': '\u2270',
'nLeftarrow;': '\u21cd',
'nleftarrow;': '\u219a',
'nLeftrightarrow;': '\u21ce',
'nleftrightarrow;': '\u21ae',
'nleq;': '\u2270',
'nleqq;': '\u2266\u0338',
'nleqslant;': '\u2a7d\u0338',
'nles;': '\u2a7d\u0338',
'nless;': '\u226e',
'nLl;': '\u22d8\u0338',
'nlsim;': '\u2274',
'nLt;': '\u226a\u20d2',
'nlt;': '\u226e',
'nltri;': '\u22ea',
'nltrie;': '\u22ec',
'nLtv;': '\u226a\u0338',
'nmid;': '\u2224',
'NoBreak;': '\u2060',
'NonBreakingSpace;': '\xa0',
'Nopf;': '\u2115',
'nopf;': '\U0001d55f',
'not': '\xac',
'Not;': '\u2aec',
'not;': '\xac',
'NotCongruent;': '\u2262',
'NotCupCap;': '\u226d',
'NotDoubleVerticalBar;': '\u2226',
'NotElement;': '\u2209',
'NotEqual;': '\u2260',
'NotEqualTilde;': '\u2242\u0338',
'NotExists;': '\u2204',
'NotGreater;': '\u226f',
'NotGreaterEqual;': '\u2271',
'NotGreaterFullEqual;': '\u2267\u0338',
'NotGreaterGreater;': '\u226b\u0338',
'NotGreaterLess;': '\u2279',
'NotGreaterSlantEqual;': '\u2a7e\u0338',
'NotGreaterTilde;': '\u2275',
'NotHumpDownHump;': '\u224e\u0338',
'NotHumpEqual;': '\u224f\u0338',
'notin;': '\u2209',
'notindot;': '\u22f5\u0338',
'notinE;': '\u22f9\u0338',
'notinva;': '\u2209',
'notinvb;': '\u22f7',
'notinvc;': '\u22f6',
'NotLeftTriangle;': '\u22ea',
'NotLeftTriangleBar;': '\u29cf\u0338',
'NotLeftTriangleEqual;': '\u22ec',
'NotLess;': '\u226e',
'NotLessEqual;': '\u2270',
'NotLessGreater;': '\u2278',
'NotLessLess;': '\u226a\u0338',
'NotLessSlantEqual;': '\u2a7d\u0338',
'NotLessTilde;': '\u2274',
'NotNestedGreaterGreater;': '\u2aa2\u0338',
'NotNestedLessLess;': '\u2aa1\u0338',
'notni;': '\u220c',
'notniva;': '\u220c',
'notnivb;': '\u22fe',
'notnivc;': '\u22fd',
'NotPrecedes;': '\u2280',
'NotPrecedesEqual;': '\u2aaf\u0338',
'NotPrecedesSlantEqual;': '\u22e0',
'NotReverseElement;': '\u220c',
'NotRightTriangle;': '\u22eb',
'NotRightTriangleBar;': '\u29d0\u0338',
'NotRightTriangleEqual;': '\u22ed',
'NotSquareSubset;': '\u228f\u0338',
'NotSquareSubsetEqual;': '\u22e2',
'NotSquareSuperset;': '\u2290\u0338',
'NotSquareSupersetEqual;': '\u22e3',
'NotSubset;': '\u2282\u20d2',
'NotSubsetEqual;': '\u2288',
'NotSucceeds;': '\u2281',
'NotSucceedsEqual;': '\u2ab0\u0338',
'NotSucceedsSlantEqual;': '\u22e1',
'NotSucceedsTilde;': '\u227f\u0338',
'NotSuperset;': '\u2283\u20d2',
'NotSupersetEqual;': '\u2289',
'NotTilde;': '\u2241',
'NotTildeEqual;': '\u2244',
'NotTildeFullEqual;': '\u2247',
'NotTildeTilde;': '\u2249',
'NotVerticalBar;': '\u2224',
'npar;': '\u2226',
'nparallel;': '\u2226',
'nparsl;': '\u2afd\u20e5',
'npart;': '\u2202\u0338',
'npolint;': '\u2a14',
'npr;': '\u2280',
'nprcue;': '\u22e0',
'npre;': '\u2aaf\u0338',
'nprec;': '\u2280',
'npreceq;': '\u2aaf\u0338',
'nrArr;': '\u21cf',
'nrarr;': '\u219b',
'nrarrc;': '\u2933\u0338',
'nrarrw;': '\u219d\u0338',
'nRightarrow;': '\u21cf',
'nrightarrow;': '\u219b',
'nrtri;': '\u22eb',
'nrtrie;': '\u22ed',
'nsc;': '\u2281',
'nsccue;': '\u22e1',
'nsce;': '\u2ab0\u0338',
'Nscr;': '\U0001d4a9',
'nscr;': '\U0001d4c3',
'nshortmid;': '\u2224',
'nshortparallel;': '\u2226',
'nsim;': '\u2241',
'nsime;': '\u2244',
'nsimeq;': '\u2244',
'nsmid;': '\u2224',
'nspar;': '\u2226',
'nsqsube;': '\u22e2',
'nsqsupe;': '\u22e3',
'nsub;': '\u2284',
'nsubE;': '\u2ac5\u0338',
'nsube;': '\u2288',
'nsubset;': '\u2282\u20d2',
'nsubseteq;': '\u2288',
'nsubseteqq;': '\u2ac5\u0338',
'nsucc;': '\u2281',
'nsucceq;': '\u2ab0\u0338',
'nsup;': '\u2285',
'nsupE;': '\u2ac6\u0338',
'nsupe;': '\u2289',
'nsupset;': '\u2283\u20d2',
'nsupseteq;': '\u2289',
'nsupseteqq;': '\u2ac6\u0338',
'ntgl;': '\u2279',
'Ntilde': '\xd1',
'ntilde': '\xf1',
'Ntilde;': '\xd1',
'ntilde;': '\xf1',
'ntlg;': '\u2278',
'ntriangleleft;': '\u22ea',
'ntrianglelefteq;': '\u22ec',
'ntriangleright;': '\u22eb',
'ntrianglerighteq;': '\u22ed',
'Nu;': '\u039d',
'nu;': '\u03bd',
'num;': '#',
'numero;': '\u2116',
'numsp;': '\u2007',
'nvap;': '\u224d\u20d2',
'nVDash;': '\u22af',
'nVdash;': '\u22ae',
'nvDash;': '\u22ad',
'nvdash;': '\u22ac',
'nvge;': '\u2265\u20d2',
'nvgt;': '>\u20d2',
'nvHarr;': '\u2904',
'nvinfin;': '\u29de',
'nvlArr;': '\u2902',
'nvle;': '\u2264\u20d2',
'nvlt;': '<\u20d2',
'nvltrie;': '\u22b4\u20d2',
'nvrArr;': '\u2903',
'nvrtrie;': '\u22b5\u20d2',
'nvsim;': '\u223c\u20d2',
'nwarhk;': '\u2923',
'nwArr;': '\u21d6',
'nwarr;': '\u2196',
'nwarrow;': '\u2196',
'nwnear;': '\u2927',
'Oacute': '\xd3',
'oacute': '\xf3',
'Oacute;': '\xd3',
'oacute;': '\xf3',
'oast;': '\u229b',
'ocir;': '\u229a',
'Ocirc': '\xd4',
'ocirc': '\xf4',
'Ocirc;': '\xd4',
'ocirc;': '\xf4',
'Ocy;': '\u041e',
'ocy;': '\u043e',
'odash;': '\u229d',
'Odblac;': '\u0150',
'odblac;': '\u0151',
'odiv;': '\u2a38',
'odot;': '\u2299',
'odsold;': '\u29bc',
'OElig;': '\u0152',
'oelig;': '\u0153',
'ofcir;': '\u29bf',
'Ofr;': '\U0001d512',
'ofr;': '\U0001d52c',
'ogon;': '\u02db',
'Ograve': '\xd2',
'ograve': '\xf2',
'Ograve;': '\xd2',
'ograve;': '\xf2',
'ogt;': '\u29c1',
'ohbar;': '\u29b5',
'ohm;': '\u03a9',
'oint;': '\u222e',
'olarr;': '\u21ba',
'olcir;': '\u29be',
'olcross;': '\u29bb',
'oline;': '\u203e',
'olt;': '\u29c0',
'Omacr;': '\u014c',
'omacr;': '\u014d',
'Omega;': '\u03a9',
'omega;': '\u03c9',
'Omicron;': '\u039f',
'omicron;': '\u03bf',
'omid;': '\u29b6',
'ominus;': '\u2296',
'Oopf;': '\U0001d546',
'oopf;': '\U0001d560',
'opar;': '\u29b7',
'OpenCurlyDoubleQuote;': '\u201c',
'OpenCurlyQuote;': '\u2018',
'operp;': '\u29b9',
'oplus;': '\u2295',
'Or;': '\u2a54',
'or;': '\u2228',
'orarr;': '\u21bb',
'ord;': '\u2a5d',
'order;': '\u2134',
'orderof;': '\u2134',
'ordf': '\xaa',
'ordf;': '\xaa',
'ordm': '\xba',
'ordm;': '\xba',
'origof;': '\u22b6',
'oror;': '\u2a56',
'orslope;': '\u2a57',
'orv;': '\u2a5b',
'oS;': '\u24c8',
'Oscr;': '\U0001d4aa',
'oscr;': '\u2134',
'Oslash': '\xd8',
'oslash': '\xf8',
'Oslash;': '\xd8',
'oslash;': '\xf8',
'osol;': '\u2298',
'Otilde': '\xd5',
'otilde': '\xf5',
'Otilde;': '\xd5',
'otilde;': '\xf5',
'Otimes;': '\u2a37',
'otimes;': '\u2297',
'otimesas;': '\u2a36',
'Ouml': '\xd6',
'ouml': '\xf6',
'Ouml;': '\xd6',
'ouml;': '\xf6',
'ovbar;': '\u233d',
'OverBar;': '\u203e',
'OverBrace;': '\u23de',
'OverBracket;': '\u23b4',
'OverParenthesis;': '\u23dc',
'par;': '\u2225',
'para': '\xb6',
'para;': '\xb6',
'parallel;': '\u2225',
'parsim;': '\u2af3',
'parsl;': '\u2afd',
'part;': '\u2202',
'PartialD;': '\u2202',
'Pcy;': '\u041f',
'pcy;': '\u043f',
'percnt;': '%',
'period;': '.',
'permil;': '\u2030',
'perp;': '\u22a5',
'pertenk;': '\u2031',
'Pfr;': '\U0001d513',
'pfr;': '\U0001d52d',
'Phi;': '\u03a6',
'phi;': '\u03c6',
'phiv;': '\u03d5',
'phmmat;': '\u2133',
'phone;': '\u260e',
'Pi;': '\u03a0',
'pi;': '\u03c0',
'pitchfork;': '\u22d4',
'piv;': '\u03d6',
'planck;': '\u210f',
'planckh;': '\u210e',
'plankv;': '\u210f',
'plus;': '+',
'plusacir;': '\u2a23',
'plusb;': '\u229e',
'pluscir;': '\u2a22',
'plusdo;': '\u2214',
'plusdu;': '\u2a25',
'pluse;': '\u2a72',
'PlusMinus;': '\xb1',
'plusmn': '\xb1',
'plusmn;': '\xb1',
'plussim;': '\u2a26',
'plustwo;': '\u2a27',
'pm;': '\xb1',
'Poincareplane;': '\u210c',
'pointint;': '\u2a15',
'Popf;': '\u2119',
'popf;': '\U0001d561',
'pound': '\xa3',
'pound;': '\xa3',
'Pr;': '\u2abb',
'pr;': '\u227a',
'prap;': '\u2ab7',
'prcue;': '\u227c',
'prE;': '\u2ab3',
'pre;': '\u2aaf',
'prec;': '\u227a',
'precapprox;': '\u2ab7',
'preccurlyeq;': '\u227c',
'Precedes;': '\u227a',
'PrecedesEqual;': '\u2aaf',
'PrecedesSlantEqual;': '\u227c',
'PrecedesTilde;': '\u227e',
'preceq;': '\u2aaf',
'precnapprox;': '\u2ab9',
'precneqq;': '\u2ab5',
'precnsim;': '\u22e8',
'precsim;': '\u227e',
'Prime;': '\u2033',
'prime;': '\u2032',
'primes;': '\u2119',
'prnap;': '\u2ab9',
'prnE;': '\u2ab5',
'prnsim;': '\u22e8',
'prod;': '\u220f',
'Product;': '\u220f',
'profalar;': '\u232e',
'profline;': '\u2312',
'profsurf;': '\u2313',
'prop;': '\u221d',
'Proportion;': '\u2237',
'Proportional;': '\u221d',
'propto;': '\u221d',
'prsim;': '\u227e',
'prurel;': '\u22b0',
'Pscr;': '\U0001d4ab',
'pscr;': '\U0001d4c5',
'Psi;': '\u03a8',
'psi;': '\u03c8',
'puncsp;': '\u2008',
'Qfr;': '\U0001d514',
'qfr;': '\U0001d52e',
'qint;': '\u2a0c',
'Qopf;': '\u211a',
'qopf;': '\U0001d562',
'qprime;': '\u2057',
'Qscr;': '\U0001d4ac',
'qscr;': '\U0001d4c6',
'quaternions;': '\u210d',
'quatint;': '\u2a16',
'quest;': '?',
'questeq;': '\u225f',
'QUOT': '"',
'quot': '"',
'QUOT;': '"',
'quot;': '"',
'rAarr;': '\u21db',
'race;': '\u223d\u0331',
'Racute;': '\u0154',
'racute;': '\u0155',
'radic;': '\u221a',
'raemptyv;': '\u29b3',
'Rang;': '\u27eb',
'rang;': '\u27e9',
'rangd;': '\u2992',
'range;': '\u29a5',
'rangle;': '\u27e9',
'raquo': '\xbb',
'raquo;': '\xbb',
'Rarr;': '\u21a0',
'rArr;': '\u21d2',
'rarr;': '\u2192',
'rarrap;': '\u2975',
'rarrb;': '\u21e5',
'rarrbfs;': '\u2920',
'rarrc;': '\u2933',
'rarrfs;': '\u291e',
'rarrhk;': '\u21aa',
'rarrlp;': '\u21ac',
'rarrpl;': '\u2945',
'rarrsim;': '\u2974',
'Rarrtl;': '\u2916',
'rarrtl;': '\u21a3',
'rarrw;': '\u219d',
'rAtail;': '\u291c',
'ratail;': '\u291a',
'ratio;': '\u2236',
'rationals;': '\u211a',
'RBarr;': '\u2910',
'rBarr;': '\u290f',
'rbarr;': '\u290d',
'rbbrk;': '\u2773',
'rbrace;': '}',
'rbrack;': ']',
'rbrke;': '\u298c',
'rbrksld;': '\u298e',
'rbrkslu;': '\u2990',
'Rcaron;': '\u0158',
'rcaron;': '\u0159',
'Rcedil;': '\u0156',
'rcedil;': '\u0157',
'rceil;': '\u2309',
'rcub;': '}',
'Rcy;': '\u0420',
'rcy;': '\u0440',
'rdca;': '\u2937',
'rdldhar;': '\u2969',
'rdquo;': '\u201d',
'rdquor;': '\u201d',
'rdsh;': '\u21b3',
'Re;': '\u211c',
'real;': '\u211c',
'realine;': '\u211b',
'realpart;': '\u211c',
'reals;': '\u211d',
'rect;': '\u25ad',
'REG': '\xae',
'reg': '\xae',
'REG;': '\xae',
'reg;': '\xae',
'ReverseElement;': '\u220b',
'ReverseEquilibrium;': '\u21cb',
'ReverseUpEquilibrium;': '\u296f',
'rfisht;': '\u297d',
'rfloor;': '\u230b',
'Rfr;': '\u211c',
'rfr;': '\U0001d52f',
'rHar;': '\u2964',
'rhard;': '\u21c1',
'rharu;': '\u21c0',
'rharul;': '\u296c',
'Rho;': '\u03a1',
'rho;': '\u03c1',
'rhov;': '\u03f1',
'RightAngleBracket;': '\u27e9',
'RightArrow;': '\u2192',
'Rightarrow;': '\u21d2',
'rightarrow;': '\u2192',
'RightArrowBar;': '\u21e5',
'RightArrowLeftArrow;': '\u21c4',
'rightarrowtail;': '\u21a3',
'RightCeiling;': '\u2309',
'RightDoubleBracket;': '\u27e7',
'RightDownTeeVector;': '\u295d',
'RightDownVector;': '\u21c2',
'RightDownVectorBar;': '\u2955',
'RightFloor;': '\u230b',
'rightharpoondown;': '\u21c1',
'rightharpoonup;': '\u21c0',
'rightleftarrows;': '\u21c4',
'rightleftharpoons;': '\u21cc',
'rightrightarrows;': '\u21c9',
'rightsquigarrow;': '\u219d',
'RightTee;': '\u22a2',
'RightTeeArrow;': '\u21a6',
'RightTeeVector;': '\u295b',
'rightthreetimes;': '\u22cc',
'RightTriangle;': '\u22b3',
'RightTriangleBar;': '\u29d0',
'RightTriangleEqual;': '\u22b5',
'RightUpDownVector;': '\u294f',
'RightUpTeeVector;': '\u295c',
'RightUpVector;': '\u21be',
'RightUpVectorBar;': '\u2954',
'RightVector;': '\u21c0',
'RightVectorBar;': '\u2953',
'ring;': '\u02da',
'risingdotseq;': '\u2253',
'rlarr;': '\u21c4',
'rlhar;': '\u21cc',
'rlm;': '\u200f',
'rmoust;': '\u23b1',
'rmoustache;': '\u23b1',
'rnmid;': '\u2aee',
'roang;': '\u27ed',
'roarr;': '\u21fe',
'robrk;': '\u27e7',
'ropar;': '\u2986',
'Ropf;': '\u211d',
'ropf;': '\U0001d563',
'roplus;': '\u2a2e',
'rotimes;': '\u2a35',
'RoundImplies;': '\u2970',
'rpar;': ')',
'rpargt;': '\u2994',
'rppolint;': '\u2a12',
'rrarr;': '\u21c9',
'Rrightarrow;': '\u21db',
'rsaquo;': '\u203a',
'Rscr;': '\u211b',
'rscr;': '\U0001d4c7',
'Rsh;': '\u21b1',
'rsh;': '\u21b1',
'rsqb;': ']',
'rsquo;': '\u2019',
'rsquor;': '\u2019',
'rthree;': '\u22cc',
'rtimes;': '\u22ca',
'rtri;': '\u25b9',
'rtrie;': '\u22b5',
'rtrif;': '\u25b8',
'rtriltri;': '\u29ce',
'RuleDelayed;': '\u29f4',
'ruluhar;': '\u2968',
'rx;': '\u211e',
'Sacute;': '\u015a',
'sacute;': '\u015b',
'sbquo;': '\u201a',
'Sc;': '\u2abc',
'sc;': '\u227b',
'scap;': '\u2ab8',
'Scaron;': '\u0160',
'scaron;': '\u0161',
'sccue;': '\u227d',
'scE;': '\u2ab4',
'sce;': '\u2ab0',
'Scedil;': '\u015e',
'scedil;': '\u015f',
'Scirc;': '\u015c',
'scirc;': '\u015d',
'scnap;': '\u2aba',
'scnE;': '\u2ab6',
'scnsim;': '\u22e9',
'scpolint;': '\u2a13',
'scsim;': '\u227f',
'Scy;': '\u0421',
'scy;': '\u0441',
'sdot;': '\u22c5',
'sdotb;': '\u22a1',
'sdote;': '\u2a66',
'searhk;': '\u2925',
'seArr;': '\u21d8',
'searr;': '\u2198',
'searrow;': '\u2198',
'sect': '\xa7',
'sect;': '\xa7',
'semi;': ';',
'seswar;': '\u2929',
'setminus;': '\u2216',
'setmn;': '\u2216',
'sext;': '\u2736',
'Sfr;': '\U0001d516',
'sfr;': '\U0001d530',
'sfrown;': '\u2322',
'sharp;': '\u266f',
'SHCHcy;': '\u0429',
'shchcy;': '\u0449',
'SHcy;': '\u0428',
'shcy;': '\u0448',
'ShortDownArrow;': '\u2193',
'ShortLeftArrow;': '\u2190',
'shortmid;': '\u2223',
'shortparallel;': '\u2225',
'ShortRightArrow;': '\u2192',
'ShortUpArrow;': '\u2191',
'shy': '\xad',
'shy;': '\xad',
'Sigma;': '\u03a3',
'sigma;': '\u03c3',
'sigmaf;': '\u03c2',
'sigmav;': '\u03c2',
'sim;': '\u223c',
'simdot;': '\u2a6a',
'sime;': '\u2243',
'simeq;': '\u2243',
'simg;': '\u2a9e',
'simgE;': '\u2aa0',
'siml;': '\u2a9d',
'simlE;': '\u2a9f',
'simne;': '\u2246',
'simplus;': '\u2a24',
'simrarr;': '\u2972',
'slarr;': '\u2190',
'SmallCircle;': '\u2218',
'smallsetminus;': '\u2216',
'smashp;': '\u2a33',
'smeparsl;': '\u29e4',
'smid;': '\u2223',
'smile;': '\u2323',
'smt;': '\u2aaa',
'smte;': '\u2aac',
'smtes;': '\u2aac\ufe00',
'SOFTcy;': '\u042c',
'softcy;': '\u044c',
'sol;': '/',
'solb;': '\u29c4',
'solbar;': '\u233f',
'Sopf;': '\U0001d54a',
'sopf;': '\U0001d564',
'spades;': '\u2660',
'spadesuit;': '\u2660',
'spar;': '\u2225',
'sqcap;': '\u2293',
'sqcaps;': '\u2293\ufe00',
'sqcup;': '\u2294',
'sqcups;': '\u2294\ufe00',
'Sqrt;': '\u221a',
'sqsub;': '\u228f',
'sqsube;': '\u2291',
'sqsubset;': '\u228f',
'sqsubseteq;': '\u2291',
'sqsup;': '\u2290',
'sqsupe;': '\u2292',
'sqsupset;': '\u2290',
'sqsupseteq;': '\u2292',
'squ;': '\u25a1',
'Square;': '\u25a1',
'square;': '\u25a1',
'SquareIntersection;': '\u2293',
'SquareSubset;': '\u228f',
'SquareSubsetEqual;': '\u2291',
'SquareSuperset;': '\u2290',
'SquareSupersetEqual;': '\u2292',
'SquareUnion;': '\u2294',
'squarf;': '\u25aa',
'squf;': '\u25aa',
'srarr;': '\u2192',
'Sscr;': '\U0001d4ae',
'sscr;': '\U0001d4c8',
'ssetmn;': '\u2216',
'ssmile;': '\u2323',
'sstarf;': '\u22c6',
'Star;': '\u22c6',
'star;': '\u2606',
'starf;': '\u2605',
'straightepsilon;': '\u03f5',
'straightphi;': '\u03d5',
'strns;': '\xaf',
'Sub;': '\u22d0',
'sub;': '\u2282',
'subdot;': '\u2abd',
'subE;': '\u2ac5',
'sube;': '\u2286',
'subedot;': '\u2ac3',
'submult;': '\u2ac1',
'subnE;': '\u2acb',
'subne;': '\u228a',
'subplus;': '\u2abf',
'subrarr;': '\u2979',
'Subset;': '\u22d0',
'subset;': '\u2282',
'subseteq;': '\u2286',
'subseteqq;': '\u2ac5',
'SubsetEqual;': '\u2286',
'subsetneq;': '\u228a',
'subsetneqq;': '\u2acb',
'subsim;': '\u2ac7',
'subsub;': '\u2ad5',
'subsup;': '\u2ad3',
'succ;': '\u227b',
'succapprox;': '\u2ab8',
'succcurlyeq;': '\u227d',
'Succeeds;': '\u227b',
'SucceedsEqual;': '\u2ab0',
'SucceedsSlantEqual;': '\u227d',
'SucceedsTilde;': '\u227f',
'succeq;': '\u2ab0',
'succnapprox;': '\u2aba',
'succneqq;': '\u2ab6',
'succnsim;': '\u22e9',
'succsim;': '\u227f',
'SuchThat;': '\u220b',
'Sum;': '\u2211',
'sum;': '\u2211',
'sung;': '\u266a',
'sup1': '\xb9',
'sup1;': '\xb9',
'sup2': '\xb2',
'sup2;': '\xb2',
'sup3': '\xb3',
'sup3;': '\xb3',
'Sup;': '\u22d1',
'sup;': '\u2283',
'supdot;': '\u2abe',
'supdsub;': '\u2ad8',
'supE;': '\u2ac6',
'supe;': '\u2287',
'supedot;': '\u2ac4',
'Superset;': '\u2283',
'SupersetEqual;': '\u2287',
'suphsol;': '\u27c9',
'suphsub;': '\u2ad7',
'suplarr;': '\u297b',
'supmult;': '\u2ac2',
'supnE;': '\u2acc',
'supne;': '\u228b',
'supplus;': '\u2ac0',
'Supset;': '\u22d1',
'supset;': '\u2283',
'supseteq;': '\u2287',
'supseteqq;': '\u2ac6',
'supsetneq;': '\u228b',
'supsetneqq;': '\u2acc',
'supsim;': '\u2ac8',
'supsub;': '\u2ad4',
'supsup;': '\u2ad6',
'swarhk;': '\u2926',
'swArr;': '\u21d9',
'swarr;': '\u2199',
'swarrow;': '\u2199',
'swnwar;': '\u292a',
'szlig': '\xdf',
'szlig;': '\xdf',
'Tab;': '\t',
'target;': '\u2316',
'Tau;': '\u03a4',
'tau;': '\u03c4',
'tbrk;': '\u23b4',
'Tcaron;': '\u0164',
'tcaron;': '\u0165',
'Tcedil;': '\u0162',
'tcedil;': '\u0163',
'Tcy;': '\u0422',
'tcy;': '\u0442',
'tdot;': '\u20db',
'telrec;': '\u2315',
'Tfr;': '\U0001d517',
'tfr;': '\U0001d531',
'there4;': '\u2234',
'Therefore;': '\u2234',
'therefore;': '\u2234',
'Theta;': '\u0398',
'theta;': '\u03b8',
'thetasym;': '\u03d1',
'thetav;': '\u03d1',
'thickapprox;': '\u2248',
'thicksim;': '\u223c',
'ThickSpace;': '\u205f\u200a',
'thinsp;': '\u2009',
'ThinSpace;': '\u2009',
'thkap;': '\u2248',
'thksim;': '\u223c',
'THORN': '\xde',
'thorn': '\xfe',
'THORN;': '\xde',
'thorn;': '\xfe',
'Tilde;': '\u223c',
'tilde;': '\u02dc',
'TildeEqual;': '\u2243',
'TildeFullEqual;': '\u2245',
'TildeTilde;': '\u2248',
'times': '\xd7',
'times;': '\xd7',
'timesb;': '\u22a0',
'timesbar;': '\u2a31',
'timesd;': '\u2a30',
'tint;': '\u222d',
'toea;': '\u2928',
'top;': '\u22a4',
'topbot;': '\u2336',
'topcir;': '\u2af1',
'Topf;': '\U0001d54b',
'topf;': '\U0001d565',
'topfork;': '\u2ada',
'tosa;': '\u2929',
'tprime;': '\u2034',
'TRADE;': '\u2122',
'trade;': '\u2122',
'triangle;': '\u25b5',
'triangledown;': '\u25bf',
'triangleleft;': '\u25c3',
'trianglelefteq;': '\u22b4',
'triangleq;': '\u225c',
'triangleright;': '\u25b9',
'trianglerighteq;': '\u22b5',
'tridot;': '\u25ec',
'trie;': '\u225c',
'triminus;': '\u2a3a',
'TripleDot;': '\u20db',
'triplus;': '\u2a39',
'trisb;': '\u29cd',
'tritime;': '\u2a3b',
'trpezium;': '\u23e2',
'Tscr;': '\U0001d4af',
'tscr;': '\U0001d4c9',
'TScy;': '\u0426',
'tscy;': '\u0446',
'TSHcy;': '\u040b',
'tshcy;': '\u045b',
'Tstrok;': '\u0166',
'tstrok;': '\u0167',
'twixt;': '\u226c',
'twoheadleftarrow;': '\u219e',
'twoheadrightarrow;': '\u21a0',
'Uacute': '\xda',
'uacute': '\xfa',
'Uacute;': '\xda',
'uacute;': '\xfa',
'Uarr;': '\u219f',
'uArr;': '\u21d1',
'uarr;': '\u2191',
'Uarrocir;': '\u2949',
'Ubrcy;': '\u040e',
'ubrcy;': '\u045e',
'Ubreve;': '\u016c',
'ubreve;': '\u016d',
'Ucirc': '\xdb',
'ucirc': '\xfb',
'Ucirc;': '\xdb',
'ucirc;': '\xfb',
'Ucy;': '\u0423',
'ucy;': '\u0443',
'udarr;': '\u21c5',
'Udblac;': '\u0170',
'udblac;': '\u0171',
'udhar;': '\u296e',
'ufisht;': '\u297e',
'Ufr;': '\U0001d518',
'ufr;': '\U0001d532',
'Ugrave': '\xd9',
'ugrave': '\xf9',
'Ugrave;': '\xd9',
'ugrave;': '\xf9',
'uHar;': '\u2963',
'uharl;': '\u21bf',
'uharr;': '\u21be',
'uhblk;': '\u2580',
'ulcorn;': '\u231c',
'ulcorner;': '\u231c',
'ulcrop;': '\u230f',
'ultri;': '\u25f8',
'Umacr;': '\u016a',
'umacr;': '\u016b',
'uml': '\xa8',
'uml;': '\xa8',
'UnderBar;': '_',
'UnderBrace;': '\u23df',
'UnderBracket;': '\u23b5',
'UnderParenthesis;': '\u23dd',
'Union;': '\u22c3',
'UnionPlus;': '\u228e',
'Uogon;': '\u0172',
'uogon;': '\u0173',
'Uopf;': '\U0001d54c',
'uopf;': '\U0001d566',
'UpArrow;': '\u2191',
'Uparrow;': '\u21d1',
'uparrow;': '\u2191',
'UpArrowBar;': '\u2912',
'UpArrowDownArrow;': '\u21c5',
'UpDownArrow;': '\u2195',
'Updownarrow;': '\u21d5',
'updownarrow;': '\u2195',
'UpEquilibrium;': '\u296e',
'upharpoonleft;': '\u21bf',
'upharpoonright;': '\u21be',
'uplus;': '\u228e',
'UpperLeftArrow;': '\u2196',
'UpperRightArrow;': '\u2197',
'Upsi;': '\u03d2',
'upsi;': '\u03c5',
'upsih;': '\u03d2',
'Upsilon;': '\u03a5',
'upsilon;': '\u03c5',
'UpTee;': '\u22a5',
'UpTeeArrow;': '\u21a5',
'upuparrows;': '\u21c8',
'urcorn;': '\u231d',
'urcorner;': '\u231d',
'urcrop;': '\u230e',
'Uring;': '\u016e',
'uring;': '\u016f',
'urtri;': '\u25f9',
'Uscr;': '\U0001d4b0',
'uscr;': '\U0001d4ca',
'utdot;': '\u22f0',
'Utilde;': '\u0168',
'utilde;': '\u0169',
'utri;': '\u25b5',
'utrif;': '\u25b4',
'uuarr;': '\u21c8',
'Uuml': '\xdc',
'uuml': '\xfc',
'Uuml;': '\xdc',
'uuml;': '\xfc',
'uwangle;': '\u29a7',
'vangrt;': '\u299c',
'varepsilon;': '\u03f5',
'varkappa;': '\u03f0',
'varnothing;': '\u2205',
'varphi;': '\u03d5',
'varpi;': '\u03d6',
'varpropto;': '\u221d',
'vArr;': '\u21d5',
'varr;': '\u2195',
'varrho;': '\u03f1',
'varsigma;': '\u03c2',
'varsubsetneq;': '\u228a\ufe00',
'varsubsetneqq;': '\u2acb\ufe00',
'varsupsetneq;': '\u228b\ufe00',
'varsupsetneqq;': '\u2acc\ufe00',
'vartheta;': '\u03d1',
'vartriangleleft;': '\u22b2',
'vartriangleright;': '\u22b3',
'Vbar;': '\u2aeb',
'vBar;': '\u2ae8',
'vBarv;': '\u2ae9',
'Vcy;': '\u0412',
'vcy;': '\u0432',
'VDash;': '\u22ab',
'Vdash;': '\u22a9',
'vDash;': '\u22a8',
'vdash;': '\u22a2',
'Vdashl;': '\u2ae6',
'Vee;': '\u22c1',
'vee;': '\u2228',
'veebar;': '\u22bb',
'veeeq;': '\u225a',
'vellip;': '\u22ee',
'Verbar;': '\u2016',
'verbar;': '|',
'Vert;': '\u2016',
'vert;': '|',
'VerticalBar;': '\u2223',
'VerticalLine;': '|',
'VerticalSeparator;': '\u2758',
'VerticalTilde;': '\u2240',
'VeryThinSpace;': '\u200a',
'Vfr;': '\U0001d519',
'vfr;': '\U0001d533',
'vltri;': '\u22b2',
'vnsub;': '\u2282\u20d2',
'vnsup;': '\u2283\u20d2',
'Vopf;': '\U0001d54d',
'vopf;': '\U0001d567',
'vprop;': '\u221d',
'vrtri;': '\u22b3',
'Vscr;': '\U0001d4b1',
'vscr;': '\U0001d4cb',
'vsubnE;': '\u2acb\ufe00',
'vsubne;': '\u228a\ufe00',
'vsupnE;': '\u2acc\ufe00',
'vsupne;': '\u228b\ufe00',
'Vvdash;': '\u22aa',
'vzigzag;': '\u299a',
'Wcirc;': '\u0174',
'wcirc;': '\u0175',
'wedbar;': '\u2a5f',
'Wedge;': '\u22c0',
'wedge;': '\u2227',
'wedgeq;': '\u2259',
'weierp;': '\u2118',
'Wfr;': '\U0001d51a',
'wfr;': '\U0001d534',
'Wopf;': '\U0001d54e',
'wopf;': '\U0001d568',
'wp;': '\u2118',
'wr;': '\u2240',
'wreath;': '\u2240',
'Wscr;': '\U0001d4b2',
'wscr;': '\U0001d4cc',
'xcap;': '\u22c2',
'xcirc;': '\u25ef',
'xcup;': '\u22c3',
'xdtri;': '\u25bd',
'Xfr;': '\U0001d51b',
'xfr;': '\U0001d535',
'xhArr;': '\u27fa',
'xharr;': '\u27f7',
'Xi;': '\u039e',
'xi;': '\u03be',
'xlArr;': '\u27f8',
'xlarr;': '\u27f5',
'xmap;': '\u27fc',
'xnis;': '\u22fb',
'xodot;': '\u2a00',
'Xopf;': '\U0001d54f',
'xopf;': '\U0001d569',
'xoplus;': '\u2a01',
'xotime;': '\u2a02',
'xrArr;': '\u27f9',
'xrarr;': '\u27f6',
'Xscr;': '\U0001d4b3',
'xscr;': '\U0001d4cd',
'xsqcup;': '\u2a06',
'xuplus;': '\u2a04',
'xutri;': '\u25b3',
'xvee;': '\u22c1',
'xwedge;': '\u22c0',
'Yacute': '\xdd',
'yacute': '\xfd',
'Yacute;': '\xdd',
'yacute;': '\xfd',
'YAcy;': '\u042f',
'yacy;': '\u044f',
'Ycirc;': '\u0176',
'ycirc;': '\u0177',
'Ycy;': '\u042b',
'ycy;': '\u044b',
'yen': '\xa5',
'yen;': '\xa5',
'Yfr;': '\U0001d51c',
'yfr;': '\U0001d536',
'YIcy;': '\u0407',
'yicy;': '\u0457',
'Yopf;': '\U0001d550',
'yopf;': '\U0001d56a',
'Yscr;': '\U0001d4b4',
'yscr;': '\U0001d4ce',
'YUcy;': '\u042e',
'yucy;': '\u044e',
'yuml': '\xff',
'Yuml;': '\u0178',
'yuml;': '\xff',
'Zacute;': '\u0179',
'zacute;': '\u017a',
'Zcaron;': '\u017d',
'zcaron;': '\u017e',
'Zcy;': '\u0417',
'zcy;': '\u0437',
'Zdot;': '\u017b',
'zdot;': '\u017c',
'zeetrf;': '\u2128',
'ZeroWidthSpace;': '\u200b',
'Zeta;': '\u0396',
'zeta;': '\u03b6',
'Zfr;': '\u2128',
'zfr;': '\U0001d537',
'ZHcy;': '\u0416',
'zhcy;': '\u0436',
'zigrarr;': '\u21dd',
'Zopf;': '\u2124',
'zopf;': '\U0001d56b',
'Zscr;': '\U0001d4b5',
'zscr;': '\U0001d4cf',
'zwj;': '\u200d',
'zwnj;': '\u200c',
}
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
try:
from html.parser import HTMLParser as compat_HTMLParser
except ImportError: # Python 2
from HTMLParser import HTMLParser as compat_HTMLParser
try: # Python 2
from HTMLParser import HTMLParseError as compat_HTMLParseError
except ImportError: # Python <3.4
try:
from html.parser import HTMLParseError as compat_HTMLParseError
except ImportError: # Python >3.4
# HTMLParseError has been deprecated in Python 3.3 and removed in
# Python 3.5. Introducing dummy exception for Python >3.5 for compatible
# and uniform cross-version exceptiong handling
class compat_HTMLParseError(Exception):
pass
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
import http.server as compat_http_server
except ImportError:
import BaseHTTPServer as compat_http_server
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
from urllib.parse import unquote as compat_urllib_parse_unquote
from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus
except ImportError: # Python 2
_asciire = (compat_urllib_parse._asciire if hasattr(compat_urllib_parse, '_asciire')
else re.compile(r'([\x00-\x7f]+)'))
# HACK: The following are the correct unquote_to_bytes, unquote and unquote_plus
# implementations from cpython 3.4.3's stdlib. Python 2's version
# is apparently broken (see https://github.com/rg3/youtube-dl/pull/6244)
def compat_urllib_parse_unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not string:
# Is it a string-like object?
string.split
return b''
if isinstance(string, compat_str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(compat_urllib_parse._hextochr[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
return b''.join(res)
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
"""Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences into Unicode characters, as accepted by the bytes.decode()
method.
By default, percent-encoded sequences are decoded with UTF-8, and invalid
sequences are replaced by a placeholder character.
unquote('abc%20def') -> 'abc def'.
"""
if '%' not in string:
string.split
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _asciire.split(string)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(compat_urllib_parse_unquote_to_bytes(bits[i]).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def compat_urllib_parse_unquote_plus(string, encoding='utf-8', errors='replace'):
"""Like unquote(), but also replace plus signs by spaces, as required for
unquoting HTML form values.
unquote_plus('%7e/abc+def') -> '~/abc def'
"""
string = string.replace('+', ' ')
return compat_urllib_parse_unquote(string, encoding, errors)
try:
from urllib.parse import urlencode as compat_urllib_parse_urlencode
except ImportError: # Python 2
# Python 2 will choke in urlencode on mixture of byte and unicode strings.
# Possible solutions are to either port it from python 3 with all
# the friends or manually ensure input query contains only byte strings.
# We will stick with latter thus recursively encoding the whole query.
def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'):
def encode_elem(e):
if isinstance(e, dict):
e = encode_dict(e)
elif isinstance(e, (list, tuple,)):
list_e = encode_list(e)
e = tuple(list_e) if isinstance(e, tuple) else list_e
elif isinstance(e, compat_str):
e = e.encode(encoding)
return e
def encode_dict(d):
return dict((encode_elem(k), encode_elem(v)) for k, v in d.items())
def encode_list(l):
return [encode_elem(e) for e in l]
return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq)
try:
from urllib.request import DataHandler as compat_urllib_request_DataHandler
except ImportError: # Python < 3.4
# Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py
class compat_urllib_request_DataHandler(compat_urllib_request.BaseHandler):
def data_open(self, req):
# data URLs as specified in RFC 2397.
#
# ignores POSTed data
#
# syntax:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
url = req.get_full_url()
scheme, data = url.split(':', 1)
mediatype, data = data.split(',', 1)
# even base64 encoded data URLs might be quoted so unquote in any case:
data = compat_urllib_parse_unquote_to_bytes(data)
if mediatype.endswith(';base64'):
data = binascii.a2b_base64(data)
mediatype = mediatype[:-7]
if not mediatype:
mediatype = 'text/plain;charset=US-ASCII'
headers = email.message_from_string(
'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data)))
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
try:
compat_basestring = basestring # Python 2
except NameError:
compat_basestring = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
etree = xml.etree.ElementTree
class _TreeBuilder(etree.TreeBuilder):
def doctype(self, name, pubid, system):
pass
if sys.version_info[0] >= 3:
def compat_etree_fromstring(text):
return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder()))
else:
# python 2.x tries to encode unicode strings with ascii (see the
# XMLParser._fixtext method)
try:
_etree_iter = etree.Element.iter
except AttributeError: # Python <=2.6
def _etree_iter(root):
for el in root.findall('*'):
yield el
for sub in _etree_iter(el):
yield sub
# on 2.6 XML doesn't have a parser argument, function copied from CPython
# 2.7 source
def _XML(text, parser=None):
if not parser:
parser = etree.XMLParser(target=_TreeBuilder())
parser.feed(text)
return parser.close()
def _element_factory(*args, **kwargs):
el = etree.Element(*args, **kwargs)
for k, v in el.items():
if isinstance(v, bytes):
el.set(k, v.decode('utf-8'))
return el
def compat_etree_fromstring(text):
doc = _XML(text, parser=etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory)))
for el in _etree_iter(doc):
if el.text is not None and isinstance(el.text, bytes):
el.text = el.text.decode('utf-8')
return doc
if hasattr(etree, 'register_namespace'):
compat_etree_register_namespace = etree.register_namespace
else:
def compat_etree_register_namespace(prefix, uri):
"""Register a namespace prefix.
The registry is global, and any existing mapping for either the
given prefix or the namespace URI will be removed.
*prefix* is the namespace prefix, *uri* is a namespace uri. Tags and
attributes in this namespace will be serialized with prefix if possible.
ValueError is raised if prefix is reserved or is invalid.
"""
if re.match(r"ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in list(etree._namespace_map.items()):
if k == uri or v == prefix:
del etree._namespace_map[k]
etree._namespace_map[uri] = prefix
if sys.version_info < (2, 7):
# Here comes the crazy part: In 2.6, if the xpath is a unicode,
# .//node does not match if a node is a direct child of . !
def compat_xpath(xpath):
if isinstance(xpath, compat_str):
xpath = xpath.encode('ascii')
return xpath
else:
compat_xpath = lambda xpath: xpath
try:
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, compat_str
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError('bad query field: %r' % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = compat_urllib_parse_unquote(
name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = compat_urllib_parse_unquote(
value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
compat_os_name = os._name if os.name == 'java' else os.name
if compat_os_name == 'nt':
def compat_shlex_quote(s):
return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"')
else:
try:
from shlex import quote as compat_shlex_quote
except ImportError: # Python < 3.3
def compat_shlex_quote(s):
if re.match(r'^[-_\w./]+$', s):
return s
else:
return "'" + s.replace("'", "'\"'\"'") + "'"
try:
args = shlex.split('中文')
assert (isinstance(args, list) and
isinstance(args[0], compat_str) and
args[0] == '中文')
compat_shlex_split = shlex.split
except (AssertionError, UnicodeEncodeError):
# Working around shlex issue with unicode strings on some python 2
# versions (see http://bugs.python.org/issue1548891)
def compat_shlex_split(s, comments=False, posix=True):
if isinstance(s, compat_str):
s = s.encode('utf-8')
return list(map(lambda s: s.decode('utf-8'), shlex.split(s, comments, posix)))
def compat_ord(c):
if type(c) is int:
return c
else:
return ord(c)
if sys.version_info >= (3, 0):
compat_getenv = os.getenv
compat_expanduser = os.path.expanduser
def compat_setenv(key, value, env=os.environ):
env[key] = value
else:
# Environment variables should be decoded with filesystem encoding.
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
def compat_getenv(key, default=None):
from .utils import get_filesystem_encoding
env = os.getenv(key, default)
if env:
env = env.decode(get_filesystem_encoding())
return env
def compat_setenv(key, value, env=os.environ):
def encode(v):
from .utils import get_filesystem_encoding
return v.encode(get_filesystem_encoding()) if isinstance(v, compat_str) else v
env[encode(key)] = encode(value)
# HACK: The default implementations of os.path.expanduser from cpython do not decode
# environment variables with filesystem encoding. We will work around this by
# providing adjusted implementations.
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
# for different platforms with correct environment variables decoding.
if compat_os_name == 'posix':
def compat_expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = compat_getenv('HOME')
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/')
return (userhome + path[i:]) or '/'
elif compat_os_name in ('nt', 'ce'):
def compat_expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = compat_getenv('HOME')
elif 'USERPROFILE' in os.environ:
userhome = compat_getenv('USERPROFILE')
elif 'HOMEPATH' not in os.environ:
return path
else:
try:
drive = compat_getenv('HOMEDRIVE')
except KeyError:
drive = ''
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
if i != 1: # ~user
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
return userhome + path[i:]
else:
compat_expanduser = os.path.expanduser
if sys.version_info < (3, 0):
def compat_print(s):
from .utils import preferredencoding
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else:
def compat_print(s):
assert isinstance(s, compat_str)
print(s)
if sys.version_info < (3, 0) and sys.platform == 'win32':
def compat_getpass(prompt, *args, **kwargs):
if isinstance(prompt, compat_str):
from .utils import preferredencoding
prompt = prompt.encode(preferredencoding())
return getpass.getpass(prompt, *args, **kwargs)
else:
compat_getpass = getpass.getpass
try:
compat_input = raw_input
except NameError: # Python 3
compat_input = input
# Python < 2.6.5 require kwargs to be bytes
try:
def _testfunc(x):
pass
_testfunc(**{'x': 0})
except TypeError:
def compat_kwargs(kwargs):
return dict((bytes(k), v) for k, v in kwargs.items())
else:
compat_kwargs = lambda kwargs: kwargs
try:
compat_numeric_types = (int, float, long, complex)
except NameError: # Python 3
compat_numeric_types = (int, float, complex)
if sys.version_info < (2, 7):
def compat_socket_create_connection(address, timeout, source_address=None):
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error('getaddrinfo returns an empty list')
else:
compat_socket_create_connection = socket.create_connection
# Fix https://github.com/rg3/youtube-dl/issues/4223
# See http://bugs.python.org/issue9161 for what is broken
def workaround_optparse_bug9161():
op = optparse.OptionParser()
og = optparse.OptionGroup(op, 'foo')
try:
og.add_option('-t')
except TypeError:
real_add_option = optparse.OptionGroup.add_option
def _compat_add_option(self, *args, **kwargs):
enc = lambda v: (
v.encode('ascii', 'replace') if isinstance(v, compat_str)
else v)
bargs = [enc(a) for a in args]
bkwargs = dict(
(k, enc(v)) for k, v in kwargs.items())
return real_add_option(self, *bargs, **bkwargs)
optparse.OptionGroup.add_option = _compat_add_option
if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
compat_get_terminal_size = shutil.get_terminal_size
else:
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
def compat_get_terminal_size(fallback=(80, 24)):
columns = compat_getenv('COLUMNS')
if columns:
columns = int(columns)
else:
columns = None
lines = compat_getenv('LINES')
if lines:
lines = int(lines)
else:
lines = None
if columns is None or lines is None or columns <= 0 or lines <= 0:
try:
sp = subprocess.Popen(
['stty', 'size'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
_lines, _columns = map(int, out.split())
except Exception:
_columns, _lines = _terminal_size(*fallback)
if columns is None or columns <= 0:
columns = _columns
if lines is None or lines <= 0:
lines = _lines
return _terminal_size(columns, lines)
try:
itertools.count(start=0, step=1)
compat_itertools_count = itertools.count
except TypeError: # Python 2.6
def compat_itertools_count(start=0, step=1):
n = start
while True:
yield n
n += step
if sys.version_info >= (3, 0):
from tokenize import tokenize as compat_tokenize_tokenize
else:
from tokenize import generate_tokens as compat_tokenize_tokenize
try:
struct.pack('!I', 0)
except TypeError:
# In Python 2.6 and 2.7.x < 2.7.7, struct requires a bytes argument
# See https://bugs.python.org/issue19099
def compat_struct_pack(spec, *args):
if isinstance(spec, compat_str):
spec = spec.encode('ascii')
return struct.pack(spec, *args)
def compat_struct_unpack(spec, *args):
if isinstance(spec, compat_str):
spec = spec.encode('ascii')
return struct.unpack(spec, *args)
else:
compat_struct_pack = struct.pack
compat_struct_unpack = struct.unpack
__all__ = [
'compat_HTMLParseError',
'compat_HTMLParser',
'compat_HTTPError',
'compat_basestring',
'compat_chr',
'compat_cookiejar',
'compat_cookies',
'compat_etree_fromstring',
'compat_etree_register_namespace',
'compat_expanduser',
'compat_get_terminal_size',
'compat_getenv',
'compat_getpass',
'compat_html_entities',
'compat_html_entities_html5',
'compat_http_client',
'compat_http_server',
'compat_input',
'compat_itertools_count',
'compat_kwargs',
'compat_numeric_types',
'compat_ord',
'compat_os_name',
'compat_parse_qs',
'compat_print',
'compat_setenv',
'compat_shlex_quote',
'compat_shlex_split',
'compat_socket_create_connection',
'compat_str',
'compat_struct_pack',
'compat_struct_unpack',
'compat_subprocess_get_DEVNULL',
'compat_tokenize_tokenize',
'compat_urllib_error',
'compat_urllib_parse',
'compat_urllib_parse_unquote',
'compat_urllib_parse_unquote_plus',
'compat_urllib_parse_unquote_to_bytes',
'compat_urllib_parse_urlencode',
'compat_urllib_parse_urlparse',
'compat_urllib_request',
'compat_urllib_request_DataHandler',
'compat_urllib_response',
'compat_urlparse',
'compat_urlretrieve',
'compat_xml_parse_error',
'compat_xpath',
'workaround_optparse_bug9161',
]
|
coreynicholson/youtube-dl
|
youtube_dl/compat.py
|
Python
|
unlicense
| 90,357
|
[
"Bowtie"
] |
27ac0b37ab6d5d740c92cf94ac83050c112c24457cd7ac7b1c98311055b71454
|
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
import random
from flumotion.twisted import defer
from flumotion.common.identity import RemoteIdentity
from flumotion.component.plugs import base
__version__ = "$Rev$"
class IdentityProviderPlug(base.ManagerPlug):
"""
Base class for plugs can calculate an identity of a remote host. See
L{flumotion.manager.manager.Vishnu.computeIdentity} for more
information.
"""
def computeIdentity(self, keycard, remoteHost):
"""
@param keycard: the keycard that the remote host used to log in.
@type keycard: L{flumotion.common.keycards.Keycard}
@param remoteHost: the ip of the remote host
@type remoteHost: str
@rtype: a deferred that will fire a
L{flumotion.common.identity.RemoteIdentity}
"""
raise NotImplementedError
class IdentityProviderExamplePlug(IdentityProviderPlug):
"""
Example implementation of the IdentityProviderPlug socket, randomly
chooses an identity for the remote host.
"""
def computeIdentity(self, keycard, remoteHost):
i = RemoteIdentity(random.choice(['larry', 'curly', 'moe']),
random.choice(['chicago', 'detroit']))
return defer.succeed(i)
|
flumotion-mirror/flumotion
|
flumotion/component/plugs/identity.py
|
Python
|
lgpl-2.1
| 1,893
|
[
"MOE"
] |
a1b7434161a1e3b02723f29e5828c2a9cf10776ef0e059c776dbfd3e8d0aa7a1
|
#!/usr/bin/env python
# Copyright 20142018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 the "License";
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyscf.gto import mole
from pyscf.data import nist
# nuclear magneton are taken from http://easyspin.org/documentation/isotopetable.html
# isotopemass, spin, nuclearg-factor
ISOTOPE_GYRO = (
((0 , 0. , 0.0),),
((1 , 1./2, 5.58569468),), # H
((3 , 1./2, -4.25499544),), # He
((7 , 3./2, 2.170951),), # Li
((9 , 3./2, -0.78495),), # Be
((11 , 3./2, 1.7924326),), # B
((13 , 1./2, 1.4048236),), # C
((14 , 1. , 0.40376100),), # N
((17 , 5./2, -0.757516),), # O
((19 , 1./2, 5.257736),), # F
((21 , 3./2, -0.441198),), # Ne
((23 , 3./2, 1.478348),), # Na
((25 , 5./2, -0.34218),), # Mg
((27 , 5./2, 1.4566028),), # Al
((29 , 1./2, -1.11058),), # Si
((31 , 1./2, 2.26320),), # P
((33 , 3./2, 0.429214),), # S
((35 , 3./2, 0.5479162),), # Cl
((40 , 0 , 0.0),), # Ar
((39 , 3./2, 0.26098),), # K
((43 , 7./2, -0.37637),), # Ca
((45 , 7./2, 1.35899),), # Sc
((47 , 5./2, -0.31539),), # Ti
((51 , 7./2, 1.47106),), # V
((53 , 3./2, -0.31636),), # Cr
((55 , 5./2, 1.3813),), # Mn
((57 , 1./2, 0.1809),), # Fe
((59 , 7./2, 1.322),), # Co
((61 , 3./2, -0.50001),), # Ni
((63 , 3./2, 1.4824),), # Cu
((67 , 5./2, 0.350192),), # Zn
((69 , 3./2, 1.34439),), # Ga
((73 , 9./2, -0.1954373),), # Ge
((75 , 3./2, 0.95965),), # As
((77 , 1./2, 1.07008),), # Se
((79 , 3./2, 1.404267),), # Br
((83 , 9./2, -0.215704),), # Kr
((85 , 5./2, 0.541192),), # Rb
((87 , 9./2, -0.24284),), # Sr
((89 , 1./2, -0.2748308),), # Y
((91 , 5./2, -0.521448),), # Zr
((93 , 9./2, 1.3712),), # Nb
((95 , 5./2, -0.3657),), # Mo
((99 , 9./2, 1.2632),), # Tc
((101, 5./2, -0.288),), # Ru
((103, 1./2, -0.1768),), # Rh
((105, 5./2, -0.257),), # Pd
((107, 1./2, -0.22714),), # Ag
((111, 1./2, -1.18977),), # Cd
((115, 9./2, 1.2313),), # In
((119, 1./2, -2.09456),), # Sn
((121, 5./2, 1.3454),), # Sb
((125, 1./2, -1.7770102),), # Te
((127, 5./2, 1.12531),), # I
((129, 1./2, -1.55595),), # Xe
((133, 7./2, 0.7377214),), # Cs
((137, 3./2, 0.62491),), # Ba
((139, 7./2, 0.795156),), # La
((140, 0 , 0.0),), # Ce
((141, 5./2, 1.7102),), # Pr
((143, 7./2, -0.3043),), # Nd
((147, 7./2, 0.737),), # Pm
((147, 7./2, -0.232),), # Sm
((153, 5./2, 0.6134),), # Eu
((157, 3./2, -0.2265),), # Gd
((159, 3./2, 1.343),), # Tb
((161, 5./2, -0.192),), # Dy
((165, 7./2, 1.668),), # Ho
((167, 7./2, -0.1611),), # Er
((169, 1./2, -0.462),), # Tm
((171, 1./2, 0.98734),), # Yb
((175, 7./2, 0.6378),), # Lu
((177, 7./2, 0.2267),), # Hf
((181, 7./2, 0.67729),), # Ta
((183, 1./2, 0.2355695),), # W
((187, 5./2, 1.2879),), # Re
((187, 1./2, 0.1293038),), # Os
((193, 3./2, 0.1091),), # Ir
((195, 1./2, 1.2190),), # Pt
((197, 3./2, 0.097164),), # Au
((199, 1./2, 1.011771),), # Hg
((205, 1./2, 3.2764292),), # Tl
((207, 1./2, 1.18512),), # Pb
((209, 9./2, 0.9134),), # Bi
((209, 1./2, 1.5),), # Po
((210, 0. , 0.0),), # At
((209, 0. , 0.0),), # Rn
((223, 0. , 0.0),), # Fr
((223, 0. , 0.0),), # Ra
((227, 3./2, 0.73),), # Ac
((229, 5./2, 0.18),), # Th
((231, 3./2, 0.0),), # Pa
((235, 7./2, -0.109),), # U
((237, 5./2, 1.256),), # Np
((239, 1./2, 0.406),), # Pu
((243, 5./2, 0.6),), # Am
((247, 9./2, 0.0),), # Cm
)
def g_factor_to_gyromagnetic_ratio(g):
'''Larmor freq in Hz'''
return nist.NUC_MAGNETON/nist.PLANCK * g
def get_nuc_g_factor(symb_or_charge, mass=None):
if isinstance(symb_or_charge, str):
Z = mole.charge(symb_or_charge)
else:
Z = symb_or_charge
# g factor of other isotopes can be found in file nuclear_g_factor.dat
if mass is None:
# The default isotopes
nuc_spin, g_nuc = ISOTOPE_GYRO[Z][0][1:3]
else:
for isotop_mass, nuc_spin, g_nuc in ISOTOPE_GYRO[Z]:
if isotop_mass == mass:
break
else:
raise ValueError('mass=%s not found in isotopes of %s' %
(mass, symb_or_charge))
#gyromag = g_factor_to_gyromagnetic_ratio(g_nuc)
return g_nuc
# Nuclear electric quadrupole moments
# http://dx.doi.org/10.1016/j.adt.2015.12.002
# Q in units of the barn (1 barn = 10-28 m2 ).
ISOTOPE_QUAD_MOMENT = (
(0 , 0. , 0. ),
(2 , 1. , 0.002862 ), # H
(0. , 0. , 0. ), # He
(7 , 3./2, 0.04003 ), # Li
(9 , 3./2, 0.05294 ), # Be
(11 , 3./2, 0.0405910), # B
(11 , 3./2, 0.03332 ), # C
(14 , 1. , 0.020443 ), # N
(17 , 5./2, 0.02562 ), # O
(19 , 0. , 0. ), # F
(21 , 3./2, 0.1028 ), # Ne
(23 , 3./2, 0.1041 ), # Na
(25 , 5./2, 0.1992 ), # Mg
(27 , 5./2, 0.146610 ), # Al
(28 , 0. , 0. ), # Si
(31 , 0. , 0. ), # P
(33 , 3./2, 0.067813 ), # S
(35 , 3./2, 0.08178 ), # Cl
(37 , 3./2, 0.0769 ), # Ar
(39 , 3./2, 0.05856 ), # K
(43 , 7./2, 0.04088 ), # Ca
(45 , 7./2, 0.2202 ), # Sc
(47 , 5./2, 0.30210 ), # Ti
(50 , 6. , 0.214 ), # V
(53 , 3./2, 0.155 ), # Cr
(55 , 5./2, 0.33010 ), # Mn
(57 , 0. , 0. ), # Fe
(59 , 7./2, 0.423 ), # Co
(61 , 3./2, 0.16215 ), # Ni
(63 , 3./2, 0.22015 ), # Cu
(67 , 5./2, 0.15015 ), # Zn
(69 , 3./2, 0.1712 ), # Ga
(73 , 9./2, 0.1961 ), # Ge
(75 , 3./2, 0.3146 ), # As
(77 , 5./2, 0.765 ), # Se
(79 , 3./2, 0.3133 ), # Br
(83 , 9./2, 0.2591 ), # Kr
(85 , 5./2, 0.2761 ), # Rb
(87 , 9./2, 0.3052 ), # Sr
(90 , 2. , 0.12511 ), # Y
(91 , 5./2, 0.1763 ), # Zr
(93 , 9./2, 0.322 ), # Nb
(95 , 5./2, 0.0221 ), # Mo
(99 , 9./2, 0.1296 ), # Tc
(101, 5./2, 0.462 ), # Ru
(100, 2. , 0.15318 ), # Rh
(105, 5./2, 0.66011 ), # Pd
(107, 0. , 0. ), # Ag
(111, 0. , 0. ), # Cd
(113, 9./2, 0.7598 ), # In
(119, 0. , 0. ), # Sn
(121, 0. , 0. ), # Sb
(125, 0. , 0. ), # Te
(127, 5./2, 0.69612 ), # I
(131, 3./2, 0.1141 ), # Xe
(133, 7./2, 0.0034310), # Cs
(137, 3./2, 0.2454 ), # Ba
(139, 7./2, 0.2006 ), # La
(140, 0. , 0. ), # Ce
(141, 5./2, 0.0776 ), # Pr
(143, 7./2, 0.612 ), # Nd
(147, 7./2, 0.7420 ), # Pm
(147, 7./2, 0.263 ), # Sm
(153, 5./2, 2.412 ), # Eu
(157, 3./2, 1.353 ), # Gd
(159, 3./2, 1.4328 ), # Tb
(163, 5./2, 2.652 ), # Dy
(165, 7./2, 3.582 ), # Ho
(167, 7./2, 3.573 ), # Er
(169, 0. , 0. ), # Tm
(173, 5./2, 2.804 ), # Yb
(175, 2. , 3.492 ), # Lu
(177, 2. , 3.373 ), # Hf
(181, 2. , 3.172 ), # Ta
(183, 0. , 0. ), # W
(187, 5./2, 2.072 ), # Re
(188, 2. , 1.464 ), # Os
(191, 3./2, 0.8169 ), # Ir
(195, 0. , 0. ), # Pt
(197, 3./2, 0.54716 ), # Au
(201, 3./2, 0.3876 ), # Hg
(205, 0. , 0. ), # Tl
(209, 9./2, 0.2717 ), # Pb
(209, 9./2, 0.51615 ), # Bi
(209, 0. , 0. ), # Po
(210, 0. , 0. ), # At
(209, 0. , 0. ), # Rn
(223, 3./2, 1.171 ), # Fr
(223, 3./2, 1.213 ), # Ra
(227, 0. , 0. ), # Ac
(229, 0. , 0. ), # Th
(231, 0. , 0. ), # Pa
(235, 7./2, 4.9366 ), # U
(237, 5./2, 3.8866 ), # Np
(239, 3./2, 2.3197 ), # Pu
(241, 5./2, 4.345 ), # Am
(247, 0. , 0. ), # Cm
)
|
gkc1000/pyscf
|
pyscf/data/nucprop.py
|
Python
|
apache-2.0
| 8,817
|
[
"PySCF"
] |
bdb205350c3b20f5ceee5594f2ce5928f4a30c91ace4f56d14d1af8509b7aea8
|
from miro import app
from miro import prefs
from miro.devices import DeviceInfo, MultipleDeviceInfo
from miro.gtcache import gettext as _
defaults = {
'audio_conversion': 'mp3',
'container_types': 'mp3 wav asf isom ogg mpeg avi'.split(),
'audio_types': 'mp* wmav* aac pcm* vorbis'.split(),
'video_types': 'theora h264 mpeg* wmv*'.split(),
'mount_instructions': _("Your phone must be in 'USB storage mode' in "
"order for %(shortappname)s to sync files to it.\n"
"To mount your phone, select 'Turn on USB "
"storage' from the notifications.",
{'shortappname':
app.config.get(prefs.SHORT_APP_NAME)}),
'video_path': u'Miro',
'audio_path': u'Miro'
}
tablet_defaults = defaults.copy()
tablet_defaults['mount_instructions'] = _(
"Your tablet must be in 'USB storage mode' in "
"order for %(shortappname)s to sync files to it.\n"
"To mount your phone, select 'Turn on USB "
"storage' from the notifications.",
{'shortappname':
app.config.get(prefs.SHORT_APP_NAME)})
htc_hero = DeviceInfo(u'HTC Hero',
video_conversion='hero',
video_path=u'Video',
audio_path=u'Music')
htc_evo = DeviceInfo(u'HTC EVO',
video_conversion='epic',
video_path=u'Video',
audio_path=u'Music')
htc_evo_4g = DeviceInfo(u'HTC EVO 4G',
video_conversion='epic')
htc_evo_3d = DeviceInfo('HTC EVO 3D',
video_conversion='sensationevo3d')
htc_legend = DeviceInfo(u'HTC Legend',
video_conversion='dreamg1',
video_path=u'/media/video',
audio_path=u'/media/audio')
tmobile_g1 = DeviceInfo(u'T-Mobile G1',
video_conversion='dreamg1')
tmobile_g2 = DeviceInfo(u'T-Mobile G2',
video_conversion='g2')
htc_vision = DeviceInfo(u'HTC Vision',
video_conversion='g2')
htc_desire_z = DeviceInfo(u'HTC Desire Z',
video_conversion='g2')
htc_incredible = DeviceInfo(u'HTC Droid Incredible',
video_conversion='epic')
htc_incredible_2 = DeviceInfo(u'HTC Droid Incredible 2',
video_conversion='epic')
htc_sensation = DeviceInfo(u'HTC Sensation',
video_conversion='epic')
htc_aria = DeviceInfo(u'HTC Aria',
video_conversion='hero')
generic_htc = DeviceInfo(_('Generic %(name)s Device', {'name': 'HTC'}),
video_conversion='hero')
htc_android_device = MultipleDeviceInfo(
'HTC Android Phone', [htc_hero, htc_evo, htc_evo_4g, htc_evo_3d,
htc_legend,
tmobile_g1, tmobile_g2, htc_vision, htc_desire_z,
htc_incredible, htc_incredible_2, htc_sensation,
htc_aria,
generic_htc],
vendor_id=0x0bb4,
product_id=0x0ff9,
**defaults)
htc_desire = DeviceInfo(u'HTC Desire',
vendor_id=0x0bb4,
product_id=0x0c87,
device_name='HTC Android Phone',
video_conversion='epic',
**defaults)
htc_desire_hd = DeviceInfo(u'HTC Desire HD',
vendor_id=0xbb4,
product_id=0x0ca2,
device_name='HTC Android Phone',
video_conversion='epic',
**defaults)
htc_thunderbolt = DeviceInfo(u'HTC Thunderbolt',
vendor_id=0x0bb4,
product_id=0x0ca4,
device_name='HTC Android Phone',
video_conversion='epic',
**defaults)
htc_sensation = DeviceInfo(u'HTC Sensation',
vendor_id=0x0bb4,
product_id=0x0c86,
device_name='HTC Android Phone',
video_conversion='sensationevo3d',
**defaults)
nexus_one = DeviceInfo(u'Nexus One',
vendor_id=0x18d1,
product_id=0x4e11,
device_name='Google, Inc.Nexus One',
video_conversion='nexusone',
**defaults)
# the Droid apparently can have two different USB IDs
motorola_droid_one = DeviceInfo(u'Motorola Droid',
vendor_id=0x22b8,
product_id=0x41db,
device_name='Motorola A855',
video_conversion='droid',
**defaults)
motorola_droid_two = DeviceInfo(u'Motorola Droid',
vendor_id=0x22b,
product_id=0x41d9,
device_name='Motorola A855',
video_conversion='droid',
**defaults)
motorola_droid2 = DeviceInfo(u'Motorola Droid 2',
vendor_id=0x22b8,
product_id=0x42a3,
device_name='Motorola A955',
video_conversion='droid',
**defaults)
motorola_droidx = DeviceInfo(u'Motorola Droid X',
vendor_id=0x22b8,
product_id=0x4285,
device_name='Motorola MB810',
video_conversion='droid',
**defaults)
motorola_xoom = DeviceInfo(u'Motorola Xoom',
vendor_id=0x18d1,
product_id=0x70a8,
device_name='Motorola MZ604',
video_conversion='xoom',
**tablet_defaults)
galaxy_s2 = DeviceInfo(u'Galaxy S2',
vendor_id=0x04e8,
product_id=0x685e,
device_name='Android UMS Composite',
video_conversion='epic',
**defaults)
galaxy_tab = DeviceInfo(u'Galaxy Tab',
vendor_id=0x04e8,
product_id=0x681d,
device_name='SAMSUNG SGH-T849',
video_conversion='galaxytab',
**tablet_defaults)
epic = DeviceInfo(u'Epic',
vendor_id=0x04e8,
product_id=0x6601,
device_name="SAMSUNG SPH-D700 Card",
video_conversion='epic',
**defaults)
lg_optimus_2x = DeviceInfo(u'Optimus 2x',
vendor_id=0x1004,
product_id=0x618e,
device_name='LGE P990',
video_conversion='epic',
**defaults)
lg_optimus_s = DeviceInfo(
u'Optimus S',
vendor_id=0x1004,
product_id=0x618E,
device_name='GOOGLE Mass storage',
video_conversion='hero',
audio_conversion='mp3',
container_types='mp3 wav asf isom ogg mpeg avi'.split(),
audio_types='mp* wmav* aac pcm* vorbis'.split(),
video_types='theora h264 mpeg* wmv*'.split(),
mount_instructions=_("Your phone must be in 'USB storage mode' in "
"order for %(shortappname)s to sync files to it.\n"
"To mount your phone, select 'Turn on USB "
"storage' from the notifications.",
{'shortappname':
app.config.get(prefs.SHORT_APP_NAME)}),
video_path=u'Media/Video',
audio_path=u'Media/Audio')
nookcolor = DeviceInfo(
name=u'MyNOOKColor',
device_name='B&N Ebook Disk',
vendor_id=0x2080,
product_id=0x0002,
# FIXME - the Nook Color has no way to play videos, so this should
# really be disabled.
video_conversion='copy',
video_path=u'My Files/Video',
audio_conversion='mp3',
audio_path=u'My Files/Music',
container_types=['mp3', 'isom'],
audio_types=['mp*', 'aac'],
video_types=[],
mount_instructions=_('Your Nook Color must be connected to your computer '
'and in USB Mode to sync files to it.\n')
)
toshiba_thrive = DeviceInfo(
u'Toshiba Thrive',
vendor_id=0x18d1,
product_id=0x7102,
device_name='AT100',
video_conversion='xoom',
audio_conversion='mp3',
container_types='mp3 wav asf isom ogg mpeg avi'.split(),
audio_types='mp* wmav* aac pcm* vorbis'.split(),
video_types='theora h264 mpeg* wmv*'.split(),
mount_instructions=_("Your tablet must be in 'USB storage mode' in "
"order for %(shortappname)s to sync files to it.\n"
"To mount your phone, select 'Turn on USB "
"storage' from the notifications.",
{'shortappname':
app.config.get(prefs.SHORT_APP_NAME)}),
video_path=u'Movies',
audio_path=u'Music')
devices = [htc_android_device, htc_desire, htc_desire_hd, htc_thunderbolt,
htc_sensation, nexus_one,
motorola_droid_one, motorola_droid_two, motorola_droid2,
motorola_droidx, motorola_xoom, lg_optimus_2x, lg_optimus_s,
galaxy_s2, galaxy_tab, epic, nookcolor, toshiba_thrive]
|
debugger06/MiroX
|
tv/resources/devices/android.py
|
Python
|
gpl-2.0
| 9,764
|
[
"Galaxy"
] |
7ed51770c23bd6f55c0ee9af6b3cb12c4ccb72dd04f25281a0db61c52a74833a
|
""" FileManagerBase is a base class for all the specific File Managers
"""
__RCSID__ = "$Id$"
# pylint: disable=protected-access
import os
import stat
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.List import intListToString
from DIRAC.Core.Utilities.Pfn import pfnunparse
class FileManagerBase(object):
""" Base class for all the specific File Managers
"""
def __init__(self, database=None):
self.db = database
self.statusDict = {}
def _getConnection(self, connection):
if connection:
return connection
res = self.db._getConnection()
if res['OK']:
return res['Value']
gLogger.warn("Failed to get MySQL connection", res['Message'])
return connection
def setDatabase(self, database):
self.db = database
def getFileCounters(self, connection=False):
""" Get a number of counters to verify the sanity of the Files in the catalog
"""
connection = self._getConnection(connection)
resultDict = {}
req = "SELECT COUNT(*) FROM FC_Files;"
res = self.db._query(req, connection)
if not res['OK']:
return res
resultDict['Files'] = res['Value'][0][0]
req = "SELECT COUNT(FileID) FROM FC_Files WHERE FileID NOT IN ( SELECT FileID FROM FC_Replicas )"
res = self.db._query(req, connection)
if not res['OK']:
return res
resultDict['Files w/o Replicas'] = res['Value'][0][0]
req = "SELECT COUNT(RepID) FROM FC_Replicas WHERE FileID NOT IN ( SELECT FileID FROM FC_Files )"
res = self.db._query(req, connection)
if not res['OK']:
return res
resultDict['Replicas w/o Files'] = res['Value'][0][0]
treeTable = self.db.dtree.getTreeTable()
req = "SELECT COUNT(FileID) FROM FC_Files WHERE DirID NOT IN ( SELECT DirID FROM %s)" % treeTable
res = self.db._query(req, connection)
if not res['OK']:
return res
resultDict['Orphan Files'] = res['Value'][0][0]
req = "SELECT COUNT(FileID) FROM FC_Files WHERE FileID NOT IN ( SELECT FileID FROM FC_FileInfo)"
res = self.db._query(req, connection)
if not res['OK']:
resultDict['Files w/o FileInfo'] = 0
else:
resultDict['Files w/o FileInfo'] = res['Value'][0][0]
req = "SELECT COUNT(FileID) FROM FC_FileInfo WHERE FileID NOT IN ( SELECT FileID FROM FC_Files)"
res = self.db._query(req, connection)
if not res['OK']:
resultDict['FileInfo w/o Files'] = 0
else:
resultDict['FileInfo w/o Files'] = res['Value'][0][0]
return S_OK(resultDict)
def getReplicaCounters(self, connection=False):
""" Get a number of counters to verify the sanity of the Replicas in the catalog
"""
connection = self._getConnection(connection)
req = "SELECT COUNT(*) FROM FC_Replicas;"
res = self.db._query(req, connection)
if not res['OK']:
return res
return S_OK({'Replicas': res['Value'][0][0]})
######################################################
#
# File write methods
#
def _insertFiles(self, lfns, uid, gid, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _deleteFiles(self, toPurge, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _insertReplicas(self, lfns, master=False, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _findFiles(self, lfns, metadata=["FileID"], allStatus=False, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _getFileReplicas(self, fileIDs, fields_input=['PFN'], allStatus=False, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _getFileIDFromGUID(self, guid, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def getLFNForGUID(self, guids, connection=False):
"""Returns the LFN matching a given GUID
"""
return S_ERROR("To be implemented on derived class")
def _setFileParameter(self, fileID, paramName, paramValue, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _deleteReplicas(self, lfns, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _setReplicaStatus(self, fileID, se, status, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _setReplicaHost(self, fileID, se, newSE, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _getDirectoryFiles(self, dirID, fileNames, metadata, allStatus=False, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _getDirectoryFileIDs(self, dirID, requestString=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _findFileIDs(self, lfns, connection=False):
""" To be implemented on derived class
Should return following the successful/failed convention
Successful is a dictionary with keys the lfn, and values the FileID"""
return S_ERROR("To be implemented on derived class")
def _getDirectoryReplicas(self, dirID, allStatus=False, connection=False):
""" To be implemented on derived class
Should return with only one value, being a list of all the replicas (FileName,FileID,SEID,PFN)
"""
return S_ERROR("To be implemented on derived class")
def countFilesInDir(self, dirId):
""" Count how many files there is in a given Directory
:param int dirID: directory id
:returns: S_OK(value) or S_ERROR
"""
return S_ERROR("To be implemented on derived class")
def _getFileLFNs(self, fileIDs):
""" Get the file LFNs for a given list of file IDs
"""
stringIDs = intListToString(fileIDs)
treeTable = self.db.dtree.getTreeTable()
req = "SELECT F.FileID, CONCAT(D.DirName,'/',F.FileName) from FC_Files as F,\
%s as D WHERE F.FileID IN ( %s ) AND F.DirID=D.DirID" % (
treeTable, stringIDs)
result = self.db._query(req)
if not result['OK']:
return result
fileNameDict = {}
for row in result['Value']:
fileNameDict[row[0]] = row[1]
failed = {}
successful = fileNameDict
if len(fileNameDict) != len(fileIDs):
for id_ in fileIDs:
if id_ not in fileNameDict:
failed[id_] = "File ID not found"
return S_OK({'Successful': successful, 'Failed': failed})
def addFile(self, lfns, credDict, connection=False):
""" Add files to the catalog
:param dict lfns: dict{ lfn : info}. 'info' is a dict containing PFN, SE, Size and Checksum
the SE parameter can be a list if we have several replicas to register
"""
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo(info, ['PFN', 'SE', 'Size', 'Checksum'])
if not res['OK']:
failed[lfn] = res['Message']
lfns.pop(lfn)
res = self._addFiles(lfns, credDict, connection=connection)
if not res['OK']:
for lfn in lfns.keys():
failed[lfn] = res['Message']
else:
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
return S_OK({'Successful': successful, 'Failed': failed})
def _addFiles(self, lfns, credDict, connection=False):
""" Main file adding method
"""
connection = self._getConnection(connection)
successful = {}
result = self.db.ugManager.getUserAndGroupID(credDict)
if not result['OK']:
return result
uid, gid = result['Value']
# prepare lfns with master replicas - the first in the list or a unique replica
masterLfns = {}
extraLfns = {}
for lfn in lfns:
masterLfns[lfn] = dict(lfns[lfn])
if isinstance(lfns[lfn].get('SE'), list):
masterLfns[lfn]['SE'] = lfns[lfn]['SE'][0]
if len(lfns[lfn]['SE']) > 1:
extraLfns[lfn] = dict(lfns[lfn])
extraLfns[lfn]['SE'] = lfns[lfn]['SE'][1:]
# Check whether the supplied files have been registered already
res = self._getExistingMetadata(masterLfns.keys(), connection=connection)
if not res['OK']:
return res
existingMetadata, failed = res['Value']
if existingMetadata:
success, fail = self._checkExistingMetadata(existingMetadata, masterLfns)
successful.update(success)
failed.update(fail)
for lfn in (success.keys() + fail.keys()):
masterLfns.pop(lfn)
# If GUIDs are supposed to be unique check their pre-existance
if self.db.uniqueGUID:
fail = self._checkUniqueGUID(masterLfns, connection=connection)
failed.update(fail)
for lfn in fail:
masterLfns.pop(lfn)
# If we have files left to register
if masterLfns:
# Create the directories for the supplied files and store their IDs
directories = self._getFileDirectories(masterLfns.keys())
for directory, fileNames in directories.items():
res = self.db.dtree.makeDirectories(directory, credDict)
if not res['OK']:
for fileName in fileNames:
lfn = os.path.join(directory, fileName)
failed[lfn] = res['Message']
masterLfns.pop(lfn)
continue
for fileName in fileNames:
if not fileName:
failed[directory] = "Is no a valid file"
masterLfns.pop(directory)
continue
lfn = "%s/%s" % (directory, fileName)
lfn = lfn.replace('//', '/')
# This condition should never be true, we would not be here otherwise...
if not res['OK']:
failed[lfn] = "Failed to create directory for file"
masterLfns.pop(lfn)
else:
masterLfns[lfn]['DirID'] = res['Value']
# If we still have files left to register
if masterLfns:
res = self._insertFiles(masterLfns, uid, gid, connection=connection)
if not res['OK']:
for lfn in masterLfns.keys(): # pylint: disable=consider-iterating-dictionary
failed[lfn] = res['Message']
masterLfns.pop(lfn)
else:
for lfn, error in res['Value']['Failed'].items():
failed[lfn] = error
masterLfns.pop(lfn)
masterLfns = res['Value']['Successful']
# Add the ancestors
if masterLfns:
res = self._populateFileAncestors(masterLfns, connection=connection)
toPurge = []
if not res['OK']:
for lfn in masterLfns.keys():
failed[lfn] = "Failed while registering ancestors"
toPurge.append(masterLfns[lfn]['FileID'])
else:
failed.update(res['Value']['Failed'])
for lfn, error in res['Value']['Failed'].items():
toPurge.append(masterLfns[lfn]['FileID'])
if toPurge:
self._deleteFiles(toPurge, connection=connection)
# Register the replicas
newlyRegistered = {}
if masterLfns:
res = self._insertReplicas(masterLfns, master=True, connection=connection)
toPurge = []
if not res['OK']:
for lfn in masterLfns.keys():
failed[lfn] = "Failed while registering replica"
toPurge.append(masterLfns[lfn]['FileID'])
else:
newlyRegistered = res['Value']['Successful']
successful.update(newlyRegistered)
failed.update(res['Value']['Failed'])
for lfn, error in res['Value']['Failed'].items():
toPurge.append(masterLfns[lfn]['FileID'])
if toPurge:
self._deleteFiles(toPurge, connection=connection)
# Add extra replicas for successfully registered LFNs
for lfn in extraLfns.keys(): # pylint: disable=consider-iterating-dictionary
if lfn not in successful:
extraLfns.pop(lfn)
if extraLfns:
res = self._findFiles(extraLfns.keys(), ['FileID', 'DirID'], connection=connection)
if not res['OK']:
for lfn in lfns.keys():
failed[lfn] = 'Failed while registering extra replicas'
successful.pop(lfn)
extraLfns.pop(lfn)
else:
failed.update(res['Value']['Failed'])
for lfn in res['Value']['Failed'].keys():
successful.pop(lfn)
extraLfns.pop(lfn)
for lfn, fileDict in res['Value']['Successful'].items():
extraLfns[lfn]['FileID'] = fileDict['FileID']
extraLfns[lfn]['DirID'] = fileDict['DirID']
if extraLfns:
res = self._insertReplicas(extraLfns, master=False, connection=connection)
if not res['OK']:
for lfn in extraLfns.keys(): # pylint: disable=consider-iterating-dictionary
failed[lfn] = "Failed while registering extra replicas"
successful.pop(lfn)
else:
newlyRegistered = res['Value']['Successful']
successful.update(newlyRegistered)
failed.update(res['Value']['Failed'])
return S_OK({'Successful': successful, 'Failed': failed})
def _updateDirectoryUsage(self, directorySEDict, change, connection=False):
connection = self._getConnection(connection)
for directoryID in directorySEDict.keys():
result = self.db.dtree.getPathIDsByID(directoryID)
if not result['OK']:
return result
parentIDs = result['Value']
dirDict = directorySEDict[directoryID]
for seID in dirDict.keys():
seDict = dirDict[seID]
files = seDict['Files']
size = seDict['Size']
insertTuples = []
for dirID in parentIDs:
insertTuples.append('(%d,%d,%d,%d,UTC_TIMESTAMP())' % (dirID, seID, size, files))
req = "INSERT INTO FC_DirectoryUsage (DirID,SEID,SESize,SEFiles,LastUpdate) "
req += "VALUES %s" % ','.join(insertTuples)
req += " ON DUPLICATE KEY UPDATE SESize=SESize%s%d, SEFiles=SEFiles%s%d, LastUpdate=UTC_TIMESTAMP() " \
% (change, size, change, files)
res = self.db._update(req)
if not res['OK']:
gLogger.warn("Failed to update FC_DirectoryUsage", res['Message'])
return S_OK()
def _populateFileAncestors(self, lfns, connection=False):
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, lfnDict in lfns.items():
originalFileID = lfnDict['FileID']
originalDepth = lfnDict.get('AncestorDepth', 1)
ancestors = lfnDict.get('Ancestors', [])
if isinstance(ancestors, basestring):
ancestors = [ancestors]
if lfn in ancestors:
ancestors.remove(lfn)
if not ancestors:
successful[lfn] = True
continue
res = self._findFiles(ancestors, connection=connection)
if res['Value']['Failed']:
failed[lfn] = "Failed to resolve ancestor files"
continue
ancestorIDs = res['Value']['Successful']
fileIDLFNs = {}
toInsert = {}
for ancestor in ancestorIDs.keys():
fileIDLFNs[ancestorIDs[ancestor]['FileID']] = ancestor
toInsert[ancestorIDs[ancestor]['FileID']] = originalDepth
res = self._getFileAncestors(fileIDLFNs.keys())
if not res['OK']:
failed[lfn] = "Failed to obtain all ancestors"
continue
fileIDAncestorDict = res['Value']
for fileIDDict in fileIDAncestorDict.values():
for ancestorID, relativeDepth in fileIDDict.items():
toInsert[ancestorID] = relativeDepth + originalDepth
res = self._insertFileAncestors(originalFileID, toInsert, connection=connection)
if not res['OK']:
if "Duplicate" in res['Message']:
failed[lfn] = "Failed to insert ancestor files: duplicate entry"
else:
failed[lfn] = "Failed to insert ancestor files"
else:
successful[lfn] = True
return S_OK({'Successful': successful, 'Failed': failed})
def _insertFileAncestors(self, fileID, ancestorDict, connection=False):
connection = self._getConnection(connection)
ancestorTuples = []
for ancestorID, depth in ancestorDict.items():
ancestorTuples.append("(%d,%d,%d)" % (fileID, ancestorID, depth))
if not ancestorTuples:
return S_OK()
req = "INSERT INTO FC_FileAncestors (FileID, AncestorID, AncestorDepth) VALUES %s" \
% intListToString(ancestorTuples)
return self.db._update(req, connection)
def _getFileAncestors(self, fileIDs, depths=[], connection=False):
connection = self._getConnection(connection)
req = "SELECT FileID, AncestorID, AncestorDepth FROM FC_FileAncestors WHERE FileID IN (%s)" \
% intListToString(fileIDs)
if depths:
req = "%s AND AncestorDepth IN (%s);" % (req, intListToString(depths))
res = self.db._query(req, connection)
if not res['OK']:
return res
fileIDAncestors = {}
for fileID, ancestorID, depth in res['Value']:
if fileID not in fileIDAncestors:
fileIDAncestors[fileID] = {}
fileIDAncestors[fileID][ancestorID] = depth
return S_OK(fileIDAncestors)
def _getFileDescendents(self, fileIDs, depths, connection=False):
connection = self._getConnection(connection)
req = "SELECT AncestorID, FileID, AncestorDepth FROM FC_FileAncestors WHERE AncestorID IN (%s)" \
% intListToString(fileIDs)
if depths:
req = "%s AND AncestorDepth IN (%s);" % (req, intListToString(depths))
res = self.db._query(req, connection)
if not res['OK']:
return res
fileIDAncestors = {}
for ancestorID, fileID, depth in res['Value']:
if ancestorID not in fileIDAncestors:
fileIDAncestors[ancestorID] = {}
fileIDAncestors[ancestorID][fileID] = depth
return S_OK(fileIDAncestors)
def addFileAncestors(self, lfns, connection=False):
""" Add file ancestors to the catalog """
connection = self._getConnection(connection)
failed = {}
successful = {}
result = self._findFiles(lfns.keys(), connection=connection)
if not result['OK']:
return result
if result['Value']['Failed']:
failed.update(result['Value']['Failed'])
for lfn in result['Value']['Failed']:
lfns.pop(lfn)
if not lfns:
return S_OK({'Successful': successful, 'Failed': failed})
for lfn in result['Value']['Successful']:
lfns[lfn]['FileID'] = result['Value']['Successful'][lfn]['FileID']
result = self._populateFileAncestors(lfns, connection)
if not result['OK']:
return result
failed.update(result['Value']['Failed'])
successful = result['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def _getFileRelatives(self, lfns, depths, relation, connection=False):
connection = self._getConnection(connection)
failed = {}
successful = {}
result = self._findFiles(lfns.keys(), connection=connection)
if not result['OK']:
return result
if result['Value']['Failed']:
failed.update(result['Value']['Failed'])
for lfn in result['Value']['Failed']:
lfns.pop(lfn)
if not lfns:
return S_OK({'Successful': successful, 'Failed': failed})
inputIDDict = {}
for lfn in result['Value']['Successful']:
inputIDDict[result['Value']['Successful'][lfn]['FileID']] = lfn
inputIDs = inputIDDict.keys()
if relation == 'ancestor':
result = self._getFileAncestors(inputIDs, depths, connection)
else:
result = self._getFileDescendents(inputIDs, depths, connection)
if not result['OK']:
return result
failed = {}
successful = {}
relDict = result['Value']
for id_ in inputIDs:
if id_ in relDict:
aList = relDict[id_].keys()
result = self._getFileLFNs(aList)
if not result['OK']:
failed[inputIDDict[id]] = "Failed to find %s" % relation
else:
if result['Value']['Successful']:
resDict = {}
for aID in result['Value']['Successful']:
resDict[result['Value']['Successful'][aID]] = relDict[id_][aID]
successful[inputIDDict[id_]] = resDict
for aID in result['Value']['Failed']:
failed[inputIDDict[id_]] = "Failed to get the ancestor LFN"
else:
successful[inputIDDict[id_]] = {}
return S_OK({'Successful': successful, 'Failed': failed})
def getFileAncestors(self, lfns, depths, connection=False):
return self._getFileRelatives(lfns, depths, 'ancestor', connection)
def getFileDescendents(self, lfns, depths, connection=False):
return self._getFileRelatives(lfns, depths, 'descendent', connection)
def _getExistingMetadata(self, lfns, connection=False):
connection = self._getConnection(connection)
# Check whether the files already exist before adding
res = self._findFiles(lfns, ['FileID', 'Size', 'Checksum', 'GUID'], connection=connection)
if not res['OK']:
return res
successful = res['Value']['Successful']
failed = res['Value']['Failed']
for lfn, error in res['Value']['Failed'].items():
if error == 'No such file or directory':
failed.pop(lfn)
return S_OK((successful, failed))
def _checkExistingMetadata(self, existingLfns, lfns):
failed = {}
successful = {}
fileIDLFNs = {}
for lfn, fileDict in existingLfns.items():
fileIDLFNs[fileDict['FileID']] = lfn
# For those that exist get the replicas to determine whether they are already registered
res = self._getFileReplicas(fileIDLFNs.keys())
if not res['OK']:
for lfn in fileIDLFNs.itervalues():
failed[lfn] = 'Failed checking pre-existing replicas'
else:
replicaDict = res['Value']
for fileID, lfn in fileIDLFNs.items():
fileMetadata = existingLfns[lfn]
existingGuid = fileMetadata['GUID']
existingSize = fileMetadata['Size']
existingChecksum = fileMetadata['Checksum']
newGuid = lfns[lfn]['GUID']
newSize = lfns[lfn]['Size']
newChecksum = lfns[lfn]['Checksum']
# Ensure that the key file metadata is the same
if (existingGuid != newGuid) or \
(existingSize != newSize) or \
(existingChecksum != newChecksum):
failed[lfn] = "File already registered with alternative metadata"
# If the DB does not have replicas for this file return an error
elif fileID not in replicaDict or not replicaDict[fileID]:
failed[lfn] = "File already registered with no replicas"
# If the supplied SE is not in the existing replicas return an error
elif not lfns[lfn]['SE'] in replicaDict[fileID].keys():
failed[lfn] = "File already registered with alternative replicas"
# If we get here the file being registered already exists exactly in the DB
else:
successful[lfn] = True
return successful, failed
def _checkUniqueGUID(self, lfns, connection=False):
connection = self._getConnection(connection)
guidLFNs = {}
failed = {}
for lfn, fileDict in lfns.items():
guidLFNs[fileDict['GUID']] = lfn
res = self._getFileIDFromGUID(guidLFNs.keys(), connection=connection)
if not res['OK']:
return dict.fromkeys(lfns, res['Message'])
for guid, fileID in res['Value'].items():
# resolve this to LFN
failed[guidLFNs[guid]] = "GUID already registered for another file %s" % fileID
return failed
def removeFile(self, lfns, connection=False):
connection = self._getConnection(connection)
""" Remove file from the catalog """
successful = {}
failed = {}
res = self._findFiles(lfns, ['DirID', 'FileID', 'Size'], connection=connection)
if not res['OK']:
return res
for lfn, error in res['Value']['Failed'].items():
if error == 'No such file or directory':
successful[lfn] = True
else:
failed[lfn] = error
fileIDLfns = {}
lfns = res['Value']['Successful']
for lfn, lfnDict in lfns.items():
fileIDLfns[lfnDict['FileID']] = lfn
res = self._computeStorageUsageOnRemoveFile(lfns, connection=connection)
if not res['OK']:
return res
directorySESizeDict = res['Value']
# Now do removal
res = self._deleteFiles(fileIDLfns.keys(), connection=connection)
if not res['OK']:
for lfn in fileIDLfns.values():
failed[lfn] = res['Message']
else:
# Update the directory usage
self._updateDirectoryUsage(directorySESizeDict, '-', connection=connection)
for lfn in fileIDLfns.values():
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
def _computeStorageUsageOnRemoveFile(self, lfns, connection=False):
# Resolve the replicas to calculate reduction in storage usage
fileIDLfns = {}
for lfn, lfnDict in lfns.items():
fileIDLfns[lfnDict['FileID']] = lfn
res = self._getFileReplicas(fileIDLfns.keys(), connection=connection)
if not res['OK']:
return res
directorySESizeDict = {}
for fileID, seDict in res['Value'].items():
dirID = lfns[fileIDLfns[fileID]]['DirID']
size = lfns[fileIDLfns[fileID]]['Size']
directorySESizeDict.setdefault(dirID, {})
directorySESizeDict[dirID].setdefault(0, {'Files': 0, 'Size': 0})
directorySESizeDict[dirID][0]['Size'] += size
directorySESizeDict[dirID][0]['Files'] += 1
for seName in seDict.keys():
res = self.db.seManager.findSE(seName)
if not res['OK']:
return res
seID = res['Value']
size = lfns[fileIDLfns[fileID]]['Size']
directorySESizeDict[dirID].setdefault(seID, {'Files': 0, 'Size': 0})
directorySESizeDict[dirID][seID]['Size'] += size
directorySESizeDict[dirID][seID]['Files'] += 1
return S_OK(directorySESizeDict)
def setFileStatus(self, lfns, connection=False):
""" Get set the group for the supplied files """
connection = self._getConnection(connection)
res = self._findFiles(lfns, ['FileID', 'UID'], connection=connection)
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for lfn in res['Value']['Successful'].keys():
status = lfns[lfn]
if isinstance(status, basestring):
if status not in self.db.validFileStatus:
failed[lfn] = 'Invalid file status %s' % status
continue
result = self._getStatusInt(status, connection=connection)
if not result['OK']:
failed[lfn] = res['Message']
continue
status = result['Value']
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setFileParameter(fileID, "Status", status, connection=connection)
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = True
return S_OK({'Successful': successful, 'Failed': failed})
######################################################
#
# Replica write methods
#
def addReplica(self, lfns, connection=False):
""" Add replica to the catalog """
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo(info, ['PFN', 'SE'])
if not res['OK']:
failed[lfn] = res['Message']
lfns.pop(lfn)
res = self._addReplicas(lfns, connection=connection)
if not res['OK']:
for lfn in lfns.keys():
failed[lfn] = res['Message']
else:
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
return S_OK({'Successful': successful, 'Failed': failed})
def _addReplicas(self, lfns, connection=False):
connection = self._getConnection(connection)
successful = {}
res = self._findFiles(lfns.keys(), ['DirID', 'FileID', 'Size'], connection=connection)
if not res['OK']:
return res
failed = res['Value']['Failed']
for lfn in failed.keys():
lfns.pop(lfn)
lfnFileIDDict = res['Value']['Successful']
for lfn, fileDict in lfnFileIDDict.items():
lfns[lfn].update(fileDict)
res = self._insertReplicas(lfns, connection=connection)
if not res['OK']:
for lfn in lfns.keys():
failed[lfn] = res['Message']
else:
successful = res['Value']['Successful']
failed.update(res['Value']['Failed'])
return S_OK({'Successful': successful, 'Failed': failed})
def removeReplica(self, lfns, connection=False):
""" Remove replica from catalog """
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo(info, ['SE'])
if not res['OK']:
failed[lfn] = res['Message']
lfns.pop(lfn)
res = self._deleteReplicas(lfns, connection=connection)
if not res['OK']:
for lfn in lfns.keys():
failed[lfn] = res['Message']
else:
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
return S_OK({'Successful': successful, 'Failed': failed})
def setReplicaStatus(self, lfns, connection=False):
""" Set replica status in the catalog """
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo(info, ['SE', 'Status'])
if not res['OK']:
failed[lfn] = res['Message']
continue
status = info['Status']
se = info['SE']
res = self._findFiles([lfn], ['FileID'], connection=connection)
if lfn not in res['Value']['Successful']:
failed[lfn] = res['Value']['Failed'][lfn]
continue
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setReplicaStatus(fileID, se, status, connection=connection)
if res['OK']:
successful[lfn] = res['Value']
else:
failed[lfn] = res['Message']
return S_OK({'Successful': successful, 'Failed': failed})
def setReplicaHost(self, lfns, connection=False):
""" Set replica host in the catalog """
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo(info, ['SE', 'NewSE'])
if not res['OK']:
failed[lfn] = res['Message']
continue
newSE = info['NewSE']
se = info['SE']
res = self._findFiles([lfn], ['FileID'], connection=connection)
if lfn not in res['Value']['Successful']:
failed[lfn] = res['Value']['Failed'][lfn]
continue
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setReplicaHost(fileID, se, newSE, connection=connection)
if res['OK']:
successful[lfn] = res['Value']
else:
failed[lfn] = res['Message']
return S_OK({'Successful': successful, 'Failed': failed})
######################################################
#
# File read methods
#
def exists(self, lfns, connection=False):
""" Determine whether a file exists in the catalog """
connection = self._getConnection(connection)
res = self._findFiles(lfns, allStatus=True, connection=connection)
if not res['OK']:
return res
successful = res['Value']['Successful']
origFailed = res['Value']['Failed']
for lfn in successful:
successful[lfn] = lfn
failed = {}
if self.db.uniqueGUID:
guidList = []
val = None
# Try to identify if the GUID is given
# We consider only 2 options :
# either {lfn : guid}
# or P lfn : {PFN : .., GUID : ..} }
if isinstance(lfns, dict):
val = lfns.values()
# We have values, take the first to identify the type
if val:
val = val[0]
if isinstance(val, dict) and 'GUID' in val:
# We are in the case {lfn : {PFN:.., GUID:..}}
guidList = [lfns[lfn]['GUID'] for lfn in lfns]
elif isinstance(val, basestring):
# We hope that it is the GUID which is given
guidList = lfns.values()
if guidList:
# A dict { guid: lfn to which it is supposed to be associated }
guidToGivenLfn = dict(zip(guidList, lfns))
res = self.getLFNForGUID(guidList, connection)
if not res['OK']:
return res
guidLfns = res['Value']['Successful']
for guid, realLfn in guidLfns.items():
successful[guidToGivenLfn[guid]] = realLfn
for lfn, error in origFailed.items():
# It could be in successful because the guid exists with another lfn
if lfn in successful:
continue
if error == 'No such file or directory':
successful[lfn] = False
else:
failed[lfn] = error
return S_OK({"Successful": successful, "Failed": failed})
def isFile(self, lfns, connection=False):
""" Determine whether a path is a file in the catalog """
connection = self._getConnection(connection)
# TO DO, should check whether it is a directory if it fails
return self.exists(lfns, connection=connection)
def getFileSize(self, lfns, connection=False):
""" Get file size from the catalog """
connection = self._getConnection(connection)
# TO DO, should check whether it is a directory if it fails
res = self._findFiles(lfns, ['Size'], connection=connection)
if not res['OK']:
return res
totalSize = 0
for lfn in res['Value']['Successful'].keys():
size = res['Value']['Successful'][lfn]['Size']
res['Value']['Successful'][lfn] = size
totalSize += size
res['TotalSize'] = totalSize
return res
def getFileMetadata(self, lfns, connection=False):
""" Get file metadata from the catalog """
connection = self._getConnection(connection)
# TO DO, should check whether it is a directory if it fails
return self._findFiles(lfns, ['Size', 'Checksum',
'ChecksumType', 'UID',
'GID', 'GUID',
'CreationDate', 'ModificationDate',
'Mode', 'Status'], connection=connection)
def getPathPermissions(self, paths, credDict, connection=False):
""" Get the permissions for the supplied paths """
connection = self._getConnection(connection)
res = self.db.ugManager.getUserAndGroupID(credDict)
if not res['OK']:
return res
uid, gid = res['Value']
res = self._findFiles(paths, metadata=['Mode', 'UID', 'GID'], connection=connection)
if not res['OK']:
return res
successful = {}
for dirName, dirDict in res['Value']['Successful'].items():
mode = dirDict['Mode']
p_uid = dirDict['UID']
p_gid = dirDict['GID']
successful[dirName] = {}
if p_uid == uid:
successful[dirName]['Read'] = mode & stat.S_IRUSR
successful[dirName]['Write'] = mode & stat.S_IWUSR
successful[dirName]['Execute'] = mode & stat.S_IXUSR
elif p_gid == gid:
successful[dirName]['Read'] = mode & stat.S_IRGRP
successful[dirName]['Write'] = mode & stat.S_IWGRP
successful[dirName]['Execute'] = mode & stat.S_IXGRP
else:
successful[dirName]['Read'] = mode & stat.S_IROTH
successful[dirName]['Write'] = mode & stat.S_IWOTH
successful[dirName]['Execute'] = mode & stat.S_IXOTH
return S_OK({'Successful': successful, 'Failed': res['Value']['Failed']})
######################################################
#
# Replica read methods
#
def __getReplicasForIDs(self, fileIDLfnDict, allStatus, connection=False):
""" Get replicas for files with already resolved IDs
"""
replicas = {}
if fileIDLfnDict:
fields = []
if not self.db.lfnPfnConvention or self.db.lfnPfnConvention == "Weak":
fields = ['PFN']
res = self._getFileReplicas(fileIDLfnDict.keys(), fields_input=fields,
allStatus=allStatus, connection=connection)
if not res['OK']:
return res
for fileID, seDict in res['Value'].items():
lfn = fileIDLfnDict[fileID]
replicas[lfn] = {}
for se, repDict in seDict.items():
pfn = repDict.get('PFN', '')
# if not pfn or self.db.lfnPfnConvention:
# res = self._resolvePFN( lfn, se )
# if res['OK']:
# pfn = res['Value']
replicas[lfn][se] = pfn
result = S_OK(replicas)
return result
def getReplicas(self, lfns, allStatus, connection=False):
""" Get file replicas from the catalog """
connection = self._getConnection(connection)
# Get FileID <-> LFN correspondence first
res = self._findFileIDs(lfns, connection=connection)
if not res['OK']:
return res
failed = res['Value']['Failed']
fileIDLFNs = {}
for lfn, fileID in res['Value']['Successful'].items():
fileIDLFNs[fileID] = lfn
result = self.__getReplicasForIDs(fileIDLFNs, allStatus, connection)
if not result['OK']:
return result
replicas = result['Value']
result = S_OK({"Successful": replicas, 'Failed': failed})
if self.db.lfnPfnConvention:
sePrefixDict = {}
resSE = self.db.seManager.getSEPrefixes()
if resSE['OK']:
sePrefixDict = resSE['Value']
result['Value']['SEPrefixes'] = sePrefixDict
return result
def getReplicasByMetadata(self, metaDict, path, allStatus, credDict, connection=False):
""" Get file replicas for files corresponding to the given metadata """
connection = self._getConnection(connection)
# Get FileID <-> LFN correspondence first
failed = {}
result = self.db.fmeta.findFilesByMetadata(metaDict, path, credDict)
if not result['OK']:
return result
idLfnDict = result['Value']
result = self.__getReplicasForIDs(idLfnDict, allStatus, connection)
if not result['OK']:
return result
replicas = result['Value']
result = S_OK({"Successful": replicas, 'Failed': failed})
if self.db.lfnPfnConvention:
sePrefixDict = {}
resSE = self.db.seManager.getSEPrefixes()
if resSE['OK']:
sePrefixDict = resSE['Value']
result['Value']['SEPrefixes'] = sePrefixDict
return result
def _resolvePFN(self, lfn, se):
resSE = self.db.seManager.getSEDefinition(se)
if not resSE['OK']:
return resSE
pfnDict = dict(resSE['Value']['SEDict'])
if "PFNPrefix" in pfnDict:
return S_OK(pfnDict['PFNPrefix'] + lfn)
pfnDict['FileName'] = lfn
return pfnunparse(pfnDict)
def getReplicaStatus(self, lfns, connection=False):
""" Get replica status from the catalog """
connection = self._getConnection(connection)
res = self._findFiles(lfns, connection=connection)
if not res['OK']:
return res
failed = res['Value']['Failed']
fileIDLFNs = {}
for lfn, fileDict in res['Value']['Successful'].iteritems():
fileID = fileDict['FileID']
fileIDLFNs[fileID] = lfn
successful = {}
if fileIDLFNs:
res = self._getFileReplicas(fileIDLFNs.keys(), allStatus=True, connection=connection)
if not res['OK']:
return res
for fileID, seDict in res['Value'].items():
lfn = fileIDLFNs[fileID]
requestedSE = lfns[lfn]
if not requestedSE:
failed[lfn] = "Replica info not supplied"
elif requestedSE not in seDict:
failed[lfn] = "No replica at supplied site"
else:
successful[lfn] = seDict[requestedSE]['Status']
return S_OK({'Successful': successful, 'Failed': failed})
######################################################
#
# General usage methods
#
def _getStatusInt(self, status, connection=False):
connection = self._getConnection(connection)
req = "SELECT StatusID FROM FC_Statuses WHERE Status = '%s';" % status
res = self.db._query(req, connection)
if not res['OK']:
return res
if res['Value']:
return S_OK(res['Value'][0][0])
req = "INSERT INTO FC_Statuses (Status) VALUES ('%s');" % status
res = self.db._update(req, connection)
if not res['OK']:
return res
return S_OK(res['lastRowId'])
def _getIntStatus(self, statusID, connection=False):
if statusID in self.statusDict:
return S_OK(self.statusDict[statusID])
connection = self._getConnection(connection)
req = "SELECT StatusID,Status FROM FC_Statuses"
res = self.db._query(req, connection)
if not res['OK']:
return res
if res['Value']:
for row in res['Value']:
self.statusDict[int(row[0])] = row[1]
if statusID in self.statusDict:
return S_OK(self.statusDict[statusID])
return S_OK('Unknown')
def getFileIDsInDirectory(self, dirID, requestString=False):
""" Get a list of IDs for all the files stored in given directories or their
subdirectories
:param dirID: single directory ID or a list of directory IDs
:type dirID: int or python:list[int]
:param bool requestString: if True return result as a SQL SELECT string
:return: list of file IDs or SELECT string
"""
return self._getDirectoryFileIDs(dirID, requestString=requestString)
def getFilesInDirectory(self, dirID, verbose=False, connection=False):
connection = self._getConnection(connection)
files = {}
res = self._getDirectoryFiles(dirID, [], ['FileID', 'Size', 'GUID',
'Checksum', 'ChecksumType',
'Type', 'UID',
'GID', 'CreationDate',
'ModificationDate', 'Mode',
'Status'], connection=connection)
if not res['OK']:
return res
if not res['Value']:
return S_OK(files)
fileIDNames = {}
for fileName, fileDict in res['Value'].items():
files[fileName] = {}
files[fileName]['MetaData'] = fileDict
fileIDNames[fileDict['FileID']] = fileName
if verbose:
result = self._getFileReplicas(fileIDNames.keys(), connection=connection)
if not result['OK']:
return result
for fileID, seDict in result['Value'].items():
fileName = fileIDNames[fileID]
files[fileName]['Replicas'] = seDict
return S_OK(files)
def getDirectoryReplicas(self, dirID, path, allStatus=False, connection=False):
""" Get the replicas for all the Files in the given Directory
:param int dirID: ID of the directory
:param unused path: useless
:param bool allStatus: whether all replicas and file status are considered
If False, take the visibleFileStatus and visibleReplicaStatus values from the configuration
"""
connection = self._getConnection(connection)
result = self._getDirectoryReplicas(dirID, allStatus, connection)
if not result['OK']:
return result
resultDict = {}
seDict = {}
for fileName, fileID, seID, pfn in result['Value']:
resultDict.setdefault(fileName, {})
if seID not in seDict:
res = self.db.seManager.getSEName(seID)
if not res['OK']:
seDict[seID] = 'Unknown'
else:
seDict[seID] = res['Value']
se = seDict[seID]
resultDict[fileName][se] = pfn
return S_OK(resultDict)
def _getFileDirectories(self, lfns):
""" For a list of lfn, returns a dictionary with key the directory, and value
the files in that directory. It does not make any query, just splits the names
:param lfns: list of lfns
:type lfns: python:list
"""
dirDict = {}
for lfn in lfns:
lfnDir = os.path.dirname(lfn)
lfnFile = os.path.basename(lfn)
dirDict.setdefault(lfnDir, [])
dirDict[lfnDir].append(lfnFile)
return dirDict
def _checkInfo(self, info, requiredKeys):
if not info:
return S_ERROR("Missing parameters")
for key in requiredKeys:
if key not in info:
return S_ERROR("Missing '%s' parameter" % key)
return S_OK()
# def _checkLFNPFNConvention( self, lfn, pfn, se ):
# """ Check that the PFN corresponds to the LFN-PFN convention """
# if pfn == lfn:
# return S_OK()
# if ( len( pfn ) < len( lfn ) ) or ( pfn[-len( lfn ):] != lfn ) :
# return S_ERROR( 'PFN does not correspond to the LFN convention' )
# return S_OK()
def changeFileGroup(self, lfns):
""" Get set the group for the supplied files
:param lfns: dictionary < lfn : group >
:param int/str newGroup: optional new group/groupID the same for all the supplied lfns
"""
res = self._findFiles(lfns, ['FileID', 'GID'])
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for lfn in res['Value']['Successful'].keys():
group = lfns[lfn]
if isinstance(group, basestring):
groupRes = self.db.ugManager.findGroup(group)
if not groupRes['OK']:
return groupRes
group = groupRes['Value']
currentGroup = res['Value']['Successful'][lfn]['GID']
if int(group) == int(currentGroup):
successful[lfn] = True
else:
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setFileParameter(fileID, "GID", group)
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = True
return S_OK({'Successful': successful, 'Failed': failed})
def changeFileOwner(self, lfns):
""" Set the owner for the supplied files
:param lfns: dictionary < lfn : owner >
:param int/str newOwner: optional new user/userID the same for all the supplied lfns
"""
res = self._findFiles(lfns, ['FileID', 'UID'])
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for lfn in res['Value']['Successful'].keys():
owner = lfns[lfn]
if isinstance(owner, basestring):
userRes = self.db.ugManager.findUser(owner)
if not userRes['OK']:
return userRes
owner = userRes['Value']
currentOwner = res['Value']['Successful'][lfn]['UID']
if int(owner) == int(currentOwner):
successful[lfn] = True
else:
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setFileParameter(fileID, "UID", owner)
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = True
return S_OK({'Successful': successful, 'Failed': failed})
def changeFileMode(self, lfns):
"""" Set the mode for the supplied files
:param lfns: dictionary < lfn : mode >
:param int newMode: optional new mode the same for all the supplied lfns
"""
res = self._findFiles(lfns, ['FileID', 'Mode'])
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for lfn in res['Value']['Successful'].keys():
mode = lfns[lfn]
currentMode = res['Value']['Successful'][lfn]['Mode']
if int(currentMode) == int(mode):
successful[lfn] = True
else:
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setFileParameter(fileID, "Mode", mode)
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = True
return S_OK({'Successful': successful, 'Failed': failed})
def setFileOwner(self, path, owner):
""" Set the file owner
:param path: file path as a string or int or list of ints or select statement
:type path: str, int or python:list[int]
:param owner: new user as a string or int uid
:type owner: str or int
"""
result = self.db.ugManager.findUser(owner)
if not result['OK']:
return result
uid = result['Value']
return self._setFileParameter(path, 'UID', uid)
def setFileGroup(self, path, gname):
""" Set the file group
:param path: file path as a string or int or list of ints or select statement
:type path: str, int or python:list[int]
:param gname: new group as a string or int gid
:type gname: str or int
"""
result = self.db.ugManager.findGroup(gname)
if not result['OK']:
return result
gid = result['Value']
return self._setFileParameter(path, 'GID', gid)
def setFileMode(self, path, mode):
""" Set the file mode
:param path: file path as a string or int or list of ints or select statement
:type path: str, int or python:list[int]
:param int mode: new mode
"""
return self._setFileParameter(path, 'Mode', mode)
def getSEDump(self, seName):
"""
Return all the files at a given SE, together with checksum and size
:param seName: name of the StorageElement
:returns: S_OK with list of tuples (lfn, checksum, size)
"""
return S_ERROR("To be implemented on derived class")
|
fstagni/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/FileManagerBase.py
|
Python
|
gpl-3.0
| 49,156
|
[
"DIRAC"
] |
7d57997e006659e8f219729d2e8aca6945eeb9864b74c62e39acb99799df401e
|
import os
import sys
from threading import Thread
from pyelectro import analysis
import numpy
import math
import pprint
pp = pprint.PrettyPrinter(indent=4)
def alpha_normalised_cost_function(value,target,base=10):
"""Fitness of a value-target pair from 0 to 1
.. WARNING:
I've found that this cost function is producing some odd behaviour.
It is best avoided until this is investigated
For any value/target pair will give a normalised value for
agreement 1 is complete value-target match and 0 is 0 match.
A mirrored exponential function is used.
The fitness is given by the expression :math:`fitness = base^{-x}`
where:
.. math::
x = {\dfrac{(value-target)}{(target + 0.01)^2}}
:param value: value measured
:param t: target
:param base: the value 'base' in the above mathematical expression for x
:return: fitness - a real number from 0 to 1
"""
value = float(value)
target = float(target)
x=((value-target)/(target+0.01))**2 #the 0.01 thing is a bit of a hack at the moment.
fitness=base**(-x)
return fitness
def normalised_cost_function(value,target,Q=None):
""" Returns fitness of a value-target pair from 0 to 1
For any value/target pair will give a normalised value for
agreement 0 is complete value-target match and 1 is "no" match.
If no Q is assigned, it is set such that it satisfies the condition
fitness=0.7 when (target-valu)e=10*target. This is essentially
empirical and seems to work. Mathematical derivation is on Mike Vella's
Lab Book 1 p.42 (page dated 15/12/11).
:param value: value measured
:param t: target
:param Q: This is the sharpness of the cost function, higher values correspond
to a sharper cost function. A high Q-Value may lead an optimizer to a solution
quickly once it nears the solution.
:return: fitness value from 0 to 1
"""
value = float(value)
target = float(target)
if Q==None:
if target != 0:
Q=7/(300*(target**2))
else:
Q=0.023333 # PG: Gives fitness = 0.023333 when value = 1; fitness = 0.7 when value = 10
fitness=1-1/(Q*(target-value)**2+1)
return fitness
class __CandidateData(object):
"""Container for information about a candidate (chromosome)"""
def __init__(self,chromosome):
self.chromosome=chromosome
def set_dbpath(self,dbpath):
self.dbpath=dbpath
def set_exp_id(self,exp_id):
self.exp_id=exp_id
def set_job_num(self,job_num):
self.job_num=job_num
class __Evaluator(object):
"""Base class for Evaluators"""
def __init__(self,parameters,weights,targets,controller):
self.parameters=parameters
self.weights=weights
self.targets=targets
self.controller=controller
'''
PG: Disabling these until they're tested again...
class __CondorContext(object):
"""manager for dealing with a condor-based grid"""
def __split_list(self,alist, wanted_parts=1):
length = len(alist)
return [ alist[i*length // wanted_parts: (i+1)*length // wanted_parts]
for i in range(wanted_parts) ]
def __prepare_candidates(self,candidates,candidates_per_job=1):
#Split candidate list into smaller ones (jobs):
#and make a job list
if optimizer_params.candidates_in_job != None:
candidates_in_job=optimizer_params.candidates_in_job
else:
candidates_in_job=candidates_per_job
num_candidates=len(candidates)
ids=range(num_candidates)
enumerated_candidates=zip(candidates,ids)
num_jobs=num_candidates/candidates_in_job
self.num_jobs=num_jobs
self.job_list=self.__split_list(enumerated_candidates,wanted_parts=self.num_jobs)
def __make_job_file(self,job,job_number):
#write the header:
filepath = os.path.join(self.tmpdir, 'run' + str(job_number) + '.sh')
run_shell = open(filepath, 'w')
run_shell.write('#!/bin/bash\n')
run_shell.write('reldir=`dirname $0`\n')
run_shell.write('cd $reldir\n')
run_shell.write('directory=`pwd`\n')
run_shell.write('pndirectory=$directory\n')
run_shell.write('#Untar the file:\n')
run_shell.write('/bin/tar xzf ./portable-neuron.tar.gz\n')
tarfile_name=optimizer_params.tarred_nrnproj
run_shell.write('/bin/tar xzf ./'+tarfile_name+'\n')
#CandidateData_list=[]
for enumerated_candidate in job:
chromosome = enumerated_candidate[0]
candidate_info = CandidateData(chromosome)
exp_id = enumerated_candidate[1]
candidate_info.set_exp_id(exp_id)
candidate_info.set_job_num(job_number)
self.CandidateData_list.append(candidate_info)
nproj = controllers.NrnProjSimRun(optimizer_params.project_path, chromosome)
run_shell.write('#issue the commands\n')
run_shell.write('$pndirectory/pnpython.sh \
$directory/src/simrunner.py "sim_var[\'exp_id\'] \
= ' + str(exp_id) + '\" ' + '"sim_var[\'''dbname''\'] \
= \'outputdb' + str(job_number) + '.sqlite\'"' +
nproj.sim_var_string + '\n')
run_shell.write('echo \'done\'\n')
run_shell.write('cp $directory/sims/outputdb' + str(job_number) + '.sqlite $directory\n')
#self.CandidateData_list=CandidateData_list
run_shell.close()
def __make_submit_file(self):
#now we write the submit file
filepath = os.path.join(self.tmpdir, 'submitfile.submit')
submit_file=open(filepath,'w')
submit_file.write('universe = vanilla\n')
submit_file.write('log = pneuron.log\n')
submit_file.write('Error = err.$(Process)\n')
submit_file.write('Output = out.$(Process)\n')
submit_file.write('requirements = GLIBC == "2.11"\n')
tarfile_name=optimizer_params.tarred_nrnproj
submit_file.write('transfer_input_files = portable-neuron.tar.gz,'+tarfile_name+'\n')
submit_file.write('should_transfer_files = yes\n')
submit_file.write('when_to_transfer_output = on_exit_or_evict\n')
#this is where you have to do the clever stuff:
for shellno in range(self.num_jobs):
submit_file.write('executable = run'+str(shellno)+'.sh\n')
submit_file.write('queue\n')
#finally close the submit file
submit_file.close()
def __build_condor_files(self,candidates,candidates_per_job=100):
#prepare list of candidates to be farmed on grid:
self.__prepare_candidates(candidates,candidates_per_job=100)
#make the job files (shell scripts to be executed on the execute nodes)
job_number=0 #run shell script number
for job in self.job_list:
self.__make_job_file(job,job_number)
job_number+=1
#now make the submit file
self.__make_submit_file()
def __delete_remote_files(self,host):
import ssh_utils
command='rm -rf ./*'
ssh_utils.issue_command(host, command)
def __put_multiple_files(self,host,filelist,localdir='/',remotedir='/'):
import ssh_utils
for file in filelist:
localpath=os.path.join(localdir,file)
remotepath=os.path.join(remotedir,file)
ssh_utils.put_file(host,localpath,remotepath)
'''
class DumbEvaluator(__Evaluator):
"""
The simulations themselves report their fitness. The evaluator
just reads them from a file. Requires the appropriate controller.
"""
def __init__(self,controller,fitness_filename_prefix,threads_number=1):
self.controller = controller
self.fitness_filename_prefix = fitness_filename_prefix
self.threads_number = threads_number
def evaluate(self,candidates,args):
threads_number = int(self.threads_number)
candidates_per_thread = (len(candidates)) / threads_number
remainder_candidates = len(candidates) % threads_number
chunk_begin = 0
chunk_end = candidates_per_thread
if remainder_candidates != 0:
chunk_end += 1
threads = []
try:
for i in range(0, threads_number):
#if fitness file exists need to destroy it:
file_name = self.fitness_filename_prefix + str(i)
if os.path.exists(file_name):
os.remove(file_name)
#run the candidates:
candidate_section=candidates[chunk_begin:chunk_end]
threads.append(Thread(target=self.controller.run, args=(candidate_section,args,file_name,)))
threads[i].daemon=True
threads[i].start()
chunk_begin = chunk_end
chunk_end += candidates_per_thread
if i < (remainder_candidates - 1):
chunk_end += 1
fitness = []
for i in range(0, threads_number):
# we should let the main thread handle keybord interrupts
while True:
threads[i].join(1)
if not threads[i].isAlive():
break
#get their fitness from the file
file_name = self.fitness_filename_prefix + str(i)
threads[i].join()
fitness = fitness + [float(i) for i in open(file_name).readlines()]
os.remove(file_name)
except (KeyboardInterrupt, SystemExit):
sys.exit("Interrupted by ctrl+c\n")
return fitness
class IClampEvaluator(__Evaluator):
"""
Locally-evaluates (not using cluster or grid computing) a model.
The evaluate routine runs the model and returns its fitness value
"""
def __init__(self,
analysis_start_time,
controller,
analysis_end_time,
target_data_path,
parameters,
analysis_var,
weights,
targets=None,
automatic=False):
super(IClampEvaluator, self).__init__(parameters,
weights,
targets,
controller)
self.analysis_start_time=analysis_start_time
self.analysis_end_time=analysis_end_time
self.target_data_path=target_data_path
self.analysis_var=analysis_var
print('target data path in evaluator:'+target_data_path)
if automatic == True:
t , v_raw = analysis.load_csv_data(target_data_path)
v = numpy.array(v_raw)
v_smooth = list(analysis.smooth(v))
ic_analysis = analysis.IClampAnalysis(v_smooth,
t,
analysis_var,
start_analysis=analysis_start_time,
end_analysis=analysis_end_time)
ic_analysis.analyse()
self.targets = ic_analysis.analysis_results
print('Obtained targets are:')
print(self.targets)
def evaluate(self,candidates,args):
print("\n>>>>> Evaluating: ")
for cand in candidates: print(">>>>> %s"%cand)
simulations_data = self.controller.run(candidates,
self.parameters)
fitness = []
for data in simulations_data:
times = data[0]
samples = data[1]
data_analysis=analysis.IClampAnalysis(samples,
times,
self.analysis_var,
start_analysis=self.analysis_start_time,
end_analysis=self.analysis_end_time,
target_data_path=self.target_data_path)
try:
data_analysis.analyse()
except:
data_analysis.analysable_data = False
fitness_value = self.evaluate_fitness(data_analysis,
self.targets,
self.weights,
cost_function=analysis.normalised_cost_function)
fitness.append(fitness_value)
print('Fitness: %s\n'%fitness_value)
return fitness
def evaluate_fitness(self,
data_analysis,
target_dict={},
target_weights=None,
cost_function=normalised_cost_function):
"""
Return the estimated fitness of the data, based on the cost function being used.
:param data_analysis: IClampAnalysis instance
:param target_dict: key-value pairs for targets
:param target_weights: key-value pairs for target weights
:param cost_function: cost function (callback) to assign individual targets sub-fitness.
"""
#calculate max fitness value (TODO: there may be a more pythonic way to do this..)
worst_cumulative_fitness=0
for target in target_dict.keys():
if target_weights == None:
target_weight = 1
else:
if target in target_weights.keys():
target_weight = target_weights[target]
else:
target_weight = 1.0
worst_cumulative_fitness += target_weight
#if we have 1 or 0 peaks we won't conduct any analysis
if data_analysis.analysable_data == False:
print('Data is non-analysable')
return worst_cumulative_fitness
else:
fitness = 0
for target in target_dict.keys():
target_value=target_dict[target]
if target_weights == None:
target_weight = 1
else:
if target in target_weights.keys():
target_weight = target_weights[target]
else:
target_weight = 1.0
if target_weight > 0:
value = data_analysis.analysis_results[target]
#let function pick Q automatically
inc = target_weight*cost_function(value,target_value)
fitness += inc
print('Target %s (weight %s): target val: %s, actual: %s, fitness increment: %s'%(target, target_weight, target_value, value, inc))
return fitness
class NetworkEvaluator(__Evaluator):
"""
Locally-evaluates (not using cluster or grid computing) a model.
The evaluate routine runs the model and returns its fitness value
"""
def __init__(self,
analysis_start_time,
controller,
analysis_end_time,
parameters,
analysis_var,
weights,
targets=None):
super(NetworkEvaluator, self).__init__(parameters,
weights,
targets,
controller)
self.analysis_start_time=analysis_start_time
self.analysis_end_time=analysis_end_time
self.analysis_var=analysis_var
self.targets=targets
def evaluate(self,candidates,args):
print("\n>>>>> Evaluating: ")
for cand in candidates: print(">>>>> %s"%cand)
simulations_data = self.controller.run(candidates,
self.parameters)
fitness = []
for data in simulations_data:
times = data[0]
volts = data[1]
data_analysis=analysis.NetworkAnalysis(volts,
times,
self.analysis_var,
start_analysis=self.analysis_start_time,
end_analysis=self.analysis_end_time)
data_analysis.analyse(self.targets)
fitness_value = self.evaluate_fitness(data_analysis,
self.targets,
self.weights,
cost_function=normalised_cost_function)
fitness.append(fitness_value)
print('Fitness: %s\n'%fitness_value)
return fitness
def evaluate_fitness(self,
data_analysis,
target_dict={},
target_weights=None,
cost_function=normalised_cost_function):
"""
Return the estimated fitness of the data, based on the cost function being used.
:param data_analysis: NetworkAnalysis instance
:param target_dict: key-value pairs for targets
:param target_weights: key-value pairs for target weights
:param cost_function: cost function (callback) to assign individual targets sub-fitness.
"""
fitness = 0
for target in target_dict.keys():
target_value=target_dict[target]
if target_weights == None:
target_weight = 1
else:
if target in target_weights.keys():
target_weight = target_weights[target]
else:
target_weight = 0 # If it's not mentioned assunme weight = 0!
if target_weight > 0:
inc = target_weight # default...
if data_analysis.analysis_results.has_key(target):
value = data_analysis.analysis_results[target]
if not math.isnan(value):
#let function pick Q automatically
inc = target_weight*cost_function(value,target_value)
else:
value = '<<infinite value!>>'
inc = target_weight
else:
value = '<<cannot be calculated! (only: %s)>>'%data_analysis.analysis_results.keys()
fitness += inc
print('Target %s (weight %s): target val: %s, actual: %s, fitness increment: %s'%(target, target_weight, target_value, value, inc))
return fitness
'''
class IClampCondorEvaluator(IClampEvaluator):
"""
Evaluate simulations and return their fitness on a condor grid.
Tested and known to work on CamGrid
(http://www.escience.cam.ac.uk/projects/camgrid/)
WARNING:
this entire class should now be considered obsolete, the evaluator
is just an IClampEvaluator and everything here that is different
from that class needs to become its own controller
"""
def __init__(self,local_analysis=False):
super(IClampCondorEvaluator,self).__init__()
#other things like the number of nodes to divide the work onto and
#host connection parameters need to go into this constructor
if local_analysis:
self.evaluate=self.__local_evaluate
else:
self.evaluate=self.__remote_evaluate__
def __condor_evaluate(self,candidates,args):
"""
Run simulations on grid and analyse data locally
WARNING: (???I'm quite confused here...there is a mistake somewhere
as the name doesn't match the description - which method is which?)
Once each generation has finished, all data is pulled to local
workstation in form of sqlite databases (1 database per job)
and these are analysed and the fitness estimated sequentially
the fitness array is then returned.
"""
import time
import ssh_utils
self.CandidateData_list=[]
self.__build_condor_files(candidates) #Build submit and runx.sh files, exp_id now corresponds to position in chromosome and fitness arrays
messagehost=ssh_utils.host(optimizer_params.host,optimizer_params.username,optimizer_params.password,optimizer_params.port)
self.__delete_remote_files__(messagehost)#delete everything in thssh_utilse directory you're about to put files in
filelist=os.listdir(self.tmpdir)
self.__put_multiple_files(messagehost,filelist,localdir=self.tmpdir,remotedir=optimizer_params.remotedir)#copy local files over
filelist=os.listdir(self.portableswdir)
self.__put_multiple_files(messagehost,filelist,localdir=self.portableswdir,remotedir=optimizer_params.remotedir)#copy local files over
ssh_utils.issue_command(messagehost,'export PATH=/opt/Condor/release/bin:$PATH\ncondor_submit submitfile.submit')
self.jobdbnames=[]
for job_num in range(self.num_jobs): #make a list of the databases we need:
jobdbname='outputdb'+str(job_num)+'.sqlite'
self.jobdbnames.append(jobdbname)
#wait till you know file exists:
dbs_created=False
pulled_dbs=[] # list of databases which have been extracted from remote server
while (dbs_created==False):
print('waiting..')
time.sleep(20)
print('checking if dbs created:')
command='ls'
remote_filelist=ssh_utils.issue_command(messagehost, command)
for jobdbname in self.jobdbnames:
db_exists=jobdbname+'\n' in remote_filelist
if (db_exists==False):
print(jobdbname +' has not been generated')
dbs_created=False
elif db_exists==True and jobdbname not in pulled_dbs:
print(jobdbname +' has been generated')
remotefile=optimizer_params.remotedir+jobdbname
localpath=os.path.join(self.datadir,str(self.generation)+jobdbname)
ssh_utils.get_file(messagehost,remotefile,localpath)
pulled_dbs.append(jobdbname) #so that it is not extracted more than once
#here pop-in the fitness evaluation
if len(pulled_dbs)==len(self.jobdbnames):
dbs_created=True
fitness=[]
for CandidateData in self.CandidateData_list:
job_num = CandidateData.job_num
dbname=str(self.generation)+'outputdb'+str(job_num)+'.sqlite'
dbpath=os.path.join(self.datadir,dbname)
exp_id=CandidateData.exp_id
connection=sqldbutils.db_connect(dbpath) #establish a database connection
query='SELECT numerical_value\
FROM output_params WHERE experiment_id=\
'+str(exp_id)+' AND parameter="fitness"'
exp_fitness=sqldbutils.execute_query(connection,query)
exp_fitness=exp_fitness.fetchall()
exp_fitness=exp_fitness[0][0]
#print('fitness: %s'%exp_fitness)
fitness.append(exp_fitness)
self.generation+=1
return fitness
def __local_evaluate(self,candidates,args):
import time
self.CandidateData_list=[]
analysis_var=self.analysis_var
#Build submitfile.submit and runx.sh files:
self.__build_condor_files(candidates) #exp_id now corresponds to position in chromosome/fitness array
fitness=[]
#submit the jobs to the grid
os.chdir(self.tmpdir)
os.system('condor_submit submitfile.submit')
#wait till you know file exists:
dbs_created=False
while (dbs_created==False):
print('checking if dbs created:')
for job_num in range(self.num_jobs):
jobdbname='outputdb'+str(job_num)+'.sqlite'
jobdbpath=os.path.join(self.datadir,jobdbname)
print(jobdbpath)
db_exists=os.path.exists(jobdbpath)
if (db_exists==False):
time.sleep(60)
dbs_created=False
break
dbs_created=True
for CandidateData in self.CandidateData_list:
job_num = CandidateData.job_num
dbname='/outputdb'+str(job_num)+'.sqlite'
dbpath=self.datadir+dbname
exp_id=CandidateData.exp_id
exp_data=sqldbutils.sim_data(dbpath,exp_id)
analysis=analysis.IClampAnalysis(exp_data.samples,exp_data.t,analysis_var,5000,10000)
exp_fitness=analysis.evaluate_fitness(optimizer_params.targets,optimizer_params.weights,cost_function=analysis.normalised_cost_function)
fitness.append(exp_fitness)
for job_num in range(self.num_jobs):
jobdbname='outputdb'+str(job_num)+'.sqlite'
jobdbpath=os.path.join(self.datadir,jobdbname)
print(jobdbpath)
os.remove(jobdbpath)
return fitness
'''
class PointBasedAnalysis(object):
def __init__(self, v, t):
self.v = numpy.array(v)
self.t = numpy.array(t)
def analyse(self, targets):
analysis_results = {}
for target in targets:
target_time = float(target.split('_')[1])
i=0
while self.t[i] < target_time:
value = self.v[i]
i+=1
analysis_results[target] = value
return analysis_results
class PointValueEvaluator(__Evaluator):
"""
Locally-evaluates (not using cluster or grid computing) a model.
The evaluate routine runs the model and returns its fitness value
"""
def __init__(self,
controller,
parameters,
weights,
targets=None):
super(PointValueEvaluator, self).__init__(parameters,
weights,
targets,
controller)
def evaluate(self,candidates,args):
print("\n>>>>> Evaluating: ")
for cand in candidates: print(">>>>> %s"%cand)
simulations_data = self.controller.run(candidates,
self.parameters)
fitness = []
for data in simulations_data:
times = data[0]
samples = data[1]
data_analysis = PointBasedAnalysis(samples,
times)
fitness_value = self.evaluate_fitness(data_analysis,
self.targets,
self.weights)
fitness.append(fitness_value)
print('Fitness: %s\n'%fitness_value)
return fitness
def evaluate_fitness(self,
data_analysis,
target_dict={},
target_weights=None,
cost_function=normalised_cost_function):
"""
Return the estimated fitness of the data, based on the cost function being used.
:param data_analysis: PointBasedAnalysis instance
:param target_dict: key-value pairs for targets
:param target_weights: key-value pairs for target weights
:param cost_function: cost function (callback) to assign individual targets sub-fitness.
"""
fitness = 0
analysed = data_analysis.analyse(target_dict)
for target in target_dict.keys():
target_value=target_dict[target]
if target_weights == None:
target_weight = 1
else:
if target in target_weights.keys():
target_weight = target_weights[target]
else:
target_weight = 1.0
if target_weight > 0:
#let function pick Q automatically
inc = target_weight * cost_function(analysed[target], target_value)
fitness += inc
print('Target %s (weight %s): target val: %s, actual: %s, fitness increment: %s'%(target, target_weight, target_value, analysed[target], inc))
return fitness
|
pgleeson/neurotune
|
neurotune/evaluators.py
|
Python
|
bsd-3-clause
| 29,790
|
[
"NEURON"
] |
1b56d2ac7a654a41d9917013244a726ab75f1d100bae428f8e420dcbfd068c7c
|
import math
import numpy as np
from scipy.cluster.vq import kmeans2
from groupmembers import print_group_members
from plots import plots
def main():
print_group_members()
""" The below line of code is needed to calculate the optimal hyperparameters for a given range. Set the range
values inside the function in the file parametertuning """
# compute_optimal_hyperparameters(training_input, training_target, validation_input, validation_target)
""" the hyperparameters are obtained after computing the search on range of M (1 to 100) and lambda (0 to 1, 0.1)"""
model_complexity = 56
regularizer_lambda = 0
compute_linear_reg_letor(model_complexity, regularizer_lambda)
model_complexity = 33
regularizer_lambda = 0
compute_linear_reg_synthetic_data(model_complexity, regularizer_lambda)
"""
:description computes the weights using closed form solution and SGD to find the outpute y. The data used for this is
LeToR dataset provided by microsoft for research.
:returns none
"""
def compute_linear_reg_letor(model_complexity, regularizer_lambda):
# input data file does not contain the reference label which is our target. also it does not have the qid (column 2)
# basically the file contains the cleaned data. It contains the feature values
letor_input = np.genfromtxt('Querylevelnorm_X.csv', delimiter=',')
global num_of_observations, num_of_features
num_of_observations, num_of_features = letor_input.shape
# target datafile contains the reference label which is our target value and which determines the relationship
# between query and document. Higher the value better the relation.
letor_target = np.genfromtxt('Querylevelnorm_t.csv', delimiter=',')
partition_data(letor_input, letor_target)
compute_display_result(model_complexity, regularizer_lambda)
"""
:description computes the weights using closed form solution and SGD to find the outpute y. The data used for this is
synthetically generated.
:returns none
"""
def compute_linear_reg_synthetic_data(model_complexity, regularizer_lambda):
synthetic_input = np.genfromtxt('input.csv', delimiter=',')
global num_of_observations, num_of_features
num_of_observations, num_of_features = synthetic_input.shape
synthetic_target = np.genfromtxt('output.csv', delimiter=',')
partition_data(synthetic_input, synthetic_target)
compute_display_result(model_complexity, regularizer_lambda)
"""
:description the function partitions the data into training, validation and test. Global variables are used to save the
partitioned data
:returns none
"""
def partition_data(input_data, target_data):
training_data_row_limit = math.floor(0.8 * num_of_observations)
validation_data_row_limit = training_data_row_limit + math.floor(0.1 * num_of_observations)
global training_input, training_target, validation_input, validation_target, test_input, test_target
training_input = input_data[:training_data_row_limit]
training_target = target_data[:training_data_row_limit]
validation_input = input_data[training_data_row_limit: validation_data_row_limit]
validation_target = target_data[training_data_row_limit: validation_data_row_limit]
test_input = input_data[validation_data_row_limit: num_of_observations]
test_target = target_data[validation_data_row_limit: num_of_observations]
"""
:description: the function generates the variance of the clustered data
:returns variance matrix
"""
def get_inverse_covariance_matrix(k_clusters, label):
cluster_points = [[]] * k_clusters
cluster_variance = [[]] * k_clusters
for i in range(k_clusters):
cluster_points[i] = (training_input[np.where(label == i)])
cluster_variance[i] = np.linalg.pinv(np.identity(num_of_features) * np.var(cluster_points[i], axis=0, ddof=1))
return np.array(cluster_variance)
"""
:description the function computes the gaussian radial basis function for each parameter vector
:returns design matrix of order N * (M + 1)
"""
def compute_design_matrix(input_data, cluster_centers, spread):
cluster_centers = np.array([cluster_centers]).T
broadcast = np.broadcast(input_data.T, cluster_centers)
yj = np.empty(broadcast.shape)
yj.flat = [x - u for (x, u) in broadcast]
design_matrix = np.exp(
np.sum(
(np.einsum('ndm,mdd->ndm', yj.T, spread) * yj.T), axis=1)
/ (-2)
)
design_matrix = np.insert(design_matrix, 0, 1, axis=1)
return design_matrix
"""
:description the function calculates the weights using the closed form solution.
the regularizer term is used to minimize the overfitting issue
:parameter regularizer lambda, design matrix, target data
:returns the weights calculated
"""
def closed_form_solution(design_matrix, target_data, regularizer_lambda):
first_term = np.dot(regularizer_lambda, np.identity(len(design_matrix[0])))
second_term = np.matmul(design_matrix.T, design_matrix)
third_term = np.matmul(design_matrix.T, target_data)
weights = np.linalg.solve(first_term + second_term, third_term).flatten()
return weights
"""
:description compute the root mean squared error of the data
:returns root mean squared error
"""
def compute_sum_of_squared_error(design_matrix, target_data, weights, regularizer_lambda):
error_term = np.sum(np.square(target_data - np.matmul(design_matrix, weights))) / 2
regularized_term = (np.matmul(weights.T, weights) * regularizer_lambda) / 2
sum_of_squares_error = error_term + regularized_term
e_rms = np.sqrt(2 * sum_of_squares_error / len(design_matrix))
return e_rms
"""
:description computes the differentiation error
:return gradient error
"""
def compute_gradient_error(design_matrix, target_data, weights, regularizer_lambda):
yj = np.matmul(design_matrix, weights.T)
difference = (yj - target_data).T
e_d = np.matmul(difference, design_matrix)
differentiation_error = (e_d + regularizer_lambda * weights) / mini_batch_size
return differentiation_error
"""
:description compute the weights using the gradient descent method with early stopping implemented
:returns weights calulated using stochastic gradient descent
"""
def compute_SGD(design_matrix, validation_design_matrix, regularizer_lambda):
N, _ = design_matrix.shape
patience = 25 # this is our patience level!
min_validation_error = np.inf
weights = np.zeros(design_matrix.shape[1])
optimal_weights = np.zeros(design_matrix.shape[1])
j = 0
steps = int(N / mini_batch_size)
for epoch in range(num_epochs):
while j < patience:
for i in range(steps):
lower_bound = i * mini_batch_size
upper_bound = min((i + 1) * mini_batch_size, N)
phi = design_matrix[lower_bound:upper_bound, :]
t = training_target[lower_bound: upper_bound]
differentiation_error = compute_gradient_error(phi, t, weights, regularizer_lambda)
weights = weights - learning_rate * differentiation_error
validation_error_rms = compute_sum_of_squared_error(validation_design_matrix, validation_target, weights,
regularizer_lambda)
if validation_error_rms < min_validation_error:
j = 0
min_validation_error = validation_error_rms
optimal_weights = weights
else:
j = j + 1
return optimal_weights
"""
:description computes the weights and design matrix and then eventually the root mean squared error
:returns none
"""
def compute_display_result(model_complexity, regularizer_lambda):
centroids, label = kmeans2(training_input, model_complexity, minit='points')
inv_covariance_matrix = get_inverse_covariance_matrix(model_complexity, label)
design_matrix = compute_design_matrix(training_input, centroids.T, inv_covariance_matrix)
closed_form_weights = closed_form_solution(design_matrix, training_target, regularizer_lambda)
# Gradient Descent Section
validation_design_matrix = compute_design_matrix(validation_input, centroids.T, inv_covariance_matrix)
sgd_weights = compute_SGD(design_matrix, validation_design_matrix, regularizer_lambda)
# Gradient Descent Section
compute_test_result(centroids, inv_covariance_matrix, closed_form_weights, sgd_weights, regularizer_lambda)
compute_validation_result(centroids, inv_covariance_matrix, closed_form_weights, sgd_weights, regularizer_lambda)
compute_training_result(centroids, inv_covariance_matrix, closed_form_weights, sgd_weights, regularizer_lambda)
def compute_test_result(centroids, inverse_covariance_matrix, closed_form_weights, sgd_weights, regularizer_lambda):
test_design_matrix = compute_design_matrix(test_input, centroids.T, inverse_covariance_matrix)
test_error_rms_cf = compute_sum_of_squared_error(test_design_matrix, test_target, closed_form_weights,
regularizer_lambda)
print('Test closed form weights: ', closed_form_weights)
print('Test Error rms (closed form weights): ', test_error_rms_cf)
test_error_rms_sgd = compute_sum_of_squared_error(test_design_matrix, test_target, sgd_weights, regularizer_lambda)
print('Test SGD weights: ', sgd_weights)
print('Test Error rms (stochastic gradient decent): ', test_error_rms_sgd)
y_closed_form = np.matmul(test_design_matrix, closed_form_weights)
y_sgd = np.matmul(test_design_matrix, sgd_weights)
# print(y_closed_form)
def compute_validation_result(centroids, inv_coariance_matrix, closed_form_weights, sgd_weights, regularizer_lambda):
validation_design_matrix = compute_design_matrix(validation_input, centroids.T, inv_coariance_matrix)
validation_error_rms_cf = compute_sum_of_squared_error(validation_design_matrix, validation_target, closed_form_weights,
regularizer_lambda)
print('Validation closed form weights: ', closed_form_weights)
print('Validation Error rms (closed form weights): ', validation_error_rms_cf)
validation_error_rms_sgd = compute_sum_of_squared_error(validation_design_matrix, validation_target, sgd_weights,
regularizer_lambda)
print('Validation SGD weights: ', sgd_weights)
print('Validation Error rms (stochastic gradient decent): ', validation_error_rms_sgd)
y_closed_form = np.matmul(validation_design_matrix, closed_form_weights)
y_sgd = np.matmul(validation_design_matrix, sgd_weights)
# print(y_closed_form)
def compute_training_result(centroids, inv_coariance_matrix, closed_form_weights, sgd_weights, regularizer_lambda):
training_design_matrix = compute_design_matrix(training_input, centroids.T, inv_coariance_matrix)
training_error_rms_cf = compute_sum_of_squared_error(training_design_matrix, training_target,
closed_form_weights, regularizer_lambda)
print('Training closed form weights: ', closed_form_weights)
print('Training Error rms (closed form weights): ', training_error_rms_cf)
training_error_rms_sgd = compute_sum_of_squared_error(training_design_matrix, training_target, sgd_weights,
regularizer_lambda)
print('Training SGD weights: ', sgd_weights)
print('Training Error rms (stochastic gradient decent): ', training_error_rms_sgd)
y_closed_form = np.matmul(training_design_matrix, closed_form_weights)
y_sgd = np.matmul(training_design_matrix, sgd_weights)
# print(y_closed_form)
"""
:description compute the optimal hyper-parameters for the range of model complexity and lambda
:returns the optimal model complexity (M) for Gaussian RBF
"""
def compute_optimal_hyperparameters(training_input, training_target, validation_input, validation_target):
k_clusters = 100
min_lambda = 0
max_lambda = 1
lambda_step_size = 0.1
optimal_M = np.inf
optimal_lambda = np.inf
optimal_error_rms = np.inf
error_rms_grid = np.zeros([k_clusters, int(max_lambda / lambda_step_size)])
test_lambda = np.arange(min_lambda, max_lambda, lambda_step_size)
for k_cluster in range(1, k_clusters):
centroids, label = kmeans2(training_input, k_cluster, minit='points')
inv_covariance_matrix = get_inverse_covariance_matrix(k_cluster, label)
design_matrix = compute_design_matrix(training_input, centroids.T, inv_covariance_matrix)
validation_design_matrix = compute_design_matrix(validation_input, centroids.T, inv_covariance_matrix)
for index, _lambda in enumerate(test_lambda):
closed_form_weights = closed_form_solution(design_matrix, training_target, _lambda)
error_rms = compute_sum_of_squared_error(validation_design_matrix, validation_target, closed_form_weights, _lambda)
error_rms_grid[k_cluster, index] = error_rms
print('Error for M = {0} and lambda = {1} is {2}'.format(k_cluster, _lambda, error_rms))
if error_rms < optimal_error_rms:
optimal_error_rms = error_rms
optimal_M = k_cluster
optimal_lambda = _lambda
print('Optimal model complexity (M) for M in 1 to {} is: {}'.format(k_clusters, optimal_M))
print('optimal regularizer constant lambda in {} to {} is: {}'.format(min_lambda, max_lambda, optimal_lambda))
if __name__ == '__main__':
# initialize common variables that are going to be used through-out
learning_rate = 1
num_epochs = 100
mini_batch_size = 500
num_of_observations, num_of_features = 0, 0
training_input = 0
training_target = 0
validation_input = None
validation_target = None
test_input = None
test_target = None
main()
plots()
|
kautuk-desai/CSE-574-Intro-to-Machine-Learning
|
project2/main.py
|
Python
|
mit
| 13,999
|
[
"Gaussian"
] |
080abe9ae828a5bcf1e47364055d8a31fc2e4fcb291a66cf0da017e8c497ff31
|
import vtk
import numpy as np
import os
import sys
from pyCellAnalyst import CellMech
import pickle
def writePoly(name, tri):
writer = vtk.vtkXMLPolyDataWriter()
writer.SetFileName(name)
writer.SetInputData(tri.GetOutput())
writer.Write()
def writeSTL(name, tri):
writer = vtk.vtkSTLWriter()
writer.SetFileName(name)
writer.SetInputData(tri.GetOutput())
writer.Write()
try:
os.mkdir("MaterialCase")
except:
pass
try:
os.mkdir("SpatialCase")
except:
pass
if len(sys.argv) == 1:
seed = np.random.randint(0, sys.maxint)
print("New seed generated for RNG: {:d}".format(seed))
elif len(sys.argv) == 2:
seed = int(sys.argv[-1])
print("RNG seed specified by user: {:d}".format(seed))
else:
raise SystemExit(
("Too many arguments: expected 0 (generate new random seed) ",
"or 1 (user-specified seed). Exitting..."))
np.random.seed(seed)
N = 1000
truth = np.zeros((N, 3, 3), np.float64)
for i in xrange(N):
a = np.random.uniform(3.0, 5.0)
b = np.random.uniform(2.0, 3.0)
c = np.random.uniform(2.0, 3.0)
N1 = np.random.uniform(0.8, 1.2)
N2 = np.random.uniform(0.8, 1.2)
#principal stretches
lam1 = np.random.uniform(0.8, 1.2)
lam2 = np.random.uniform(0.8, 1.2)
lam3 = np.random.uniform(0.8, 1.2)
#Euler angles for rotation from Cartesian basis
#the aim is to randomly define an eigenbasis for
#the principal stretches that transforms to identity {e_i}
#by this rotation generating a U containing dilatation
#and shear with no worry of not being positive definite
#Euler angle definition (extrinsic):
# alpha - rotation about reference z
# beta - rotation about reference x
# gamma - rotation about z
alpha = np.random.uniform(0, np.pi / 4.0)
beta = np.random.uniform(0, np.pi / 4.0)
gamma = np.random.uniform(0, np.pi / 4.0)
Q = np.zeros((3, 3), np.float64)
Q[0, 0] = np.cos(alpha) * np.cos(gamma) - np.cos(beta) * np.sin(alpha) * np.sin(gamma)
Q[0, 1] = -np.cos(alpha) * np.sin(gamma) - np.cos(beta) * np.cos(gamma) * np.sin(alpha)
Q[0, 2] = np.sin(alpha) * np.sin(beta)
Q[1, 0] = np.cos(gamma) * np.sin(alpha) + np.cos(alpha) * np.cos(beta) * np.sin(gamma)
Q[1, 1] = np.cos(alpha) * np.cos(beta) * np.cos(gamma) - np.sin(alpha) * np.sin(gamma)
Q[1, 2] = -np.cos(alpha) * np.sin(beta)
Q[2, 0] = np.sin(beta) * np.sin(gamma)
Q[2, 1] = np.cos(gamma) * np.sin(beta)
Q[2, 2] = np.cos(beta)
#$U = \sum_1^3 \lambda_i \mathbf{r}_i \outer \mathbf{r}_i$
U = np.zeros((3, 3), np.float64)
l = [lam1, lam2, lam3]
for j in xrange(3):
r = np.dot(Q, np.eye(3)[:, j])
U += np.outer(l[j] * r, r)
ellipFunc = vtk.vtkParametricSuperEllipsoid()
ellipFunc.SetXRadius(a)
ellipFunc.SetYRadius(b)
ellipFunc.SetZRadius(c)
ellipFunc.SetN1(N1)
ellipFunc.SetN2(N2)
ellip = vtk.vtkParametricFunctionSource()
ellip.SetParametricFunction(ellipFunc)
ellip.SetUResolution(50)
ellip.SetVResolution(30)
ellip.SetWResolution(30)
ellip.SetScalarModeToNone()
ellip.Update()
d = np.pi / 15 * lam1 * a
resample = vtk.vtkPolyDataPointSampler()
resample.SetInputData(ellip.GetOutput())
resample.SetDistance(d)
resample.Update()
delaunay = vtk.vtkDelaunay3D()
delaunay.SetInputData(resample.GetOutput())
delaunay.Update()
geo = vtk.vtkGeometryFilter()
geo.SetInputData(delaunay.GetOutput())
geo.Update()
decim = vtk.vtkDecimatePro()
decim.SetInputData(geo.GetOutput())
decim.SetTargetReduction(.2)
decim.Update()
writeSTL("MaterialCase/cell{:04d}.stl".format(i + 1), decim)
transform = vtk.vtkTransform()
tmp = np.eye(4)
tmp[0:3, 0:3] = U
transform.SetMatrix(tmp.ravel())
transform.Update()
spatial = vtk.vtkTransformPolyDataFilter()
spatial.SetTransform(transform)
spatial.SetInputData(decim.GetOutput())
spatial.Update()
writeSTL("SpatialCase/cell{:04d}.stl".format(i + 1), spatial)
truth[i, :, :] = 0.5 * (np.dot(U.T, U) - np.eye(3))
mech = CellMech(
ref_dir="MaterialCase",
def_dir="SpatialCase",
rigidInitial=False,
deformable=False,
saveFEA=False,
display=False)
residual = np.array(mech.cell_strains) - truth
results = {"residual": residual,
"truth": truth}
fid = open("results.pkl", "wb")
pickle.dump(results, fid, 2)
fid.close()
|
siboles/pyCellAnalyst
|
src/testing/Homogeneous/homogeneous.py
|
Python
|
mit
| 4,450
|
[
"VTK"
] |
363c0009e1c72712fc374b31815dca80afb120d20723b20b208eed938bf2f503
|
#!/usr/bin/env python
from pyDFTutils.vasp.vasp_dos import plot_all_pdos
import argparse
def plot_pdos():
parser=argparse.ArgumentParser(description='Plot the partial dos')
parser.add_argument('-f','--filename',type=str,help='DOSCAR filename',default='DOSCAR')
parser.add_argument('-n','--ispin',type=int,help='number os spins, 1 or 2',default=2)
parser.add_argument('-o','--output',type=str,help='output dir',default='PDOS')
parser.add_argument('--xmin',type=float,help='xmin',default=-15.0)
parser.add_argument('--xmax',type=float,help='xmax',default=5.0)
parser.add_argument('--ymin',type=float,help='ymin',default=-2.0)
parser.add_argument('--ymax',type=float,help='xmax',default=2.0)
parser.add_argument('-s','--sites',type=str,help='sites',default=['s','p','eg','t2g'],nargs='+')
parser.add_argument('-e','--element',type=str,help='element',default=None,nargs='+')
args=parser.parse_args()
plot_all_pdos(element_types=args.element,filename=args.filename,ispin=args.ispin,ymin=args.ymin,ymax=args.ymax,xmin=args.xmin,xmax=args.xmax,output_dir=args.output,orbs=args.sites)
if __name__=='__main__':
plot_pdos()
|
mailhexu/pyDFTutils
|
scripts/plotpdos.py
|
Python
|
lgpl-3.0
| 1,169
|
[
"VASP"
] |
76ebc2823e3245aa8acb9160024e6c970e7866acd9966c9d6fe079899738c1b9
|
"""Import basic exposure of libzmq C API as a backend"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .select import public_api, select_backend
try:
_ns = select_backend('zmq.backend.cython')
except ImportError:
_ns = select_backend('zmq.backend.cffi')
globals().update(_ns)
__all__ = public_api
|
skycucumber/Messaging-Gateway
|
webapp/venv/lib/python2.7/site-packages/zmq/backend/__init__.py
|
Python
|
gpl-2.0
| 827
|
[
"Brian"
] |
27b3c92a7edc79b068863466e1a8a6054fad6ac744ec1fb9502f0c095f9c2b5f
|
#!/usr/bin/python
#
# Regression test driver for cmd-line tools
#
# Usage: test_cmdline_tool.py [<options>] <tool> <arguments>
#
# If the -g option is given or the TEST_GENERATE environment variable is set to 1,
# *-expected.<suffix> files will be generated instead of running the tests.
#
# Any generated output is written to the file `basename <argument`-actual.<suffix>
# Any warning or errors are written to stderr.
#
# The test is run with OPENSCAD_FONT_PATH set to the testdata/ttf directory. This
# should ensure we fetch the fonts from there even if they are also installed
# on the system. (E.g. the C glyph is actually different from Debian/Jessie
# installation and what we ship as Liberation-2.00.1).
#
# Returns 0 on passed test
# 1 on error
# 2 on invalid cmd-line options
#
# Author: Marius Kintel <marius@kintel.net>
#
import sys
import os
import glob
import subprocess
import re
import getopt
import shutil
import platform
import string
import difflib
#_debug_tcct = True
_debug_tcct = False
def debug(*args):
global _debug_tcct
if _debug_tcct:
print 'test_cmdline_tool:',
for a in args: print a,
print
def initialize_environment():
if not options.generate: options.generate = bool(os.getenv("TEST_GENERATE"))
return True
def init_expected_filename():
global expecteddir, expectedfilename # fixme - globals are hard to use
expected_testname = options.testname
if hasattr(options, "expecteddir"):
expected_dirname = options.expecteddir
else:
expected_dirname = expected_testname
expecteddir = os.path.join(options.regressiondir, expected_dirname)
expectedfilename = os.path.join(expecteddir, options.filename + "-expected." + options.suffix)
expectedfilename = os.path.normpath(expectedfilename)
def init_actual_filename():
global actualdir, actualfilename # fixme - globals are hard to use
cmdname = os.path.split(options.cmd)[1]
actualdir = os.path.join(os.getcwd(), options.testname + "-output")
actualfilename = os.path.join(actualdir, options.filename + "-actual." + options.suffix)
actualfilename = os.path.normpath(actualfilename)
def verify_test(testname, cmd):
global expectedfilename, actualfilename
if not options.generate:
if not os.path.isfile(expectedfilename):
print >> sys.stderr, "Error: test '%s' is missing expected output in %s" % (testname, expectedfilename)
# next 2 imgs parsed by test_pretty_print.py
print >> sys.stderr, ' actual image: ' + actualfilename + '\n'
print >> sys.stderr, ' expected image: ' + expectedfilename + '\n'
return False
return True
def execute_and_redirect(cmd, params, outfile):
retval = -1
try:
proc = subprocess.Popen([cmd] + params, stdout=outfile, stderr=subprocess.STDOUT)
out = proc.communicate()[0]
retval = proc.wait()
except:
print >> sys.stderr, "Error running subprocess: ", sys.exc_info()[1]
print >> sys.stderr, " cmd:", cmd
print >> sys.stderr, " params:", params
print >> sys.stderr, " outfile:", outfile
if outfile == subprocess.PIPE: return (retval, out)
else: return retval
def normalize_string(s):
"""Apply all modifications to an output string which would have been
applied if OPENSCAD_TESTING was defined at build time of the executable.
This truncates all floats, removes ', timestamp = ...' parts. The function
is idempotent.
This also normalizes away import paths from 'file = ' arguments."""
s = re.sub(', timestamp = [0-9]+', '', s)
def floatrep(match):
value = float(match.groups()[0])
if abs(value) < 10**-12:
return "0"
if abs(value) >= 10**6:
return "%d"%value
return "%.6g"%value
s = re.sub('(-?[0-9]+\\.[0-9]+(e[+-][0-9]+)?)', floatrep, s)
def pathrep(match):
return match.groups()[0] + match.groups()[2]
s = re.sub('(file = ")([^"/]*/)*([^"]*")', pathrep, s)
return s
def get_normalized_text(filename):
try:
f = open(filename)
text = f.read()
except:
text = ''
text = normalize_string(text)
return text.strip("\r\n").replace("\r\n", "\n") + "\n"
def compare_text(expected, actual):
return get_normalized_text(expected) == get_normalized_text(actual)
def compare_default(resultfilename):
print >> sys.stderr, 'text comparison: '
print >> sys.stderr, ' expected textfile: ', expectedfilename
print >> sys.stderr, ' actual textfile: ', resultfilename
expected_text = get_normalized_text(expectedfilename)
actual_text = get_normalized_text(resultfilename)
if not expected_text == actual_text:
if resultfilename:
differences = difflib.unified_diff(
[line.strip() for line in expected_text.splitlines()],
[line.strip() for line in actual_text.splitlines()])
line = None
for line in differences: sys.stderr.write(line + '\n')
if not line: return True
return False
return True
def compare_png(resultfilename):
compare_method = 'pixel'
#args = [expectedfilename, resultfilename, "-alpha", "Off", "-compose", "difference", "-composite", "-threshold", "10%", "-blur", "2", "-threshold", "30%", "-format", "%[fx:w*h*mean]", "info:"]
args = [expectedfilename, resultfilename, "-alpha", "Off", "-compose", "difference", "-composite", "-threshold", "10%", "-morphology", "Erode", "Square", "-format", "%[fx:w*h*mean]", "info:"]
# for systems with older imagemagick that doesnt support '-morphology'
# http://www.imagemagick.org/Usage/morphology/#alturnative
if options.comparator == 'old':
args = [expectedfilename, resultfilename, "-alpha", "Off", "-compose", "difference", "-composite", "-threshold", "10%", "-gaussian-blur","3x65535", "-threshold", "99.99%", "-format", "%[fx:w*h*mean]", "info:"]
if options.comparator == 'ncc':
# for systems where imagemagick crashes when using the above comparators
args = [expectedfilename, resultfilename, "-alpha", "Off", "-compose", "difference", "-metric", "NCC", "tmp.png"]
options.comparison_exec = 'compare'
compare_method = 'NCC'
if options.comparator == 'diffpng':
# alternative to imagemagick based on Yee's algorithm
# Writing the 'difference image' with --output is very useful for debugging but takes a long time
# args = [expectedfilename, resultfilename, "--output", resultfilename+'.diff.png']
args = [expectedfilename, resultfilename]
compare_method = 'diffpng'
print >> sys.stderr, 'Image comparison cmdline: '
print >> sys.stderr, '["'+str(options.comparison_exec) + '"],' + str(args)
# these two lines are parsed by the test_pretty_print.py
print >> sys.stderr, ' actual image: ' + resultfilename + '\n'
print >> sys.stderr, ' expected image: ' + expectedfilename + '\n'
if not resultfilename:
print >> sys.stderr, "Error: Error during test image generation"
return False
(retval, output) = execute_and_redirect(options.comparison_exec, args, subprocess.PIPE)
print "Image comparison return:", retval, "output:", output
if retval == 0:
if compare_method=='pixel':
pixelerr = int(float(output.strip()))
if pixelerr < 32: return True
else: print >> sys.stderr, pixelerr, ' pixel errors'
elif compare_method=='NCC':
thresh = 0.95
ncc_err = float(output.strip())
if ncc_err > thresh or ncc_err==0.0: return True
else: print >> sys.stderr, ncc_err, ' Images differ: NCC comparison < ', thresh
elif compare_method=='diffpng':
if 'MATCHES:' in output: return True
if 'DIFFERS:' in output: return False
return False
def compare_with_expected(resultfilename):
if not options.generate:
if "compare_" + options.suffix in globals(): return globals()["compare_" + options.suffix](resultfilename)
else: return compare_default(resultfilename)
return True
def run_test(testname, cmd, args):
cmdname = os.path.split(options.cmd)[1]
if options.generate:
if not os.path.exists(expecteddir):
try:
os.makedirs(expecteddir)
except OSError as e:
if e.errno != 17: raise e # catch File Exists to allow parallel runs
outputname = expectedfilename
else:
if not os.path.exists(actualdir):
try:
os.makedirs(actualdir)
except OSError as e:
if e.errno != 17: raise e # catch File Exists to allow parallel runs
outputname = actualfilename
outputname = os.path.normpath(outputname)
outfile = open(outputname, "wb")
try:
cmdline = [cmd] + args + [outputname]
print 'run_test() cmdline:',cmdline
fontdir = os.path.join(os.path.dirname(cmd), "testdata")
fontenv = os.environ.copy()
fontenv["OPENSCAD_FONT_PATH"] = fontdir
print 'using font directory:', fontdir
sys.stdout.flush()
proc = subprocess.Popen(cmdline, env = fontenv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
comresult = proc.communicate()
stdouttext, errtext = comresult[0],comresult[1]
if errtext != None and len(errtext) > 0:
print >> sys.stderr, "stderr output: " + errtext
if stdouttext != None and len(stdouttext) > 0:
print >> sys.stderr, "stdout output: " + stdouttext
outfile.close()
if proc.returncode != 0:
print >> sys.stderr, "Error: %s failed with return code %d" % (cmdname, proc.returncode)
return None
return outputname
except OSError, err:
print >> sys.stderr, "Error: %s \"%s\"" % (err.strerror, cmd)
return None
class Options:
def __init__(self):
self.__dict__['options'] = {}
def __setattr__(self, name, value):
self.options[name] = value
def __getattr__(self, name):
return self.options[name]
def usage():
print >> sys.stderr, "Usage: " + sys.argv[0] + " [<options>] <cmdline-tool> <argument>"
print >> sys.stderr, "Options:"
print >> sys.stderr, " -g, --generate Generate expected output for the given tests"
print >> sys.stderr, " -s, --suffix=<suffix> Write -expected and -actual files with the given suffix instead of .txt"
print >> sys.stderr, " -e, --expected-dir=<dir> Use -expected files from the given dir (to share files between test drivers)"
print >> sys.stderr, " -t, --test=<name> Specify test name instead of deducting it from the argument (defaults to basename <exe>)"
print >> sys.stderr, " -f, --file=<name> Specify test file instead of deducting it from the argument (default to basename <first arg>)"
print >> sys.stderr, " -c, --convexec=<name> Path to ImageMagick 'convert' executable"
if __name__ == '__main__':
# Handle command-line arguments
try:
debug('args:'+str(sys.argv))
opts, args = getopt.getopt(sys.argv[1:], "gs:e:c:t:f:m", ["generate", "convexec=", "suffix=", "expected_dir=", "test=", "file=", "comparator="])
debug('getopt args:'+str(sys.argv))
except getopt.GetoptError, err:
usage()
sys.exit(2)
global options
options = Options()
options.regressiondir = os.path.join(os.path.split(sys.argv[0])[0], "regression")
options.generate = False
options.suffix = "txt"
options.comparator = ""
for o, a in opts:
if o in ("-g", "--generate"): options.generate = True
elif o in ("-s", "--suffix"):
if a[0] == '.': options.suffix = a[1:]
else: options.suffix = a
elif o in ("-e", "--expected-dir"):
options.expecteddir = a
elif o in ("-t", "--test"):
options.testname = a
elif o in ("-f", "--file"):
options.filename = a
elif o in ("-c", "--compare-exec"):
options.comparison_exec = os.path.normpath( a )
elif o in ("-m", "--comparator"):
options.comparator = a
# <cmdline-tool> and <argument>
if len(args) < 2:
usage()
sys.exit(2)
options.cmd = args[0]
# If only one test file, we can usually deduct the test name from the file
if len(args) == 2:
basename = os.path.splitext(args[1])[0]
path, options.filename = os.path.split(basename)
print >> sys.stderr, basename
print >> sys.stderr, path, options.filename
print >> sys.stderr, options.filename
if not hasattr(options, "filename"):
print >> sys.stderr, "Filename cannot be deducted from arguments. Specify test filename using the -f option"
sys.exit(2)
if not hasattr(options, "testname"):
options.testname = os.path.split(args[0])[1]
# Initialize and verify run-time environment
if not initialize_environment(): sys.exit(1)
init_expected_filename()
init_actual_filename()
# Verify test environment
verification = verify_test(options.testname, options.cmd)
resultfile = run_test(options.testname, options.cmd, args[1:])
if not resultfile: exit(1)
if not verification or not compare_with_expected(resultfile): exit(1)
|
clothbot/openscad
|
tests/test_cmdline_tool.py
|
Python
|
gpl-2.0
| 13,380
|
[
"Gaussian"
] |
b6d6cea77c0282518d8220c27b7f27fab4c65d9b0aed1a3f7ac54f3530f054a9
|
#youtube stuff imported
import httplib2
import re
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
import time
import googleapiclient
import sys, os
from utils import config
from utils import Object
from utils import logger
from utils import messageFormatter
from utils import fileIO
import asyncio
import datetime
class Youtube:
def __init__(self):
self.pageToken = ""
self.youtube = ""
self.serviceStarted = False
self.secretsFilePath = "config{0}auth{0}client_secrets.json".format(os.sep)
self.oauthFilePath = "config{0}auth{0}oauth.json".format(os.sep)
self.l = logger.logs("Youtube")
fileIO.checkFolder("config{0}auth{0}".format(os.sep),"auth",self.l)
fileIO.checkFile("config-example{0}auth{0}youtube.json".format(os.sep),"config{0}auth{0}youtube.json".format(os.sep),"youtube.json",self.l)
self.enabled = fileIO.loadConf("config{0}auth{0}youtube.json")["Enabled"]
self.pageToken = fileIO.loadConf("config{0}auth{0}youtube.json")["pageToken"]
self.oldMessageList = [] #keeps track of old messages to filter out
self.messageFrequency = 0
if (self.enabled):
secretsExist = self.checkFile(self.secretsFilePath,"client_secrets.json",self.l)
self.msgCheckList = fileIO.loadConf("config{0}auth{0}youtube.json")["selfMsgFilter"]
if (secretsExist):
self.l.logger.info("Starting")
self.initAuth()
config.events.onMessageSend += self.sendLiveChat
else:
self.l.logger.info("Please make sure the oauth and client secret files exist")
#sys.exit()
def checkFile(self,filePath,fileName,logger):
return (os.path.isfile(filePath))
def initAuth(self):
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the {{ Google Cloud Console }} at
# {{ https://cloud.google.com/console }}.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
self.CLIENT_SECRETS_FILE = self.secretsFilePath
# This OAuth 2.0 access scope allows for full read/write access to the
# authenticated user's account.
self.YOUTUBE_READ_WRITE_SCOPE = "https://www.googleapis.com/auth/youtube"
self.YOUTUBE_API_SERVICE_NAME = "youtube"
self.YOUTUBE_API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
self.MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please variables.configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the {{ Cloud Console }}
{{ https://cloud.google.com/console }}
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
self.CLIENT_SECRETS_FILE))
async def get_authenticated_service(self,args):
flow = flow_from_clientsecrets(self.CLIENT_SECRETS_FILE,
scope=self.YOUTUBE_READ_WRITE_SCOPE,
message=self.MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage(self.oauthFilePath)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
return build(self.YOUTUBE_API_SERVICE_NAME, self.YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
# Retrieve a list of the liveStream resources associated with the currently
# authenticated user's channel.
async def getLiveId(self): #this gets the live chat id
list_streams_request = self.youtube.liveBroadcasts().list( #checks for the live chat id through this
part="snippet", #this is what we look through to get the live chat id
broadcastStatus="all", #we need both of these to get the live chat id
broadcastType="all"
).execute() #executes it so its not just some object
self.liveChatId = list_streams_request["items"][0]["snippet"]["liveChatId"]#sifts through the output to get the live chat id and saves it
self.botUserID = list_streams_request["items"][0]["snippet"]["channelId"] #saves the bots channel user id that we will use as a identifier
self.l.logger.debug("liveID {0}".format(self.liveChatId)) #print the live chat id
self.l.logger.debug("BotID" + str(self.botUserID))
async def listChat(self):
try:
continuation = True
try:
list_chatmessages = self.youtube.liveChatMessages().list( #lists the chat messages
part="id,snippet,authorDetails", #gets the author details needed and the snippet all of which giving me the message and username
liveChatId=self.liveChatId,
maxResults=200,
pageToken=self.pageToken #gives the previous token so it loads a new section of the chat
).execute() #executes it so its not just some object
self.pageToken = list_chatmessages["nextPageToken"] #page token for next use
except googleapiclient.errors.HttpError:
self.l.logger.info("Some Google API Error Occured")
await self.Login()
continuation = False
amount = 0
if continuation == True:
for temp in list_chatmessages["items"]: #goes through all the stuff in the list messages list
message = temp["snippet"]["displayMessage"] #gets the display message
username = temp["authorDetails"]["displayName"] #gets the users name
userID = temp["authorDetails"]["channelId"]
profilePic = temp["authorDetails"]["profileImageUrl"]
if message != "" and username != "": #this makes sure that the message and username slot arent empty before putting this to the discord chat
self.l.logger.debug(temp)
fileIO.fileSave("youtubeMsgJson.json", temp)
self.l.logger.debug(userID)
self.l.logger.debug(self.botUserID)
if (userID != self.botUserID):#await self.weedMsg(userId,message)):
self.l.logger.info("{0} {1}".format(username,message))
await self.processMsg(username=username,message=message,roleList=await self.youtubeRoles(temp["authorDetails"]),profilePicture=profilePic)
amount = amount + 1
else: #check if the message was sent by the bot or not
msgFound = False
for oldMsg in self.oldMessageList:
if oldMsg["Message"].strip() == message.strip():
msgFound = True
if not msgFound: #if message not sent by bot then send it
self.l.logger.info("{0} {1}".format(username,message))
await self.processMsg(username=username,message=message,roleList=await self.youtubeRoles(temp["authorDetails"]),profilePicture=profilePic)
amount = amount + 1
# if userID != self.botUserID:
# self.l.logger.info("{0} {1}".format(username,message))
# await self.processMsg(username=username,message=message,roleList=await self.youtubeRoles(temp["authorDetails"]))
# elif userID == self.botUserID: #if the userId is the bots then check the message to see if the bot sent it.
# try:
# if (message.find("[B]")==-1): #Checks the message against this to see if it was sent by the bot or a user
# self.l.logger.info("{0} {1}".format(username,message))
# await self.processMsg(username=username,message=message,roleList=await self.youtubeRoles(temp["authorDetails"]))
# except AttributeError as error:
# self.l.logger.info("{0} {1}".format(username,message))
# await self.processMsg(username=username,message=message,roleList=await self.youtubeRoles(temp["authorDetails"]))
self.messageFrequency = amount
except ConnectionResetError:
x = 1
await self.Login()
self.l.logger.info('Connection Error reconnecting')
async def weedMsg(self,userID,message):
# False means its a safe message
# true means it should be weeded out
##This isnt really needed anymore
if userID == self.userID:
for i in self.msgCheckList:
if (message.find(i) == -1):
return False
return True
else:
return False
async def clearMsgList(self):
oldTime = datetime.datetime.now() - datetime.timedelta(minutes=15)
for msg in self.oldMessageList:
if msg["Time"] < oldTime:
self.oldMessageList.remove(msg)
async def processMsg(self,username,message,roleList,profilePicture):
formatOptions = {"%authorName%": username, "%channelFrom%": "Youtube", "%serverFrom%": "Youtube", "%serviceFrom%": "youtube","%message%":"message","%roles%":roleList}
message = Object.ObjectLayout.message(Author=username,Contents=message,Server="Youtube",Channel="Youtube",Service="Youtube",Roles=roleList,profilePicture=profilePicture)
objDeliveryDetails = Object.ObjectLayout.DeliveryDetails(Module="Site",ModuleTo="Modules",Service="Modules",Server="Modules",Channel="Modules")
objSendMsg = Object.ObjectLayout.sendMsgDeliveryDetails(Message=message, DeliveryDetails=objDeliveryDetails, FormattingOptions=formatOptions,messageUnchanged="None")
config.events.onMessage(message=objSendMsg)
async def youtubeRoles(self,authorDetails):
roles = {}
if authorDetails["isChatModerator"] == True:
roles.update({"Mod":2})
if authorDetails["isChatOwner"] == True:
roles.update({"Owner":3})
if authorDetails["isChatSponsor"] == True:
roles.update({"Sponsor":1})
roles.update({"Normal": 0})
self.l.logger.debug("roles {0}".format(roles))
return roles
async def listLiveStreams(self):
try:
x = list_streams_request = self.youtube.liveStreams().list(
part="id,snippet",
mine=True,
maxResults=50
).execute()
fileIO.fileSave("youtubeliveStreamsJson.json", x)
except:
await self.Login()
self.l.logger.info('Connection Error reconnecting')
async def listLiveBroadcasts(self):
try:
x = self.youtube.liveBroadcasts().list(
broadcastStatus="all",
part="id,snippet",
maxResults=50
).execute()
fileIO.fileSave("youtubeliveBroadcastsJson.json", x)
except:
await self.Login()
self.l.logger.info('Connection Error reconnecting')
async def sendLiveChat(self,sndMessage): #sends messages to youtube live chat
while self.serviceStarted != True:
await asyncio.sleep(0.2)
if sndMessage.DeliveryDetails.ModuleTo == "Site" and sndMessage.DeliveryDetails.Service == "Youtube": #determines if its the right service and supposed to be here
msg = await messageFormatter.formatter(sndMessage,formattingOptions=sndMessage.formattingSettings,formatType=sndMessage.formatType)
time = datetime.datetime.now()
self.oldMessageList.append({"Time":time, "Message":msg}) #keeps track of old messages so that we can check and not listen to these
list_chatmessages_inset = self.youtube.liveChatMessages().insert(
part = "snippet",
body = dict (
snippet = dict(
liveChatId = self.liveChatId,
type = "textMessageEvent",
textMessageDetails = dict(
messageText = msg
)
)
)
)
list_chatmessages_inset.execute()
#print(list_chatmessages_inset.execute()) #debug for sending live chat messages
async def Login(self):
if "__main__" == "__main__":
self.l.logger.info("Logging In")
args = argparser.parse_args()
self.youtube = await self.get_authenticated_service(args) #authenticates the api and saves it to youtube
await self.getLiveId()
self.l.logger.info("Logged in")
self.serviceStarted = True
async def youtubeChatControl(self):
self.l.logger.info("Started")
counter = 0
while True:
if self.serviceStarted == True:
#try:
await self.listChat()
#await self.listLiveStreams()
#await self.listLiveBroadcasts()
await self.clearMsgList()
if counter == 5:
filePath = "config{0}auth{0}youtube.json".format(os.sep)
data = {"Enabled": self.enabled, "pageToken": self.pageToken, "selfMsgFilter": self.msgCheckList}
fileIO.fileSave(filePath,data)
counter=0
self.l.logger.debug("Saving")
counter+=1
#except googleapiclient.errors.HttpError:
#youtube = self.Login()
#self.l.logger.info('Connection Error reconnecting')
if self.messageFrequency == 0: #this should prevent overuse of the google api quota slowing down the bot during times of low use and speeding it up during times of high use
await asyncio.sleep(8)
elif self.messageFrequency == 1:
await asyncio.sleep(5)
elif self.messageFrequency > 1:
await asyncio.sleep(1)
y = Youtube()
if (y.enabled):
loop = asyncio.get_event_loop()
loop.create_task(y.Login())
loop.create_task(y.youtubeChatControl())
|
popcorn9499/chatBot
|
sites/youtube.py
|
Python
|
gpl-3.0
| 15,563
|
[
"VisIt"
] |
81ffe8bde1cc0e8c592eb1f8d03f811b389dd7c62a4c57323d540ef25d4fced3
|
#!/usr/bin/python -*- coding: utf-8 -*-
#
# Merlin - Almost Native Python Machine Learning Library: Naive Bayes Classifer
#
# Copyright (C) 2014-2015 alvations
# URL:
# For license information, see LICENSE.md
import numpy as np
import linear_classifier as lc
from gaussian import *
class NaiveBayes(lc.LinearClassifier):
def __init__(self,xtype="gaussian"):
lc.LinearClassifier.__init__(self)
self.trained = False
self.xtype = xtype
def train(self,x,y):
nr_x,nr_f = x.shape
nr_c = np.unique(y).shape[0]
if(self.xtype == "gaussian"):
params = self.train_gaussian(x,y,nr_x,nr_f,nr_c)
elif(self.xtype == "Multinomial"):
print "Training a multinomial"
params = self.train_multinomial(x,y,nr_x,nr_f,nr_c)
else:
print "Naive Bayes not implemented for this type of model of P(X|Y)"
self.trained = True;
return params
##########################
### Estimate mean and variance of
### Gaussian Distributions
### Note that the variance is shared for all
### classes
##########################
def train_gaussian(self,x,y,nr_x,nr_f,nr_c):
prior = np.zeros(nr_c)
likelihood = np.zeros((nr_f,nr_c))
classes = np.unique(y)
means = np.zeros((nr_c,nr_f))
variances = np.zeros((nr_c,nr_f))
for i in xrange(nr_c):
idx,_ = np.nonzero(y == classes[i])
prior[i] = 1.0*len(idx)/len(y)
for f in xrange(nr_f):
g = estimate_gaussian(x[idx,f])
means[i,f] = g.mean
variances[i,f] = g.variance
## Take the mean of the covariance for each matric
variances = np.mean(variances,1)
params = np.zeros((nr_f+1,nr_c))
for i in xrange(nr_c):
params[0,i] = -1/(2*variances[i]) * np.dot(means[i,:],means[i,:]) + np.log(prior[i])
params[1:,i] = (1/variances[i] * means[i]).transpose()
return params
##########################
### Train a gaussian distribution
### Has one multinomial for each feature
##########################
def train_multinomial(self,x,y,nr_x,nr_f,nr_c):
prior = np.zeros(nr_c)
ind_per_class = {}
classes = np.unique(y)
for i in xrange(nr_c):
idx,_ = np.nonzero(y == classes[i])
ind_per_class = idx
likelihood = np.zeros((nr_f,nr_c))
sums = np.zeros((nr_f,1))
for i in xrange(nr_c):
idx,_ = np.nonzero(y == classes[i])
prior[i] = 1.0*len(idx)/len(y)
value = x[idx,:].sum(0)
sums[:,0] += value
likelihood[:,i] = value
for f in xrange(nr_f):
for i in xrange(nr_c):
likelihood[f,i] = likelihood[f,i]/sums[f,0]
params = np.zeros((nr_f+1,nr_c))
for i in xrange(nr_c):
params[0,i] = np.log(prior[i])
params[1:,i] = np.nan_to_num(np.log(likelihood[:,i]))
return params
|
meetvaruni/GitFiles
|
pywsd-master/pywsd-master/merlin/naive_bayes.py
|
Python
|
gpl-2.0
| 3,103
|
[
"Gaussian"
] |
49c6063c8f1ad45945b8d422ae23da8187df17c01506b22dc2ef8a2d738b79f2
|
# -*- coding: utf-8 -*-
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, adam.candy@imperial.ac.uk
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
"""
/***************************************************************************
MeshNetCDF
A QGIS plugin
Create Gmsh mesh from NetCDF (.nc) file where the z-coordinate is a metric for the mesh size.
-------------------
begin : 2012-07-25
***************************************************************************/
/***************************************************************************
* *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
def name():
return "Mesh NetCDF"
def description():
return "Create Gmsh mesh from NetCDF (.nc) file where the z-coordinate is a metric for the mesh size."
def version():
return "Version 0.1"
def icon():
return "icon.png"
def qgisMinimumVersion():
return "1.0"
def classFactory(iface):
# load MeshNetCDF class from file MeshNetCDF
from meshnetcdf import MeshNetCDF
return MeshNetCDF(iface)
|
adamcandy/qgis-plugins-meshing-initial
|
release/mesh_netcdf/__init__.py
|
Python
|
lgpl-2.1
| 2,448
|
[
"NetCDF"
] |
2e37e6d652864f5d7550171866071b0a43c2ca1e94e804fa7ee8479d59aac8ab
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A runner implementation that submits a job for remote execution.
The runner will create a JSON description of the job graph and then submit it
to the Dataflow Service for remote execution by a worker.
"""
import logging
import threading
import time
import traceback
import urllib
from collections import defaultdict
import apache_beam as beam
from apache_beam import coders
from apache_beam import error
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.internal.gcp import json_value
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.pvalue import AsSideInput
from apache_beam.runners.dataflow.dataflow_metrics import DataflowMetrics
from apache_beam.runners.dataflow.internal import names
from apache_beam.runners.dataflow.internal.clients import dataflow as dataflow_api
from apache_beam.runners.dataflow.internal.names import PropertyNames
from apache_beam.runners.dataflow.internal.names import TransformNames
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.runners.runner import PValueCache
from apache_beam.transforms.display import DisplayData
from apache_beam.typehints import typehints
from apache_beam.utils.plugin import BeamPlugin
__all__ = ['DataflowRunner']
class DataflowRunner(PipelineRunner):
"""A runner that creates job graphs and submits them for remote execution.
Every execution of the run() method will submit an independent job for
remote execution that consists of the nodes reachable from the passed in
node argument or entire graph if node is None. The run() method returns
after the service created the job and will not wait for the job to finish
if blocking is set to False.
"""
# A list of PTransformOverride objects to be applied before running a pipeline
# using DataflowRunner.
# Currently this only works for overrides where the input and output types do
# not change.
# For internal SDK use only. This should not be updated by Beam pipeline
# authors.
# Imported here to avoid circular dependencies.
# TODO: Remove the apache_beam.pipeline dependency in CreatePTransformOverride
from apache_beam.runners.dataflow.ptransform_overrides import CreatePTransformOverride
_PTRANSFORM_OVERRIDES = [
CreatePTransformOverride(),
]
def __init__(self, cache=None):
# Cache of CloudWorkflowStep protos generated while the runner
# "executes" a pipeline.
self._cache = cache if cache is not None else PValueCache()
self._unique_step_id = 0
def _get_unique_step_name(self):
self._unique_step_id += 1
return 's%s' % self._unique_step_id
@staticmethod
def poll_for_job_completion(runner, result, duration):
"""Polls for the specified job to finish running (successfully or not).
Updates the result with the new job information before returning.
Args:
runner: DataflowRunner instance to use for polling job state.
result: DataflowPipelineResult instance used for job information.
duration (int): The time to wait (in milliseconds) for job to finish.
If it is set to :data:`None`, it will wait indefinitely until the job
is finished.
"""
last_message_time = None
last_message_hash = None
last_error_rank = float('-inf')
last_error_msg = None
last_job_state = None
# How long to wait after pipeline failure for the error
# message to show up giving the reason for the failure.
# It typically takes about 30 seconds.
final_countdown_timer_secs = 50.0
sleep_secs = 5.0
# Try to prioritize the user-level traceback, if any.
def rank_error(msg):
if 'work item was attempted' in msg:
return -1
elif 'Traceback' in msg:
return 1
return 0
if duration:
start_secs = time.time()
duration_secs = duration / 1000
job_id = result.job_id()
while True:
response = runner.dataflow_client.get_job(job_id)
# If get() is called very soon after Create() the response may not contain
# an initialized 'currentState' field.
if response.currentState is not None:
if response.currentState != last_job_state:
logging.info('Job %s is in state %s', job_id, response.currentState)
last_job_state = response.currentState
if str(response.currentState) != 'JOB_STATE_RUNNING':
# Stop checking for new messages on timeout, explanatory
# message received, success, or a terminal job state caused
# by the user that therefore doesn't require explanation.
if (final_countdown_timer_secs <= 0.0
or last_error_msg is not None
or str(response.currentState) == 'JOB_STATE_DONE'
or str(response.currentState) == 'JOB_STATE_CANCELLED'
or str(response.currentState) == 'JOB_STATE_UPDATED'
or str(response.currentState) == 'JOB_STATE_DRAINED'):
break
# The job has failed; ensure we see any final error messages.
sleep_secs = 1.0 # poll faster during the final countdown
final_countdown_timer_secs -= sleep_secs
time.sleep(sleep_secs)
# Get all messages since beginning of the job run or since last message.
page_token = None
while True:
messages, page_token = runner.dataflow_client.list_messages(
job_id, page_token=page_token, start_time=last_message_time)
for m in messages:
message = '%s: %s: %s' % (m.time, m.messageImportance, m.messageText)
m_hash = hash(message)
if last_message_hash is not None and m_hash == last_message_hash:
# Skip the first message if it is the last message we got in the
# previous round. This can happen because we use the
# last_message_time as a parameter of the query for new messages.
continue
last_message_time = m.time
last_message_hash = m_hash
# Skip empty messages.
if m.messageImportance is None:
continue
logging.info(message)
if str(m.messageImportance) == 'JOB_MESSAGE_ERROR':
if rank_error(m.messageText) >= last_error_rank:
last_error_rank = rank_error(m.messageText)
last_error_msg = m.messageText
if not page_token:
break
if duration:
passed_secs = time.time() - start_secs
if passed_secs > duration_secs:
logging.warning('Timing out on waiting for job %s after %d seconds',
job_id, passed_secs)
break
result._job = response
runner.last_error_msg = last_error_msg
@staticmethod
def group_by_key_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class GroupByKeyInputVisitor(PipelineVisitor):
"""A visitor that replaces `Any` element type for input `PCollection` of
a `GroupByKey` or `_GroupByKeyOnly` with a `KV` type.
TODO(BEAM-115): Once Python SDk is compatible with the new Runner API,
we could directly replace the coder instead of mutating the element type.
"""
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.core import GroupByKey, _GroupByKeyOnly
if isinstance(transform_node.transform, (GroupByKey, _GroupByKeyOnly)):
pcoll = transform_node.inputs[0]
input_type = pcoll.element_type
# If input_type is not specified, then treat it as `Any`.
if not input_type:
input_type = typehints.Any
def coerce_to_kv_type(element_type):
if isinstance(element_type, typehints.TupleHint.TupleConstraint):
if len(element_type.tuple_types) == 2:
return element_type
else:
raise ValueError(
"Tuple input to GroupByKey must be have two components. "
"Found %s for %s" % (element_type, pcoll))
elif isinstance(input_type, typehints.AnyTypeConstraint):
# `Any` type needs to be replaced with a KV[Any, Any] to
# force a KV coder as the main output coder for the pcollection
# preceding a GroupByKey.
return typehints.KV[typehints.Any, typehints.Any]
elif isinstance(element_type, typehints.UnionConstraint):
union_types = [
coerce_to_kv_type(t) for t in element_type.union_types]
return typehints.KV[
typehints.Union[tuple(t.tuple_types[0] for t in union_types)],
typehints.Union[tuple(t.tuple_types[1] for t in union_types)]]
else:
# TODO: Possibly handle other valid types.
raise ValueError(
"Input to GroupByKey must be of Tuple or Any type. "
"Found %s for %s" % (element_type, pcoll))
pcoll.element_type = coerce_to_kv_type(input_type)
return GroupByKeyInputVisitor()
@staticmethod
def flatten_input_visitor():
# Imported here to avoid circular dependencies.
from apache_beam.pipeline import PipelineVisitor
class FlattenInputVisitor(PipelineVisitor):
"""A visitor that replaces the element type for input ``PCollections``s of
a ``Flatten`` transform with that of the output ``PCollection``.
"""
def visit_transform(self, transform_node):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import Flatten
if isinstance(transform_node.transform, Flatten):
output_pcoll = transform_node.outputs[None]
for input_pcoll in transform_node.inputs:
input_pcoll.element_type = output_pcoll.element_type
return FlattenInputVisitor()
def run(self, pipeline):
"""Remotely executes entire pipeline or parts reachable from node."""
# Import here to avoid adding the dependency for local running scenarios.
try:
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
except ImportError:
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
# Snapshot the pipeline in a portable proto before mutating it
proto_pipeline = pipeline.to_runner_api()
# Performing configured PTransform overrides.
pipeline.replace_all(DataflowRunner._PTRANSFORM_OVERRIDES)
# Add setup_options for all the BeamPlugin imports
setup_options = pipeline._options.view_as(SetupOptions)
plugins = BeamPlugin.get_all_plugin_paths()
if setup_options.beam_plugins is not None:
plugins = list(set(plugins + setup_options.beam_plugins))
setup_options.beam_plugins = plugins
self.job = apiclient.Job(pipeline._options, proto_pipeline)
# Dataflow runner requires a KV type for GBK inputs, hence we enforce that
# here.
pipeline.visit(self.group_by_key_input_visitor())
# Dataflow runner requires output type of the Flatten to be the same as the
# inputs, hence we enforce that here.
pipeline.visit(self.flatten_input_visitor())
# The superclass's run will trigger a traversal of all reachable nodes.
super(DataflowRunner, self).run(pipeline)
test_options = pipeline._options.view_as(TestOptions)
# If it is a dry run, return without submitting the job.
if test_options.dry_run:
return None
# Get a Dataflow API client and set its options
self.dataflow_client = apiclient.DataflowApplicationClient(
pipeline._options)
# Create the job description and send a request to the service. The result
# can be None if there is no need to send a request to the service (e.g.
# template creation). If a request was sent and failed then the call will
# raise an exception.
result = DataflowPipelineResult(
self.dataflow_client.create_job(self.job), self)
self._metrics = DataflowMetrics(self.dataflow_client, result, self.job)
result.metric_results = self._metrics
return result
def _get_typehint_based_encoding(self, typehint, window_coder):
"""Returns an encoding based on a typehint object."""
return self._get_cloud_encoding(self._get_coder(typehint,
window_coder=window_coder))
@staticmethod
def _get_coder(typehint, window_coder):
"""Returns a coder based on a typehint object."""
if window_coder:
return coders.WindowedValueCoder(
coders.registry.get_coder(typehint),
window_coder=window_coder)
return coders.registry.get_coder(typehint)
def _get_cloud_encoding(self, coder):
"""Returns an encoding based on a coder object."""
if not isinstance(coder, coders.Coder):
raise TypeError('Coder object must inherit from coders.Coder: %s.' %
str(coder))
return coder.as_cloud_object()
def _get_side_input_encoding(self, input_encoding):
"""Returns an encoding for the output of a view transform.
Args:
input_encoding: encoding of current transform's input. Side inputs need
this because the service will check that input and output types match.
Returns:
An encoding that matches the output and input encoding. This is essential
for the View transforms introduced to produce side inputs to a ParDo.
"""
return {
'@type': input_encoding['@type'],
'component_encodings': [input_encoding]
}
def _get_encoded_output_coder(self, transform_node, window_value=True):
"""Returns the cloud encoding of the coder for the output of a transform."""
if (len(transform_node.outputs) == 1
and transform_node.outputs[None].element_type is not None):
# TODO(robertwb): Handle type hints for multi-output transforms.
element_type = transform_node.outputs[None].element_type
else:
# TODO(silviuc): Remove this branch (and assert) when typehints are
# propagated everywhere. Returning an 'Any' as type hint will trigger
# usage of the fallback coder (i.e., cPickler).
element_type = typehints.Any
if window_value:
window_coder = (
transform_node.outputs[None].windowing.windowfn.get_window_coder())
else:
window_coder = None
return self._get_typehint_based_encoding(
element_type, window_coder=window_coder)
def _add_step(self, step_kind, step_label, transform_node, side_tags=()):
"""Creates a Step object and adds it to the cache."""
# Import here to avoid adding the dependency for local running scenarios.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(step_kind, self._get_unique_step_name())
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, step_label)
# Cache the node/step association for the main output of the transform node.
self._cache.cache_output(transform_node, None, step)
# If side_tags is not () then this is a multi-output transform node and we
# need to cache the (node, tag, step) for each of the tags used to access
# the outputs. This is essential because the keys used to search in the
# cache always contain the tag.
for tag in side_tags:
self._cache.cache_output(transform_node, tag, step)
# Finally, we add the display data items to the pipeline step.
# If the transform contains no display data then an empty list is added.
step.add_property(
PropertyNames.DISPLAY_DATA,
[item.get_dict() for item in
DisplayData.create_from(transform_node.transform).items])
return step
def _add_singleton_step(self, label, full_label, tag, input_step):
"""Creates a CollectionToSingleton step used to handle ParDo side inputs."""
# Import here to avoid adding the dependency for local running scenarios.
from apache_beam.runners.dataflow.internal import apiclient
step = apiclient.Step(TransformNames.COLLECTION_TO_SINGLETON, label)
self.job.proto.steps.append(step.proto)
step.add_property(PropertyNames.USER_NAME, full_label)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(tag)})
step.encoding = self._get_side_input_encoding(input_step.encoding)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (full_label, PropertyNames.OUTPUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
return step
def run_Impulse(self, transform_node):
standard_options = (
transform_node.outputs[None].pipeline._options.view_as(StandardOptions))
if standard_options.streaming:
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
step.add_property(PropertyNames.FORMAT, 'pubsub')
step.add_property(PropertyNames.PUBSUB_SUBSCRIPTION, '_starting_signal/')
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (
transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
else:
ValueError('Impulse source for batch pipelines has not been defined.')
def run_Flatten(self, transform_node):
step = self._add_step(TransformNames.FLATTEN,
transform_node.full_label, transform_node)
inputs = []
for one_input in transform_node.inputs:
input_step = self._cache.get_pvalue(one_input)
inputs.append(
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(one_input.tag)})
step.add_property(PropertyNames.INPUTS, inputs)
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
def apply_WriteToBigQuery(self, transform, pcoll):
# Make sure this is the WriteToBigQuery class that we expected
if not isinstance(transform, beam.io.WriteToBigQuery):
return self.apply_PTransform(transform, pcoll)
standard_options = pcoll.pipeline._options.view_as(StandardOptions)
if standard_options.streaming:
if (transform.write_disposition ==
beam.io.BigQueryDisposition.WRITE_TRUNCATE):
raise RuntimeError('Can not use write truncation mode in streaming')
return self.apply_PTransform(transform, pcoll)
else:
return pcoll | 'WriteToBigQuery' >> beam.io.Write(
beam.io.BigQuerySink(
transform.table_reference.tableId,
transform.table_reference.datasetId,
transform.table_reference.projectId,
transform.schema,
transform.create_disposition,
transform.write_disposition))
def apply_GroupByKey(self, transform, pcoll):
# Infer coder of parent.
#
# TODO(ccy): make Coder inference and checking less specialized and more
# comprehensive.
parent = pcoll.producer
if parent:
coder = parent.transform._infer_output_coder() # pylint: disable=protected-access
if not coder:
coder = self._get_coder(pcoll.element_type or typehints.Any, None)
if not coder.is_kv_coder():
raise ValueError(('Coder for the GroupByKey operation "%s" is not a '
'key-value coder: %s.') % (transform.label,
coder))
# TODO(robertwb): Update the coder itself if it changed.
coders.registry.verify_deterministic(
coder.key_coder(), 'GroupByKey operation "%s"' % transform.label)
return pvalue.PCollection(pcoll.pipeline)
def run_GroupByKey(self, transform_node):
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.GROUP, transform_node.full_label, transform_node)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
step.encoding = self._get_encoded_output_coder(transform_node)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
windowing = transform_node.transform.get_windowing(
transform_node.inputs)
step.add_property(
PropertyNames.SERIALIZED_FN,
self.serialize_windowing_strategy(windowing))
def run_ParDo(self, transform_node):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
# Attach side inputs.
si_dict = {}
# We must call self._cache.get_pvalue exactly once due to refcounting.
si_labels = {}
full_label_counts = defaultdict(int)
lookup_label = lambda side_pval: si_labels[side_pval]
for side_pval in transform_node.side_inputs:
assert isinstance(side_pval, AsSideInput)
step_number = self._get_unique_step_name()
si_label = 'SideInput-' + step_number
pcollection_label = '%s.%s' % (
side_pval.pvalue.producer.full_label.split('/')[-1],
side_pval.pvalue.tag if side_pval.pvalue.tag else 'out')
si_full_label = '%s/%s(%s.%s)' % (transform_node.full_label,
side_pval.__class__.__name__,
pcollection_label,
full_label_counts[pcollection_label])
# Count the number of times the same PCollection is a side input
# to the same ParDo.
full_label_counts[pcollection_label] += 1
self._add_singleton_step(
si_label, si_full_label, side_pval.pvalue.tag,
self._cache.get_pvalue(side_pval.pvalue))
si_dict[si_label] = {
'@type': 'OutputReference',
PropertyNames.STEP_NAME: si_label,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}
si_labels[side_pval] = si_label
# Now create the step for the ParDo transform being handled.
transform_name = transform_node.full_label.rsplit('/', 1)[-1]
step = self._add_step(
TransformNames.DO,
transform_node.full_label + (
'/{}'.format(transform_name)
if transform_node.side_inputs else ''),
transform_node,
transform_node.transform.output_tags)
fn_data = self._pardo_fn_data(transform_node, lookup_label)
step.add_property(PropertyNames.SERIALIZED_FN, pickler.dumps(fn_data))
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
# Add side inputs if any.
step.add_property(PropertyNames.NON_PARALLEL_INPUTS, si_dict)
# Generate description for the outputs. The output names
# will be 'out' for main output and 'out_<tag>' for a tagged output.
# Using 'out' as a tag will not clash with the name for main since it will
# be transformed into 'out_out' internally.
outputs = []
step.encoding = self._get_encoded_output_coder(transform_node)
# Add the main output to the description.
outputs.append(
{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT})
for side_tag in transform.output_tags:
# The assumption here is that all outputs will have the same typehint
# and coder as the main output. This is certainly the case right now
# but conceivably it could change in the future.
outputs.append(
{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, side_tag)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: (
'%s_%s' % (PropertyNames.OUT, side_tag))})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
@staticmethod
def _pardo_fn_data(transform_node, get_label):
transform = transform_node.transform
si_tags_and_types = [ # pylint: disable=protected-access
(get_label(side_pval), side_pval.__class__, side_pval._view_options())
for side_pval in transform_node.side_inputs]
return (transform.fn, transform.args, transform.kwargs, si_tags_and_types,
transform_node.inputs[0].windowing)
def apply_CombineValues(self, transform, pcoll):
return pvalue.PCollection(pcoll.pipeline)
def run_CombineValues(self, transform_node):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.COMBINE, transform_node.full_label, transform_node)
# Combiner functions do not take deferred side-inputs (i.e. PValues) and
# therefore the code to handle extra args/kwargs is simpler than for the
# DoFn's of the ParDo transform. In the last, empty argument is where
# side inputs information would go.
fn_data = (transform.fn, transform.args, transform.kwargs, ())
step.add_property(PropertyNames.SERIALIZED_FN,
pickler.dumps(fn_data))
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
# Note that the accumulator must not have a WindowedValue encoding, while
# the output of this step does in fact have a WindowedValue encoding.
accumulator_encoding = self._get_cloud_encoding(
transform_node.transform.fn.get_accumulator_coder())
output_encoding = self._get_encoded_output_coder(transform_node)
step.encoding = output_encoding
step.add_property(PropertyNames.ENCODING, accumulator_encoding)
# Generate description for main output 'out.'
outputs = []
# Add the main output to the description.
outputs.append(
{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT})
step.add_property(PropertyNames.OUTPUT_INFO, outputs)
def run_Read(self, transform_node):
transform = transform_node.transform
step = self._add_step(
TransformNames.READ, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the source specific properties.
if not hasattr(transform.source, 'format'):
# If a format is not set, we assume the source to be a custom source.
source_dict = {}
source_dict['spec'] = {
'@type': names.SOURCE_TYPE,
names.SERIALIZED_SOURCE_KEY: pickler.dumps(transform.source)
}
try:
source_dict['metadata'] = {
'estimated_size_bytes': json_value.get_typed_value_descriptor(
transform.source.estimate_size())
}
except error.RuntimeValueProviderError:
# Size estimation is best effort, and this error is by value provider.
logging.info(
'Could not estimate size of source %r due to ' + \
'RuntimeValueProviderError', transform.source)
except Exception: # pylint: disable=broad-except
# Size estimation is best effort. So we log the error and continue.
logging.info(
'Could not estimate size of source %r due to an exception: %s',
transform.source, traceback.format_exc())
step.add_property(PropertyNames.SOURCE_STEP_INPUT,
source_dict)
elif transform.source.format == 'text':
step.add_property(PropertyNames.FILE_PATTERN, transform.source.path)
elif transform.source.format == 'bigquery':
step.add_property(PropertyNames.BIGQUERY_EXPORT_FORMAT, 'FORMAT_AVRO')
# TODO(silviuc): Add table validation if transform.source.validate.
if transform.source.table_reference is not None:
step.add_property(PropertyNames.BIGQUERY_DATASET,
transform.source.table_reference.datasetId)
step.add_property(PropertyNames.BIGQUERY_TABLE,
transform.source.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.source.table_reference.projectId is not None:
step.add_property(PropertyNames.BIGQUERY_PROJECT,
transform.source.table_reference.projectId)
elif transform.source.query is not None:
step.add_property(PropertyNames.BIGQUERY_QUERY, transform.source.query)
step.add_property(PropertyNames.BIGQUERY_USE_LEGACY_SQL,
transform.source.use_legacy_sql)
step.add_property(PropertyNames.BIGQUERY_FLATTEN_RESULTS,
transform.source.flatten_results)
else:
raise ValueError('BigQuery source %r must specify either a table or'
' a query',
transform.source)
elif transform.source.format == 'pubsub':
standard_options = (
transform_node.inputs[0].pipeline.options.view_as(StandardOptions))
if not standard_options.streaming:
raise ValueError('PubSubPayloadSource is currently available for use '
'only in streaming pipelines.')
# Only one of topic or subscription should be set.
if transform.source.full_subscription:
step.add_property(PropertyNames.PUBSUB_SUBSCRIPTION,
transform.source.full_subscription)
elif transform.source.full_topic:
step.add_property(PropertyNames.PUBSUB_TOPIC,
transform.source.full_topic)
if transform.source.id_label:
step.add_property(PropertyNames.PUBSUB_ID_LABEL,
transform.source.id_label)
else:
raise ValueError(
'Source %r has unexpected format %s.' % (
transform.source, transform.source.format))
if not hasattr(transform.source, 'format'):
step.add_property(PropertyNames.FORMAT, names.SOURCE_FORMAT)
else:
step.add_property(PropertyNames.FORMAT, transform.source.format)
# Wrap coder in WindowedValueCoder: this is necessary as the encoding of a
# step should be the type of value outputted by each step. Read steps
# automatically wrap output values in a WindowedValue wrapper, if necessary.
# This is also necessary for proper encoding for size estimation.
# Using a GlobalWindowCoder as a place holder instead of the default
# PickleCoder because GlobalWindowCoder is known coder.
# TODO(robertwb): Query the collection for the windowfn to extract the
# correct coder.
coder = coders.WindowedValueCoder(transform._infer_output_coder(),
coders.coders.GlobalWindowCoder()) # pylint: disable=protected-access
step.encoding = self._get_cloud_encoding(coder)
step.add_property(
PropertyNames.OUTPUT_INFO,
[{PropertyNames.USER_NAME: (
'%s.%s' % (transform_node.full_label, PropertyNames.OUT)),
PropertyNames.ENCODING: step.encoding,
PropertyNames.OUTPUT_NAME: PropertyNames.OUT}])
def run__NativeWrite(self, transform_node):
transform = transform_node.transform
input_tag = transform_node.inputs[0].tag
input_step = self._cache.get_pvalue(transform_node.inputs[0])
step = self._add_step(
TransformNames.WRITE, transform_node.full_label, transform_node)
# TODO(mairbek): refactor if-else tree to use registerable functions.
# Initialize the sink specific properties.
if transform.sink.format == 'text':
# Note that it is important to use typed properties (@type/value dicts)
# for non-string properties and also for empty strings. For example,
# in the code below the num_shards must have type and also
# file_name_suffix and shard_name_template (could be empty strings).
step.add_property(
PropertyNames.FILE_NAME_PREFIX, transform.sink.file_name_prefix,
with_type=True)
step.add_property(
PropertyNames.FILE_NAME_SUFFIX, transform.sink.file_name_suffix,
with_type=True)
step.add_property(
PropertyNames.SHARD_NAME_TEMPLATE, transform.sink.shard_name_template,
with_type=True)
if transform.sink.num_shards > 0:
step.add_property(
PropertyNames.NUM_SHARDS, transform.sink.num_shards, with_type=True)
# TODO(silviuc): Implement sink validation.
step.add_property(PropertyNames.VALIDATE_SINK, False, with_type=True)
elif transform.sink.format == 'bigquery':
# TODO(silviuc): Add table validation if transform.sink.validate.
step.add_property(PropertyNames.BIGQUERY_DATASET,
transform.sink.table_reference.datasetId)
step.add_property(PropertyNames.BIGQUERY_TABLE,
transform.sink.table_reference.tableId)
# If project owning the table was not specified then the project owning
# the workflow (current project) will be used.
if transform.sink.table_reference.projectId is not None:
step.add_property(PropertyNames.BIGQUERY_PROJECT,
transform.sink.table_reference.projectId)
step.add_property(PropertyNames.BIGQUERY_CREATE_DISPOSITION,
transform.sink.create_disposition)
step.add_property(PropertyNames.BIGQUERY_WRITE_DISPOSITION,
transform.sink.write_disposition)
if transform.sink.table_schema is not None:
step.add_property(
PropertyNames.BIGQUERY_SCHEMA, transform.sink.schema_as_json())
elif transform.sink.format == 'pubsub':
standard_options = (
transform_node.inputs[0].pipeline.options.view_as(StandardOptions))
if not standard_options.streaming:
raise ValueError('PubSubPayloadSink is currently available for use '
'only in streaming pipelines.')
step.add_property(PropertyNames.PUBSUB_TOPIC, transform.sink.full_topic)
else:
raise ValueError(
'Sink %r has unexpected format %s.' % (
transform.sink, transform.sink.format))
step.add_property(PropertyNames.FORMAT, transform.sink.format)
# Wrap coder in WindowedValueCoder: this is necessary for proper encoding
# for size estimation. Using a GlobalWindowCoder as a place holder instead
# of the default PickleCoder because GlobalWindowCoder is known coder.
# TODO(robertwb): Query the collection for the windowfn to extract the
# correct coder.
coder = coders.WindowedValueCoder(transform.sink.coder,
coders.coders.GlobalWindowCoder())
step.encoding = self._get_cloud_encoding(coder)
step.add_property(PropertyNames.ENCODING, step.encoding)
step.add_property(
PropertyNames.PARALLEL_INPUT,
{'@type': 'OutputReference',
PropertyNames.STEP_NAME: input_step.proto.name,
PropertyNames.OUTPUT_NAME: input_step.get_output(input_tag)})
@classmethod
def serialize_windowing_strategy(cls, windowing):
from apache_beam.runners import pipeline_context
from apache_beam.portability.api import beam_runner_api_pb2
context = pipeline_context.PipelineContext()
windowing_proto = windowing.to_runner_api(context)
return cls.byte_array_to_json_string(
beam_runner_api_pb2.MessageWithComponents(
components=context.to_runner_api(),
windowing_strategy=windowing_proto).SerializeToString())
@classmethod
def deserialize_windowing_strategy(cls, serialized_data):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners import pipeline_context
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.transforms.core import Windowing
proto = beam_runner_api_pb2.MessageWithComponents()
proto.ParseFromString(cls.json_string_to_byte_array(serialized_data))
return Windowing.from_runner_api(
proto.windowing_strategy,
pipeline_context.PipelineContext(proto.components))
@staticmethod
def byte_array_to_json_string(raw_bytes):
"""Implements org.apache.beam.sdk.util.StringUtils.byteArrayToJsonString."""
return urllib.quote(raw_bytes)
@staticmethod
def json_string_to_byte_array(encoded_string):
"""Implements org.apache.beam.sdk.util.StringUtils.jsonStringToByteArray."""
return urllib.unquote(encoded_string)
class DataflowPipelineResult(PipelineResult):
"""Represents the state of a pipeline run on the Dataflow service."""
def __init__(self, job, runner):
"""Initialize a new DataflowPipelineResult instance.
Args:
job: Job message from the Dataflow API. Could be :data:`None` if a job
request was not sent to Dataflow service (e.g. template jobs).
runner: DataflowRunner instance.
"""
self._job = job
self._runner = runner
self.metric_results = None
def _update_job(self):
# We need the job id to be able to update job information. There is no need
# to update the job if we are in a known terminal state.
if self.has_job and not self._is_in_terminal_state():
self._job = self._runner.dataflow_client.get_job(self.job_id())
def job_id(self):
return self._job.id
def metrics(self):
return self.metric_results
@property
def has_job(self):
return self._job is not None
@property
def state(self):
"""Return the current state of the remote job.
Returns:
A PipelineState object.
"""
if not self.has_job:
return PipelineState.UNKNOWN
self._update_job()
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
# TODO: Move this table to a another location.
# Ordered by the enum values.
api_jobstate_map = {
values_enum.JOB_STATE_UNKNOWN: PipelineState.UNKNOWN,
values_enum.JOB_STATE_STOPPED: PipelineState.STOPPED,
values_enum.JOB_STATE_RUNNING: PipelineState.RUNNING,
values_enum.JOB_STATE_DONE: PipelineState.DONE,
values_enum.JOB_STATE_FAILED: PipelineState.FAILED,
values_enum.JOB_STATE_CANCELLED: PipelineState.CANCELLED,
values_enum.JOB_STATE_UPDATED: PipelineState.UPDATED,
values_enum.JOB_STATE_DRAINING: PipelineState.DRAINING,
values_enum.JOB_STATE_DRAINED: PipelineState.DRAINED,
values_enum.JOB_STATE_PENDING: PipelineState.PENDING,
values_enum.JOB_STATE_CANCELLING: PipelineState.CANCELLING,
}
return (api_jobstate_map[self._job.currentState] if self._job.currentState
else PipelineState.UNKNOWN)
def _is_in_terminal_state(self):
if not self.has_job:
return True
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
return self._job.currentState in [
values_enum.JOB_STATE_STOPPED, values_enum.JOB_STATE_DONE,
values_enum.JOB_STATE_FAILED, values_enum.JOB_STATE_CANCELLED,
values_enum.JOB_STATE_DRAINED]
def wait_until_finish(self, duration=None):
if not self._is_in_terminal_state():
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
thread = threading.Thread(
target=DataflowRunner.poll_for_job_completion,
args=(self._runner, self, duration))
# Mark the thread as a daemon thread so a keyboard interrupt on the main
# thread will terminate everything. This is also the reason we will not
# use thread.join() to wait for the polling thread.
thread.daemon = True
thread.start()
while thread.isAlive():
time.sleep(5.0)
# TODO: Merge the termination code in poll_for_job_completion and
# _is_in_terminal_state.
terminated = (str(self._job.currentState) != 'JOB_STATE_RUNNING')
assert duration or terminated, (
'Job did not reach to a terminal state after waiting indefinitely.')
if terminated and self.state != PipelineState.DONE:
# TODO(BEAM-1290): Consider converting this to an error log based on
# theresolution of the issue.
raise DataflowRuntimeException(
'Dataflow pipeline failed. State: %s, Error:\n%s' %
(self.state, getattr(self._runner, 'last_error_msg', None)), self)
return self.state
def cancel(self):
if not self.has_job:
raise IOError('Failed to get the Dataflow job id.')
self._update_job()
if self._is_in_terminal_state():
logging.warning(
'Cancel failed because job %s is already terminated in state %s.',
self.job_id(), self.state)
else:
if not self._runner.dataflow_client.modify_job_state(
self.job_id(), 'JOB_STATE_CANCELLED'):
cancel_failed_message = (
'Failed to cancel job %s, please go to the Developers Console to '
'cancel it manually.') % self.job_id()
logging.error(cancel_failed_message)
raise DataflowRuntimeException(cancel_failed_message, self)
return self.state
def __str__(self):
return '<%s %s %s>' % (
self.__class__.__name__,
self.job_id(),
self.state)
def __repr__(self):
return '<%s %s at %s>' % (self.__class__.__name__, self._job, hex(id(self)))
class DataflowRuntimeException(Exception):
"""Indicates an error has occurred in running this pipeline."""
def __init__(self, msg, result):
super(DataflowRuntimeException, self).__init__(msg)
self.result = result
|
staslev/incubator-beam
|
sdks/python/apache_beam/runners/dataflow/dataflow_runner.py
|
Python
|
apache-2.0
| 44,142
|
[
"VisIt"
] |
f04a866bd99ca9c1f687be6bddc4c18f5b1677d7fb4343346989303353ccb6c2
|
import csv
import socket
from constants import *
from string import capwords
from subprocess import call
from os import unlink as delete
from traceback import print_exc as stacktrace
interesting = {'rubber cap', 'heat core', 'terra mantle', 'steel boots', 'terra legs', 'dreaded cleaver', "butcher's axe", 'mercenary sword',
'glooth amulet', 'giant shimmering pearl', 'terra amulet', 'terra hood', 'terra boots', 'glooth cape', 'glooth axe',
'glooth club', 'glooth blade', 'glooth bag', 'green gem', 'skull staff', 'metal bat', 'gearwheel chain',
'crown armor', 'royal helmet', 'medusa shield', 'tower shield', 'giant sword', 'sacred tree amulet',
'zaoan armor', 'zaoan helmet', 'zaoan legs', 'zaoan shoes',
'deepling squelcher', 'deepling staff', 'necklace of the deep', 'ornate crossbow', 'guardian axe',
'foxtail', 'heavy trident', "warrior's shield", "warrior's axe",
'magic plate armor', 'golden legs', 'mastermind shield', 'fire axe', 'demon shield', 'giant sword','demon trophy',
'demonrage sword', 'gold ring', 'platinum amulet', 'magma legs', 'amber staff', 'onyx flail', 'fire sword', 'magma monocle',
'magma boots', 'ruthless axe', 'wand of inferno', 'gold ingot',
'cheese'
}
notif_time = 2 # in seconds
with open('Database/pluralMap.csv', mode='r') as pluralFile:
csvFile = csv.reader(pluralFile)
pluralMap = {row[0]: row[1] for row in csvFile}
print 'pluralMap loaded'
sockfile = '/tmp/flarelyzer.sock'
def notify(title, msg):
call(['notify-send', '--urgency=low', '--expire-time=' + str(notif_time * 1000), title, msg])
def quit():
global sockfile
try:
print 'stopping memory scanner'
client.sendall('QUIT')
client.recv(10)
except: pass
finally:
print '--Notification agent closed--'
client.close()
delete(sockfile)
notify('Flarelyzer', 'Closed!')
#print '--Notification agent closed--'
exit()
def process_loot(loot):
try:
loot = map(lambda x: x[1:], loot)
loot_amounts = dict()
for i in xrange(len(loot)):
lootables = loot[i].split()
if not lootables:
#print 'skipping strange loot message: ', lootables
continue
loot_start = lootables[0]
if loot_start.isdigit():
loot[i] = loot[i][loot[i].find(' ') + 1:]
if loot[i] in pluralMap:
loot[i] = pluralMap[loot[i]]
else:
for suffix in plural_suffixes:
if loot[i].endswith(suffix):
loot[i].replace(suffix, plural_suffixes[suffix])
break
else:
for word in plural_words:
if loot[i].startswith(word):
loot[i].replace(word, plural_words[word])
break
loot_amounts[loot[i]] = loot_start
else:
if loot_start in ['a', 'an']:
loot[i] = loot[i][loot[i].find(' ') + 1:]
loot_amounts[loot[i]] = '0'
return loot, loot_amounts
except:
print 'loot parser error!'
stacktrace()
interesting = set(map(str.lower, interesting))
print 'Creating temporary socket file...'
agent = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
agent.bind(sockfile)
agent.listen(1)
print 'waiting for client...'
client, addr = agent.accept()
try:
while True:
full_msg = client.recv(1024)
try:
client.sendall('ACK')
except IOError:
quit()
if full_msg == 'ATTACHED':
notify('Flarelyzer', 'Started successfully!')
continue
elif full_msg == 'NEXT':
continue
typeInd = full_msg.find('Loot of ') + 8
monsterInd = typeInd
if full_msg.find('a', typeInd) == -1: # Not a valid loot message
#print 'skipping invalid loot message: ', full_msg
continue
elif full_msg[typeInd] == 'a': # not an 'a' if its the loot of a boss
monsterInd = typeInd + 2
monster = full_msg[monsterInd:full_msg.rfind(':')]
loot = full_msg[full_msg.rfind(':') + 1:].split(',')
loot, loot_amounts = process_loot(loot)
loot = map(str.lower, loot)
valuables = interesting.intersection(loot)
if valuables:
lootmsg = ''
for v in valuables:
if loot_amounts[v] != '0':
lootmsg += loot_amounts[v] + ' '
lootmsg += capwords(v) + ', '
else:
lootmsg = lootmsg[:-2]
notify(monster.title(), lootmsg)
except KeyboardInterrupt:
pass
except Exception, e:
print 'Notification agent error!'+str(e)
stacktrace()
quit()
|
Javieracost/Flarelyzer
|
agent.py
|
Python
|
apache-2.0
| 5,036
|
[
"Amber"
] |
df3d82b3f91754aa308cd677fef66075816575e84de473df80f7f06ec1362519
|
# -*- coding: utf-8 -*-
"""
:author: Rinze de Laat <laat@delmic.com>
:copyright: © 2012-2021 Rinze de Laat, Philip Winkler, Delmic
This file is part of Odemis.
.. license::
Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU
General Public License version 2 as published by the Free Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along with Odemis. If not,
see http://www.gnu.org/licenses/.
This module contains classes needed to construct stream panels.
Stream panels are custom, specialized controls that allow the user to view and manipulate various
data streams coming from the microscope.
"""
from __future__ import division
from builtins import str
from past.builtins import basestring
from decorator import decorator
import logging
from collections import OrderedDict
from odemis import acq, gui
from odemis.gui import FG_COLOUR_EDIT, FG_COLOUR_MAIN, BG_COLOUR_MAIN, BG_COLOUR_STREAM, \
FG_COLOUR_DIS, FG_COLOUR_RADIO_ACTIVE, FG_COLOUR_BUTTON
from odemis.gui import img
from odemis.gui.comp import buttons
from odemis.gui.comp.buttons import ImageTextButton
from odemis.gui.comp.combo import ComboBox, ColorMapComboBox
from odemis.gui.comp.foldpanelbar import FoldPanelItem, FoldPanelBar
from odemis.gui.comp.radio import GraphicalRadioButtonControl
from odemis.gui.comp.slider import UnitFloatSlider, VisualRangeSlider, UnitIntegerSlider, Slider
from odemis.gui.comp.text import SuggestTextCtrl, UnitFloatCtrl, FloatTextCtrl, UnitIntegerCtrl, PatternValidator
from odemis.gui.util import call_in_wx_main
from odemis.gui.util.widgets import VigilantAttributeConnector
from odemis.model import TINT_FIT_TO_RGB, TINT_RGB_AS_IS
import wx
import wx.lib.newevent
from wx.lib.pubsub import pub
from odemis.gui.conf.data import COLORMAPS
import matplotlib.colors as colors
stream_remove_event, EVT_STREAM_REMOVE = wx.lib.newevent.NewEvent()
stream_visible_event, EVT_STREAM_VISIBLE = wx.lib.newevent.NewEvent()
stream_peak_event, EVT_STREAM_PEAK = wx.lib.newevent.NewEvent()
# Values to control which option is available
OPT_NAME_EDIT = 1 # allow the renaming of the stream (for one time only)
OPT_BTN_REMOVE = 2 # remove the stream entry
OPT_BTN_SHOW = 4 # show/hide the stream image
OPT_BTN_UPDATE = 8 # update/stop the stream acquisition
OPT_BTN_TINT = 16 # tint of the stream (if the VA exists)
OPT_BTN_PEAK = 32 # show/hide the peak fitting data
OPT_FIT_RGB = 64 # allow a Fit RGB colormap (for spectrum stremas)
OPT_NO_COLORMAPS = 128 # do not allow additional colormaps. Typical for an RGB image
CAPTION_PADDING_RIGHT = 5
ICON_WIDTH, ICON_HEIGHT = 16, 16
TINT_CUSTOM_TEXT = u"Custom tint…"
@decorator
def control_bookkeeper(f, self, *args, **kwargs):
""" Clear the default message, if needed, and advance the row count """
result = f(self, *args, **kwargs)
# This makes the 2nd column's width variable
if not self.gb_sizer.IsColGrowable(1):
self.gb_sizer.AddGrowableCol(1)
# Redo FoldPanelBar layout
win = self
while not isinstance(win, FoldPanelBar):
win = win.Parent
win.Layout()
self.num_rows += 1
return result
class StreamPanelHeader(wx.Control):
""" This class describes a clickable control responsible for expanding and collapsing the
StreamPanel to which it belongs.
It can also contain various sub buttons that allow for stream manipulation.
"""
BUTTON_SIZE = (18, 18) # The pixel size of the button
BUTTON_BORDER_SIZE = 9 # Border space around the buttons
def __init__(self, parent, wid=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.NO_BORDER):
assert(isinstance(parent, StreamPanel))
super(StreamPanelHeader, self).__init__(parent, wid, pos, size, style)
self.SetBackgroundColour(self.Parent.BackgroundColour)
# This style enables us to draw the background with our own paint event handler
self.SetBackgroundStyle(wx.BG_STYLE_PAINT)
# Callback when the label changes: (string (text) -> None)
self.label_change_callback = None
# Create and add sizer and populate with controls
self._sz = wx.BoxSizer(wx.HORIZONTAL)
# Fold indicator icon, drawn directly in the background in a fixed position
self._foldIcons = wx.ImageList(16, 16)
self._foldIcons.Add(img.getBitmap("icon/arr_down_s.png"))
self._foldIcons.Add(img.getBitmap("icon/arr_right_s.png"))
# Add the needed controls to the sizer
self.btn_remove = self._add_remove_btn() if self.Parent.options & OPT_BTN_REMOVE else None
if self.Parent.options & OPT_NAME_EDIT:
self.ctrl_label = self._add_suggest_ctrl()
else:
self.ctrl_label = self._add_label_ctrl()
self.btn_peak = self._add_peak_btn() if self.Parent.options & OPT_BTN_PEAK else None
self.combo_colormap = self._add_colormap_combo() if self.Parent.options & OPT_BTN_TINT else None
self.btn_show = self._add_visibility_btn() if self.Parent.options & OPT_BTN_SHOW else None
self.btn_update = self._add_update_btn() if self.Parent.options & OPT_BTN_UPDATE else None
# Add spacer for creating padding on the right side of the header panel
self._sz.Add((64, 1), 0)
# Set the sizer of the Control
self.SetSizerAndFit(self._sz)
self.Bind(wx.EVT_SIZE, self.on_size)
self.Layout()
# Control creation methods
def _add_remove_btn(self):
""" Add a button for stream removal """
btn_rem = buttons.ImageButton(self,
bitmap=img.getBitmap("icon/ico_rem_str.png"),
size=self.BUTTON_SIZE)
btn_rem.bmpHover = img.getBitmap("icon/ico_rem_str_h.png")
btn_rem.SetToolTip("Remove stream")
self._add_ctrl(btn_rem)
return btn_rem
def _add_suggest_ctrl(self):
""" Add a suggest control to the header panel """
suggest_ctrl = SuggestTextCtrl(self, id=-1, value=self.Parent.stream.name.value)
suggest_ctrl.SetBackgroundColour(self.Parent.GetBackgroundColour())
suggest_ctrl.SetForegroundColour(FG_COLOUR_EDIT)
suggest_ctrl.Bind(wx.EVT_COMMAND_ENTER, self._on_label_change)
self._add_ctrl(suggest_ctrl, stretch=True)
return suggest_ctrl
def _add_label_ctrl(self):
""" Add a label control to the header panel """
label_ctrl = wx.StaticText(self, -1, self.Parent.stream.name.value)
label_ctrl.SetBackgroundColour(self.Parent.GetBackgroundColour())
label_ctrl.SetForegroundColour(FG_COLOUR_MAIN)
self._add_ctrl(label_ctrl, stretch=True)
return label_ctrl
def _add_colormap_combo(self):
""" Add the colormap combobox (in place of tint btn) """
cbstyle = wx.NO_BORDER | wx.TE_PROCESS_ENTER
# Determine possible choices
if not isinstance(self.Parent.stream.tint.value, colors.Colormap):
custom_tint = self.Parent.stream.tint.value
else:
custom_tint = (0, 0, 0)
if self.Parent.options & OPT_NO_COLORMAPS:
self.colormap_choices = OrderedDict([
("Original", TINT_RGB_AS_IS),
])
else:
self.colormap_choices = OrderedDict([
("Grayscale", (255, 255, 255)),
])
# store the index
self._colormap_original_idx = len(self.colormap_choices) - 1
if self.Parent.options & OPT_FIT_RGB:
self.colormap_choices["Fit to RGB"] = TINT_FIT_TO_RGB
# store the index
self._colormap_fitrgb_idx = len(self.colormap_choices) - 1
self.colormap_choices.update(OrderedDict([
("Red tint", (255, 0, 0)),
("Green tint", (0, 255, 0)),
("Blue tint", (0, 0, 255)),
(TINT_CUSTOM_TEXT, custom_tint),
]))
# store the index
self._colormap_customtint_idx = len(self.colormap_choices) - 1
if not self.Parent.options & OPT_NO_COLORMAPS:
self.colormap_choices.update(COLORMAPS) # add the predefined color maps
colormap_combo = ColorMapComboBox(self, wx.ID_ANY, pos=(0, 0), labels=list(self.colormap_choices.keys()),
choices=list(self.colormap_choices.values()), size=(88, 16),
style=cbstyle)
# determine which value to select
for index, value in enumerate(self.colormap_choices.values()):
if self.Parent.stream.tint.value == value:
if self.Parent.options & OPT_NO_COLORMAPS:
colormap_combo.SetSelection(0)
else:
colormap_combo.SetSelection(index)
break
else:
# Set to grayscale by default
colormap_combo.SetSelection(0)
colormap_combo.Bind(wx.EVT_COMBOBOX, self._on_colormap_click)
self.Parent.stream.tint.subscribe(self._on_colormap_value)
self._add_ctrl(colormap_combo)
return colormap_combo
def _add_peak_btn(self):
""" Add the peak toggle button to the stream panel header """
peak_btn = buttons.ImageStateButton(self, bitmap=img.getBitmap("icon/ico_peak_none.png"))
peak_btn.bmpHover = img.getBitmap("icon/ico_peak_none_h.png")
peak_btn.bmpSelected = [img.getBitmap("icon/ico_peak_%s.png" % (m,)) for m in ("gaussian", "lorentzian")]
peak_btn.bmpSelectedHover = [img.getBitmap("icon/ico_peak_%s_h.png" % (m,)) for m in ("gaussian", "lorentzian")]
peak_btn.SetToolTip("Select peak fitting (Gaussian, Lorentzian, or none)")
self._add_ctrl(peak_btn)
return peak_btn
def _add_visibility_btn(self):
""" Add the visibility toggle button to the stream panel header """
visibility_btn = buttons.ImageToggleButton(self,
bitmap=img.getBitmap("icon/ico_eye_closed.png"))
visibility_btn.bmpHover = img.getBitmap("icon/ico_eye_closed_h.png")
visibility_btn.bmpSelected = img.getBitmap("icon/ico_eye_open.png")
visibility_btn.bmpSelectedHover = img.getBitmap("icon/ico_eye_open_h.png")
visibility_btn.SetToolTip("Toggle stream visibility")
self._add_ctrl(visibility_btn)
return visibility_btn
def _add_update_btn(self):
""" Add a button for (de)activation of the stream """
update_btn = buttons.ImageToggleButton(self,
bitmap=img.getBitmap("icon/ico_pause.png"))
update_btn.bmpHover = img.getBitmap("icon/ico_pause_h.png")
update_btn.bmpSelected = img.getBitmap("icon/ico_play.png")
update_btn.bmpSelectedHover = img.getBitmap("icon/ico_play_h.png")
# TODO: add a tooltip for when selected ("Turn off stream" vs "Activate stream")
# => on ImageToggleButton
update_btn.SetToolTip("Update stream")
self._vac_updated = VigilantAttributeConnector(
self.Parent.stream.should_update,
update_btn,
update_btn.SetToggle,
update_btn.GetToggle,
events=wx.EVT_BUTTON
)
self._add_ctrl(update_btn)
return update_btn
def _add_ctrl(self, ctrl, stretch=False):
""" Add the given control to the header panel
:param ctrl: (wx.Control) Control to add to the header panel
:param stretch: True if the control should expand to fill space
"""
# Only the first element has a left border
border = wx.ALL if self._sz.IsEmpty() else wx.RIGHT
self._sz.Add(
ctrl,
proportion=1 if stretch else 0,
flag=(border | wx.ALIGN_CENTRE_VERTICAL | wx.RESERVE_SPACE_EVEN_IF_HIDDEN),
border=self.BUTTON_BORDER_SIZE
)
# END Control creation methods
# Layout and painting
def on_size(self, event):
""" Handle the wx.EVT_SIZE event for the Expander class """
self.SetSize((self.Parent.GetSize().x, -1))
self.Layout()
self.Refresh()
event.Skip()
def on_draw_expander(self, dc):
""" Draw the expand/collapse arrow icon
It needs to be called from the parent's paint event handler.
"""
win_rect = self.GetRect()
x_pos = win_rect.GetRight() - ICON_WIDTH - CAPTION_PADDING_RIGHT
self._foldIcons.Draw(
1 if self.Parent.collapsed else 0,
dc,
x_pos,
(win_rect.GetHeight() - ICON_HEIGHT) // 2,
wx.IMAGELIST_DRAW_TRANSPARENT
)
# END Layout and painting
# Show/hide/disable controls
def _show_ctrl(self, ctrl, show):
""" Show or hide the given control """
if ctrl:
self._sz.Show(ctrl, show)
self._sz.Layout()
def show_remove_btn(self, show):
""" Show or hide the remove button """
self._show_ctrl(self.btn_remove, show)
def show_updated_btn(self, show):
""" Show or hide the update button """
self._show_ctrl(self.btn_update, show)
def show_peak_btn(self, show):
""" Show or hide the peak button """
self._show_ctrl(self.btn_peak, show)
def show_show_btn(self, show):
""" Show or hide the show button """
self._show_ctrl(self.btn_show, show)
def enable_remove_btn(self, enabled):
""" Enable or disable the remove button """
self.btn_remove.Enable(enabled)
def enable_updated_btn(self, enabled):
""" Enable or disable the update button """
self.btn_update.Enable(enabled)
def enable_show_btn(self, enabled):
""" Enable or disable the show button """
self.btn_show.Enable(enabled)
def enable_peak_btn(self, enabled):
""" Enable or disable the peak button """
self.btn_peak.Enable(enabled)
def enable_colormap_combo(self, enabled):
""" Enable or disable colormap dropdown """
self.combo_colormap.Enable(enabled)
def enable(self, enabled):
""" Enable or disable all buttons that are present """
if self.btn_remove:
self.enable_remove_btn(enabled)
if self.btn_update:
self.enable_updated_btn(enabled)
if self.btn_show:
self.enable_show_btn(enabled)
if self.btn_peak:
self.enable_peak_btn(enabled)
if self.combo_colormap:
self.enable_colormap_combo(enabled)
def to_static_mode(self):
""" Remove or disable the controls not needed for a static view of the stream """
self.show_remove_btn(False)
self.show_updated_btn(False)
if isinstance(self.ctrl_label, SuggestTextCtrl):
self.ctrl_label.Disable()
def to_locked_mode(self):
""" Remove or disable all controls """
self.to_static_mode()
self.show_show_btn(False)
self.show_peak_btn(False)
# END Show/hide/disable controls
# GUI event handlers
def _on_label_change(self, evt):
""" Call the label change callback when the label value changes """
if callable(self.label_change_callback):
self.label_change_callback(self.ctrl_label.GetValue())
@call_in_wx_main
def _on_colormap_value(self, colour):
""" Update the colormap selector to reflect the provided colour """
# determine which value to select
for index, value in enumerate(self.colormap_choices.values()):
if colour == value:
self.combo_colormap.SetSelection(index)
break
elif colour == TINT_FIT_TO_RGB:
self.combo_colormap.SetSelection(self._colormap_fitrgb_idx) # fit to RGB
break
else: # Can't find the colour => it's custom tint
if isinstance(colour, tuple):
self.colormap_choices[TINT_CUSTOM_TEXT] = colour
self.combo_colormap.SetClientData(self._colormap_customtint_idx, colour)
else:
logging.warning("Got unknown colormap, which is not a tint: %s", colour)
self.combo_colormap.SetSelection(self._colormap_customtint_idx) # custom tint
@call_in_wx_main
def _on_colormap_click(self, evt):
""" Handle the mouse click event on the tint button """
# check the value of the colormap
index = self.combo_colormap.GetSelection()
name, tint = list(self.colormap_choices.items())[index]
if name == TINT_CUSTOM_TEXT:
# Set default colour to the current value
cldata = wx.ColourData()
cldata.SetColour(wx.Colour(*tint))
dlg = wx.ColourDialog(self, cldata)
if dlg.ShowModal() == wx.ID_OK:
tint = dlg.ColourData.GetColour().Get(includeAlpha=False) # convert to a 3-tuple
logging.debug("Colour %r selected", tint)
# Setting the VA will automatically update the button's colour
self.colormap_choices[TINT_CUSTOM_TEXT] = tint
self.combo_colormap.SetClientData(index, tint)
else:
self._on_colormap_value(self.Parent.stream.tint.value)
return
self.Parent.stream.tint.value = tint
# END GUI event handlers
def set_label_choices(self, choices):
""" Assign a list of predefined labels to the suggest control form which the user may choose
:param choices: [str]
"""
try:
self.ctrl_label.SetChoices(choices)
except AttributeError:
raise TypeError("SuggestTextCtrl required, %s found!!" % type(self.ctrl_label))
def set_focus_on_label(self):
""" Set the focus on the label (and select the text if it's editable) """
self.ctrl_label.SetFocus()
if self.Parent.options & OPT_NAME_EDIT:
self.ctrl_label.SelectAll()
class StreamPanel(wx.Panel):
""" The StreamPanel class, a special case collapsible panel.
The StreamPanel consists of the following widgets:
StreamPanel
BoxSizer
StreamPanelHeader
Panel
BoxSizer
GridBagSizer
Additional controls can be added to the GridBagSizer in the 'finalize' method.
The controls contained within a StreamPanel are typically connected to the VigilantAttribute
properties of the Stream it's representing.
"""
def __init__(self, parent, stream, options=(OPT_BTN_REMOVE | OPT_BTN_SHOW | OPT_BTN_UPDATE),
wid=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.CP_DEFAULT_STYLE, name="StreamPanel", collapsed=False):
"""
:param parent: (StreamBar) The parent widget.
:param stream: (Stream) The stream data model to be displayed to and
modified by the user.
"""
assert(isinstance(parent, StreamBar))
wx.Panel.__init__(self, parent, wid, pos, size, style, name)
self.options = options
self.stream = stream # TODO: Should this also be moved to the StreamController? YES!
# Dye attributes
self._btn_excitation = None
self._btn_emission = None
# Appearance
self.SetBackgroundColour(BG_COLOUR_STREAM)
self.SetForegroundColour(FG_COLOUR_MAIN)
# State
self._collapsed = collapsed
# Child widgets
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.main_sizer)
self._header = None
self._panel = None
self._prev_drange = None
self.gb_sizer = wx.GridBagSizer()
# Counter that keeps track of the number of rows containing controls inside this panel
self.num_rows = 0
self._create_controls()
def _create_controls(self):
""" Set up the basic structure for the controls that are going to be used """
# Create stream header
self._header = StreamPanelHeader(self)
self._header.Bind(wx.EVT_LEFT_UP, self.on_toggle)
self._header.Bind(wx.EVT_PAINT, self.on_draw_expander)
self.Bind(wx.EVT_BUTTON, self.on_button, self._header)
self._header.btn_remove.Bind(wx.EVT_BUTTON, self.on_remove_btn)
self._header.btn_show.Bind(wx.EVT_BUTTON, self.on_visibility_btn)
if self._header.btn_peak is not None:
self._header.btn_peak.Bind(wx.EVT_BUTTON, self.on_peak_btn)
if wx.Platform == "__WXMSW__":
self._header.Bind(wx.EVT_LEFT_DCLICK, self.on_button)
self.main_sizer.Add(self._header, 0, wx.EXPAND)
# Create the control panel
self._panel = wx.Panel(self, style=wx.TAB_TRAVERSAL | wx.NO_BORDER)
# Add a simple sizer so we can create padding for the panel
border_sizer = wx.BoxSizer(wx.HORIZONTAL)
border_sizer.Add(self.gb_sizer, border=5, flag=wx.ALL | wx.EXPAND, proportion=1)
self._panel.SetSizer(border_sizer)
self._panel.SetBackgroundColour(BG_COLOUR_MAIN)
self._panel.SetForegroundColour(FG_COLOUR_MAIN)
self._panel.SetFont(self.GetFont())
# Simplified version of .collapse()
self._panel.Show(not self._collapsed)
self.main_sizer.Add(self._panel, 0, wx.EXPAND)
@control_bookkeeper
def add_metadata_button(self):
"""
Add a button that opens a dialog with all metadata (for static streams)
"""
metadata_btn = ImageTextButton(self._panel, label="Metadata...", height=16, style=wx.ALIGN_CENTER)
self.gb_sizer.Add(metadata_btn, (self.num_rows, 2), span=(1, 1),
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.TOP | wx.BOTTOM, border=5)
return metadata_btn
@property
def collapsed(self):
return self._collapsed
@property
def header_change_callback(self):
return self._header.label_change_callback
@header_change_callback.setter
def header_change_callback(self, f):
self._header.label_change_callback = f
def set_header_choices(self, choices):
self._header.set_label_choices(choices)
def flatten(self):
""" Unfold the stream panel and hide the header """
self.collapse(False)
self._header.Show(False)
def set_focus_on_label(self):
""" Focus the text label in the header """
self._header.set_focus_on_label()
def Layout(self, *args, **kwargs):
""" Layout the StreamPanel. """
if not self._header or not self._panel or not self.main_sizer:
return False # we need to complete the creation first!
oursz = self.GetSize()
# move & resize the button and the static line
self.main_sizer.SetDimension(0, 0, oursz.GetWidth(),
self.main_sizer.GetMinSize().GetHeight())
self.main_sizer.Layout()
if not self._collapsed:
# move & resize the container window
yoffset = self.main_sizer.GetSize().GetHeight()
if oursz.y - yoffset > 0:
self._panel.SetSize(0, yoffset, oursz.x, oursz.y - yoffset)
# this is very important to make the pane window layout show
# correctly
self._panel.Show()
self._panel.Layout()
return True
def DoGetBestSize(self, *args, **kwargs):
""" Gets the size which best suits the window
For a control, it would be the minimal size which doesn't truncate the control, for a panel
the same size as it would have after a call to `Fit()`.
TODO: This method seems deprecated. Test if it's really so.
"""
# do not use GetSize() but rather GetMinSize() since it calculates
# the required space of the sizer
sz = self.main_sizer.GetMinSize()
# when expanded, we need more space
if not self._collapsed:
pbs = self._panel.GetBestSize()
sz.width = max(sz.GetWidth(), pbs.x)
# For wxPython 4 this is no longer needed
# sz.height = sz.y + pbs.y
return sz
def set_visible(self, visible):
""" Set the "visible" toggle button of the stream panel """
self._header.btn_show.SetToggle(visible)
def set_peak(self, state):
""" Set the "peak" toggle button of the stream panel
state (None or 0<=int): None for no peak, 0 for gaussian, 1 for lorentzian
"""
self._header.btn_peak.SetState(state)
def collapse(self, collapse):
""" Collapses or expands the pane window """
if self._collapsed == collapse:
return
self.Freeze()
# update our state
self._panel.Show(not collapse)
self._collapsed = collapse
# Call after is used, so the fit will occur after everything has been hidden or shown
wx.CallAfter(self.Parent.fit_streams)
self.Thaw()
# GUI events: update the stream when the user changes the values
def on_remove_btn(self, evt):
logging.debug("Remove button clicked for '%s'", self.stream.name.value)
# generate EVT_STREAM_REMOVE
event = stream_remove_event(spanel=self)
wx.PostEvent(self, event)
def on_visibility_btn(self, evt):
# generate EVT_STREAM_VISIBLE
event = stream_visible_event(visible=self._header.btn_show.GetToggle())
wx.PostEvent(self, event)
def on_peak_btn(self, evt):
# generate EVT_STREAM_PEAK
event = stream_peak_event(state=self._header.btn_peak.GetState())
wx.PostEvent(self, event)
@staticmethod
def create_text_frame(heading, text):
"""
Create text frame with cancel button (same style as log frame during backend startup)
heading (String): title of the text box
text (String): text to be displayed
"""
frame = wx.Dialog(None, title=heading, size=(800, 800),
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
text = wx.TextCtrl(frame, value=text, style=wx.TE_MULTILINE | wx.TE_READONLY)
textsizer = wx.BoxSizer()
textsizer.Add(text, 1, flag=wx.ALL | wx.EXPAND)
btnsizer = frame.CreateButtonSizer(wx.CLOSE)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(textsizer, 1, flag=wx.ALL | wx.EXPAND, border=5)
sizer.Add(btnsizer, 0, flag=wx.ALIGN_CENTER_VERTICAL | wx.EXPAND | wx.BOTTOM, border=5)
frame.SetSizer(sizer)
frame.CenterOnScreen()
return frame
# Manipulate expander buttons
def show_updated_btn(self, show):
self._header.show_updated_btn(show)
def enable_updated_btn(self, enabled):
self._header.enable_updated_btn(enabled)
def show_remove_btn(self, show):
self._header.show_remove_btn(show)
def show_visible_btn(self, show):
self._header.show_show_btn(show)
def show_peak_btn(self, show):
self._header.show_peak_btn(show)
def enable(self, enabled):
self._header.enable(enabled)
def OnSize(self, event):
""" Handles the wx.EVT_SIZE event for StreamPanel
"""
self.Layout()
event.Skip()
def on_toggle(self, evt):
""" Detect click on the collapse button of the StreamPanel """
w = evt.GetEventObject().GetSize().GetWidth()
if evt.GetX() > w * 0.85:
self.collapse(not self._collapsed)
else:
evt.Skip()
def on_button(self, event):
""" Handles the wx.EVT_BUTTON event for StreamPanel """
if event.GetEventObject() != self._header:
event.Skip()
return
self.collapse(not self._collapsed)
def on_draw_expander(self, event):
""" Handle the ``wx.EVT_PAINT`` event for the stream panel
:note: This is a drawing routine to paint the GTK-style expander.
"""
dc = wx.AutoBufferedPaintDC(self._header)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
self._header.on_draw_expander(dc)
def to_static_mode(self):
""" Hide or make read-only any button or data that should not change during acquisition """
self._header.to_static_mode()
def to_locked_mode(self):
""" Hide or make read-only all buttons and data controls"""
self._header.to_static_mode()
self._header.to_locked_mode()
# Setting Control Addition Methods
def _add_side_label(self, label_text, tooltip=None):
""" Add a text label to the control grid
This method should only be called from other methods that add control to the control grid
:param label_text: (str)
:return: (wx.StaticText)
"""
lbl_ctrl = wx.StaticText(self._panel, -1, label_text)
if tooltip:
lbl_ctrl.SetToolTip(tooltip)
self.gb_sizer.Add(lbl_ctrl, (self.num_rows, 0),
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL, border=5)
return lbl_ctrl
@control_bookkeeper
def add_autobc_ctrls(self):
""" Create and return controls needed for (auto) brightness and contrast manipulation """
btn_autobc = buttons.ImageTextToggleButton(self._panel, height=24,
icon=img.getBitmap("icon/ico_contrast.png"),
label="Auto", active_colour=FG_COLOUR_RADIO_ACTIVE)
btn_autobc.SetToolTip("Toggle image auto brightness/contrast")
lbl_bc_outliers = wx.StaticText(self._panel, -1, "Outliers")
sld_bc_outliers = UnitFloatSlider(
self._panel,
value=self.stream.auto_bc_outliers.value,
min_val=self.stream.auto_bc_outliers.range[0],
max_val=self.stream.auto_bc_outliers.range[1],
unit="%",
scale="cubic",
accuracy=2
)
sld_bc_outliers.SetToolTip("Amount of dark and bright pixels to ignore")
autobc_sz = wx.BoxSizer(wx.HORIZONTAL)
autobc_sz.Add(btn_autobc, 0, flag=wx.ALIGN_CENTRE_VERTICAL | wx.RIGHT, border=5)
autobc_sz.Add(lbl_bc_outliers, 0, flag=wx.ALIGN_CENTRE_VERTICAL | wx.LEFT, border=5)
autobc_sz.Add(sld_bc_outliers, 1,
flag=wx.LEFT | wx.EXPAND, border=5)
self.gb_sizer.Add(autobc_sz, (self.num_rows, 0), span=(1, 3),
flag=wx.EXPAND | wx.ALL, border=5)
return btn_autobc, lbl_bc_outliers, sld_bc_outliers
@control_bookkeeper
def add_outliers_ctrls(self):
""" Add controls for the manipulation of the outlier values """
# TODO: Move min/max to controller too?
hist_min = self.stream.intensityRange.range[0][0]
hist_max = self.stream.intensityRange.range[1][1]
sld_hist = VisualRangeSlider(self._panel, size=(-1, 40),
value=self.stream.intensityRange.value,
min_val=hist_min, max_val=hist_max)
sld_hist.SetBackgroundColour("#000000")
self.gb_sizer.Add(sld_hist, pos=(self.num_rows, 0), span=(1, 3), border=5,
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT)
self.num_rows += 1
# Low/ High values are in raw data. So it's typically uint, but could
# be float for some weird cases. So we make them float, with high
# accuracy to avoid rounding.
lbl_lowi = wx.StaticText(self._panel, -1, "Low")
tooltip_txt = "Values below are mapped to black [cts/px]."
lbl_lowi.SetToolTip(tooltip_txt)
txt_lowi = FloatTextCtrl(self._panel, -1,
self.stream.intensityRange.value[0],
style=wx.NO_BORDER, size=(-1, 14),
min_val=hist_min, max_val=hist_max,
key_step_min=1, accuracy=6)
txt_lowi.SetForegroundColour(FG_COLOUR_EDIT)
txt_lowi.SetOwnBackgroundColour(BG_COLOUR_MAIN)
txt_lowi.SetToolTip(tooltip_txt)
lbl_highi = wx.StaticText(self._panel, -1, "High")
tooltip_txt = "Values above are mapped to white [cts/px]."
lbl_highi.SetToolTip(tooltip_txt)
txt_highi = FloatTextCtrl(self._panel, -1,
self.stream.intensityRange.value[1],
style=wx.NO_BORDER, size=(-1, 14),
min_val=hist_min, max_val=hist_max,
key_step_min=1, accuracy=6)
txt_highi.SetBackgroundColour(BG_COLOUR_MAIN)
txt_highi.SetForegroundColour(FG_COLOUR_EDIT)
txt_highi.SetToolTip(tooltip_txt)
# Add controls to sizer for spacing
lh_sz = wx.BoxSizer(wx.HORIZONTAL)
lh_sz.Add(lbl_lowi, 0, border=5, flag=wx.ALIGN_CENTRE_VERTICAL | wx.LEFT)
lh_sz.Add(txt_lowi, 1, border=5,
flag=wx.EXPAND | wx.RIGHT | wx.LEFT)
lh_sz.Add(lbl_highi, 0, border=5, flag=wx.ALIGN_CENTRE_VERTICAL | wx.LEFT)
lh_sz.Add(txt_highi, 1, border=5,
flag=wx.EXPAND | wx.RIGHT | wx.LEFT)
# Add spacing sizer to grid sizer
self.gb_sizer.Add(lh_sz, (self.num_rows, 0), span=(1, 3), border=5,
flag=wx.BOTTOM | wx.EXPAND)
return sld_hist, txt_lowi, txt_highi
@control_bookkeeper
def add_hw_setting_ctrl(self, name, value=None):
""" Add a generic number control to manipulate a hardware setting """
lbl_ctrl = self._add_side_label(name)
value_ctrl = FloatTextCtrl(self._panel, -1, value or 0.0, style=wx.NO_BORDER)
value_ctrl.SetForegroundColour(gui.FG_COLOUR_EDIT)
value_ctrl.SetBackgroundColour(gui.BG_COLOUR_MAIN)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1), span=(1, 2),
flag=wx.EXPAND | wx.ALL, border=5)
return lbl_ctrl, value_ctrl
def _add_slider(self, klass, label_text, value, conf):
""" Add a slider of type 'klass' to the settings panel """
if conf is None:
conf = {}
lbl_ctrl = self._add_side_label(label_text)
value_ctrl = klass(self._panel, value=value, **conf)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1), span=(1, 2),
flag=wx.EXPAND | wx.ALL, border=5)
return lbl_ctrl, value_ctrl
@control_bookkeeper
def add_slider(self, label_text, value=None, conf=None):
""" Add an integer value slider to the settings panel
:param label_text: (str) Label text to display
:param value: (None or int) Value to display
:param conf: (None or dict) Dictionary containing parameters for the control
"""
return self._add_slider(Slider, label_text, value, conf)
@control_bookkeeper
def add_integer_slider(self, label_text, value=None, conf=None):
""" Add an integer value slider to the settings panel
:param label_text: (str) Label text to display
:param value: (None or int) Value to display
:param conf: (None or dict) Dictionary containing parameters for the control
"""
return self._add_slider(UnitIntegerSlider, label_text, value, conf)
@control_bookkeeper
def add_float_slider(self, label_text, value=None, conf=None):
""" Add a float value slider to the settings panel
:param label_text: (str) Label text to display
:param value: (None or float) Value to display
:param conf: (None or dict) Dictionary containing parameters for the control
"""
return self._add_slider(UnitFloatSlider, label_text, value, conf)
@control_bookkeeper
def add_int_field(self, label_text, value=None, conf=None):
""" Add an integer value field to the settings panel
:param label_text: (str) Label text to display
:param value: (None or int) Value to display
:param conf: (None or dict) Dictionary containing parameters for the control
"""
return self._add_num_field(UnitIntegerCtrl, label_text, value, conf)
@control_bookkeeper
def add_float_field(self, label_text, value=None, conf=None):
""" Add a float value field to the settings panel
:param label_text: (str) Label text to display
:param value: (None or float) Value to display
:param conf: (None or dict) Dictionary containing parameters for the control
"""
return self._add_num_field(UnitFloatCtrl, label_text, value, conf)
def _add_num_field(self, klass, label_text, value, conf):
if conf is None:
conf = {}
lbl_ctrl = self._add_side_label(label_text)
value_ctrl = klass(self._panel, value=value, style=wx.NO_BORDER, **conf)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1),
flag=wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, border=5)
value_ctrl.SetForegroundColour(gui.FG_COLOUR_EDIT)
value_ctrl.SetBackgroundColour(gui.BG_COLOUR_MAIN)
return lbl_ctrl, value_ctrl
@control_bookkeeper
def add_combobox_control(self, label_text, value=None, conf=None):
""" Add a combo box to the hardware settings panel
:param label_text: (str) Label text to display
:param value: (None or float) Value to display *NOT USED ATM*
:param conf: (None or dict) Dictionary containing parameters for the control
"""
if conf is None:
conf = {}
lbl_ctrl = self._add_side_label(label_text)
cbstyle = wx.NO_BORDER | wx.TE_PROCESS_ENTER | conf.pop("style", 0)
value_ctrl = ComboBox(self._panel, wx.ID_ANY, pos=(0, 0), size=(-1, 16),
style=cbstyle, **conf)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1), span=(1, 2),
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.ALL, border=5)
if value is not None:
value_ctrl.SetValue(str(value))
return lbl_ctrl, value_ctrl
@control_bookkeeper
def add_readonly_field(self, label_text, value=None, selectable=True):
""" Adds a value to the control panel that cannot directly be changed by the user
:param label_text: (str) Label text to display
:param value: (None or object) Value to display next to the label.
If None, only the label will be displayed. The object should be
"stringable", so the safest is to ensure it's a string.
:param selectable: (boolean) whether the value can be selected for copying by the user
:return: (Ctrl, Ctrl or None) Label and value control
"""
lbl_ctrl = self._add_side_label(label_text)
# Convert value object to str (unicode) iif necessary.
# unicode() (which is str() from Python3) fails in Python2 if argument
# is bytes with non-ascii characters.
# => If value is already bytes or unicode, just pass it as-is to wx.TextCtrl.
if not isinstance(value, basestring):
value = str(value)
if value is not None:
if selectable:
value_ctrl = wx.TextCtrl(self._panel, value=value,
style=wx.BORDER_NONE | wx.TE_READONLY)
value_ctrl.SetForegroundColour(gui.FG_COLOUR_DIS)
value_ctrl.SetBackgroundColour(gui.BG_COLOUR_MAIN)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1),
flag=wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, border=5)
else:
value_ctrl = wx.StaticText(self._panel, label=value)
value_ctrl.SetForegroundColour(gui.FG_COLOUR_DIS)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1), flag=wx.ALL, border=5)
else:
value_ctrl = None
return lbl_ctrl, value_ctrl
@control_bookkeeper
def add_checkbox_control(self, label_text, value=True, conf=None):
""" Add a checkbox to the settings panel
:param label_text: (str) Label text to display
:param value: (bool) Value to display (True == checked)
:param conf: (None or dict) Dictionary containing parameters for the control
"""
if conf is None:
conf = {}
lbl_ctrl = self._add_side_label(label_text)
# wx.ALIGN_RIGHT has the effect of only highlighting the box on hover,
# which makes it less ugly with Ubuntu
value_ctrl = wx.CheckBox(self._panel, wx.ID_ANY,
style=wx.ALIGN_RIGHT | wx.NO_BORDER,
**conf)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1), span=(1, 2),
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.TOP | wx.BOTTOM, border=5)
value_ctrl.SetValue(value)
return lbl_ctrl, value_ctrl
@control_bookkeeper
def add_radio_control(self, label_text, value=None, conf=None):
""" Add a series of radio buttons to the settings panel
:param label_text: (str) Label text to display
:param value: (None or float) Value to display
:param conf: (None or dict) Dictionary containing parameters for the control
"""
if conf is None:
conf = {}
lbl_ctrl = self._add_side_label(label_text)
value_ctrl = GraphicalRadioButtonControl(self._panel, -1, style=wx.NO_BORDER,
**conf)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1),
flag=wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, border=5)
if value is not None:
value_ctrl.SetValue(value)
return lbl_ctrl, value_ctrl
@control_bookkeeper
def add_text_field(self, label_text, value=None, readonly=False):
""" Add a label and text control to the settings panel
:param label_text: (str) Label text to display
:param value: (None or str) Value to display
:param readonly: (boolean) Whether the value can be changed by the user
:return: (Ctrl, Ctrl) Label and text control
"""
lbl_ctrl = self._add_side_label(label_text)
value_ctrl = wx.TextCtrl(self._panel, value=str(value or ""),
style=wx.TE_PROCESS_ENTER | wx.BORDER_NONE | (wx.TE_READONLY if readonly else 0))
if readonly:
value_ctrl.SetForegroundColour(gui.FG_COLOUR_DIS)
else:
value_ctrl.SetForegroundColour(gui.FG_COLOUR_EDIT)
value_ctrl.SetBackgroundColour(gui.BG_COLOUR_MAIN)
self.gb_sizer.Add(value_ctrl, (self.num_rows, 1),
flag=wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, border=5)
return lbl_ctrl, value_ctrl
@control_bookkeeper
def add_run_btn(self, label_text):
"""
Add a generic run button and the corresponding side label to the gridbag sizer.
:param label_text: (str) label text to display
:returns: (wx.StaticText, ImageTextButton) side label and run button
"""
lbl_ctrl = self._add_side_label(label_text)
run_btn = ImageTextButton(self._panel, label="Run...", height=16, style=wx.ALIGN_CENTER)
self.gb_sizer.Add(run_btn, (self.num_rows, 2), span=(1, 1),
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.TOP | wx.BOTTOM, border=5)
return lbl_ctrl, run_btn
@control_bookkeeper
def add_divider(self):
""" Add a dividing line to the stream panel """
line_ctrl = wx.StaticLine(self._panel, size=(-1, 1))
self.gb_sizer.Add(line_ctrl, (self.num_rows, 0), span=(1, 3),
flag=wx.ALL | wx.EXPAND, border=5)
@control_bookkeeper
def add_dye_excitation_ctrl(self, band, readonly, center_wl_color):
lbl_ctrl, value_ctrl, lbl_exc_peak, btn_excitation = self._add_filter_line("Excitation",
band,
readonly,
center_wl_color)
return lbl_ctrl, value_ctrl, lbl_exc_peak, btn_excitation
@control_bookkeeper
def add_dye_emission_ctrl(self, band, readonly, center_wl_color):
lbl_ctrl, value_ctrl, lbl_em_peak, btn_emission = self._add_filter_line("Emission",
band,
readonly,
center_wl_color)
return lbl_ctrl, value_ctrl, lbl_em_peak, btn_emission
def _add_filter_line(self, name, band, readonly, center_wl_color):
""" Create the controls for dye emission/excitation colour filter setting
:param name: (str): the label name
:param band (str): the current wavelength band to display
:param readonly (bool) read-only when there's no or just one band value
:param center_wl_color: None or (r, g, b) center wavelength color of the
current band of the VA. If None, no button is shown.
:return: (4 wx.Controls) the respective controls created
"""
# Note: va.value is in m, but we present everything in nm
lbl_ctrl = self._add_side_label(name)
# will contain both the combo box and the peak label
exc_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.gb_sizer.Add(exc_sizer, (self.num_rows, 1), flag=wx.EXPAND)
if readonly:
hw_set = wx.TextCtrl(self._panel, value=band, size=(-1, 16),
style=wx.BORDER_NONE | wx.TE_READONLY)
hw_set.SetBackgroundColour(self._panel.BackgroundColour)
hw_set.SetForegroundColour(FG_COLOUR_DIS)
exc_sizer.Add(hw_set, 1, flag=wx.LEFT | wx.RIGHT | wx.ALIGN_CENTRE_VERTICAL, border=5)
else:
hw_set = ComboBox(self._panel, value=band, size=(-1, 16),
style=wx.CB_READONLY | wx.BORDER_NONE)
# To avoid catching mouse wheels events when scrolling the panel
hw_set.Bind(wx.EVT_MOUSEWHEEL, lambda e: None)
exc_sizer.Add(hw_set, 1, border=5, flag=wx.ALL | wx.ALIGN_CENTRE_VERTICAL)
# Label for peak information
lbl_peak = wx.StaticText(self._panel)
exc_sizer.Add(lbl_peak, 1, border=5, flag=wx.ALL | wx.ALIGN_CENTRE_VERTICAL | wx.ALIGN_LEFT)
if center_wl_color:
# A button, but not clickable, just to show the wavelength
# If a dye is selected, the colour of the peak is used, otherwise we
# use the hardware setting
btn_color = buttons.ColourButton(self._panel, -1, colour=center_wl_color,
size=(18, 18))
self.gb_sizer.Add(btn_color,
(self.num_rows, 2),
flag=wx.RIGHT | wx.ALIGN_CENTRE_VERTICAL | wx.ALIGN_RIGHT,
border=5)
else:
btn_color = None
return lbl_ctrl, hw_set, lbl_peak, btn_color
# END Setting Control Addition Methods
@control_bookkeeper
def add_specbw_ctrls(self):
""" Add controls to manipulate the spectrum data bandwidth
Returns:
(VisualRangeSlider, wx.StaticText, wx.StaticText)
"""
# 1st row, center label, slider and value
wl = self.stream.spectrumBandwidth.value
# TODO: Move min/max to controller too?
wl_rng = (self.stream.spectrumBandwidth.range[0][0],
self.stream.spectrumBandwidth.range[1][1])
sld_spec = VisualRangeSlider(self._panel, size=(-1, 40),
value=wl, min_val=wl_rng[0], max_val=wl_rng[1])
sld_spec.SetBackgroundColour("#000000")
self.gb_sizer.Add(sld_spec, pos=(self.num_rows, 0), span=(1, 3), border=5,
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT)
self.num_rows += 1
# 2nd row, text fields for intensity (ratios)
tooltip_txt = "Center wavelength of the spectrum"
lbl_scenter = wx.StaticText(self._panel, -1, "Center")
lbl_scenter.SetToolTip(tooltip_txt)
txt_scenter = UnitFloatCtrl(self._panel, -1, (wl[0] + wl[1]) / 2,
style=wx.NO_BORDER, size=(-1, 14),
min_val=wl_rng[0], max_val=wl_rng[1],
unit=self.stream.spectrumBandwidth.unit, # m or px
accuracy=3)
txt_scenter.SetBackgroundColour(BG_COLOUR_MAIN)
txt_scenter.SetForegroundColour(FG_COLOUR_EDIT)
txt_scenter.SetToolTip(tooltip_txt)
tooltip_txt = "Bandwidth of the spectrum"
lbl_sbw = wx.StaticText(self._panel, -1, "Bandwidth")
lbl_sbw.SetToolTip(tooltip_txt)
txt_sbw = UnitFloatCtrl(self._panel, -1, (wl[1] - wl[0]),
style=wx.NO_BORDER, size=(-1, 14),
min_val=0, max_val=(wl_rng[1] - wl_rng[0]),
unit=self.stream.spectrumBandwidth.unit,
accuracy=3)
txt_sbw.SetBackgroundColour(BG_COLOUR_MAIN)
txt_sbw.SetForegroundColour(FG_COLOUR_EDIT)
txt_sbw.SetToolTip(tooltip_txt)
cb_wl_sz = wx.BoxSizer(wx.HORIZONTAL)
cb_wl_sz.Add(lbl_scenter, 0,
flag=wx.ALIGN_CENTRE_VERTICAL | wx.LEFT,
border=5)
cb_wl_sz.Add(txt_scenter, 1,
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.RIGHT | wx.LEFT,
border=5)
cb_wl_sz.Add(lbl_sbw, 0,
flag=wx.ALIGN_CENTRE_VERTICAL | wx.LEFT,
border=5)
cb_wl_sz.Add(txt_sbw, 1,
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.RIGHT | wx.LEFT,
border=5)
self.gb_sizer.Add(cb_wl_sz, (self.num_rows, 0), span=(1, 3), border=5,
flag=wx.BOTTOM | wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND)
return sld_spec, txt_scenter, txt_sbw
@control_bookkeeper
def add_specselwidth_ctrl(self):
""" Add a control to manipulate the spectrum selection width
:return: wx.StaticText, UnitIntegerSlider
"""
# Add the selectionWidth VA
tooltip_txt = "Width of the point or line selected"
lbl_selection_width = self._add_side_label("Width", tooltip_txt)
sld_selection_width = UnitIntegerSlider(
self._panel,
value=self.stream.selectionWidth.value,
min_val=self.stream.selectionWidth.range[0],
max_val=self.stream.selectionWidth.range[1],
unit="px",
)
sld_selection_width.SetToolTip(tooltip_txt)
self.gb_sizer.Add(sld_selection_width, (self.num_rows, 1), span=(1, 2), border=5,
flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.ALL)
return lbl_selection_width, sld_selection_width
class StreamBar(wx.Panel):
"""
The whole panel containing stream panels and a button to add more streams
There are multiple levels of visibility of a stream panel:
* the stream panel is shown in the panel and has the visible icon on:
The current view is compatible with the stream and has it in its list
of streams.
* the stream panel is shown in the panel and has the visible icon off:
The current view is compatible with the stream, but the stream is not
in its list of streams
* the stream panel is not present in the panel (hidden):
The current view is not compatible with the stream
"""
DEFAULT_BORDER = 2
DEFAULT_STYLE = wx.BOTTOM | wx.EXPAND
# the order in which the streams are displayed
STREAM_ORDER = (
acq.stream.ScannerSettingsStream,
acq.stream.SEMStream,
acq.stream.StaticSEMStream,
acq.stream.BrightfieldStream,
acq.stream.StaticStream,
acq.stream.FluoStream,
acq.stream.CLStream,
acq.stream.CameraStream,
acq.stream.FIBStream,
acq.stream.ARSettingsStream,
acq.stream.SpectrumSettingsStream,
acq.stream.ScannedTemporalSettingsStream,
acq.stream.TemporalSpectrumSettingsStream,
acq.stream.MonochromatorSettingsStream,
acq.stream.CameraCountStream,
acq.stream.ScannedTCSettingsStream
)
def __init__(self, *args, **kwargs):
add_btn = kwargs.pop('add_button', False)
wx.Panel.__init__(self, *args, **kwargs)
self.stream_panels = []
self._sz = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self._sz)
msg = "No streams available."
self.txt_no_stream = wx.StaticText(self, -1, msg)
self._sz.Add(self.txt_no_stream, 0, wx.ALL | wx.ALIGN_CENTER, 10)
self.btn_add_stream = buttons.PopupImageButton(
self, -1,
label="ADD STREAM",
style=wx.ALIGN_CENTER
)
self.btn_add_stream.SetForegroundColour(FG_COLOUR_BUTTON)
self._sz.Add(self.btn_add_stream, flag=wx.ALL, border=10)
self.btn_add_stream.Show(add_btn)
self.btn_add_overview = buttons.PlusImageButton(
self, -1,
label="ADD OVERVIEW",
style=wx.ALIGN_CENTER,
)
self.btn_add_overview.SetForegroundColour(FG_COLOUR_BUTTON)
self._sz.Add(self.btn_add_overview, flag=wx.ALL, border=15)
self.btn_add_overview.Show(False)
self.fit_streams()
def fit_streams(self):
# When the whole window/app is destroyed, each widget receives a destroy
# event. In such a case, it's not worthy re-fitting the streams, and
# especially it can fail because some other objects have already been
# destroyed.
if not self or self.IsBeingDeleted():
logging.debug("Stream panelbar is being deleted, not refitting")
return
logging.debug("Refitting stream panels")
self._set_warning()
h = self._sz.GetMinSize().GetHeight()
self.SetSize((-1, h))
p = self.Parent
while not isinstance(p, FoldPanelItem):
p = p.Parent
self.Layout()
p.Refresh()
# TODO: maybe should be provided after init by the controller (like key of
# sorted()), to separate the GUI from the model ?
def _get_stream_order(self, stream):
""" Gives the "order" of the given stream, as defined in STREAM_ORDER.
Args:
stream (Stream): a stream
Returns:
(int >= 0): the order
"""
for i, c in enumerate(self.STREAM_ORDER):
if isinstance(stream, c):
return i
msg = "Stream %s of unknown order type %s"
logging.warning(msg, stream.name.value, stream.__class__.__name__)
return len(self.STREAM_ORDER)
# === VA handlers
# Moved to stream controller
# === Event Handlers
def on_stream_remove(self, evt):
"""
Called when user request to remove a stream via the stream panel
"""
st = evt.spanel.stream
logging.debug("User removed stream (panel) %s", st.name.value)
# delete stream panel
self.remove_stream_panel(evt.spanel)
# Publish removal notification
pub.sendMessage("stream.remove", stream=st)
def on_streamp_destroy(self, evt):
"""
Called when a stream panel is completely removed
"""
self.fit_streams()
# === API of the stream panel
def show_add_button(self):
self.btn_add_stream.Show()
self.fit_streams()
def hide_add_button(self):
self.btn_add_stream.Hide()
self.fit_streams()
def show_overview_button(self):
self.btn_add_overview.Show()
self.fit_streams()
def hide_overview_button(self):
self.btn_add_overview.Hide()
self.fit_streams()
def is_empty(self):
return len(self.stream_panels) == 0
def get_size(self):
""" Return the number of streams contained within the StreamBar """
return len(self.stream_panels)
def add_stream_panel(self, spanel, show=True):
"""
This method adds a stream panel to the stream bar. The appropriate
position is automatically determined.
spanel (StreamPanel): a stream panel
"""
# Insert the spanel in the order of STREAM_ORDER. If there are already
# streams with the same type, insert after them.
ins_pos = 0
order_s = self._get_stream_order(spanel.stream)
for e in self.stream_panels:
order_e = self._get_stream_order(e.stream)
if order_s < order_e:
break
ins_pos += 1
logging.debug("Inserting %s at position %s", spanel.stream.__class__.__name__, ins_pos)
self.stream_panels.insert(ins_pos, spanel)
if self._sz is None:
self._sz = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self._sz)
self._sz.Insert(ins_pos, spanel,
flag=self.DEFAULT_STYLE,
border=self.DEFAULT_BORDER)
# TODO: instead of a stream_remove message, just take a callable to call
# when the stream needs to be removed
spanel.Bind(EVT_STREAM_REMOVE, self.on_stream_remove)
spanel.Bind(wx.EVT_WINDOW_DESTROY, self.on_streamp_destroy, source=spanel)
spanel.Layout()
# hide the stream if the current view is not compatible
spanel.Show(show)
self.fit_streams()
def remove_stream_panel(self, spanel):
"""
Removes a stream panel
Deletion of the actual stream must be done separately.
Must be called in the main GUI thread
"""
# Remove it from the sizer explicitly, because even if the sizer will
# eventually detect it (via the destroy event), that will be later, and
# until then the fit_stream will not be correct.
self._sz.Detach(spanel)
self.stream_panels.remove(spanel)
spanel.Destroy()
def clear(self):
"""
Remove all stream panels
Must be called in the main GUI thread
"""
for p in list(self.stream_panels):
# Only refit the (empty) bar after all streams are gone
p.Unbind(wx.EVT_WINDOW_DESTROY, source=p, handler=self.on_streamp_destroy)
self.remove_stream_panel(p)
self.fit_streams()
def _set_warning(self):
""" Display a warning text when no streams are present, or show it
otherwise.
"""
self.txt_no_stream.Show(self.is_empty())
class FastEMProjectPanelHeader(wx.Control):
"""
A widget for expanding and collapsing the project panel. It also contains a remove button and a
text control for the project name.
"""
BUTTON_SIZE = (18, 18) # The pixel size of the button
BUTTON_BORDER_SIZE = 9 # Border space around the buttons
def __init__(self, parent, name, wid=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.NO_BORDER):
assert (isinstance(parent, FastEMProjectPanel))
super(FastEMProjectPanelHeader, self).__init__(parent, wid, pos, size, style)
self.SetBackgroundColour(self.Parent.BackgroundColour)
# This style enables us to draw the background with our own paint event handler
self.SetBackgroundStyle(wx.BG_STYLE_PAINT)
# Create and add sizer and populate with controls
self._sz = wx.BoxSizer(wx.HORIZONTAL)
# Fold indicator icon, drawn directly in the background in a fixed position
self._foldIcons = wx.ImageList(16, 16)
self._foldIcons.Add(img.getBitmap("icon/arr_down_s.png"))
self._foldIcons.Add(img.getBitmap("icon/arr_right_s.png"))
# Add the needed controls to the sizer
self.btn_remove = self._add_remove_btn()
self.txt_ctrl = self._add_text_ctrl(name)
# Add spacer for creating padding on the right side of the header panel
self._sz.Add((64, 1), 0)
# Set the sizer of the Control
self.SetSizerAndFit(self._sz)
self.Bind(wx.EVT_SIZE, self.on_size)
self.Layout()
# Control creation methods
def _add_remove_btn(self):
""" Add a button for project removal """
btn_rem = buttons.ImageButton(self,
bitmap=img.getBitmap("icon/ico_rem_str.png"),
size=self.BUTTON_SIZE)
btn_rem.bmpHover = img.getBitmap("icon/ico_rem_str_h.png")
btn_rem.SetToolTip("Remove project")
self._add_ctrl(btn_rem)
return btn_rem
def _add_text_ctrl(self, name):
""" Add a label control to the header panel """
txt_ctrl = wx.TextCtrl(self, wx.ID_ANY, name, style=wx.TE_PROCESS_ENTER | wx.BORDER_NONE,
validator=PatternValidator(r'[A-Za-z0-9_()-]+'))
txt_ctrl.SetBackgroundColour(self.Parent.GetBackgroundColour())
txt_ctrl.SetForegroundColour(FG_COLOUR_MAIN)
self._add_ctrl(txt_ctrl, stretch=True)
return txt_ctrl
def _add_ctrl(self, ctrl, stretch=False):
""" Add the given control to the header panel
:param ctrl: (wx.Control) Control to add to the header panel
:param stretch: True if the control should expand to fill space
"""
# Only the first element has a left border
border = wx.ALL if self._sz.IsEmpty() else wx.RIGHT
self._sz.Add(
ctrl,
proportion=1 if stretch else 0,
flag=(border | wx.ALIGN_CENTRE_VERTICAL | wx.RESERVE_SPACE_EVEN_IF_HIDDEN),
border=self.BUTTON_BORDER_SIZE
)
# Layout and painting
def on_size(self, event):
""" Handle the wx.EVT_SIZE event for the Expander class """
self.SetSize((self.Parent.GetSize().x, -1))
self.Layout()
self.Refresh()
event.Skip()
def on_draw_expander(self, dc):
""" Draw the expand/collapse arrow icon
It needs to be called from the parent's paint event handler.
"""
win_rect = self.GetRect()
x_pos = win_rect.GetRight() - ICON_WIDTH - CAPTION_PADDING_RIGHT
self._foldIcons.Draw(
1 if self.Parent.collapsed else 0,
dc,
x_pos,
(win_rect.GetHeight() - ICON_HEIGHT) // 2,
wx.IMAGELIST_DRAW_TRANSPARENT
)
class FastEMProjectBar(wx.Panel):
"""
The whole panel containing project panels and a button to add more projects.
"""
DEFAULT_BORDER = 2
DEFAULT_STYLE = wx.BOTTOM | wx.EXPAND
def __init__(self, *args, **kwargs):
add_btn = kwargs.pop('add_button', False)
wx.Panel.__init__(self, *args, **kwargs)
self.project_panels = []
self._sz = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self._sz)
self.btn_add_project = buttons.ImageTextButton(
self, -1,
label="ADD PROJECT",
style=wx.ALIGN_CENTER,
bitmap=img.getBitmap("stream_add_b.png")
)
self.btn_add_project.SetForegroundColour(FG_COLOUR_BUTTON)
self.btn_add_project.SetToolTip("Add a new project. A project can be used to organize "
"regions of acquisition (ROA) of similar type.")
self._sz.Add(self.btn_add_project, flag=wx.ALL, border=10)
self.btn_add_project.Show(add_btn)
self.txt_no_project = wx.StaticText(self, -1, "No projects available.")
self._sz.Add(self.txt_no_project, 0, wx.ALL | wx.ALIGN_CENTER, 10)
self.fit_panels()
# === Event Handlers
def on_project_remove(self, evt):
"""
Called when user request to remove a project via the project panel
"""
p = evt.spanel.project
logging.debug("User removed project (panel) %s", p.name.value)
# delete project panel
self.remove_project_panel(evt.ppanel)
def on_projectp_destroy(self, evt):
"""
Called when a project panel is completely removed
"""
self.fit_panels()
# === API of the project panel
def is_empty(self):
return len(self.project_panels) == 0
def get_size(self):
""" Return the number of streams contained within the StreamBar """
return len(self.project_panels)
def add_project_panel(self, ppanel, show=True):
"""
This method adds a project panel to the project bar. The appropriate
position is automatically determined.
ppanel (ProjectPanel): a project panel
"""
ins_pos = len(self.project_panels) + 1
self.project_panels.append(ppanel)
self._sz.Insert(ins_pos, ppanel, flag=self.DEFAULT_STYLE, border=self.DEFAULT_BORDER)
# TODO: instead of a stream_remove message, just take a callable to call
# when the stream needs to be removed
ppanel.Bind(EVT_STREAM_REMOVE, self.on_project_remove)
ppanel.Bind(wx.EVT_WINDOW_DESTROY, self.on_projectp_destroy, source=ppanel)
ppanel.Layout()
# hide the stream if the current view is not compatible
ppanel.Show(show)
self.fit_panels()
def remove_project_panel(self, ppanel):
"""
Removes a project panel
Deletion of the actual project must be done separately.
Must be called in the main GUI thread
"""
# Remove it from the sizer explicitly, because even if the sizer will
# eventually detect it (via the destroy event), that will be later, and
# until then the fit_stream will not be correct.
self._sz.Detach(ppanel)
self.project_panels.remove(ppanel)
ppanel.Destroy()
def enable_buttons(self, enabled):
for p in self.project_panels:
p.btn_add_roa.Enable(enabled)
self.btn_add_project.Enable(enabled)
def fit_panels(self):
# When the whole window/app is destroyed, each widget receives a destroy
# event. In such a case, it's not worthy re-fitting the streams, and
# especially it can fail because some other objects have already been
# destroyed.
if not self or self.IsBeingDeleted():
logging.debug("Project panelbar is being deleted, not refitting")
return
logging.debug("Refitting project panels")
# Display a warning text when no streams are present
self.txt_no_project.Show(self.is_empty())
h = self._sz.GetMinSize().GetHeight()
self.SetSize((-1, h))
p = self.Parent
while not isinstance(p, FoldPanelItem):
p = p.Parent
p.Refresh()
class FastEMProjectPanel(wx.Panel):
"""
Panel for one project, containing multiple ROAPanels.
"""
DEFAULT_BORDER = 2
DEFAULT_STYLE = wx.BOTTOM | wx.EXPAND
def __init__(self, parent,
wid=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.CP_DEFAULT_STYLE, name="ProjectPanel", collapsed=False):
assert(isinstance(parent, FastEMProjectBar))
wx.Panel.__init__(self, parent, wid, pos, size, style, name)
# Appearance
self.SetBackgroundColour(BG_COLOUR_STREAM)
self.SetForegroundColour(FG_COLOUR_MAIN)
# State
self._collapsed = collapsed
# Counter that keeps track of the number of rows containing controls inside this panel
self.num_rows = 0
self.roa_panels = []
# Create project header
self._header = FastEMProjectPanelHeader(self, name)
self._header.Bind(wx.EVT_LEFT_UP, self.on_toggle)
self._header.Bind(wx.EVT_PAINT, self.on_draw_expander)
self.Bind(wx.EVT_BUTTON, self.on_button, self._header)
# Create the control panel
self._panel = wx.Panel(self, style=wx.TAB_TRAVERSAL | wx.NO_BORDER)
self._panel.SetBackgroundColour(BG_COLOUR_MAIN)
self._panel.SetForegroundColour(FG_COLOUR_MAIN)
self._panel.SetFont(self.GetFont())
# Add a border sizer so we can create padding for the panel
self._border_sizer = wx.BoxSizer(wx.VERTICAL)
self._panel_sizer = wx.BoxSizer(wx.VERTICAL)
self._border_sizer.Add(self._panel_sizer, border=10, flag=wx.BOTTOM | wx.EXPAND, proportion=1)
self._panel.SetSizer(self._border_sizer)
self._main_sizer = wx.BoxSizer(wx.VERTICAL)
self._main_sizer.Add(self._header, 0, wx.EXPAND)
self._main_sizer.Add(self._panel, 0, wx.EXPAND)
self.SetSizer(self._main_sizer)
# Add roi button
self.btn_add_roa = buttons.ImageTextButton(
self._panel, -1,
label="ADD ROA",
style=wx.ALIGN_CENTER,
bitmap=img.getBitmap("stream_add_b.png"),
)
self.btn_add_roa.SetForegroundColour(FG_COLOUR_BUTTON)
self.btn_add_roa.SetToolTip("Add new region of acquisition (ROA) to project.")
self._panel_sizer.Add(self.btn_add_roa, flag=wx.TOP | wx.LEFT | wx.RIGHT, border=10)
self.btn_add_roa.Show(True)
# Make remove button and text control public (for FastEMProjectBarController)
self.btn_remove = self._header.btn_remove
self.txt_ctrl = self._header.txt_ctrl
@property
def collapsed(self):
return self._collapsed
def flatten(self):
""" Unfold the stream panel and hide the header """
self.collapse(False)
self._header.Show(False)
def collapse(self, collapse):
""" Collapses or expands the pane window """
if self._collapsed == collapse:
return
self.Freeze()
# update our state
self._panel.Show(not collapse)
self._collapsed = collapse
# Call after is used, so the fit will occur after everything has been hidden or shown
wx.CallAfter(self.Parent.fit_panels)
self.Thaw()
def OnSize(self, event):
""" Handles the wx.EVT_SIZE event for FastEMProjectPanel """
self.Layout()
event.Skip()
def on_toggle(self, evt):
""" Detect click on the collapse button of the FastEMProjectPanel """
w = evt.GetEventObject().GetSize().GetWidth()
if evt.GetX() > w * 0.85:
self.collapse(not self._collapsed)
else:
evt.Skip()
def on_button(self, event):
""" Handles the wx.EVT_BUTTON event for FastEMProjectPanel """
if event.GetEventObject() != self._header:
event.Skip()
return
self.collapse(not self._collapsed)
def on_draw_expander(self, event):
""" Handle the ``wx.EVT_PAINT`` event for the stream panel
:note: This is a drawing routine to paint the GTK-style expander.
"""
dc = wx.AutoBufferedPaintDC(self._header)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
self._header.on_draw_expander(dc)
def add_roa_panel(self, roa_panel):
""" Add a ROA control panel to the project panel, append .roa_panels.
:param roa_panel: (FastEMROAPanel) panel to be added
"""
self.num_rows += 1
self.roa_panels.append(roa_panel)
self._panel_sizer.Add(roa_panel, border=10, flag=wx.LEFT | wx.RIGHT | wx.EXPAND, proportion=1)
self.fit_panels()
def fit_panels(self):
# When the whole window/app is destroyed, each widget receives a destroy
# event. In such a case, it's not worthy re-fitting the streams, and
# especially it can fail because some other objects have already been
# destroyed.
if not self or self.IsBeingDeleted():
logging.debug("ROA panelbar is being deleted, not refitting")
return
logging.debug("Refitting ROA panels")
h = self._panel_sizer.GetMinSize().GetHeight()
self.SetSize((-1, h))
p = self.Parent
while not isinstance(p, FoldPanelItem):
p = p.Parent
p.Refresh()
class FastEMROAPanel(wx.Panel):
""" Panel for one region of acquisition. """
BUTTON_SIZE = (18, 18) # The pixel size of the button
BUTTON_BORDER_SIZE = 9 # Border space around the buttons
def __init__(self, parent, name, calibrations, wid=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.CP_DEFAULT_STYLE):
"""
name (str): ROA name, default text for the text control
calibrations (list of str): choices for calibration combobox
"""
assert (isinstance(parent, FastEMProjectPanel))
wx.Panel.__init__(self, parent._panel, wid, pos, size, style, name)
self._parent = parent
self._panel_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetForegroundColour(gui.FG_COLOUR_EDIT)
self.SetBackgroundColour(gui.BG_COLOUR_MAIN)
# Add controls
self.btn_remove = self._add_remove_btn()
self.txt_ctrl = self._add_text_ctrl(name)
self.calibration_ctrl = self._add_combobox(calibrations)
# Fit sizer
self._panel_sizer.AddSpacer(5)
self.SetSizerAndFit(self._panel_sizer)
self.Bind(wx.EVT_SIZE, self._on_size)
self.Layout()
self._parent.Refresh()
def activate(self):
self.SetBackgroundColour(gui.BG_COLOUR_STREAM)
self.txt_ctrl.SetBackgroundColour(gui.BG_COLOUR_STREAM)
self.calibration_ctrl.SetBackgroundColour(gui.BG_COLOUR_STREAM)
def deactivate(self):
self.SetBackgroundColour(gui.BG_COLOUR_MAIN)
self.txt_ctrl.SetBackgroundColour(gui.BG_COLOUR_MAIN)
self.calibration_ctrl.SetBackgroundColour(gui.BG_COLOUR_MAIN)
def _add_remove_btn(self):
""" Add a button for ROI removal """
btn_rem = buttons.ImageButton(self, bitmap=img.getBitmap("icon/ico_rem_str.png"), size=self.BUTTON_SIZE)
btn_rem.bmpHover = img.getBitmap("icon/ico_rem_str_h.png")
btn_rem.SetToolTip("Remove RoA")
self._add_ctrl(btn_rem)
return btn_rem
def _add_text_ctrl(self, default_text):
""" Add a text ctrl to the control grid
:param default_text: (str)
:return: (wx.TextCtrl)
"""
txt_ctrl = wx.TextCtrl(self, wx.ID_ANY, default_text, style=wx.TE_PROCESS_ENTER | wx.BORDER_NONE,
validator=PatternValidator(r'[A-Za-z0-9_()-]+'))
txt_ctrl.SetForegroundColour(gui.FG_COLOUR_EDIT)
txt_ctrl.SetBackgroundColour(gui.BG_COLOUR_MAIN)
self._add_ctrl(txt_ctrl, True)
return txt_ctrl
def _add_combobox(self, choices):
""" Add a combobox to the control grid
:param choices: (list of str)
:return: (wx.ComboBox)
"""
calibration_ctrl = ComboBox(self, value=choices[0], choices=choices, size=(100, -1),
style=wx.CB_READONLY | wx.BORDER_NONE)
self._add_ctrl(calibration_ctrl)
return calibration_ctrl
def _add_ctrl(self, ctrl, stretch=False):
""" Add the given control to the panel
:param ctrl: (wx.Control) Control to add to the header panel
:param stretch: True if the control should expand to fill space
"""
self._panel_sizer.Add(
ctrl,
proportion=1 if stretch else 0,
flag=(wx.RIGHT | wx.ALIGN_CENTRE_VERTICAL | wx.RESERVE_SPACE_EVEN_IF_HIDDEN),
border=self.BUTTON_BORDER_SIZE
)
def _on_size(self, event):
""" Handle the wx.EVT_SIZE event for the Expander class """
self.SetSize((self._parent.GetSize().x, -1))
self.Layout()
self.Refresh()
event.Skip()
class FastEMCalibrationBar(wx.Panel):
"""
The whole panel containing the panel with the calibration buttons.
"""
DEFAULT_BORDER = 2
DEFAULT_STYLE = wx.BOTTOM | wx.EXPAND
def __init__(self, *args, **kwargs):
kwargs.pop('add_button', False) # remove add_button kwarg
wx.Panel.__init__(self, *args, **kwargs)
self._sz = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self._sz)
def add_calibration_panel(self, panel):
"""
This method adds the calibration panel to the calibration bar. Should only be called once.
panel (FastEMCalibrationPanel): the calibration panel
"""
self._sz.Insert(0, panel, flag=self.DEFAULT_STYLE, border=self.DEFAULT_BORDER)
panel.Layout()
class FastEMCalibrationPanel(wx.Panel):
"""
Panel for the calibration buttons.
"""
def __init__(self, parent, layout,
wid=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.CP_DEFAULT_STYLE, name="CalibrationPanel"):
"""
layout (list of lists of int): layout of scintillator grid, given as 2D list of scintillator positions,
e.g. [[6, 5, 4], [3, 2, 1]]
"""
assert(isinstance(parent, FastEMCalibrationBar))
wx.Panel.__init__(self, parent, wid, pos, size, style, name)
self.buttons = {} # int --> wx.Button
self._panel_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self._panel_sizer)
# Calibration Grid
nrows = len(layout)
ncols = max(len(row) for row in layout)
calgrid_sz = wx.GridBagSizer(nrows, ncols)
for row_idx, row in enumerate(layout):
for col_idx, elem in enumerate(row):
subsz = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(self, wx.ALL | wx.ALIGN_CENTER, label="?", size=(30, 30))
btn.SetBackgroundColour(FG_COLOUR_BUTTON)
subsz.Add(btn)
subsz.AddSpacer(8)
calgrid_sz.Add(subsz, pos=(row_idx, col_idx))
txt = wx.StaticText(self, wx.ALL | wx.ALIGN_CENTER, str(elem), size=(10, -1))
subsz.Add(txt, wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, 5)
subsz.AddSpacer(20)
self.buttons[elem] = btn
self._panel_sizer.Add(calgrid_sz, 0, wx.ALL | wx.ALIGN_CENTER, 10)
self._panel_sizer.AddSpacer(10)
class FastEMOverviewSelectionPanel(wx.Panel):
"""
Panel for the calibration buttons.
"""
def __init__(self, parent,
wid=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.CP_DEFAULT_STYLE, name="CalibrationPanel"):
"""
layout (list of lists of int): layout of scintillator grid, given as 2D list of scintillator positions,
e.g. [[6, 5, 4], [3, 2, 1]]
"""
wx.Panel.__init__(self, parent, wid, pos, size, style, name)
self.buttons = {} # int --> wx.Button
self._panel_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self._panel_sizer)
def create_controls(self, layout):
nrows = len(layout)
ncols = max(len(row) for row in layout)
calgrid_sz = wx.GridBagSizer(nrows, ncols)
for row_idx, row in enumerate(layout):
for col_idx, elem in enumerate(row):
subsz = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.ToggleButton(self, wx.ALL | wx.ALIGN_CENTER, size=(30, 30))
btn.SetBackgroundColour("#999999")
subsz.Add(btn)
subsz.AddSpacer(8)
calgrid_sz.Add(subsz, pos=(row_idx, col_idx))
txt = wx.StaticText(self, wx.ALL | wx.ALIGN_CENTER, str(elem), size=(10, -1))
subsz.Add(txt, wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, 5)
subsz.AddSpacer(20)
self.buttons[elem] = btn
self._panel_sizer.Add(calgrid_sz, 0, wx.ALL | wx.ALIGN_CENTER, 10)
|
delmic/odemis
|
src/odemis/gui/comp/stream.py
|
Python
|
gpl-2.0
| 80,900
|
[
"Gaussian"
] |
2ecdf0bcf22e02028a0a20b4277149b0f03e3526a897dfc0d0e9930dc9bb4adb
|
import sys
import os
class validateApp:
sampleFiles = {}
def __init__(self):
self.exitValidate = False
self.debug = True
self.verbose = True
def validateSampleSheetHTSeq(self, dirSample, finalDir, sampleSheet, force):
linenum = 0
if not os.path.exists(dirSample):
self.exitTime("Directory " + dirSample + " is not found")
if os.path.exists(sampleSheet):
f = open(sampleSheet, "r");
else:
self.exitTime("No sample sheet named " + sampleSheet + " was found")
for e in f.readlines():
if linenum != 0:
self.sampleSequenceIDafterPreprocess(dirSample, finalDir, e.split("\t"), force, True)
linenum += 1
for key in self.sampleFiles:
self.sampleFiles[key].sort(key=lambda x: len(x))
return self.sampleFiles
def validateSampleSheet(self, dirSample, finalDir, sampleSheet, force, afterPreprocess):
linenum = 0
if not os.path.exists(dirSample):
self.exitTime("Directory " + dirSample + " is not found")
if os.path.exists(sampleSheet):
f = open(sampleSheet, "r");
else:
self.exitTime("No sample sheet named " + sampleSheet + " was found")
for e in f.readlines():
if linenum != 0:
if afterPreprocess:
self.sampleSequenceIDafterPreprocess(dirSample, finalDir, e.split("\t"), force, False)
else:
self.sampleSequenceID(dirSample, finalDir, e.split("\t"), force)
linenum += 1
for key in self.sampleFiles:
self.sampleFiles[key].sort(key=lambda x: len(x))
return self.sampleFiles
def sampleSequenceIDafterPreprocess(self, dirSample, finalDir, seqID, force, htseq):
if seqID[0][0] == "#":
pass
elif len(seqID) >= 2:
seqID[0] = os.path.join(dirSample, seqID[1].rstrip())
seqID[1] = os.path.join(finalDir, seqID[1].rstrip())
self.finalDirTest(seqID[1], force)
if htseq == False:
self.directoryFiles(seqID)
else:
self.directoryFilesHTSeq(seqID, seqID[1].split('/')[-1]);
else:
self.exitTime("There wasn't two columns in the sample file file")
def sampleSequenceID(self, dirSample, finalDir, seqID, force):
if seqID[0][0] == "#":
pass
elif len(seqID) >= 2:
seqID[0] = os.path.join(dirSample, seqID[0])
seqID[1] = os.path.join(finalDir, seqID[1].rstrip())
self.finalDirTest(seqID[1], force)
self.directoryFiles(seqID)
else:
self.exitTime("There wasn't two columns in the sample file file")
def finalDirTest(self, sampleID, force):
if os.path.exists(sampleID) and not force:
self.exitTime(sampleID + " was all ready created. Use the -w or --overwrite option to overwrite")
elif os.path.exists(sampleID):
for root, dirs, files in os.walk(sampleID, topdown=False):
for f in files:
os.remove(os.path.join(root, f))
for d in dirs:
os.rmdir(os.path.join(root, d))
print "Warning"
print "Overwrite was turned on - overwriting " + sampleID + "\n"
#sets up the directory dictionary
#set up key with tuple (sample and seq)
def directoryFilesHTSeq(self, sampleSeq, fileName):
sampleSeq = tuple(sampleSeq)
#print sampleSeq
directoryTest = sampleSeq[0].rstrip();
#fastqCount insures at least one fastq files is under the directory
bamCount = 0
if self.testDirectory(directoryTest):
for subdir, dir, files in os.walk(directoryTest):
for file in files:
file = os.path.abspath(os.path.join(directoryTest, file))
if ".bam" in file and fileName in file and not ".bai" in file:
bamCount += 1
if not sampleSeq in self.sampleFiles:
self.sampleFiles[sampleSeq] = []
self.sampleFiles[sampleSeq].append(file)
if (bamCount == 0):
self.exitTime("No bam files were found under this directory - " + directoryTest)
else:
self.exitTime("Directory " + directoryTest + " does not exists")
def directoryFiles(self, sampleSeq):
#sampleSeq = sampleSeq[1]
print sampleSeq
directoryTest = sampleSeq[0].rstrip();
sampleSeq = sampleSeq[1]
#fastqCount insures at least one fastq files is under the directory
fastqCount = 0
if self.testDirectory(directoryTest):
for subdir, dir, files in os.walk(directoryTest):
for file in files:
file = os.path.abspath(os.path.join(directoryTest, file))
if "_R1" in file and ".fastq" in file:
fastqCount += 1
if not sampleSeq in self.sampleFiles:
self.sampleFiles[sampleSeq] = []
self.sampleFiles[sampleSeq].append(self.isPairedEnd(file))
elif "_SE" in file and "fastq" in file:
fastqCount += 1
if not sampleSeq in self.sampleFiles:
self.sampleFiles[sampleSeq] = []
self.sampleFiles[sampleSeq].append(self.isPairedEnd(file))
if (fastqCount == 0):
self.exitTime("No fastq files were found under this directory - " + directoryTest)
else:
self.exitTime("Directory " + directoryTest + " does not exists")
def exitTime(self, exitString):
print exitString
if self.exitValidate:
print "Failure in Validation - exiting process"
exit(-1)
else:
print "Failure in Validation - going to continue to continue to validate sample sheet"
#returns 2 elements in list if paired end reads or 1 element in list if it is a SE
def isPairedEnd(self, fileRead1):
if "_SE" in fileRead1:
return [fileRead1]
file2 = fileRead1.replace("_R1", "_R2")
SE = fileRead1.replace("_R1", "_SE")
if os.path.exists(SE) and os.path.exists(file2):
return [fileRead1, file2, SE]
if os.path.exists(file2):
return [fileRead1, file2]
else:
return [fileRead1]
#test if directory exists . . . meh. probably can get ride of it
def testDirectory(self, directoryTest):
return os.path.isdir(directoryTest)
#outputs the single end paired end reads
def infoOutput(self):
#print self.sampleFiles
for key in self.sampleFiles:
#print "SEQUENCE " + key[0] + " SAMPLE " + key[1]
for files in self.sampleFiles[key]:
if len(files) == 1:
pass
#print "SE Files: " + str(files).strip("[]")
else:
pass
#print "PE Files: " + str(files).strip("[]")
def setValidation(self, exitOut):
self.exitValidate = exitOut
#tuple key (seq ID - sample ID)
#value - 1 value is SE reads and 2 values is PE
def dictionaryFilesReturn(self):
return self.sampleFiles
def main():
test = validateApp(False)
test.validateSampleSheet("samples.txt")
test.infoOutput()
|
msettles/expHTS
|
expHTS/validate_app.py
|
Python
|
apache-2.0
| 7,843
|
[
"HTSeq"
] |
b129c21bde4c71778352cde48a8094b9b666ca75f12f76e89b1b2fde29c3713a
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
def main():
colors = vtk.vtkNamedColors()
# Create five points.
origin = [0.0, 0.0, 0.0]
p0 = [1.0, 0.0, 0.0]
p1 = [0.0, 1.0, 0.0]
p2 = [0.0, 1.0, 2.0]
p3 = [1.0, 2.0, 3.0]
# Create a vtkPoints object and store the points in it
points = vtk.vtkPoints()
points.InsertNextPoint(origin)
points.InsertNextPoint(p0)
points.InsertNextPoint(p1)
points.InsertNextPoint(p2)
points.InsertNextPoint(p3)
polyLine = vtk.vtkPolyLine()
polyLine.GetPointIds().SetNumberOfIds(5)
for i in range(0, 5):
polyLine.GetPointIds().SetId(i, i)
# Create a cell array to store the lines in and add the lines to it
cells = vtk.vtkCellArray()
cells.InsertNextCell(polyLine)
# Create a polydata to store everything in
polyData = vtk.vtkPolyData()
# Add the points to the dataset
polyData.SetPoints(points)
# Add the lines to the dataset
polyData.SetLines(cells)
# Setup actor and mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polyData)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colors.GetColor3d("Tomato"))
# Setup render window, renderer, and interactor
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("PolyLine")
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.AddActor(actor)
renderer.SetBackground(colors.GetColor3d("DarkOliveGreen"))
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/GeometricObjects/PolyLine.py
|
Python
|
apache-2.0
| 1,752
|
[
"VTK"
] |
53ded223e3a47f366823077a5b9e3c21df9e9e44ac9f3698c39369124db97c95
|
'''
This module provides functionality for normalizing protein data.
Levels can be extracted from supernatant or phosphotyrosine runs using median
or mean peptide levels across multiple channels.
'''
from __future__ import absolute_import, division
# Built-ins
from collections import OrderedDict
import logging
import warnings
# Core data analysis libraries
from matplotlib import pyplot as plt
import numpy as np
import seaborn as sns
from scipy import stats
LOGGER = logging.getLogger('pyproteome.levels')
WARN_PEP_CUTOFF = 50
REL_CUTOFF = 5
LEVELS_DPI = 90
def kde_max(points):
'''
Estimate the center of a quantification channel by fitting a gaussian
KDE function and finding its maximum.
Parameters
----------
points : list of float
Returns
-------
float
'''
points = points[~np.isnan(points)]
gaus = stats.kde.gaussian_kde(points, bw_method='silverman')
# .25 - .75 1000 slices
# x = np.arange(0, 10, .01)
x = np.linspace(np.quantile(points, .15), np.quantile(points, .85), 1000)
y = np.array(gaus.pdf(x))
return x[y == y.max()][0]
def get_channel_levels(
data,
norm_channels=None,
method='median',
cols=2,
):
'''
Calculate channel normalization levels. This value is calculated by
selecting the peak of Gaussian KDE distribution fitted to channel ratio
values.
Parameters
----------
data : :class:`pyproteome.data_sets.DataSet`
norm_channels : list of str, optional
Sample names of channels to use for normalization.
method : str, optional
Normalize to the 'mean' or 'median' of each row.
cols : int, optional
Number of columns used when displaying KDE distributions.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
channel_levels : dict of str, float
'''
if norm_channels is None:
norm_channels = list(data.channels.keys())
channels = [data.channels[i] for i in norm_channels]
channel_levels = OrderedDict()
rows = int(np.ceil(len(data.channels) / cols))
scale = 3
f, axes = plt.subplots(
rows, cols,
sharex=True,
sharey=True,
figsize=(scale * cols, scale * rows),
dpi=LEVELS_DPI,
)
axes = [i for j in axes for i in j]
ax_iter = iter(axes)
if method in ['mean']:
norm = data.psms[channels].mean(axis=1)
elif method in ['median']:
norm = data.psms[channels].median(axis=1)
else:
raise Exception('Unknown normalization method: {}'.format(method))
for col_name, col in zip(norm_channels, channels):
points = (data.psms[col] / norm).dropna()
# Remove peptides that change more than 25x
points = points[
(points >= 1 / REL_CUTOFF) &
(points <= REL_CUTOFF)
]
if points.shape[0] < WARN_PEP_CUTOFF:
LOGGER.warning(
(
'{}: Too few peptides for normalization, '
'quantification may be inaccurate '
' ({} peptides for {}: {})'
).format(data.name, points.shape[0], col_name, col)
)
if points.shape[0] < 1:
channel_levels[col] = 1
continue
else:
# Fit a guassian and find its maximum
max_x = kde_max(points)
channel_levels[col] = max_x
ax = next(ax_iter)
# seaborn==0.9.0 throws a scipy.stats warning
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
'',
FutureWarning,
)
sns.distplot(
points.apply(np.log2),
bins=25,
ax=ax,
)
ax.set_xlim(left=-2, right=2)
ax.set_title(
'{} ({})'.format(col_name, col)
if isinstance(data.channels, dict) else
col,
)
txt = 'center = {:.2f}\n$\\sigma$ = {:.2f}'.format(
max_x,
points.std(ddof=1),
)
lines = [
ax.axvline(np.log2(points.median()), color='g', zorder=2, linestyle=':'),
ax.axvline(np.log2(points.mean()), color='r', zorder=3, linestyle='-.'),
ax.axvline(np.log2(max_x), color='c', zorder=4, linestyle='--'),
ax.axvline(np.log2(1), color='k', linestyle='-', zorder=0, lw=2),
]
if col_name is norm_channels[0]:
ax.legend(lines[:-1], ['kde max', 'mean', 'median'])
ax.text(
s=txt,
x=ax.get_xlim()[1] * .9,
y=1,
fontsize=10,
color='k',
horizontalalignment='right',
verticalalignment='center',
).set_bbox(
dict(
alpha=.75,
linewidth=0.25,
facecolor='white',
zorder=1,
edgecolor='black',
boxstyle='round',
)
)
for ax in ax_iter:
ax.axis('off')
f.suptitle(
'{}'.format(data.name),
fontsize=16,
y=.92,
)
return f, channel_levels
|
white-lab/pyproteome
|
pyproteome/levels.py
|
Python
|
bsd-2-clause
| 5,189
|
[
"Gaussian"
] |
7742af57f1b5fefa340a7f038e0b1382c63d96a1da3fab35d1697ff8e969b458
|
"""
View a 3D rendering of the magnetic field lines and the streamlines of the rational surfaces.
The quality of the later can be used as an indicator of the quality of the grid. The magnetic field
is computed from efit_analyzed.py. The script can be used as a template to show additional properties of the field
based on enthought's example by Gael Varoquaux <gael.varoquaux@normalesup.org>
http://docs.enthought.com/mayavi/mayavi/auto/example_magnetic_field.html#example-magnetic-field
"""
from __future__ import absolute_import
from __future__ import division
from builtins import range
from past.utils import old_div
from boutdata.collect import collect
import numpy as np
import sys
if sys.version_info[0]>=3:
message = "View3D uses the VTK library through mayavi, which"+\
" is currently only available in python 2"
raise ImportError(message)
else:
from mayavi import mlab
from .read_geqdsk import read_geqdsk
from boututils.View2D import View2D
from scipy import interpolate
from .boutgrid import *
from boututils.file_import import file_import
def View3D(g,path=None, gb=None):
##############################################################################
# Resolution
n=51
#compute Bxy
[Br,Bz,x,y,q]=View2D(g,option=1)
rd=g.r.max()+.5
zd=g.z.max()+.5
##############################################################################
# The grid of points on which we want to evaluate the field
X, Y, Z = np.mgrid[-rd:rd:n*1j, -rd:rd:n*1j, -zd:zd:n*1j]
## Avoid rounding issues :
#f = 1e4 # this gives the precision we are interested by :
#X = np.round(X * f) / f
#Y = np.round(Y * f) / f
#Z = np.round(Z * f) / f
r = np.c_[X.ravel(), Y.ravel(), Z.ravel()]
##############################################################################
# Calculate field
# First initialize a container matrix for the field vector :
B = np.empty_like(r)
#Compute Toroidal field
# fpol is given between simagx (psi on the axis) and sibdry (
# psi on limiter or separatrix). So the toroidal field (fpol/R) and the q profile are within these boundaries
# For each r,z we have psi thus we get fpol if (r,z) is within the boundary (limiter or separatrix) and fpol=fpol(outer_boundary) for outside
#The range of psi is g.psi.max(), g.psi.min() but we have f(psi) up to the limit. Thus we use a new extended variable padded up to max psi
# set points between psi_limit and psi_max
add_psi=np.linspace(g.sibdry,g.psi.max(),10)
# define the x (psi) array
xf=np.arange(np.float(g.qpsi.size))*(g.sibdry-g.simagx)/np.float(g.qpsi.size-1) + g.simagx
# pad the extra values excluding the 1st value
xf=np.concatenate((xf, add_psi[1::]), axis=0)
# pad fpol with corresponding points
fp=np.lib.pad(g.fpol, (0,9), 'edge')
# create interpolating function
f = interpolate.interp1d(xf, fp)
#calculate Toroidal field
Btrz = old_div(f(g.psi), g.r)
rmin=g.r[:,0].min()
rmax=g.r[:,0].max()
zmin=g.z[0,:].min()
zmax=g.z[0,:].max()
B1p,B2p,B3p,B1t,B2t,B3t = magnetic_field(g,X,Y,Z,rmin,rmax,zmin,zmax, Br,Bz,Btrz)
bpnorm = np.sqrt(B1p**2 + B2p**2 + B3p**2)
btnorm = np.sqrt(B1t**2 + B2t**2 + B3t**2)
BBx=B1p+B1t
BBy=B2p+B2t
BBz=B3p+B3t
btotal = np.sqrt(BBx**2 + BBy**2 + BBz**2)
Psi = psi_field(g,X,Y,Z,rmin,rmax,zmin,zmax)
##############################################################################
# Visualization
# We threshold the data ourselves, as the threshold filter produce a
# data structure inefficient with IsoSurface
#bmax = bnorm.max()
#
#B1[B > bmax] = 0
#B2[B > bmax] = 0
#B3[B > bmax] = 0
#bnorm[bnorm > bmax] = bmax
mlab.figure(1, size=(1080,1080))#, bgcolor=(1, 1, 1), fgcolor=(0.5, 0.5, 0.5))
mlab.clf()
fieldp = mlab.pipeline.vector_field(X, Y, Z, B1p, B2p, B3p,
scalars=bpnorm, name='Bp field')
fieldt = mlab.pipeline.vector_field(X, Y, Z, B1t, B2t, B3t,
scalars=btnorm, name='Bt field')
field = mlab.pipeline.vector_field(X, Y, Z, BBx, BBy, BBz,
scalars=btotal, name='B field')
field2 = mlab.pipeline.scalar_field(X, Y, Z, Psi, name='Psi field')
#vectors = mlab.pipeline.vectors(field,
# scale_factor=1,#(X[1, 0, 0] - X[0, 0, 0]),
# )
#vcp1 = mlab.pipeline.vector_cut_plane(fieldp,
# scale_factor=1,
# colormap='jet',
# plane_orientation='y_axes')
##
#vcp2 = mlab.pipeline.vector_cut_plane(fieldt,
# scale_factor=1,
# colormap='jet',
# plane_orientation='x_axes')
# Mask random points, to have a lighter visualization.
#vectors.glyph.mask_input_points = True
#vectors.glyph.mask_points.on_ratio = 6
#vcp = mlab.pipeline.vector_cut_plane(field1)
#vcp.glyph.glyph.scale_factor=5*(X[1, 0, 0] - X[0, 0, 0])
# For prettier picture:
#vcp1.implicit_plane.widget.enabled = False
#vcp2.implicit_plane.widget.enabled = False
iso = mlab.pipeline.iso_surface(field2,
contours=[Psi.min()+.01],
opacity=0.4,
colormap='bone')
for i in range(q.size):
iso.contour.contours[i+1:i+2]=[q[i]]
iso.compute_normals = True
#
#mlab.pipeline.image_plane_widget(field2,
# plane_orientation='x_axes',
# #slice_index=10,
# extent=[-rd, rd, -rd, rd, -zd,zd]
# )
#mlab.pipeline.image_plane_widget(field2,
# plane_orientation='y_axes',
# # slice_index=10,
# extent=[-rd, rd, -rd,rd, -zd,zd]
# )
#scp = mlab.pipeline.scalar_cut_plane(field2,
# colormap='jet',
# plane_orientation='x_axes')
# For prettier picture and with 2D streamlines:
#scp.implicit_plane.widget.enabled = False
#scp.enable_contours = True
#scp.contour.number_of_contours = 20
#
# Magnetic Axis
s=mlab.pipeline.streamline(field)
s.streamline_type = 'line'
s.seed.widget = s.seed.widget_list[3]
s.seed.widget.position=[g.rmagx,0.,g.zmagx]
s.seed.widget.enabled = False
# q=i surfaces
for i in range(np.shape(x)[0]):
s=mlab.pipeline.streamline(field)
s.streamline_type = 'line'
##s.seed.widget = s.seed.widget_list[0]
##s.seed.widget.center = 0.0, 0.0, 0.0
##s.seed.widget.radius = 1.725
##s.seed.widget.phi_resolution = 16
##s.seed.widget.handle_direction =[ 1., 0., 0.]
##s.seed.widget.enabled = False
##s.seed.widget.enabled = True
##s.seed.widget.enabled = False
#
if x[i].size>1 :
s.seed.widget = s.seed.widget_list[3]
s.seed.widget.position=[x[i][0],0.,y[i][0]]
s.seed.widget.enabled = False
# A trick to make transparency look better: cull the front face
iso.actor.property.frontface_culling = True
#mlab.view(39, 74, 0.59, [.008, .0007, -.005])
out=mlab.outline(extent=[-rd, rd, -rd, rd, -zd, zd], line_width=.5 )
out.outline_mode = 'cornered'
out.outline_filter.corner_factor = 0.0897222
w = mlab.gcf()
w.scene.camera.position = [13.296429046581462, 13.296429046581462, 12.979811259697154]
w.scene.camera.focal_point = [0.0, 0.0, -0.31661778688430786]
w.scene.camera.view_angle = 30.0
w.scene.camera.view_up = [0.0, 0.0, 1.0]
w.scene.camera.clipping_range = [13.220595435695394, 35.020427055647517]
w.scene.camera.compute_view_plane_normal()
w.scene.render()
w.scene.show_axes = True
mlab.show()
if(path != None):
#BOUT data
#path='../Aiba/'
#
#gb = file_import(path+'aiba.bout.grd.nc')
#gb = file_import("../cbm18_8_y064_x516_090309.nc")
#gb = file_import("cbm18_dens8.grid_nx68ny64.nc")
#gb = file_import("/home/ben/run4/reduced_y064_x256.nc")
data = collect('P', path=path)
data = data[50,:,:,:]
#data0=collect("P0", path=path)
#data=data+data0[:,:,None]
s = np.shape(data)
nz = s[2]
sgrid = create_grid(gb, data, 1)
# OVERPLOT the GRID
#mlab.pipeline.add_dataset(sgrid)
#gr=mlab.pipeline.grid_plane(sgrid)
#gr.grid_plane.axis='x'
## pressure scalar cut plane from bout
scpb = mlab.pipeline.scalar_cut_plane(sgrid,
colormap='jet',
plane_orientation='x_axes')
scpb.implicit_plane.widget.enabled = False
scpb.enable_contours = True
scpb.contour.filled_contours=True
#
scpb.contour.number_of_contours = 20
#
#
#loc=sgrid.points
#p=sgrid.point_data.scalars
# compute pressure from scatter points interpolation
#pint=interpolate.griddata(loc, p, (X, Y, Z), method='linear')
#dpint=np.ma.masked_array(pint,np.isnan(pint)).filled(0.)
#
#p2 = mlab.pipeline.scalar_field(X, Y, Z, dpint, name='P field')
#
#scp2 = mlab.pipeline.scalar_cut_plane(p2,
# colormap='jet',
# plane_orientation='y_axes')
#
#scp2.implicit_plane.widget.enabled = False
#scp2.enable_contours = True
#scp2.contour.filled_contours=True
#scp2.contour.number_of_contours = 20
#scp2.contour.minimum_contour=.001
# CHECK grid orientation
#fieldr = mlab.pipeline.vector_field(X, Y, Z, -BBx, BBy, BBz,
# scalars=btotal, name='B field')
#
#sg=mlab.pipeline.streamline(fieldr)
#sg.streamline_type = 'tube'
#sg.seed.widget = sg.seed.widget_list[3]
#sg.seed.widget.position=loc[0]
#sg.seed.widget.enabled = False
#OUTPUT grid
#ww = tvtk.XMLStructuredGridWriter(input=sgrid, file_name='sgrid.vts')
#ww.write()
return
def magnetic_field(g,X,Y,Z,rmin,rmax,zmin,zmax,Br,Bz,Btrz):
rho = np.sqrt(X**2 + Y**2)
phi=np.arctan2(Y,X)
br=np.zeros(np.shape(X))
bz=np.zeros(np.shape(X))
bt=np.zeros(np.shape(X))
nx,ny,nz=np.shape(X)
mask = (rho >= rmin) & (rho <= rmax) & (Z >= zmin) & (Z <= zmax)
k=np.argwhere(mask==True)
fr=interpolate.interp2d(g.r[:,0], g.z[0,:], Br.T)
fz=interpolate.interp2d(g.r[:,0], g.z[0,:], Bz.T)
ft=interpolate.interp2d(g.r[:,0], g.z[0,:], Btrz.T)
for i in range(len(k)):
br[k[i,0],k[i,1],k[i,2]]=fr(rho[k[i,0],k[i,1],k[i,2]],Z[k[i,0],k[i,1],k[i,2]])
bz[k[i,0],k[i,1],k[i,2]]=fz(rho[k[i,0],k[i,1],k[i,2]],Z[k[i,0],k[i,1],k[i,2]])
bt[k[i,0],k[i,1],k[i,2]]=ft(rho[k[i,0],k[i,1],k[i,2]],Z[k[i,0],k[i,1],k[i,2]])
# Toroidal component
B1t=-bt*np.sin(phi)
B2t=bt*np.cos(phi)
B3t=0*bz
# Poloidal component
B1p=br*np.cos(phi)
B2p=br*np.sin(phi)
B3p=bz
# Rotate the field back in the lab's frame
return B1p,B2p,B3p,B1t,B2t,B3t
def psi_field(g,X,Y,Z,rmin,rmax,zmin,zmax):
rho = np.sqrt(X**2 + Y**2)
psi=np.zeros(np.shape(X))
nx,ny,nz=np.shape(X)
mask = (rho >= rmin) & (rho <= rmax) & (Z >= zmin) & (Z <= zmax)
k=np.argwhere(mask==True)
f=interpolate.interp2d(g.r[:,0], g.z[0,:], g.psi.T)
for i in range(len(k)):
psi[k[i,0],k[i,1],k[i,2]]=f(rho[k[i,0],k[i,1],k[i,2]],Z[k[i,0],k[i,1],k[i,2]])
# Rotate the field back in the lab's frame
return psi
if __name__ == '__main__':
path='../../tokamak_grids/pyGridGen/'
g=read_geqdsk(path+"g118898.03400")
View3D(g)
mlab.show()
|
erikgrinaker/BOUT-dev
|
tools/pylib/boututils/View3D.py
|
Python
|
gpl-3.0
| 11,702
|
[
"Mayavi",
"VTK"
] |
aad4ce651704d7a509289850275273ae255dbe451f430f2fa1a47a8a10e3985a
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import math
import os
import json
import collections
import itertools
from abc import ABCMeta, abstractmethod, abstractproperty
import random
import warnings
from fnmatch import fnmatch
import re
try:
# New Py>=3.5 import
from math import gcd
except ImportError:
# Deprecated import from Py3.5 onwards.
from fractions import gcd
import six
import numpy as np
from pymatgen.core.operations import SymmOp
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element, Specie, get_el_sp
from monty.json import MSONable
from pymatgen.core.sites import Site, PeriodicSite
from pymatgen.core.bonds import CovalentBond, get_bond_length
from pymatgen.core.composition import Composition
from pymatgen.util.coord_utils import get_angle, all_distances, \
lattice_points_in_supercell
from pymatgen.core.units import Mass, Length
from monty.io import zopen
"""
This module provides classes used to define a non-periodic molecule and a
periodic structure.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
class SiteCollection(six.with_metaclass(ABCMeta, collections.Sequence)):
"""
Basic SiteCollection. Essentially a sequence of Sites or PeriodicSites.
This serves as a base class for Molecule (a collection of Site, i.e., no
periodicity) and Structure (a collection of PeriodicSites, i.e.,
periodicity). Not meant to be instantiated directly.
"""
# Tolerance in Angstrom for determining if sites are too close.
DISTANCE_TOLERANCE = 0.5
@abstractproperty
def sites(self):
"""
Returns a tuple of sites.
"""
return
@abstractmethod
def get_distance(self, i, j):
"""
Returns distance between sites at index i and j.
Args:
i (int): Index of first site
j (int): Index of second site
Returns:
(float) Distance between sites at index i and index j.
"""
return
@property
def distance_matrix(self):
"""
Returns the distance matrix between all sites in the structure. For
periodic structures, this is overwritten to return the nearest image
distance.
"""
return all_distances(self.cart_coords, self.cart_coords)
@property
def species(self):
"""
Only works for ordered structures.
Disordered structures will raise an AttributeError.
Returns:
([Specie]) List of species at each site of the structure.
"""
return [site.specie for site in self]
@property
def species_and_occu(self):
"""
List of species and occupancies at each site of the structure.
"""
return [site.species_and_occu for site in self]
@property
def ntypesp(self):
"""Number of types of atoms."""
return len(self.types_of_specie)
@property
def types_of_specie(self):
"""
List of types of specie. Only works for ordered structures.
Disordered structures will raise an AttributeError.
"""
# Cannot use set since we want a deterministic algorithm.
types = []
for site in self:
if site.specie not in types:
types.append(site.specie)
return types
def group_by_types(self):
"""Iterate over species grouped by type"""
for t in self.types_of_specie:
for site in self:
if site.specie == t:
yield site
def indices_from_symbol(self, symbol):
"""
Returns a tuple with the sequential indices of the sites
that contain an element with the given chemical symbol.
"""
return tuple((i for i, specie in enumerate(self.species)
if specie.symbol == symbol))
@property
def symbol_set(self):
"""
Tuple with the set of chemical symbols.
Note that len(symbol_set) == len(types_of_specie)
"""
return tuple((specie.symbol for specie in self.types_of_specie))
@property
def atomic_numbers(self):
"""List of atomic numbers."""
return [site.specie.number for site in self]
@property
def site_properties(self):
"""
Returns the site properties as a dict of sequences. E.g.,
{"magmom": (5,-5), "charge": (-4,4)}.
"""
props = {}
prop_keys = set()
for site in self:
prop_keys.update(site.properties.keys())
for k in prop_keys:
props[k] = [site.properties.get(k, None) for site in self]
return props
def __contains__(self, site):
return site in self.sites
def __iter__(self):
return self.sites.__iter__()
def __getitem__(self, ind):
return self.sites[ind]
def __len__(self):
return len(self.sites)
def __hash__(self):
# for now, just use the composition hash code.
return self.composition.__hash__()
@property
def num_sites(self):
"""
Number of sites.
"""
return len(self)
@property
def cart_coords(self):
"""
Returns a np.array of the cartesian coordinates of sites in the
structure.
"""
return np.array([site.coords for site in self])
@property
def formula(self):
"""
(str) Returns the formula.
"""
return self.composition.formula
@property
def composition(self):
"""
(Composition) Returns the composition
"""
elmap = collections.defaultdict(float)
for site in self:
for species, occu in site.species_and_occu.items():
elmap[species] += occu
return Composition(elmap)
@property
def charge(self):
"""
Returns the net charge of the structure based on oxidation states. If
Elements are found, a charge of 0 is assumed.
"""
charge = 0
for site in self:
for specie, amt in site.species_and_occu.items():
charge += getattr(specie, "oxi_state", 0) * amt
return charge
@property
def is_ordered(self):
"""
Checks if structure is ordered, meaning no partial occupancies in any
of the sites.
"""
return all((site.is_ordered for site in self))
def get_angle(self, i, j, k):
"""
Returns angle specified by three sites.
Args:
i (int): Index of first site.
j (int): Index of second site.
k (int): Index of third site.
Returns:
(float) Angle in degrees.
"""
v1 = self[i].coords - self[j].coords
v2 = self[k].coords - self[j].coords
return get_angle(v1, v2, units="degrees")
def get_dihedral(self, i, j, k, l):
"""
Returns dihedral angle specified by four sites.
Args:
i (int): Index of first site
j (int): Index of second site
k (int): Index of third site
l (int): Index of fourth site
Returns:
(float) Dihedral angle in degrees.
"""
v1 = self[k].coords - self[l].coords
v2 = self[j].coords - self[k].coords
v3 = self[i].coords - self[j].coords
v23 = np.cross(v2, v3)
v12 = np.cross(v1, v2)
return math.degrees(math.atan2(np.linalg.norm(v2) * np.dot(v1, v23),
np.dot(v12, v23)))
def is_valid(self, tol=DISTANCE_TOLERANCE):
"""
True if SiteCollection does not contain atoms that are too close
together. Note that the distance definition is based on type of
SiteCollection. Cartesian distances are used for non-periodic
Molecules, while PBC is taken into account for periodic structures.
Args:
tol (float): Distance tolerance. Default is 0.01A.
Returns:
(bool) True if SiteCollection does not contain atoms that are too
close together.
"""
if len(self.sites) == 1:
return True
all_dists = self.distance_matrix[np.triu_indices(len(self), 1)]
return bool(np.min(all_dists) > tol)
@abstractmethod
def to(self, fmt=None, filename=None):
"""
Generates well-known string representations of SiteCollections (e.g.,
molecules / structures). Should return a string type or write to a file.
"""
pass
@classmethod
@abstractmethod
def from_str(cls, input_string, fmt):
"""
Reads in SiteCollection from a string.
"""
pass
@classmethod
@abstractmethod
def from_file(cls, filename):
"""
Reads in SiteCollection from a filename.
"""
pass
class IStructure(SiteCollection, MSONable):
"""
Basic immutable Structure object with periodicity. Essentially a sequence
of PeriodicSites having a common lattice. IStructure is made to be
(somewhat) immutable so that they can function as keys in a dict. To make
modifications, use the standard Structure object instead. Structure
extends Sequence and Hashable, which means that in many cases,
it can be used like any Python sequence. Iterating through a
structure is equivalent to going through the sites in sequence.
"""
def __init__(self, lattice, species, coords, validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=False,
site_properties=None):
"""
Create a periodic structure.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
"""
if len(species) != len(coords):
raise StructureError("The list of atomic species must be of the"
" same length as the list of fractional"
" coordinates.")
if isinstance(lattice, Lattice):
self._lattice = lattice
else:
self._lattice = Lattice(lattice)
sites = []
for i in range(len(species)):
prop = None
if site_properties:
prop = {k: v[i]
for k, v in site_properties.items()}
sites.append(
PeriodicSite(species[i], coords[i], self._lattice,
to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
properties=prop))
self._sites = tuple(sites)
if validate_proximity and not self.is_valid():
raise StructureError(("Structure contains sites that are ",
"less than 0.01 Angstrom apart!"))
@classmethod
def from_sites(cls, sites, validate_proximity=False,
to_unit_cell=False):
"""
Convenience constructor to make a Structure from a list of sites.
Args:
sites: Sequence of PeriodicSites. Sites must have the same
lattice.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to translate sites into the unit
cell.
Returns:
(Structure) Note that missing properties are set as None.
"""
if len(sites) < 1:
raise ValueError("You need at least one site to construct a %s" %
cls)
if (not validate_proximity) and (not to_unit_cell):
# This is not really a good solution, but if we are not changing
# the sites, initializing an empty structure and setting _sites
# to be sites is much faster than doing the full initialization.
lattice = sites[0].lattice
for s in sites[1:]:
if s.lattice != lattice:
raise ValueError("Sites must belong to the same lattice")
s_copy = cls(lattice=lattice, species=[], coords=[])
s_copy._sites = list(sites)
return s_copy
prop_keys = []
props = {}
lattice = None
for i, site in enumerate(sites):
if not lattice:
lattice = site.lattice
elif site.lattice != lattice:
raise ValueError("Sites must belong to the same lattice")
for k, v in site.properties.items():
if k not in prop_keys:
prop_keys.append(k)
props[k] = [None] * len(sites)
props[k][i] = v
for k, v in props.items():
if any((vv is None for vv in v)):
warnings.warn("Not all sites have property %s. Missing values "
"are set to None." % k)
return cls(lattice, [site.species_and_occu for site in sites],
[site.frac_coords for site in sites],
site_properties=props,
validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell)
@classmethod
def from_spacegroup(cls, sg, lattice, species, coords, site_properties=None,
coords_are_cartesian=False, tol=1e-5):
"""
Generate a structure using a spacegroup. Note that only symmetrically
distinct species and coords should be provided. All equivalent sites
are generated from the spacegroup operations.
Args:
sg (str/int): The spacegroup. If a string, it will be interpreted
as one of the notations supported by
pymatgen.symmetry.groups.Spacegroup. E.g., "R-3c" or "Fm-3m".
If an int, it will be interpreted as an international number.
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
Note that no attempt is made to check that the lattice is
compatible with the spacegroup specified. This may be
introduced in a future version.
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
tol (float): A fractional tolerance to deal with numerical
precision issues in determining if orbits are the same.
"""
from pymatgen.symmetry.groups import SpaceGroup
try:
i = int(sg)
sgp = SpaceGroup.from_int_number(i)
except ValueError:
sgp = SpaceGroup(sg)
if isinstance(lattice, Lattice):
latt = lattice
else:
latt = Lattice(lattice)
if not sgp.is_compatible(latt):
raise ValueError(
"Supplied lattice with parameters %s is incompatible with "
"supplied spacegroup %s!" % (latt.lengths_and_angles,
sgp.symbol)
)
if len(species) != len(coords):
raise ValueError(
"Supplied species and coords lengths (%d vs %d) are "
"different!" % (len(species), len(coords))
)
frac_coords = coords if not coords_are_cartesian else \
lattice.get_fractional_coords(coords)
props = {} if site_properties is None else site_properties
all_sp = []
all_coords = []
all_site_properties = collections.defaultdict(list)
for i, (sp, c) in enumerate(zip(species, frac_coords)):
cc = sgp.get_orbit(c, tol=tol)
all_sp.extend([sp] * len(cc))
all_coords.extend(cc)
for k, v in props.items():
all_site_properties[k].extend([v[i]] * len(cc))
return cls(latt, all_sp, all_coords,
site_properties=all_site_properties)
@property
def distance_matrix(self):
"""
Returns the distance matrix between all sites in the structure. For
periodic structures, this should return the nearest image distance.
"""
return self.lattice.get_all_distances(self.frac_coords,
self.frac_coords)
@property
def sites(self):
"""
Returns an iterator for the sites in the Structure.
"""
return self._sites
@property
def lattice(self):
"""
Lattice of the structure.
"""
return self._lattice
@property
def density(self):
"""
Returns the density in units of g/cc
"""
m = Mass(self.composition.weight, "amu")
return m.to("g") / (self.volume * Length(1, "ang").to("cm") ** 3)
def get_space_group_info(self, symprec=1e-2, angle_tolerance=5.0):
"""
Convenience method to quickly get the spacegroup of a structure.
Args:
symprec (float): Same definition as in SpacegroupAnalyzer.
Defaults to 1e-2.
angle_tolerance (float): Same definition as in SpacegroupAnalyzer.
Defaults to 5 degrees.
Returns:
spacegroup_symbol, international_number
"""
# Import within method needed to avoid cyclic dependency.
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
a = SpacegroupAnalyzer(self, symprec=symprec,
angle_tolerance=angle_tolerance)
return a.get_space_group_symbol(), a.get_space_group_number()
def matches(self, other, **kwargs):
"""
Check whether this structure is similar to another structure.
Basically a convenience method to call structure matching fitting.
Args:
other (IStructure/Structure): Another structure.
**kwargs: Same **kwargs as in
:class:`pymatgen.analysis.structure_matcher.StructureMatcher`.
Returns:
(bool) True is the structures are similar under some affine
transformation.
"""
from pymatgen.analysis.structure_matcher import StructureMatcher
m = StructureMatcher(**kwargs)
return m.fit(Structure.from_sites(self), Structure.from_sites(other))
def __eq__(self, other):
if other is None:
return False
if len(self) != len(other):
return False
if self.lattice != other.lattice:
return False
for site in self:
if site not in other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# For now, just use the composition hash code.
return self.composition.__hash__()
def __mul__(self, scaling_matrix):
"""
Makes a supercell. Allowing to have sites outside the unit cell
Args:
scaling_matrix: A scaling matrix for transforming the lattice
vectors. Has to be all integers. Several options are possible:
a. A full 3x3 scaling matrix defining the linear combination
the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,
1]] generates a new structure with lattice vectors a' =
2a + b, b' = 3b, c' = c where a, b, and c are the lattice
vectors of the original structure.
b. An sequence of three scaling factors. E.g., [2, 1, 1]
specifies that the supercell should have dimensions 2a x b x
c.
c. A number, which simply scales all lattice vectors by the
same factor.
Returns:
Supercell structure. Note that a Structure is always returned,
even if the input structure is a subclass of Structure. This is
to avoid different arguments signatures from causing problems. If
you prefer a subclass to return its own type, you need to override
this method in the subclass.
"""
scale_matrix = np.array(scaling_matrix, np.int16)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
new_lattice = Lattice(np.dot(scale_matrix, self._lattice.matrix))
f_lat = lattice_points_in_supercell(scale_matrix)
c_lat = new_lattice.get_cartesian_coords(f_lat)
new_sites = []
for site in self:
for v in c_lat:
s = PeriodicSite(site.species_and_occu, site.coords + v,
new_lattice, properties=site.properties,
coords_are_cartesian=True, to_unit_cell=False)
new_sites.append(s)
return Structure.from_sites(new_sites)
def __rmul__(self, scaling_matrix):
"""
Similar to __mul__ to preserve commutativeness.
"""
return self.__mul__(scaling_matrix)
@property
def frac_coords(self):
"""
Fractional coordinates as a Nx3 numpy array.
"""
return np.array([site.frac_coords for site in self._sites])
@property
def volume(self):
"""
Returns the volume of the structure.
"""
return self._lattice.volume
def get_distance(self, i, j, jimage=None):
"""
Get distance between site i and j assuming periodic boundary
conditions. If the index jimage of two sites atom j is not specified it
selects the jimage nearest to the i atom and returns the distance and
jimage indices in terms of lattice vector translations if the index
jimage of atom j is specified it returns the distance between the i
atom and the specified jimage atom.
Args:
i (int): Index of first site
j (int): Index of second site
jimage: Number of lattice translations in each lattice direction.
Default is None for nearest image.
Returns:
distance
"""
return self[i].distance(self[j], jimage)
def get_sites_in_sphere(self, pt, r, include_index=False):
"""
Find all sites within a sphere from the point. This includes sites
in other periodic images.
Algorithm:
1. place sphere of radius r in crystal and determine minimum supercell
(parallelpiped) which would contain a sphere of radius r. for this
we need the projection of a_1 on a unit vector perpendicular
to a_2 & a_3 (i.e. the unit vector in the direction b_1) to
determine how many a_1"s it will take to contain the sphere.
Nxmax = r * length_of_b_1 / (2 Pi)
2. keep points falling within r.
Args:
pt (3x1 array): cartesian coordinates of center of sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
site_fcoords = np.mod(self.frac_coords, 1)
neighbors = []
for fcoord, dist, i in self._lattice.get_points_in_sphere(
site_fcoords, pt, r):
nnsite = PeriodicSite(self[i].species_and_occu,
fcoord, self._lattice,
properties=self[i].properties)
neighbors.append((nnsite, dist) if not include_index
else (nnsite, dist, i))
return neighbors
def get_neighbors(self, site, r, include_index=False):
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site:
site, which is the center of the sphere.
r:
radius of sphere.
include_index:
boolean that determines whether the non-supercell site index
is included in the returned data
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
nn = self.get_sites_in_sphere(site.coords, r,
include_index=include_index)
return [d for d in nn if site != d[0]]
def get_all_neighbors(self, r, include_index=False):
"""
Get neighbors for each atom in the unit cell, out to a distance r
Returns a list of list of neighbors for each site in structure.
Use this method if you are planning on looping over all sites in the
crystal. If you only want neighbors for a particular site, use the
method get_neighbors as it may not have to build such a large supercell
However if you are looping over all sites in the crystal, this method
is more efficient since it only performs one pass over a large enough
supercell to contain all possible atoms out to a distance r.
The return type is a [(site, dist) ...] since most of the time,
subsequent processing requires the distance.
Args:
r (float): Radius of sphere.
include_index (bool): Whether to include the non-supercell site
in the returned data
Returns:
A list of a list of nearest neighbors for each site, i.e.,
[[(site, dist, index) ...], ..]
Index only supplied if include_index = True.
The index is the index of the site in the original (non-supercell)
structure. This is needed for ewaldmatrix by keeping track of which
sites contribute to the ewald sum.
"""
# Use same algorithm as get_sites_in_sphere to determine supercell but
# loop over all atoms in crystal
recp_len = np.array(self.lattice.reciprocal_lattice.abc)
maxr = np.ceil((r + 0.15) * recp_len / (2 * math.pi))
nmin = np.floor(np.min(self.frac_coords, axis=0)) - maxr
nmax = np.ceil(np.max(self.frac_coords, axis=0)) + maxr
all_ranges = [np.arange(x, y) for x, y in zip(nmin, nmax)]
latt = self._lattice
neighbors = [list() for i in range(len(self._sites))]
all_fcoords = np.mod(self.frac_coords, 1)
coords_in_cell = latt.get_cartesian_coords(all_fcoords)
site_coords = self.cart_coords
indices = np.arange(len(self))
for image in itertools.product(*all_ranges):
coords = latt.get_cartesian_coords(image) + coords_in_cell
all_dists = all_distances(coords, site_coords)
all_within_r = np.bitwise_and(all_dists <= r, all_dists > 1e-8)
for (j, d, within_r) in zip(indices, all_dists, all_within_r):
nnsite = PeriodicSite(self[j].species_and_occu, coords[j],
latt, properties=self[j].properties,
coords_are_cartesian=True)
for i in indices[within_r]:
item = (nnsite, d[i], j) if include_index else (
nnsite, d[i])
neighbors[i].append(item)
return neighbors
def get_neighbors_in_shell(self, origin, r, dr, include_index=False):
"""
Returns all sites in a shell centered on origin (coords) between radii
r-dr and r+dr.
Args:
origin (3x1 array): Cartesian coordinates of center of sphere.
r (float): Inner radius of shell.
dr (float): Width of shell.
include_index (bool): Whether to include the non-supercell site
in the returned data
Returns:
[(site, dist, index) ...] since most of the time, subsequent
processing
requires the distance. Index only supplied if include_index = True.
The index is the index of the site in the original (non-supercell)
structure. This is needed for ewaldmatrix by keeping track of which
sites contribute to the ewald sum.
"""
outer = self.get_sites_in_sphere(origin, r + dr,
include_index=include_index)
inner = r - dr
return [t for t in outer if t[1] > inner]
def get_sorted_structure(self, key=None, reverse=False):
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
sites = sorted(self, key=key, reverse=reverse)
return self.__class__.from_sites(sites)
def get_reduced_structure(self, reduction_algo="niggli"):
"""
Get a reduced structure.
Args:
reduction_algo (str): The lattice reduction algorithm to use.
Currently supported options are "niggli" or "LLL".
"""
if reduction_algo == "niggli":
reduced_latt = self._lattice.get_niggli_reduced_lattice()
elif reduction_algo == "LLL":
reduced_latt = self._lattice.get_lll_reduced_lattice()
else:
raise ValueError("Invalid reduction algo : {}"
.format(reduction_algo))
if reduced_latt != self.lattice:
return self.__class__(reduced_latt, self.species_and_occu,
self.cart_coords,
coords_are_cartesian=True, to_unit_cell=True,
site_properties=self.site_properties)
else:
return self.copy()
def copy(self, site_properties=None, sanitize=False):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Args:
site_properties (dict): Properties to add or override. The
properties are specified in the same way as the constructor,
i.e., as a dict of the form {property: [values]}. The
properties should be in the order of the *original* structure
if you are performing sanitization.
sanitize (bool): If True, this method will return a sanitized
structure. Sanitization performs a few things: (i) The sites are
sorted by electronegativity, (ii) a LLL lattice reduction is
carried out to obtain a relatively orthogonalized cell,
(iii) all fractional coords for sites are mapped into the
unit cell.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
if (not site_properties) and (not sanitize):
# This is not really a good solution, but if we are not changing
# the site_properties or sanitizing, initializing an empty
# structure and setting _sites to be sites is much faster (~100x)
# than doing the full initialization.
s_copy = self.__class__(lattice=self._lattice, species=[],
coords=[])
s_copy._sites = list(self._sites)
return s_copy
props = self.site_properties
if site_properties:
props.update(site_properties)
if not sanitize:
return self.__class__(self._lattice,
self.species_and_occu,
self.frac_coords,
site_properties=props)
else:
reduced_latt = self._lattice.get_lll_reduced_lattice()
new_sites = []
for i, site in enumerate(self):
frac_coords = reduced_latt.get_fractional_coords(site.coords)
site_props = {}
for p in props:
site_props[p] = props[p][i]
new_sites.append(PeriodicSite(site.species_and_occu,
frac_coords, reduced_latt,
to_unit_cell=True,
properties=site_props))
new_sites = sorted(new_sites)
return self.__class__.from_sites(new_sites)
def interpolate(self, end_structure, nimages=10,
interpolate_lattices=False, pbc=True, autosort_tol=0):
"""
Interpolate between this structure and end_structure. Useful for
construction of NEB inputs.
Args:
end_structure (Structure): structure to interpolate between this
structure and end.
nimages (int): No. of interpolation images. Defaults to 10 images.
interpolate_lattices (bool): Whether to interpolate the lattices.
Interpolates the lengths and angles (rather than the matrix)
so orientation may be affected.
pbc (bool): Whether to use periodic boundary conditions to find
the shortest path between endpoints.
autosort_tol (float): A distance tolerance in angstrom in
which to automatically sort end_structure to match to the
closest points in this particular structure. This is usually
what you want in a NEB calculation. 0 implies no sorting.
Otherwise, a 0.5 value usually works pretty well.
Returns:
List of interpolated structures. The starting and ending
structures included as the first and last structures respectively.
A total of (nimages + 1) structures are returned.
"""
# Check length of structures
if len(self) != len(end_structure):
raise ValueError("Structures have different lengths!")
if not (interpolate_lattices or self.lattice == end_structure.lattice):
raise ValueError("Structures with different lattices!")
# Check that both structures have the same species
for i in range(len(self)):
if self[i].species_and_occu != end_structure[i].species_and_occu:
raise ValueError("Different species!\nStructure 1:\n" +
str(self) + "\nStructure 2\n" +
str(end_structure))
start_coords = np.array(self.frac_coords)
end_coords = np.array(end_structure.frac_coords)
if autosort_tol:
dist_matrix = self.lattice.get_all_distances(start_coords,
end_coords)
site_mappings = collections.defaultdict(list)
unmapped_start_ind = []
for i, row in enumerate(dist_matrix):
ind = np.where(row < autosort_tol)[0]
if len(ind) == 1:
site_mappings[i].append(ind[0])
else:
unmapped_start_ind.append(i)
if len(unmapped_start_ind) > 1:
raise ValueError("Unable to reliably match structures "
"with auto_sort_tol = %f. unmapped indices "
"= %s" % (autosort_tol, unmapped_start_ind))
sorted_end_coords = np.zeros_like(end_coords)
matched = []
for i, j in site_mappings.items():
if len(j) > 1:
raise ValueError("Unable to reliably match structures "
"with auto_sort_tol = %f. More than one "
"site match!" % autosort_tol)
sorted_end_coords[i] = end_coords[j[0]]
matched.append(j[0])
if len(unmapped_start_ind) == 1:
i = unmapped_start_ind[0]
j = list(set(range(len(start_coords))).difference(matched))[0]
sorted_end_coords[i] = end_coords[j]
end_coords = sorted_end_coords
vec = end_coords - start_coords
if pbc:
vec -= np.round(vec)
sp = self.species_and_occu
structs = []
if interpolate_lattices:
# interpolate lattice matrices using polar decomposition
from scipy.linalg import polar
# u is unitary (rotation), p is stretch
u, p = polar(np.dot(end_structure.lattice.matrix.T,
np.linalg.inv(self.lattice.matrix.T)))
lvec = p - np.identity(3)
lstart = self.lattice.matrix.T
for x in range(nimages + 1):
if interpolate_lattices:
l_a = np.dot(np.identity(3) + x / nimages * lvec, lstart).T
l = Lattice(l_a)
else:
l = self.lattice
fcoords = start_coords + x / nimages * vec
structs.append(self.__class__(l, sp, fcoords,
site_properties=self.site_properties))
return structs
def get_primitive_structure(self, tolerance=0.25):
"""
This finds a smaller unit cell than the input. Sometimes it doesn"t
find the smallest possible one, so this method is recursively called
until it is unable to find a smaller cell.
NOTE: if the tolerance is greater than 1/2 the minimum inter-site
distance in the primitive cell, the algorithm will reject this lattice.
Args:
tolerance (float), Angstroms: Tolerance for each coordinate of a
particular site. For example, [0.1, 0, 0.1] in cartesian
coordinates will be considered to be on the same coordinates
as [0, 0, 0] for a tolerance of 0.25. Defaults to 0.25.
Returns:
The most primitive structure found.
"""
# group sites by species string
sites = sorted(self._sites, key=lambda s: s.species_string)
grouped_sites = [
list(a[1])
for a in itertools.groupby(sites, key=lambda s: s.species_string)]
grouped_fcoords = [np.array([s.frac_coords for s in g])
for g in grouped_sites]
# min_vecs are approximate periodicities of the cell. The exact
# periodicities from the supercell matrices are checked against these
# first
min_fcoords = min(grouped_fcoords, key=lambda x: len(x))
min_vecs = min_fcoords - min_fcoords[0]
# fractional tolerance in the supercell
super_ftol = np.divide(tolerance, self.lattice.abc)
super_ftol_2 = super_ftol * 2
def pbc_coord_intersection(fc1, fc2, tol):
"""
Returns the fractional coords in fc1 that have coordinates
within tolerance to some coordinate in fc2
"""
d = fc1[:, None, :] - fc2[None, :, :]
d -= np.round(d)
np.abs(d, d)
return fc1[np.any(np.all(d < tol, axis=-1), axis=-1)]
# here we reduce the number of min_vecs by enforcing that every
# vector in min_vecs approximately maps each site onto a similar site.
# The subsequent processing is O(fu^3 * min_vecs) = O(n^4) if we do no
# reduction.
# This reduction is O(n^3) so usually is an improvement. Using double
# the tolerance because both vectors are approximate
for g in sorted(grouped_fcoords, key=lambda x: len(x)):
for f in g:
min_vecs = pbc_coord_intersection(min_vecs, g - f, super_ftol_2)
def get_hnf(fu):
"""
Returns all possible distinct supercell matrices given a
number of formula units in the supercell. Batches the matrices
by the values in the diagonal (for less numpy overhead).
Computational complexity is O(n^3), and difficult to improve.
Might be able to do something smart with checking combinations of a
and b first, though unlikely to reduce to O(n^2).
"""
def factors(n):
for i in range(1, n + 1):
if n % i == 0:
yield i
for det in factors(fu):
if det == 1:
continue
for a in factors(det):
for e in factors(det // a):
g = det // a // e
yield det, np.array(
[[[a, b, c], [0, e, f], [0, 0, g]]
for b, c, f in
itertools.product(range(a), range(a),
range(e))])
# we cant let sites match to their neighbors in the supercell
grouped_non_nbrs = []
for gfcoords in grouped_fcoords:
fdist = gfcoords[None, :, :] - gfcoords[:, None, :]
fdist -= np.round(fdist)
np.abs(fdist, fdist)
non_nbrs = np.any(fdist > 2 * super_ftol[None, None, :], axis=-1)
# since we want sites to match to themselves
np.fill_diagonal(non_nbrs, True)
grouped_non_nbrs.append(non_nbrs)
num_fu = six.moves.reduce(gcd, map(len, grouped_sites))
for size, ms in get_hnf(num_fu):
inv_ms = np.linalg.inv(ms)
# find sets of lattice vectors that are are present in min_vecs
dist = inv_ms[:, :, None, :] - min_vecs[None, None, :, :]
dist -= np.round(dist)
np.abs(dist, dist)
is_close = np.all(dist < super_ftol, axis=-1)
any_close = np.any(is_close, axis=-1)
inds = np.all(any_close, axis=-1)
for inv_m, m in zip(inv_ms[inds], ms[inds]):
new_m = np.dot(inv_m, self.lattice.matrix)
ftol = np.divide(tolerance, np.sqrt(np.sum(new_m ** 2, axis=1)))
valid = True
new_coords = []
new_sp = []
new_props = collections.defaultdict(list)
for gsites, gfcoords, non_nbrs in zip(grouped_sites,
grouped_fcoords,
grouped_non_nbrs):
all_frac = np.dot(gfcoords, m)
# calculate grouping of equivalent sites, represented by
# adjacency matrix
fdist = all_frac[None, :, :] - all_frac[:, None, :]
fdist = np.abs(fdist - np.round(fdist))
close_in_prim = np.all(fdist < ftol[None, None, :], axis=-1)
groups = np.logical_and(close_in_prim, non_nbrs)
# check that groups are correct
if not np.all(np.sum(groups, axis=0) == size):
valid = False
break
# check that groups are all cliques
for g in groups:
if not np.all(groups[g][:, g]):
valid = False
break
if not valid:
break
# add the new sites, averaging positions
added = np.zeros(len(gsites))
new_fcoords = all_frac % 1
for i, group in enumerate(groups):
if not added[i]:
added[group] = True
inds = np.where(group)[0]
coords = new_fcoords[inds[0]]
for n, j in enumerate(inds[1:]):
offset = new_fcoords[j] - coords
coords += (offset - np.round(offset)) / (n + 2)
new_sp.append(gsites[inds[0]].species_and_occu)
for k in gsites[inds[0]].properties:
new_props[k].append(gsites[inds[0]].properties[k])
new_coords.append(coords)
if valid:
inv_m = np.linalg.inv(m)
new_l = Lattice(np.dot(inv_m, self.lattice.matrix))
s = Structure(new_l, new_sp, new_coords,
site_properties=new_props,
coords_are_cartesian=False)
return s.get_primitive_structure(
tolerance).get_reduced_structure()
return self.copy()
def __repr__(self):
outs = ["Structure Summary", repr(self.lattice)]
for s in self:
outs.append(repr(s))
return "\n".join(outs)
def __str__(self):
outs = ["Full Formula ({s})".format(s=self.composition.formula),
"Reduced Formula: {}"
.format(self.composition.reduced_formula)]
to_s = lambda x: "%0.6f" % x
outs.append("abc : " + " ".join([to_s(i).rjust(10)
for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i).rjust(10)
for i in self.lattice.angles]))
outs.append("Sites ({i})".format(i=len(self)))
data = []
props = self.site_properties
keys = sorted(props.keys())
for i, site in enumerate(self):
row = [str(i), site.species_string]
row.extend([to_s(j) for j in site.frac_coords])
for k in keys:
row.append(props[k][i])
data.append(row)
from tabulate import tabulate
outs.append(tabulate(data, headers=["#", "SP", "a", "b", "c"] + keys,
))
return "\n".join(outs)
def as_dict(self, verbosity=1, fmt=None, **kwargs):
"""
Dict representation of Structure.
Args:
verbosity (int): Verbosity level. Default of 1 includes both
direct and cartesian coordinates for all sites, lattice
parameters, etc. Useful for reading and for insertion into a
database. Set to 0 for an extremely lightweight version
that only includes sufficient information to reconstruct the
object.
fmt (str): Specifies a format for the dict. Defaults to None,
which is the default format used in pymatgen. Other options
include "abivars".
**kwargs: Allow passing of other kwargs needed for certain
formats, e.g., "abivars".
Returns:
JSON serializable dict representation.
"""
if fmt == "abivars":
"""Returns a dictionary with the ABINIT variables."""
from pymatgen.io.abinit.abiobjects import structure_to_abivars
return structure_to_abivars(self, **kwargs)
latt_dict = self._lattice.as_dict(verbosity=verbosity)
del latt_dict["@module"]
del latt_dict["@class"]
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lattice": latt_dict, "sites": []}
for site in self:
site_dict = site.as_dict(verbosity=verbosity)
del site_dict["lattice"]
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d
@classmethod
def from_dict(cls, d, fmt=None):
"""
Reconstitute a Structure object from a dict representation of Structure
created using as_dict().
Args:
d (dict): Dict representation of structure.
Returns:
Structure object
"""
if fmt == "abivars":
from pymatgen.io.abinit.abiobjects import structure_from_abivars
return structure_from_abivars(cls=cls, **d)
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
return cls.from_sites(sites)
def to(self, fmt=None, filename=None, **kwargs):
"""
Outputs the structure to a file or string.
Args:
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "cif", "poscar", "cssr", "json".
Non-case sensitive.
filename (str): If provided, output will be written to a file. If
fmt is not specified, the format is determined from the
filename. Defaults is None, i.e. string output.
Returns:
(str) if filename is None. None otherwise.
"""
from pymatgen.io.cif import CifWriter
from pymatgen.io.vasp import Poscar
from pymatgen.io.cssr import Cssr
from pymatgen.io.xcrysden import XSF
filename = filename or ""
fmt = "" if fmt is None else fmt.lower()
fname = os.path.basename(filename)
if fmt == "cif" or fnmatch(fname, "*.cif*"):
writer = CifWriter(self)
elif fmt == "poscar" or fnmatch(fname, "*POSCAR*"):
writer = Poscar(self)
elif fmt == "cssr" or fnmatch(fname.lower(), "*.cssr*"):
writer = Cssr(self)
elif fmt == "json" or fnmatch(fname.lower(), "*.json"):
s = json.dumps(self.as_dict())
if filename:
with zopen(filename, "wt") as f:
f.write("%s" % s)
return
else:
return s
elif fmt == "xsf" or fnmatch(fname.lower(), "*.xsf*"):
if filename:
with zopen(fname, "wt", encoding='utf8') as f:
s = XSF(self).to_string()
f.write(s)
return s
else:
return XSF(self).to_string()
else:
import yaml
try:
from yaml import CSafeDumper as Dumper
except ImportError:
from yaml import SafeDumper as Dumper
if filename:
with zopen(filename, "wt") as f:
yaml.dump(self.as_dict(), f, Dumper=Dumper)
return
else:
return yaml.dump(self.as_dict(), Dumper=Dumper)
if filename:
writer.write_file(filename)
else:
return writer.__str__()
@classmethod
def from_str(cls, input_string, fmt, primitive=False, sort=False,
merge_tol=0.0):
"""
Reads a structure from a string.
Args:
input_string (str): String to parse.
fmt (str): A format specification.
primitive (bool): Whether to find a primitive cell. Defaults to
False.
sort (bool): Whether to sort the sites in accordance to the default
ordering criteria, i.e., electronegativity.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
IStructure / Structure
"""
from pymatgen.io.cif import CifParser
from pymatgen.io.vasp import Poscar
from pymatgen.io.cssr import Cssr
from pymatgen.io.xcrysden import XSF
fmt = fmt.lower()
if fmt == "cif":
parser = CifParser.from_string(input_string)
s = parser.get_structures(primitive=primitive)[0]
elif fmt == "poscar":
s = Poscar.from_string(input_string, False).structure
elif fmt == "cssr":
cssr = Cssr.from_string(input_string)
s = cssr.structure
elif fmt == "json":
d = json.loads(input_string)
s = Structure.from_dict(d)
elif fmt == "yaml":
import yaml
d = yaml.load(input_string)
s = Structure.from_dict(d)
elif fmt == "xsf":
s = XSF.from_string(input_string).structure
else:
raise ValueError("Unrecognized format `%s`!" % fmt)
if sort:
s = s.get_sorted_structure()
if merge_tol:
s.merge_sites(merge_tol)
return cls.from_sites(s)
@classmethod
def from_file(cls, filename, primitive=False, sort=False, merge_tol=0.0):
"""
Reads a structure from a file. For example, anything ending in
a "cif" is assumed to be a Crystallographic Information Format file.
Supported formats include CIF, POSCAR/CONTCAR, CHGCAR, LOCPOT,
vasprun.xml, CSSR, Netcdf and pymatgen's JSON serialized structures.
Args:
filename (str): The filename to read from.
primitive (bool): Whether to convert to a primitive cell
Only available for cifs. Defaults to False.
sort (bool): Whether to sort sites. Default to False.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
Structure.
"""
if filename.endswith(".nc"):
# Read Structure from a netcdf file.
from pymatgen.io.abinit.netcdf import structure_from_ncdata
s = structure_from_ncdata(filename, cls=cls)
if sort:
s = s.get_sorted_structure()
return s
from pymatgen.io.vasp import Vasprun, Chgcar
from monty.io import zopen
fname = os.path.basename(filename)
with zopen(filename, "rt") as f:
contents = f.read()
if fnmatch(fname.lower(), "*.cif*"):
return cls.from_str(contents, fmt="cif",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*POSCAR*") or fnmatch(fname, "*CONTCAR*"):
s = cls.from_str(contents, fmt="poscar",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "CHGCAR*") or fnmatch(fname, "LOCPOT*"):
s = Chgcar.from_file(filename).structure
elif fnmatch(fname, "vasprun*.xml*"):
s = Vasprun(filename).final_structure
elif fnmatch(fname.lower(), "*.cssr*"):
return cls.from_str(contents, fmt="cssr",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
return cls.from_str(contents, fmt="json",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*.yaml*"):
return cls.from_str(contents, fmt="yaml",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*.xsf"):
return cls.from_str(contents, fmt="xsf",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
else:
raise ValueError("Unrecognized file extension!")
if sort:
s = s.get_sorted_structure()
if merge_tol:
s.merge_sites(merge_tol)
s.__class__ = cls
return s
class IMolecule(SiteCollection, MSONable):
"""
Basic immutable Molecule object without periodicity. Essentially a
sequence of sites. IMolecule is made to be immutable so that they can
function as keys in a dict. For a mutable molecule,
use the :class:Molecule.
Molecule extends Sequence and Hashable, which means that in many cases,
it can be used like any Python sequence. Iterating through a molecule is
equivalent to going through the sites in sequence.
"""
def __init__(self, species, coords, charge=0,
spin_multiplicity=None, validate_proximity=False,
site_properties=None):
"""
Creates a Molecule.
Args:
species: list of atomic species. Possible kinds of input include a
list of dict of elements/species and occupancies, a List of
elements/specie specified as actual Element/Specie, Strings
("Fe", "Fe2+") or atomic numbers (1,56).
coords (3x1 array): list of cartesian coordinates of each species.
charge (float): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
"""
if len(species) != len(coords):
raise StructureError(("The list of atomic species must be of the",
" same length as the list of fractional ",
"coordinates."))
sites = []
for i in range(len(species)):
prop = None
if site_properties:
prop = {k: v[i] for k, v in site_properties.items()}
sites.append(Site(species[i], coords[i], properties=prop))
self._sites = tuple(sites)
if validate_proximity and not self.is_valid():
raise StructureError(("Molecule contains sites that are ",
"less than 0.01 Angstrom apart!"))
self._charge = charge
nelectrons = 0
for site in sites:
for sp, amt in site.species_and_occu.items():
nelectrons += sp.Z * amt
nelectrons -= charge
self._nelectrons = nelectrons
if spin_multiplicity:
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of %d and spin multiplicity of %d is"
" not possible for this molecule" %
(self._charge, spin_multiplicity))
self._spin_multiplicity = spin_multiplicity
else:
self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
@property
def charge(self):
"""
Charge of molecule
"""
return self._charge
@property
def spin_multiplicity(self):
"""
Spin multiplicity of molecule.
"""
return self._spin_multiplicity
@property
def nelectrons(self):
"""
Number of electrons in the molecule.
"""
return self._nelectrons
@property
def center_of_mass(self):
"""
Center of mass of molecule.
"""
center = np.zeros(3)
total_weight = 0
for site in self:
wt = site.species_and_occu.weight
center += site.coords * wt
total_weight += wt
return center / total_weight
@property
def sites(self):
"""
Returns a tuple of sites in the Molecule.
"""
return self._sites
@classmethod
def from_sites(cls, sites, charge=0, spin_multiplicity=None,
validate_proximity=False):
"""
Convenience constructor to make a Molecule from a list of sites.
Args:
sites ([Site]): Sequence of Sites.
charge (int): Charge of molecule. Defaults to 0.
spin_multiplicity (int): Spin multicipity. Defaults to None,
in which it is determined automatically.
validate_proximity (bool): Whether to check that atoms are too
close.
"""
props = collections.defaultdict(list)
for site in sites:
for k, v in site.properties.items():
props[k].append(v)
return cls([site.species_and_occu for site in sites],
[site.coords for site in sites],
charge=charge, spin_multiplicity=spin_multiplicity,
validate_proximity=validate_proximity,
site_properties=props)
def break_bond(self, ind1, ind2, tol=0.2):
"""
Returns two molecules based on breaking the bond between atoms at index
ind1 and ind2.
Args:
ind1 (int): Index of first site.
ind2 (int): Index of second site.
tol (float): Relative tolerance to test. Basically, the code
checks if the distance between the sites is less than (1 +
tol) * typical bond distances. Defaults to 0.2, i.e.,
20% longer.
Returns:
Two Molecule objects representing the two clusters formed from
breaking the bond.
"""
sites = self._sites
clusters = [[sites[ind1]], [sites[ind2]]]
sites = [site for i, site in enumerate(sites) if i not in (ind1, ind2)]
def belongs_to_cluster(site, cluster):
for test_site in cluster:
if CovalentBond.is_bonded(site, test_site, tol=tol):
return True
return False
while len(sites) > 0:
unmatched = []
for site in sites:
for cluster in clusters:
if belongs_to_cluster(site, cluster):
cluster.append(site)
break
else:
unmatched.append(site)
if len(unmatched) == len(sites):
raise ValueError("Not all sites are matched!")
sites = unmatched
return (self.__class__.from_sites(cluster)
for cluster in clusters)
def get_covalent_bonds(self, tol=0.2):
"""
Determines the covalent bonds in a molecule.
Args:
tol (float): The tol to determine bonds in a structure. See
CovalentBond.is_bonded.
Returns:
List of bonds
"""
bonds = []
for site1, site2 in itertools.combinations(self._sites, 2):
if CovalentBond.is_bonded(site1, site2, tol):
bonds.append(CovalentBond(site1, site2))
return bonds
def __eq__(self, other):
if other is None:
return False
if len(self) != len(other):
return False
if self.charge != other.charge:
return False
if self.spin_multiplicity != other.spin_multiplicity:
return False
for site in self:
if site not in other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# For now, just use the composition hash code.
return self.composition.__hash__()
def __repr__(self):
outs = ["Molecule Summary"]
for s in self:
outs.append(s.__repr__())
return "\n".join(outs)
def __str__(self):
outs = ["Full Formula (%s)" % self.composition.formula,
"Reduced Formula: " + self.composition.reduced_formula,
"Charge = %s, Spin Mult = %s" % (
self._charge, self._spin_multiplicity),
"Sites (%d)" % len(self)]
for i, site in enumerate(self):
outs.append(" ".join([str(i), site.species_string,
" ".join([("%0.6f" % j).rjust(12)
for j in site.coords])]))
return "\n".join(outs)
def as_dict(self):
"""
Json-serializable dict representation of Molecule
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self._charge,
"spin_multiplicity": self._spin_multiplicity,
"sites": []}
for site in self:
site_dict = site.as_dict()
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d
@classmethod
def from_dict(cls, d):
"""
Reconstitute a Molecule object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of Molecule.
Returns:
Molecule object
"""
species = []
coords = []
props = collections.defaultdict(list)
for site_dict in d["sites"]:
species.append({Specie(sp["element"], sp["oxidation_state"])
if "oxidation_state" in sp else
Element(sp["element"]): sp["occu"]
for sp in site_dict["species"]})
coords.append(site_dict["xyz"])
siteprops = site_dict.get("properties", {})
for k, v in siteprops.items():
props[k].append(v)
return cls(species, coords, charge=d.get("charge", 0),
spin_multiplicity=d.get("spin_multiplicity"),
site_properties=props)
def get_distance(self, i, j):
"""
Get distance between site i and j.
Args:
i (int): Index of first site
j (int): Index of second site
Returns:
Distance between the two sites.
"""
return self[i].distance(self[j])
def get_sites_in_sphere(self, pt, r):
"""
Find all sites within a sphere from a point.
Args:
pt (3x1 array): Cartesian coordinates of center of sphere.
r (float): Radius of sphere.
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
neighbors = []
for site in self._sites:
dist = site.distance_from_point(pt)
if dist <= r:
neighbors.append((site, dist))
return neighbors
def get_neighbors(self, site, r):
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site (Site): Site at the center of the sphere.
r (float): Radius of sphere.
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
nn = self.get_sites_in_sphere(site.coords, r)
return [(s, dist) for (s, dist) in nn if site != s]
def get_neighbors_in_shell(self, origin, r, dr):
"""
Returns all sites in a shell centered on origin (coords) between radii
r-dr and r+dr.
Args:
origin (3x1 array): Cartesian coordinates of center of sphere.
r (float): Inner radius of shell.
dr (float): Width of shell.
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
outer = self.get_sites_in_sphere(origin, r + dr)
inner = r - dr
return [(site, dist) for (site, dist) in outer if dist > inner]
def get_boxed_structure(self, a, b, c, images=(1, 1, 1),
random_rotation=False, min_dist=1, cls=None, offset=None, no_cross=False):
"""
Creates a Structure from a Molecule by putting the Molecule in the
center of a orthorhombic box. Useful for creating Structure for
calculating molecules using periodic codes.
Args:
a (float): a-lattice parameter.
b (float): b-lattice parameter.
c (float): c-lattice parameter.
images: No. of boxed images in each direction. Defaults to
(1, 1, 1), meaning single molecule with 1 lattice parameter
in each direction.
random_rotation (bool): Whether to apply a random rotation to
each molecule. This jumbles all the molecules so that they
are not exact images of each other.
min_dist (float): The minimum distance that atoms should be from
each other. This is only used if random_rotation is True.
The randomized rotations are searched such that no two atoms
are less than min_dist from each other.
cls: The Structure class to instantiate (defaults to pymatgen
structure)
offset: Translation to offset molecule from center of mass coords
no_cross: Whether to forbid molecule coords from extending beyond
boundary of box.
Returns:
Structure containing molecule in a box.
"""
if offset is None:
offset = np.array([0,0,0])
coords = np.array(self.cart_coords)
x_range = max(coords[:, 0]) - min(coords[:, 0])
y_range = max(coords[:, 1]) - min(coords[:, 1])
z_range = max(coords[:, 2]) - min(coords[:, 2])
if a <= x_range or b <= y_range or c <= z_range:
raise ValueError("Box is not big enough to contain Molecule.")
lattice = Lattice.from_parameters(a * images[0], b * images[1],
c * images[2],
90, 90, 90)
nimages = images[0] * images[1] * images[2]
coords = []
centered_coords = self.cart_coords - self.center_of_mass + offset
for i, j, k in itertools.product(list(range(images[0])),
list(range(images[1])),
list(range(images[2]))):
box_center = [(i + 0.5) * a, (j + 0.5) * b, (k + 0.5) * c]
if random_rotation:
while True:
op = SymmOp.from_origin_axis_angle(
(0, 0, 0), axis=np.random.rand(3),
angle=random.uniform(-180, 180))
m = op.rotation_matrix
new_coords = np.dot(m, centered_coords.T).T + box_center
if no_cross == True:
x_max, x_min = max(new_coords[:, 0]), min(new_coords[:, 0])
y_max, y_min = max(new_coords[:, 1]), min(new_coords[:, 1])
z_max, z_min = max(new_coords[:, 2]), min(new_coords[:, 2])
if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0:
raise ValueError("Molecule crosses boundary of box.")
if len(coords) == 0:
break
distances = lattice.get_all_distances(
lattice.get_fractional_coords(new_coords),
lattice.get_fractional_coords(coords))
if np.amin(distances) > min_dist:
break
else:
new_coords = centered_coords + box_center
if no_cross == True:
x_max, x_min = max(new_coords[:, 0]), min(new_coords[:, 0])
y_max, y_min = max(new_coords[:, 1]), min(new_coords[:, 1])
z_max, z_min = max(new_coords[:, 2]), min(new_coords[:, 2])
if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0:
raise ValueError("Molecule crosses boundary of box.")
coords.extend(new_coords)
sprops = {k: v * nimages for k, v in self.site_properties.items()}
if cls is None:
cls = Structure
return cls(lattice, self.species * nimages, coords,
coords_are_cartesian=True,
site_properties=sprops).get_sorted_structure()
def get_centered_molecule(self):
"""
Returns a Molecule centered at the center of mass.
Returns:
Molecule centered with center of mass at origin.
"""
center = self.center_of_mass
new_coords = np.array(self.cart_coords) - center
return self.__class__(self.species_and_occu, new_coords,
charge=self._charge,
spin_multiplicity=self._spin_multiplicity,
site_properties=self.site_properties)
def to(self, fmt=None, filename=None):
"""
Outputs the molecule to a file or string.
Args:
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "xyz", "gjf", "g03", "json". If
you have OpenBabel installed, any of the formats supported by
OpenBabel. Non-case sensitive.
filename (str): If provided, output will be written to a file. If
fmt is not specified, the format is determined from the
filename. Defaults is None, i.e. string output.
Returns:
(str) if filename is None. None otherwise.
"""
from pymatgen.io.xyz import XYZ
from pymatgen.io.gaussian import GaussianInput
from pymatgen.io.babel import BabelMolAdaptor
fmt = "" if fmt is None else fmt.lower()
fname = os.path.basename(filename or "")
if fmt == "xyz" or fnmatch(fname.lower(), "*.xyz*"):
writer = XYZ(self)
elif any([fmt == r or fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["gjf", "g03", "g09", "com", "inp"]]):
writer = GaussianInput(self)
elif fmt == "json" or fnmatch(fname, "*.json*") or fnmatch(fname,
"*.mson*"):
if filename:
with zopen(filename, "wt", encoding='utf8') as f:
return json.dump(self.as_dict(), f)
else:
return json.dumps(self.as_dict())
elif fmt == "yaml" or fnmatch(fname, "*.yaml*"):
import yaml
try:
from yaml import CSafeDumper as Dumper
except ImportError:
from yaml import SafeDumper as Dumper
if filename:
with zopen(fname, "wt", encoding='utf8') as f:
return yaml.dump(self.as_dict(), f, Dumper=Dumper)
else:
return yaml.dump(self.as_dict(), Dumper=Dumper)
else:
m = re.search("\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
fname.lower())
if (not fmt) and m:
fmt = m.group(1)
writer = BabelMolAdaptor(self)
return writer.write_file(filename, file_format=fmt)
if filename:
writer.write_file(filename)
else:
return str(writer)
@classmethod
def from_str(cls, input_string, fmt):
"""
Reads the molecule from a string.
Args:
input_string (str): String to parse.
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "xyz", "gjf", "g03", "json". If
you have OpenBabel installed, any of the formats supported by
OpenBabel. Non-case sensitive.
Returns:
IMolecule or Molecule.
"""
from pymatgen.io.xyz import XYZ
from pymatgen.io.gaussian import GaussianInput
if fmt.lower() == "xyz":
m = XYZ.from_string(input_string).molecule
elif fmt in ["gjf", "g03", "g09", "com", "inp"]:
m = GaussianInput.from_string(input_string).molecule
elif fmt == "json":
d = json.loads(input_string)
return cls.from_dict(d)
elif fmt == "yaml":
import yaml
try:
from yaml import CSafeDumper as Dumper, CLoader as Loader
except ImportError:
from yaml import SafeDumper as Dumper, Loader
d = yaml.load(input_string, Loader=Loader)
return cls.from_dict(d)
else:
from pymatgen.io.babel import BabelMolAdaptor
m = BabelMolAdaptor.from_string(input_string,
file_format=fmt).pymatgen_mol
return cls.from_sites(m)
@classmethod
def from_file(cls, filename):
"""
Reads a molecule from a file. Supported formats include xyz,
gaussian input (gjf|g03|g09|com|inp), Gaussian output (.out|and
pymatgen's JSON serialized molecules. Using openbabel,
many more extensions are supported but requires openbabel to be
installed.
Args:
filename (str): The filename to read from.
Returns:
Molecule
"""
from pymatgen.io.gaussian import GaussianOutput
with zopen(filename) as f:
contents = f.read()
fname = filename.lower()
if fnmatch(fname, "*.xyz*"):
return cls.from_str(contents, fmt="xyz")
elif any([fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["gjf", "g03", "g09", "com", "inp"]]):
return cls.from_str(contents, fmt="g09")
elif any([fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["out", "lis", "log"]]):
return GaussianOutput(filename).final_structure
elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
return cls.from_str(contents, fmt="json")
elif fnmatch(fname, "*.yaml*"):
return cls.from_str(contents, fmt="yaml")
else:
from pymatgen.io.babel import BabelMolAdaptor
m = re.search("\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
filename.lower())
if m:
new = BabelMolAdaptor.from_file(filename,
m.group(1)).pymatgen_mol
new.__class__ = cls
return new
raise ValueError("Unrecognized file extension!")
class Structure(IStructure, collections.MutableSequence):
"""
Mutable version of structure.
"""
__hash__ = None
def __init__(self, lattice, species, coords, validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=False,
site_properties=None):
"""
Create a periodic structure.
Args:
lattice: The lattice, either as a pymatgen.core.lattice.Lattice or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species: List of species on each site. Can take in flexible input,
including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
fractional_coords: list of fractional coordinates of each species.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
"""
super(Structure, self).__init__(lattice, species, coords,
validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties)
self._sites = list(self._sites)
def __setitem__(self, i, site):
"""
Modify a site in the structure.
Args:
i (int, [int], slice, Specie-like): Indices to change. You can
specify these as an int, a list of int, or a species-like
string.
site (PeriodicSite/Specie/Sequence): Three options exist. You
can provide a PeriodicSite directly (lattice will be
checked). Or more conveniently, you can provide a
specie-like object or a tuple of up to length 3.
Examples:
s[0] = "Fe"
s[0] = Element("Fe")
both replaces the species only.
s[0] = "Fe", [0.5, 0.5, 0.5]
Replaces site and *fractional* coordinates. Any properties
are inherited from current site.
s[0] = "Fe", [0.5, 0.5, 0.5], {"spin": 2}
Replaces site and *fractional* coordinates and properties.
s[(0, 2, 3)] = "Fe"
Replaces sites 0, 2 and 3 with Fe.
s[0::2] = "Fe"
Replaces all even index sites with Fe.
s["Mn"] = "Fe"
Replaces all Mn in the structure with Fe. This is
a short form for the more complex replace_species.
s["Mn"] = "Fe0.5Co0.5"
Replaces all Mn in the structure with Fe: 0.5, Co: 0.5, i.e.,
creates a disordered structure!
"""
if isinstance(i, int):
indices = [i]
elif isinstance(i, six.string_types + (Element, Specie)):
self.replace_species({i: site})
return
elif isinstance(i, slice):
to_mod = self[i]
indices = [ii for ii, s in enumerate(self._sites)
if s in to_mod]
else:
indices = list(i)
for ii in indices:
if isinstance(site, PeriodicSite):
if site.lattice != self._lattice:
raise ValueError("PeriodicSite added must have same lattice "
"as Structure!")
elif len(indices) != 1:
raise ValueError("Site assignments makes sense only for "
"single int indices!")
self._sites[ii] = site
else:
if isinstance(site, six.string_types) or (
not isinstance(site, collections.Sequence)):
sp = site
frac_coords = self._sites[ii].frac_coords
properties = self._sites[ii].properties
else:
sp = site[0]
frac_coords = site[1] if len(site) > 1 else \
self._sites[ii].frac_coords
properties = site[2] if len(site) > 2 else \
self._sites[ii].properties
self._sites[ii] = PeriodicSite(sp, frac_coords, self._lattice,
properties=properties)
def __delitem__(self, i):
"""
Deletes a site from the Structure.
"""
self._sites.__delitem__(i)
def append(self, species, coords, coords_are_cartesian=False,
validate_proximity=False, properties=None):
"""
Append a site to the structure.
Args:
species: Species of inserted site
coords (3x1 array): Coordinates of inserted site
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
properties (dict): Properties of the site.
Returns:
New structure with inserted site.
"""
return self.insert(len(self), species, coords,
coords_are_cartesian=coords_are_cartesian,
validate_proximity=validate_proximity,
properties=properties)
def insert(self, i, species, coords, coords_are_cartesian=False,
validate_proximity=False, properties=None):
"""
Insert a site to the structure.
Args:
i (int): Index to insert site
species (species-like): Species of inserted site
coords (3x1 array): Coordinates of inserted site
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
properties (dict): Properties associated with the site.
Returns:
New structure with inserted site.
"""
if not coords_are_cartesian:
new_site = PeriodicSite(species, coords, self._lattice,
properties=properties)
else:
frac_coords = self._lattice.get_fractional_coords(coords)
new_site = PeriodicSite(species, frac_coords, self._lattice,
properties=properties)
if validate_proximity:
for site in self:
if site.distance(new_site) < self.DISTANCE_TOLERANCE:
raise ValueError("New site is too close to an existing "
"site!")
self._sites.insert(i, new_site)
def add_site_property(self, property_name, values):
"""
Adds a property to all sites.
Args:
property_name (str): The name of the property to add.
values: A sequence of values. Must be same length as number of
sites.
"""
if len(values) != len(self._sites):
raise ValueError("Values must be same length as sites.")
for i in range(len(self._sites)):
site = self._sites[i]
props = site.properties
if not props:
props = {}
props[property_name] = values[i]
self._sites[i] = PeriodicSite(site.species_and_occu,
site.frac_coords, self._lattice,
properties=props)
def replace_species(self, species_mapping):
"""
Swap species in a structure.
Args:
species_mapping (dict): Dict of species to swap. Species can be
elements too. e.g., {Element("Li"): Element("Na")} performs
a Li for Na substitution. The second species can be a
sp_and_occu dict. For example, a site with 0.5 Si that is
passed the mapping {Element('Si): {Element('Ge'):0.75,
Element('C'):0.25} } will have .375 Ge and .125 C. You can
also supply strings that represent elements or species and
the code will try to figure out the meaning. E.g.,
{"C": "C0.5Si0.5"} will replace all C with 0.5 C and 0.5 Si,
i.e., a disordered site.
"""
latt = self._lattice
species_mapping = {get_el_sp(k): v
for k, v in species_mapping.items()}
def mod_site(site):
c = Composition()
for sp, amt in site.species_and_occu.items():
new_sp = species_mapping.get(sp, sp)
try:
c += Composition(new_sp) * amt
except Exception:
c += {new_sp: amt}
return PeriodicSite(c, site.frac_coords, latt,
properties=site.properties)
self._sites = [mod_site(site) for site in self._sites]
def replace(self, i, species, coords=None, coords_are_cartesian=False,
properties=None):
"""
Replace a single site. Takes either a species or a dict of species and
occupations.
Args:
i (int): Index of the site in the _sites list.
species (species-like): Species of replacement site
coords (3x1 array): Coordinates of replacement site. If None,
the current coordinates are assumed.
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
properties (dict): Properties associated with the site.
"""
if coords is None:
frac_coords = self[i].frac_coords
elif coords_are_cartesian:
frac_coords = self._lattice.get_fractional_coords(coords)
else:
frac_coords = coords
new_site = PeriodicSite(species, frac_coords, self._lattice,
properties=properties)
self._sites[i] = new_site
def remove_species(self, species):
"""
Remove all occurrences of several species from a structure.
Args:
species: Sequence of species to remove, e.g., ["Li", "Na"].
"""
new_sites = []
species = [get_el_sp(s) for s in species]
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species_and_occu.items()
if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(PeriodicSite(
new_sp_occu, site.frac_coords, self._lattice,
properties=site.properties))
self._sites = new_sites
def remove_sites(self, indices):
"""
Delete sites with at indices.
Args:
indices: Sequence of indices of sites to delete.
"""
self._sites = [s for i, s in enumerate(self._sites)
if i not in indices]
def apply_operation(self, symmop, fractional=False):
"""
Apply a symmetry operation to the structure and return the new
structure. The lattice is operated by the rotation matrix only.
Coords are operated in full and then transformed to the new lattice.
Args:
symmop (SymmOp): Symmetry operation to apply.
fractional (bool): Whether the symmetry operation is applied in
fractional space. Defaults to False, i.e., symmetry operation
is applied in cartesian coordinates.
"""
if not fractional:
self._lattice = Lattice([symmop.apply_rotation_only(row)
for row in self._lattice.matrix])
def operate_site(site):
new_cart = symmop.operate(site.coords)
new_frac = self._lattice.get_fractional_coords(new_cart)
return PeriodicSite(site.species_and_occu, new_frac,
self._lattice,
properties=site.properties)
else:
new_latt = np.dot(symmop.rotation_matrix, self._lattice.matrix)
self._lattice = Lattice(new_latt)
def operate_site(site):
return PeriodicSite(site.species_and_occu,
symmop.operate(site.frac_coords),
self._lattice,
properties=site.properties)
self._sites = [operate_site(s) for s in self._sites]
def modify_lattice(self, new_lattice):
"""
Modify the lattice of the structure. Mainly used for changing the
basis.
Args:
new_lattice (Lattice): New lattice
"""
self._lattice = new_lattice
new_sites = []
for site in self._sites:
new_sites.append(PeriodicSite(site.species_and_occu,
site.frac_coords,
self._lattice,
properties=site.properties))
self._sites = new_sites
def apply_strain(self, strain):
"""
Apply a strain to the lattice.
Args:
strain (float or list): Amount of strain to apply. Can be a float,
or a sequence of 3 numbers. E.g., 0.01 means all lattice
vectors are increased by 1%. This is equivalent to calling
modify_lattice with a lattice with lattice parameters that
are 1% larger.
"""
s = (1 + np.array(strain)) * np.eye(3)
self.modify_lattice(Lattice(np.dot(self._lattice.matrix.T, s).T))
def sort(self, key=None, reverse=False):
"""
Sort a structure in place. The parameters have the same meaning as in
list.sort. By default, sites are sorted by the electronegativity of
the species. The difference between this method and
get_sorted_structure (which also works in IStructure) is that the
latter returns a new Structure, while this just sorts the Structure
in place.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
self._sites = sorted(self._sites, key=key, reverse=reverse)
def translate_sites(self, indices, vector, frac_coords=True,
to_unit_cell=True):
"""
Translate specific sites by some vector, keeping the sites within the
unit cell.
Args:
indices: Integer or List of site indices on which to perform the
translation.
vector: Translation vector for sites.
frac_coords (bool): Whether the vector corresponds to fractional or
cartesian coordinates.
to_unit_cell (bool): Whether new sites are transformed to unit
cell
"""
if not isinstance(indices, collections.Iterable):
indices = [indices]
for i in indices:
site = self._sites[i]
if frac_coords:
fcoords = site.frac_coords + vector
else:
fcoords = self._lattice.get_fractional_coords(
site.coords + vector)
new_site = PeriodicSite(site.species_and_occu, fcoords,
self._lattice, to_unit_cell=to_unit_cell,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
def perturb(self, distance):
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance (float): Distance in angstroms by which to perturb each
site.
"""
def get_rand_vec():
# deals with zero vectors.
vector = np.random.randn(3)
vnorm = np.linalg.norm(vector)
return vector / vnorm * distance if vnorm != 0 else get_rand_vec()
for i in range(len(self._sites)):
self.translate_sites([i], get_rand_vec(), frac_coords=False)
def add_oxidation_state_by_element(self, oxidation_states):
"""
Add oxidation states to a structure.
Args:
oxidation_states (dict): Dict of oxidation states.
E.g., {"Li":1, "Fe":2, "P":5, "O":-2}
"""
try:
for i, site in enumerate(self._sites):
new_sp = {}
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Specie(sym, oxidation_states[sym])] = occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
except KeyError:
raise ValueError("Oxidation state of all elements must be "
"specified in the dictionary.")
def add_oxidation_state_by_site(self, oxidation_states):
"""
Add oxidation states to a structure by site.
Args:
oxidation_states (list): List of oxidation states.
E.g., [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2]
"""
try:
for i, site in enumerate(self._sites):
new_sp = {}
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Specie(sym, oxidation_states[i])] = occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
except IndexError:
raise ValueError("Oxidation state of all sites must be "
"specified in the dictionary.")
def remove_oxidation_states(self):
"""
Removes oxidation states from a structure.
"""
for i, site in enumerate(self._sites):
new_sp = collections.defaultdict(float)
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Element(sym)] += occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
def make_supercell(self, scaling_matrix, to_unit_cell=True):
"""
Create a supercell.
Args:
scaling_matrix: A scaling matrix for transforming the lattice
vectors. Has to be all integers. Several options are possible:
a. A full 3x3 scaling matrix defining the linear combination
the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,
1]] generates a new structure with lattice vectors a' =
2a + b, b' = 3b, c' = c where a, b, and c are the lattice
vectors of the original structure.
b. An sequence of three scaling factors. E.g., [2, 1, 1]
specifies that the supercell should have dimensions 2a x b x
c.
c. A number, which simply scales all lattice vectors by the
same factor.
to_unit_cell: Whether or not to fall back sites into the unit cell
"""
s = self*scaling_matrix
if to_unit_cell:
for isite, site in enumerate(s):
s[isite] = site.to_unit_cell
self._sites = s.sites
self._lattice = s.lattice
def scale_lattice(self, volume):
"""
Performs a scaling of the lattice vectors so that length proportions
and angles are preserved.
Args:
volume (float): New volume of the unit cell in A^3.
"""
self.modify_lattice(self._lattice.scale(volume))
def merge_sites(self, tol=0.01, mode="sum"):
"""
Merges sites (adding occupancies) within tol of each other.
Removes site properties.
Args:
tol (float): Tolerance for distance to merge sites.
mode (str): Two modes supported. "delete" means duplicate sites are
deleted. "sum" means the occupancies are summed for the sites.
Only first letter is considered.
"""
mode = mode.lower()[0]
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import fcluster, linkage
d = self.distance_matrix
np.fill_diagonal(d, 0)
clusters = fcluster(linkage(squareform((d + d.T) / 2)),
tol, 'distance')
sites = []
for c in np.unique(clusters):
inds = np.where(clusters == c)[0]
species = self[inds[0]].species_and_occu
coords = self[inds[0]].frac_coords
for n, i in enumerate(inds[1:]):
sp = self[i].species_and_occu
if mode == "s":
species += sp
offset = self[i].frac_coords - coords
coords += ((offset - np.round(offset)) / (n + 2)).astype(
coords.dtype)
sites.append(PeriodicSite(species, coords, self.lattice))
self._sites = sites
class Molecule(IMolecule, collections.MutableSequence):
"""
Mutable Molecule. It has all the methods in IMolecule, but in addition,
it allows a user to perform edits on the molecule.
"""
__hash__ = None
def __init__(self, species, coords, charge=0,
spin_multiplicity=None, validate_proximity=False,
site_properties=None):
"""
Creates a MutableMolecule.
Args:
species: list of atomic species. Possible kinds of input include a
list of dict of elements/species and occupancies, a List of
elements/specie specified as actual Element/Specie, Strings
("Fe", "Fe2+") or atomic numbers (1,56).
coords (3x1 array): list of cartesian coordinates of each species.
charge (float): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
"""
super(Molecule, self).__init__(species, coords, charge=charge,
spin_multiplicity=spin_multiplicity,
validate_proximity=validate_proximity,
site_properties=site_properties)
self._sites = list(self._sites)
def __setitem__(self, i, site):
"""
Modify a site in the molecule.
Args:
i (int, [int], slice, Specie-like): Indices to change. You can
specify these as an int, a list of int, or a species-like
string.
site (PeriodicSite/Specie/Sequence): Three options exist. You can
provide a Site directly, or for convenience, you can provide
simply a Specie-like string/object, or finally a (Specie,
coords) sequence, e.g., ("Fe", [0.5, 0.5, 0.5]).
"""
if isinstance(i, int):
indices = [i]
elif isinstance(i, six.string_types + (Element, Specie)):
self.replace_species({i: site})
return
elif isinstance(i, slice):
to_mod = self[i]
indices = [ii for ii, s in enumerate(self._sites)
if s in to_mod]
else:
indices = list(i)
for ii in indices:
if isinstance(site, Site):
self._sites[ii] = site
else:
if isinstance(site, six.string_types) or (
not isinstance(site, collections.Sequence)):
sp = site
coords = self._sites[ii].coords
properties = self._sites[ii].properties
else:
sp = site[0]
coords = site[1] if len(site) > 1 else self._sites[
ii].coords
properties = site[2] if len(site) > 2 else self._sites[ii] \
.properties
self._sites[ii] = Site(sp, coords, properties=properties)
def __delitem__(self, i):
"""
Deletes a site from the Structure.
"""
self._sites.__delitem__(i)
def append(self, species, coords, validate_proximity=True,
properties=None):
"""
Appends a site to the molecule.
Args:
species: Species of inserted site
coords: Coordinates of inserted site
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to True.
properties (dict): A dict of properties for the Site.
Returns:
New molecule with inserted site.
"""
return self.insert(len(self), species, coords,
validate_proximity=validate_proximity,
properties=properties)
def set_charge_and_spin(self, charge, spin_multiplicity=None):
"""
Set the charge and spin multiplicity.
Args:
charge (int): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
"""
self._charge = charge
nelectrons = 0
for site in self._sites:
for sp, amt in site.species_and_occu.items():
nelectrons += sp.Z * amt
nelectrons -= charge
self._nelectrons = nelectrons
if spin_multiplicity:
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(
self._charge, spin_multiplicity))
self._spin_multiplicity = spin_multiplicity
else:
self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
def insert(self, i, species, coords, validate_proximity=False,
properties=None):
"""
Insert a site to the molecule.
Args:
i (int): Index to insert site
species: species of inserted site
coords (3x1 array): coordinates of inserted site
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to True.
properties (dict): Dict of properties for the Site.
Returns:
New molecule with inserted site.
"""
new_site = Site(species, coords, properties=properties)
if validate_proximity:
for site in self:
if site.distance(new_site) < self.DISTANCE_TOLERANCE:
raise ValueError("New site is too close to an existing "
"site!")
self._sites.insert(i, new_site)
def add_site_property(self, property_name, values):
"""
Adds a property to a site.
Args:
property_name (str): The name of the property to add.
values (list): A sequence of values. Must be same length as
number of sites.
"""
if len(values) != len(self._sites):
raise ValueError("Values must be same length as sites.")
for i in range(len(self._sites)):
site = self._sites[i]
props = site.properties
if not props:
props = {}
props[property_name] = values[i]
self._sites[i] = Site(site.species_and_occu, site.coords,
properties=props)
def replace_species(self, species_mapping):
"""
Swap species in a molecule.
Args:
species_mapping (dict): dict of species to swap. Species can be
elements too. E.g., {Element("Li"): Element("Na")} performs
a Li for Na substitution. The second species can be a
sp_and_occu dict. For example, a site with 0.5 Si that is
passed the mapping {Element('Si): {Element('Ge'):0.75,
Element('C'):0.25} } will have .375 Ge and .125 C.
"""
species_mapping = {get_el_sp(k): v
for k, v in species_mapping.items()}
def mod_site(site):
c = Composition()
for sp, amt in site.species_and_occu.items():
new_sp = species_mapping.get(sp, sp)
try:
c += Composition(new_sp) * amt
except TypeError:
c += {new_sp: amt}
return Site(c, site.coords, properties=site.properties)
self._sites = [mod_site(site) for site in self._sites]
def remove_species(self, species):
"""
Remove all occurrences of a species from a molecule.
Args:
species: Species to remove.
"""
new_sites = []
species = [get_el_sp(sp) for sp in species]
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species_and_occu.items()
if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(Site(new_sp_occu, site.coords,
properties=site.properties))
self._sites = new_sites
def remove_sites(self, indices):
"""
Delete sites with at indices.
Args:
indices: Sequence of indices of sites to delete.
"""
self._sites = [self._sites[i] for i in range(len(self._sites))
if i not in indices]
def translate_sites(self, indices=None, vector=None):
"""
Translate specific sites by some vector, keeping the sites within the
unit cell.
Args:
indices (list): List of site indices on which to perform the
translation.
vector (3x1 array): Translation vector for sites.
"""
if indices is None:
indices = range(len(self))
if vector is None:
vector == [0, 0, 0]
for i in indices:
site = self._sites[i]
new_site = Site(site.species_and_occu, site.coords + vector,
properties=site.properties)
self._sites[i] = new_site
def rotate_sites(self, indices=None, theta=0, axis=None, anchor=None):
"""
Rotate specific sites by some angle around vector at anchor.
Args:
indices (list): List of site indices on which to perform the
translation.
angle (float): Angle in radians
axis (3x1 array): Rotation axis vector.
anchor (3x1 array): Point of rotation.
"""
from numpy.linalg import norm
from numpy import cross, eye
from scipy.linalg import expm
if indices is None:
indices = range(len(self))
if axis is None:
axis = [0, 0, 1]
if anchor is None:
anchor = [0, 0, 0]
anchor = np.array(anchor)
axis = np.array(axis)
theta %= 2 * np.pi
rm = expm(cross(eye(3), axis / norm(axis)) * theta)
for i in indices:
site = self._sites[i]
s = ((rm * np.matrix(site.coords - anchor).T).T + anchor).A1
new_site = Site(site.species_and_occu, s,
properties=site.properties)
self._sites[i] = new_site
def perturb(self, distance):
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance (float): Distance in angstroms by which to perturb each
site.
"""
def get_rand_vec():
# deals with zero vectors.
vector = np.random.randn(3)
vnorm = np.linalg.norm(vector)
return vector / vnorm * distance if vnorm != 0 else get_rand_vec()
for i in range(len(self._sites)):
self.translate_sites([i], get_rand_vec())
def apply_operation(self, symmop):
"""
Apply a symmetry operation to the molecule.
Args:
symmop (SymmOp): Symmetry operation to apply.
"""
def operate_site(site):
new_cart = symmop.operate(site.coords)
return Site(site.species_and_occu, new_cart,
properties=site.properties)
self._sites = [operate_site(s) for s in self._sites]
def copy(self):
"""
Convenience method to get a copy of the molecule.
Returns:
A copy of the Molecule.
"""
return self.__class__.from_sites(self)
def substitute(self, index, func_grp, bond_order=1):
"""
Substitute atom at index with a functional group.
Args:
index (int): Index of atom to substitute.
func_grp: Substituent molecule. There are two options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecie X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in functional_groups.json.
bond_order: A specified bond order to calculate the bond length
between the attached functional group and the nearest
neighbor site. Defaults to 1.
"""
# Find the nearest neighbor that is not a terminal atom.
all_non_terminal_nn = []
for nn, dist in self.get_neighbors(self[index], 3):
# Check that the nn has neighbors within a sensible distance but
# is not the site being substituted.
for inn, dist2 in self.get_neighbors(nn, 3):
if inn != self[index] and \
dist2 < 1.2 * get_bond_length(nn.specie,
inn.specie):
all_non_terminal_nn.append((nn, dist))
break
if len(all_non_terminal_nn) == 0:
raise RuntimeError("Can't find a non-terminal neighbor to attach"
" functional group to.")
non_terminal_nn = min(all_non_terminal_nn, key=lambda d: d[1])[0]
# Set the origin point to be the coordinates of the nearest
# non-terminal neighbor.
origin = non_terminal_nn.coords
# Pass value of functional group--either from user-defined or from
# functional.json
if isinstance(func_grp, Molecule):
func_grp = func_grp
else:
# Check to see whether the functional group is in database.
if func_grp not in FunctionalGroups:
raise RuntimeError("Can't find functional group in list. "
"Provide explicit coordinate instead")
else:
func_grp = FunctionalGroups[func_grp]
# If a bond length can be found, modify func_grp so that the X-group
# bond length is equal to the bond length.
bl = get_bond_length(non_terminal_nn.specie, func_grp[1].specie,
bond_order=bond_order)
if bl is not None:
func_grp = func_grp.copy()
vec = func_grp[0].coords - func_grp[1].coords
func_grp[0] = "X", func_grp[1].coords + bl / np.linalg.norm(vec) \
* vec
# Align X to the origin.
x = func_grp[0]
func_grp.translate_sites(list(range(len(func_grp))), origin - x.coords)
# Find angle between the attaching bond and the bond to be replaced.
v1 = func_grp[1].coords - origin
v2 = self[index].coords - origin
angle = get_angle(v1, v2)
if 1 < abs(angle % 180) < 179:
# For angles which are not 0 or 180, we perform a rotation about
# the origin along an axis perpendicular to both bonds to align
# bonds.
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(origin, axis, angle)
func_grp.apply_operation(op)
elif abs(abs(angle) - 180) < 1:
# We have a 180 degree angle. Simply do an inversion about the
# origin
for i in range(len(func_grp)):
func_grp[i] = (func_grp[i].species_and_occu,
origin - (func_grp[i].coords - origin))
# Remove the atom to be replaced, and add the rest of the functional
# group.
del self[index]
for site in func_grp[1:]:
self._sites.append(site)
class StructureError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
pass
with open(os.path.join(os.path.dirname(__file__),
"func_groups.json"), "rt") as f:
FunctionalGroups = {k: Molecule(v["species"], v["coords"])
for k, v in json.load(f).items()}
|
aykol/pymatgen
|
pymatgen/core/structure.py
|
Python
|
mit
| 123,332
|
[
"ABINIT",
"CRYSTAL",
"Gaussian",
"NetCDF",
"VASP",
"pymatgen"
] |
569c5e3491033d3319ac0a41f43b7981bfd4ce398415ca24d99adfb918f8d68d
|
import random
import string
import time
from mock import patch
from django.contrib.auth.models import User
from django.contrib.auth.models import AnonymousUser
from django.contrib.sessions.backends.db import SessionStore
from django.core.urlresolvers import NoReverseMatch
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test.client import RequestFactory
from . import utils
from . import config
from .connection import parse_redis_url, get_redis_connection
from .models import AccessAttempt
from .test import DefenderTestCase, DefenderTransactionTestCase
# Django >= 1.7 compatibility
try:
LOGIN_FORM_KEY = '<form action="/admin/login/" method="post"'
' id="login-form">'
ADMIN_LOGIN_URL = reverse('admin:login')
except NoReverseMatch:
ADMIN_LOGIN_URL = reverse('admin:index')
LOGIN_FORM_KEY = 'this_is_the_login_form'
VALID_USERNAME = VALID_PASSWORD = 'valid'
class AccessAttemptTest(DefenderTestCase):
""" Test case using custom settings for testing
"""
LOCKED_MESSAGE = 'Account locked: too many login attempts.'
PERMANENT_LOCKED_MESSAGE = (
LOCKED_MESSAGE + ' Contact an admin to unlock your account.'
)
def _get_random_str(self):
""" Returns a random str """
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for x in range(20))
def _login(self, username=None, password=None, user_agent='test-browser',
remote_addr='127.0.0.1'):
""" Login a user. If the username or password is not provided
it will use a random string instead. Use the VALID_USERNAME and
VALID_PASSWORD to make a valid login.
"""
if username is None:
username = self._get_random_str()
if password is None:
password = self._get_random_str()
response = self.client.post(ADMIN_LOGIN_URL, {
'username': username,
'password': password,
LOGIN_FORM_KEY: 1,
}, HTTP_USER_AGENT=user_agent, REMOTE_ADDR=remote_addr)
return response
def setUp(self):
""" Create a valid user for login
"""
self.user = User.objects.create_superuser(
username=VALID_USERNAME,
email='test@example.com',
password=VALID_PASSWORD,
)
def test_data_integrity_of_get_blocked_ips(self):
""" Test whether data retrieved from redis via
get_blocked_ips() is the same as the data saved
"""
data_in = ['127.0.0.1', '4.2.2.1']
for ip in data_in:
utils.block_ip(ip)
data_out = utils.get_blocked_ips()
self.assertEqual(sorted(data_in), sorted(data_out))
# send in None, should have same values.
utils.block_ip(None)
data_out = utils.get_blocked_ips()
self.assertEqual(sorted(data_in), sorted(data_out))
def test_data_integrity_of_get_blocked_usernames(self):
""" Test whether data retrieved from redis via
get_blocked_usernames() is the same as the data saved
"""
data_in = ['foo', 'bar']
for username in data_in:
utils.block_username(username)
data_out = utils.get_blocked_usernames()
self.assertEqual(sorted(data_in), sorted(data_out))
# send in None, should have same values.
utils.block_username(None)
data_out = utils.get_blocked_usernames()
self.assertEqual(sorted(data_in), sorted(data_out))
def test_login_get(self):
""" visit the login page """
response = self.client.get(ADMIN_LOGIN_URL)
self.assertEquals(response.status_code, 200)
def test_failure_limit_by_ip_once(self):
""" Tests the login lock by ip when trying to login
one more time than failure limit
"""
for i in range(0, config.FAILURE_LIMIT):
response = self._login()
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now
response = self._login()
self.assertContains(response, self.LOCKED_MESSAGE)
# doing a get should also get locked out message
response = self.client.get(ADMIN_LOGIN_URL)
self.assertContains(response, self.LOCKED_MESSAGE)
def test_failure_limit_by_ip_many(self):
""" Tests the login lock by ip when trying to
login a lot of times more than failure limit
"""
for i in range(0, config.FAILURE_LIMIT):
response = self._login()
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now
for i in range(0, random.randrange(1, 10)):
# try to log in a bunch of times
response = self._login()
self.assertContains(response, self.LOCKED_MESSAGE)
# doing a get should also get locked out message
response = self.client.get(ADMIN_LOGIN_URL)
self.assertContains(response, self.LOCKED_MESSAGE)
def test_failure_limit_by_username_once(self):
""" Tests the login lock by username when trying to login
one more time than failure limit
"""
for i in range(0, config.FAILURE_LIMIT):
ip = '74.125.239.{0}.'.format(i)
response = self._login(username=VALID_USERNAME, remote_addr=ip)
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now
response = self._login()
self.assertContains(response, self.LOCKED_MESSAGE)
# doing a get should also get locked out message
response = self.client.get(ADMIN_LOGIN_URL)
self.assertContains(response, self.LOCKED_MESSAGE)
def test_valid_login(self):
""" Tests a valid login for a real username
"""
response = self._login(username=VALID_USERNAME,
password=VALID_PASSWORD)
self.assertNotContains(response, LOGIN_FORM_KEY, status_code=302)
def test_reset_after_valid_login(self):
""" Tests the counter gets reset after a valid login
"""
for i in range(0, config.FAILURE_LIMIT):
self._login(username=VALID_USERNAME)
# now login with a valid username and password
self._login(username=VALID_USERNAME, password=VALID_PASSWORD)
# and we should be able to try again without hitting the failure limit
response = self._login(username=VALID_USERNAME)
self.assertNotContains(response, self.LOCKED_MESSAGE)
def test_blocked_ip_cannot_login(self):
""" Test an user with blocked ip cannot login with another username
"""
for i in range(0, config.FAILURE_LIMIT + 1):
response = self._login(username=VALID_USERNAME)
# try to login with a different user
response = self._login(username='myuser')
self.assertContains(response, self.LOCKED_MESSAGE)
def test_blocked_username_cannot_login(self):
""" Test an user with blocked username cannot login using
another ip
"""
for i in range(0, config.FAILURE_LIMIT + 1):
ip = '74.125.239.{0}.'.format(i)
response = self._login(username=VALID_USERNAME, remote_addr=ip)
# try to login with a different ip
response = self._login(username=VALID_USERNAME, remote_addr='8.8.8.8')
self.assertContains(response, self.LOCKED_MESSAGE)
def test_cooling_off(self):
""" Tests if the cooling time allows a user to login
"""
self.test_failure_limit_by_ip_once()
# Wait for the cooling off period
time.sleep(config.COOLOFF_TIME)
if config.MOCK_REDIS:
# mock redis require that we expire on our own
get_redis_connection().do_expire() # pragma: no cover
# It should be possible to login again, make sure it is.
self.test_valid_login()
def test_cooling_off_for_trusted_user(self):
""" Test the cooling time for a trusted user
"""
# Try the cooling off time
self.test_cooling_off()
def test_long_user_agent_valid(self):
""" Tests if can handle a long user agent
"""
long_user_agent = 'ie6' * 1024
response = self._login(username=VALID_USERNAME, password=VALID_PASSWORD,
user_agent=long_user_agent)
self.assertNotContains(response, LOGIN_FORM_KEY, status_code=302)
@patch('defender.config.BEHIND_REVERSE_PROXY', True)
@patch('defender.config.REVERSE_PROXY_HEADER', 'HTTP_X_FORWARDED_FOR')
def test_get_ip_reverse_proxy(self):
""" Tests if can handle a long user agent
"""
request_factory = RequestFactory()
request = request_factory.get(ADMIN_LOGIN_URL)
request.user = AnonymousUser()
request.session = SessionStore()
request.META['HTTP_X_FORWARDED_FOR'] = '192.168.24.24'
self.assertEquals(utils.get_ip(request), '192.168.24.24')
request_factory = RequestFactory()
request = request_factory.get(ADMIN_LOGIN_URL)
request.user = AnonymousUser()
request.session = SessionStore()
request.META['REMOTE_ADDR'] = '24.24.24.24'
self.assertEquals(utils.get_ip(request), '24.24.24.24')
def test_get_ip(self):
""" Tests if can handle a long user agent
"""
request_factory = RequestFactory()
request = request_factory.get(ADMIN_LOGIN_URL)
request.user = AnonymousUser()
request.session = SessionStore()
self.assertEquals(utils.get_ip(request), '127.0.0.1')
def test_long_user_agent_not_valid(self):
""" Tests if can handle a long user agent with failure
"""
long_user_agent = 'ie6' * 1024
for i in range(0, config.FAILURE_LIMIT + 1):
response = self._login(user_agent=long_user_agent)
self.assertContains(response, self.LOCKED_MESSAGE)
def test_reset_ip(self):
""" Tests if can reset an ip address
"""
# Make a lockout
self.test_failure_limit_by_ip_once()
# Reset the ip so we can try again
utils.reset_failed_attempts(ip_address='127.0.0.1')
# Make a login attempt again
self.test_valid_login()
@patch('defender.config.LOCKOUT_URL', 'http://localhost/othe/login/')
def test_failed_login_redirect_to_URL(self):
""" Test to make sure that after lockout we send to the correct
redirect URL """
for i in range(0, config.FAILURE_LIMIT):
response = self._login()
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now, check redirect make sure it is valid.
response = self._login()
self.assertEquals(response.status_code, 302)
self.assertEquals(response['Location'], 'http://localhost/othe/login/')
# doing a get should also get locked out message
response = self.client.get(ADMIN_LOGIN_URL)
self.assertEquals(response.status_code, 302)
self.assertEquals(response['Location'], 'http://localhost/othe/login/')
@patch('defender.config.LOCKOUT_URL', '/o/login/')
def test_failed_login_redirect_to_URL_local(self):
""" Test to make sure that after lockout we send to the correct
redirect URL """
for i in range(0, config.FAILURE_LIMIT):
response = self._login()
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now, check redirect make sure it is valid.
response = self._login()
self.assertEquals(response.status_code, 302)
self.assertEquals(response['Location'], 'http://testserver/o/login/')
# doing a get should also get locked out message
response = self.client.get(ADMIN_LOGIN_URL)
self.assertEquals(response.status_code, 302)
self.assertEquals(response['Location'], 'http://testserver/o/login/')
@patch('defender.config.LOCKOUT_TEMPLATE', 'defender/lockout.html')
def test_failed_login_redirect_to_template(self):
""" Test to make sure that after lockout we send to the correct
template """
for i in range(0, config.FAILURE_LIMIT):
response = self._login()
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now, check template make sure it is valid.
response = self._login()
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'defender/lockout.html')
# doing a get should also get locked out message
response = self.client.get(ADMIN_LOGIN_URL)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'defender/lockout.html')
@patch('defender.config.COOLOFF_TIME', 0)
def test_failed_login_no_cooloff(self):
for i in range(0, config.FAILURE_LIMIT):
response = self._login()
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now, check redirect make sure it is valid.
response = self._login()
self.assertContains(response, self.PERMANENT_LOCKED_MESSAGE)
# doing a get should also get locked out message
response = self.client.get(ADMIN_LOGIN_URL)
self.assertContains(response, self.PERMANENT_LOCKED_MESSAGE)
def test_login_attempt_model(self):
""" test the login model"""
response = self._login()
self.assertContains(response, LOGIN_FORM_KEY)
self.assertEquals(AccessAttempt.objects.count(), 1)
self.assertIsNotNone(str(AccessAttempt.objects.all()[0]))
def test_is_valid_ip(self):
""" Test the is_valid_ip() method """
self.assertEquals(utils.is_valid_ip('192.168.0.1'), True)
self.assertEquals(utils.is_valid_ip('130.80.100.24'), True)
self.assertEquals(utils.is_valid_ip('8.8.8.8'), True)
self.assertEquals(utils.is_valid_ip('127.0.0.1'), True)
self.assertEquals(utils.is_valid_ip('fish'), False)
self.assertEquals(utils.is_valid_ip(None), False)
self.assertEquals(utils.is_valid_ip(''), False)
self.assertEquals(utils.is_valid_ip('0x41.0x41.0x41.0x41'), False)
self.assertEquals(utils.is_valid_ip('192.168.100.34.y'), False)
self.assertEquals(
utils.is_valid_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334'), True)
self.assertEquals(
utils.is_valid_ip('2001:db8:85a3:0:0:8a2e:370:7334'), True)
self.assertEquals(
utils.is_valid_ip('2001:db8:85a3::8a2e:370:7334'), True)
self.assertEquals(
utils.is_valid_ip('::ffff:192.0.2.128'), True)
self.assertEquals(
utils.is_valid_ip('::ffff:8.8.8.8'), True)
def test_parse_redis_url(self):
""" test the parse_redis_url method """
# full regular
conf = parse_redis_url("redis://user:password@localhost2:1234/2")
self.assertEquals(conf.get('HOST'), 'localhost2')
self.assertEquals(conf.get('DB'), 2)
self.assertEquals(conf.get('PASSWORD'), 'password')
self.assertEquals(conf.get('PORT'), 1234)
# full non local
conf = parse_redis_url("redis://user:pass@www.localhost.com:1234/2")
self.assertEquals(conf.get('HOST'), 'www.localhost.com')
self.assertEquals(conf.get('DB'), 2)
self.assertEquals(conf.get('PASSWORD'), 'pass')
self.assertEquals(conf.get('PORT'), 1234)
# no user name
conf = parse_redis_url("redis://password@localhost2:1234/2")
self.assertEquals(conf.get('HOST'), 'localhost2')
self.assertEquals(conf.get('DB'), 2)
self.assertEquals(conf.get('PASSWORD'), None)
self.assertEquals(conf.get('PORT'), 1234)
# no user name 2 with colon
conf = parse_redis_url("redis://:password@localhost2:1234/2")
self.assertEquals(conf.get('HOST'), 'localhost2')
self.assertEquals(conf.get('DB'), 2)
self.assertEquals(conf.get('PASSWORD'), 'password')
self.assertEquals(conf.get('PORT'), 1234)
# Empty
conf = parse_redis_url(None)
self.assertEquals(conf.get('HOST'), 'localhost')
self.assertEquals(conf.get('DB'), 0)
self.assertEquals(conf.get('PASSWORD'), None)
self.assertEquals(conf.get('PORT'), 6379)
# no db
conf = parse_redis_url("redis://:password@localhost2:1234")
self.assertEquals(conf.get('HOST'), 'localhost2')
self.assertEquals(conf.get('DB'), 0)
self.assertEquals(conf.get('PASSWORD'), 'password')
self.assertEquals(conf.get('PORT'), 1234)
# no password
conf = parse_redis_url("redis://localhost2:1234/0")
self.assertEquals(conf.get('HOST'), 'localhost2')
self.assertEquals(conf.get('DB'), 0)
self.assertEquals(conf.get('PASSWORD'), None)
self.assertEquals(conf.get('PORT'), 1234)
def test_get_ip_address_from_request(self):
req = HttpRequest()
req.META['REMOTE_ADDR'] = '1.2.3.4'
ip = utils.get_ip_address_from_request(req)
self.assertEqual(ip, '1.2.3.4')
req = HttpRequest()
req.META['REMOTE_ADDR'] = '1.2.3.4 '
ip = utils.get_ip_address_from_request(req)
self.assertEqual(ip, '1.2.3.4')
req = HttpRequest()
req.META['REMOTE_ADDR'] = '192.168.100.34.y'
ip = utils.get_ip_address_from_request(req)
self.assertEqual(ip, '127.0.0.1')
req = HttpRequest()
req.META['REMOTE_ADDR'] = 'cat'
ip = utils.get_ip_address_from_request(req)
self.assertEqual(ip, '127.0.0.1')
req = HttpRequest()
ip = utils.get_ip_address_from_request(req)
self.assertEqual(ip, '127.0.0.1')
@patch('defender.config.BEHIND_REVERSE_PROXY', True)
@patch('defender.config.REVERSE_PROXY_HEADER', 'HTTP_X_PROXIED')
def test_get_ip_reverse_proxy_custom_header(self):
req = HttpRequest()
req.META['HTTP_X_PROXIED'] = '1.2.3.4'
self.assertEqual(utils.get_ip(req), '1.2.3.4')
req = HttpRequest()
req.META['HTTP_X_PROXIED'] = '1.2.3.4, 5.6.7.8, 127.0.0.1'
self.assertEqual(utils.get_ip(req), '1.2.3.4')
req = HttpRequest()
req.META['REMOTE_ADDR'] = '1.2.3.4'
self.assertEqual(utils.get_ip(req), '1.2.3.4')
@patch('defender.config.BEHIND_REVERSE_PROXY', True)
@patch('defender.config.REVERSE_PROXY_HEADER', 'HTTP_X_REAL_IP')
def test_get_user_attempts(self):
ip_attempts = random.randint(3, 12)
username_attempts = random.randint(3, 12)
for i in range(0, ip_attempts):
utils.increment_key(utils.get_ip_attempt_cache_key('1.2.3.4'))
for i in range(0, username_attempts):
utils.increment_key(utils.get_username_attempt_cache_key('foobar'))
req = HttpRequest()
req.POST['username'] = 'foobar'
req.META['HTTP_X_REAL_IP'] = '1.2.3.4'
self.assertEqual(
utils.get_user_attempts(req), max(ip_attempts, username_attempts)
)
req = HttpRequest()
req.POST['username'] = 'foobar'
req.META['HTTP_X_REAL_IP'] = '5.6.7.8'
self.assertEqual(
utils.get_user_attempts(req), username_attempts
)
req = HttpRequest()
req.POST['username'] = 'barfoo'
req.META['HTTP_X_REAL_IP'] = '1.2.3.4'
self.assertEqual(
utils.get_user_attempts(req), ip_attempts
)
def test_admin(self):
""" test the admin pages for this app """
from .admin import AccessAttemptAdmin
AccessAttemptAdmin
def test_decorator_middleware(self):
# because watch_login is called twice in this test (once by the
# middleware and once by the decorator) we have half as many attempts
# before getting locked out.
# this is getting called twice, once for each decorator, not sure how
# to dynamically remove one of the middlewares during a test so we
# divide the failure limit by 2.
for i in range(0, int(config.FAILURE_LIMIT)):
response = self._login()
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now
response = self._login()
self.assertContains(response, self.LOCKED_MESSAGE)
# doing a get should also get locked out message
response = self.client.get(ADMIN_LOGIN_URL)
self.assertContains(response, self.LOCKED_MESSAGE)
def test_get_view(self):
""" Check that the decorator doesn't tamper with GET requests"""
for i in range(0, config.FAILURE_LIMIT):
response = self.client.get(ADMIN_LOGIN_URL)
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
response = self.client.get(ADMIN_LOGIN_URL)
self.assertNotContains(response, self.LOCKED_MESSAGE)
@patch('defender.config.USE_CELERY', True)
def test_use_celery(self):
""" Check that use celery works"""
self.assertEquals(AccessAttempt.objects.count(), 0)
for i in range(0, int(config.FAILURE_LIMIT)):
response = self._login()
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now
response = self._login()
self.assertContains(response, self.LOCKED_MESSAGE)
self.assertEquals(AccessAttempt.objects.count(),
config.FAILURE_LIMIT+1)
self.assertIsNotNone(str(AccessAttempt.objects.all()[0]))
@patch('defender.config.LOCKOUT_BY_IP_USERNAME', True)
def test_lockout_by_ip_and_username(self):
"""Check that lockout still works when locking out by IP and Username combined"""
username = 'testy'
for i in range(0, config.FAILURE_LIMIT):
response = self._login(username=username)
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# But we should get one now
response = self._login(username=username)
self.assertContains(response, self.LOCKED_MESSAGE)
# We shouldn't get a lockout message when attempting to use no username
response = self.client.get(ADMIN_LOGIN_URL)
self.assertContains(response, LOGIN_FORM_KEY)
# We shouldn't get a lockout message when attempting to use a different username
response = self._login()
self.assertContains(response, LOGIN_FORM_KEY)
# We shouldn't get a lockout message when attempting to use a different ip address
ip = '74.125.239.60'
response = self._login(username=VALID_USERNAME, remote_addr=ip)
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
@patch('defender.config.DISABLE_IP_LOCKOUT', True)
def test_disable_ip_lockout(self):
"""Check that lockout still works when we disable IP Lock out"""
username = 'testy'
# try logging in with the same IP, but different username
# we shouldn't be blocked.
# same IP different, usernames
ip = '74.125.239.60'
for i in range(0, config.FAILURE_LIMIT+10):
login_username = u"{0}{1}".format(username, i)
response = self._login(username=login_username, remote_addr=ip)
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# So, we shouldn't have gotten a lock-out yet.
# same username with same IP
for i in range(0, config.FAILURE_LIMIT):
response = self._login(username=username)
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# But we should get one now
# same username and Ip, over the limit for username.
response = self._login(username=username)
self.assertContains(response, self.LOCKED_MESSAGE)
# We shouldn't get a lockout message when attempting to use no username
response = self.client.get(ADMIN_LOGIN_URL)
self.assertContains(response, LOGIN_FORM_KEY)
# We shouldn't get a lockout message when attempting to use a different username
response = self._login()
self.assertContains(response, LOGIN_FORM_KEY)
# We shouldn't get a lockout message when attempting to use a different ip address
second_ip = '74.125.239.99'
response = self._login(username=VALID_USERNAME, remote_addr=second_ip)
# Check if we are in the same login page
self.assertContains(response, LOGIN_FORM_KEY)
# we should have no ip's blocked
data_out = utils.get_blocked_ips()
self.assertEqual(data_out, [])
# even if we try to manually block one it still won't be in there.
utils.block_ip(second_ip)
# we should still have no ip's blocked
data_out = utils.get_blocked_ips()
self.assertEqual(data_out, [])
class DefenderTestCaseTest(DefenderTestCase):
"""Make sure that we're cleaning the cache between tests"""
key = 'test_key'
def test_first_incr(self):
utils.REDIS_SERVER.incr(self.key)
result = int(utils.REDIS_SERVER.get(self.key))
self.assertEqual(result, 1)
def test_second_incr(self):
utils.REDIS_SERVER.incr(self.key)
result = int(utils.REDIS_SERVER.get(self.key))
self.assertEqual(result, 1)
class DefenderTransactionTestCaseTest(DefenderTransactionTestCase):
"""Make sure that we're cleaning the cache between tests"""
key = 'test_key'
def test_first_incr(self):
utils.REDIS_SERVER.incr(self.key)
result = int(utils.REDIS_SERVER.get(self.key))
self.assertEqual(result, 1)
def test_second_incr(self):
utils.REDIS_SERVER.incr(self.key)
result = int(utils.REDIS_SERVER.get(self.key))
self.assertEqual(result, 1)
class TestUtils(DefenderTestCase):
def test_username_blocking(self):
username = 'foo'
self.assertFalse(utils.is_user_already_locked(username))
utils.block_username(username)
self.assertTrue(utils.is_user_already_locked(username))
utils.unblock_username(username)
self.assertFalse(utils.is_user_already_locked(username))
def test_ip_address_blocking(self):
ip = '1.2.3.4'
self.assertFalse(utils.is_source_ip_already_locked(ip))
utils.block_ip(ip)
self.assertTrue(utils.is_source_ip_already_locked(ip))
utils.unblock_ip(ip)
self.assertFalse(utils.is_source_ip_already_locked(ip))
|
docker-hub/django-defender
|
defender/tests.py
|
Python
|
apache-2.0
| 27,805
|
[
"VisIt"
] |
9c30a8668a037320062905a697abc1b452b9eff2b54020f8e773406793969753
|
#!C:\Python33\python.exe -u
# -*- coding: UTF-8 -*-
# enable debugging
import cgi
import cgitb
cgitb.enable()
import struct
import array
form = cgi.FieldStorage()
print("Content-Type: text/html")
print("""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<noscript>
<p>Sorry, we can't analyze your stats without scripting enabled.</p>
</noscript>
<title>Dark Souls death counter - Submit your stats</title>
<link rel="stylesheet" type="text/css" href="../styles.css">
<style type="text/css">
html
{
background-image:url(../images/BG_capra2_fade.jpg);
}
</style>
</head>
<body>
<div class="header"></div>
<div class="wrapper">
<div class="container">
<div class="navbar">
<ul>
<li><a href="../index.html">Home</a></li>
<li><a href="stats.py">Stats</a></li>
<li><a href="../about.html">About</a></li>
</ul>
</div>
<div class="content">
<table id="table-char">
<tr>""")
print("""
<td>Character: <span id="span-char">{name}</span></td>
<td>Total deaths: <span id="span-deaths">{deaths}</span></td>
</tr>
</table>
<form name="playerInfo" method="POST" action="stats.py" enctype="multipart/form-data" >
<p>Help us make pretty graphs! Fill in a little extra information about your progress so we can make the stats page more useful and show you how you stack up against everyone else. Don't worry; everything is anonymous, so nobody will have to know how much you suck at this game.</p>
<div id="extraCharInfos"><div class="extra-info">
<label for="playthrough">What's the highest <a href="http://darksouls.wikidot.com/new-game-plus" class="spoiler-link">playthrough</a> you've COMPLETED with this character?</label>
<select id="playthrough" name="playthrough" >
<option value="0">None</option>
<option value="1">New game</option>
<option value="2">New game +</option>
<option value="3">New game +2</option>
<option value="4">New game +3</option>
<option value="5">New game +4</option>
<option value="6">New game +5</option>
<option value="7">New game +6</option>
<option value="8">I have beaten NG+7 or higher. Please send help.</option>
</select>
<p></p>
<label for="progress">What's the furthest <a href="http://darksouls.wikidot.com/areas" class="spoiler-link">area</a> you've COMPLETED with this character in your latest playthrough?</label>
<select id="progress" name="progress" >
<option value="0">None</option>
<option value="0.05">Undead Asylum</option>
<option value="0.20">1st Bell (Undead Parish)</option>
<option value="0.30">2nd Bell (Quelaag's Domain)</option>
<option value="0.40">Sen's Fortress</option>
<option value="0.50">Anor Londo</option>
<option value="0.60">1/4 Lord Souls (Seath/Nito/etc.)</option>
<option value="0.70">2/4 Lord Souls</option>
<option value="0.80">3/4 Lord Souls</option>
<option value="0.95">4/4 Lord Souls</option>
</select>
<p>Check all of the optional areas that you've COMPLETED:</p>
<input type="checkbox" id="optional-shitholes" name="optional-shitholes" />
<label for="optional-shitholes">Lower Undead Burg/Depths</label>
<input type="checkbox" id="optional-dragonbros" name="optional-dragonbros" />
<label for="optional-dragonbros">Great Hollow/Ash Lake</label>
<input type="checkbox" id="optional-asylum" name="optional-asylum" />
<label for="optional-asylum">Undead Asylum (2nd visit)</label>
<input type="checkbox" id="optional-paintedworld" name="optional-paintedworld" />
<label for="optional-paintedworld">Painted World of Ariamis</label>
<input type="checkbox" id="optional-manus" name="optional-manus" />
<label for="optional-manus">Additional Content (beat Manus)</label>
<br />
<p>What is your favorite nickname for Ornstein & Smough?</p>
<select id="smornstein" name="smornstein" >
<option value="0">None</option>
<option value="1">Anor Londo Attorneys</option>
<option value="2">Asterix & Obelix</option>
<option value="3">Atlas & P-body</option>
<option value="4">Biggie Smalls</option>
<option value="5">Fatboy Slim</option>
<option value="6">Fatman & Robin</option>
<option value="7">Fuck This I Quit</option>
<option value="8">James and the Giant Peach</option>
<option value="9">Jay & Silent Bob</option>
<option value="10">Jonah and the Whale</option>
<option value="11">Knight and the Round Table</option>
<option value="12">Laurel & Hardy</option>
<option value="13">Penn & Teller</option>
<option value="14">Pikachu & Snorlax</option>
<option value="15">Ren & Stimpy</option>
<option value="16">Spade & Farley</option>
<option value="17">Super Londo Bros</option>
<option value="18">Thunder & Thighs</option>
<option value="19">Timon & Pumba</option>
</select>
</div>
<input type="hidden" name="name" value="{name}" />
<input type="hidden" name="deaths" value="{deaths}" />
<input type="submit" id="submit" value="Enough dicking around. How'd I do?" />
</form>""".format(name=form['name'].value, deaths=form['deaths'].value))
print("""
<p></p>
</div>
</div>
</div>
</body>
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-47846181-1']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</html>
""")
|
RKYates/Dark-Souls-Death-Count-cgi-page
|
cgi-bin/submit.py
|
Python
|
gpl-3.0
| 5,722
|
[
"VisIt"
] |
8a98bd551ac4ba222947328b93350b3dc927928bceaa5c3a58a85bbaee62ad01
|
import unittest
from test import support
import sys
import random
# Used for lazy formatting of failure messages
class Frm(object):
def __init__(self, format, *args):
self.format = format
self.args = args
def __str__(self):
return self.format % self.args
# SHIFT should match the value in longintrepr.h for best testing.
SHIFT = 15
BASE = 2 ** SHIFT
MASK = BASE - 1
KARATSUBA_CUTOFF = 70 # from longobject.c
# Max number of base BASE digits to use in test cases. Doubling
# this will more than double the runtime.
MAXDIGITS = 15
# build some special values
special = [0, 1, 2, BASE, BASE >> 1, 0x5555555555555555, 0xaaaaaaaaaaaaaaaa]
# some solid strings of one bits
p2 = 4 # 0 and 1 already added
for i in range(2*SHIFT):
special.append(p2 - 1)
p2 = p2 << 1
del p2
# add complements & negations
special += [~x for x in special] + [-x for x in special]
L = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', 314),
('314 ', 314),
(' \t\t 314 \t\t ', 314),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', 1),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError)
]
class LongTest(unittest.TestCase):
# Get quasi-random long consisting of ndigits digits (in base BASE).
# quasi == the most-significant digit will not be 0, and the number
# is constructed to contain long strings of 0 and 1 bits. These are
# more likely than random bits to provoke digit-boundary errors.
# The sign of the number is also random.
def getran(self, ndigits):
self.assert_(ndigits > 0)
nbits_hi = ndigits * SHIFT
nbits_lo = nbits_hi - SHIFT + 1
answer = 0
nbits = 0
r = int(random.random() * (SHIFT * 2)) | 1 # force 1 bits to start
while nbits < nbits_lo:
bits = (r >> 1) + 1
bits = min(bits, nbits_hi - nbits)
self.assert_(1 <= bits <= SHIFT)
nbits = nbits + bits
answer = answer << bits
if r & 1:
answer = answer | ((1 << bits) - 1)
r = int(random.random() * (SHIFT * 2))
self.assert_(nbits_lo <= nbits <= nbits_hi)
if random.random() < 0.5:
answer = -answer
return answer
# Get random long consisting of ndigits random digits (relative to base
# BASE). The sign bit is also random.
def getran2(ndigits):
answer = 0
for i in range(ndigits):
answer = (answer << SHIFT) | random.randint(0, MASK)
if random.random() < 0.5:
answer = -answer
return answer
def check_division(self, x, y):
eq = self.assertEqual
q, r = divmod(x, y)
q2, r2 = x//y, x%y
pab, pba = x*y, y*x
eq(pab, pba, Frm("multiplication does not commute for %r and %r", x, y))
eq(q, q2, Frm("divmod returns different quotient than / for %r and %r", x, y))
eq(r, r2, Frm("divmod returns different mod than %% for %r and %r", x, y))
eq(x, q*y + r, Frm("x != q*y + r after divmod on x=%r, y=%r", x, y))
if y > 0:
self.assert_(0 <= r < y, Frm("bad mod from divmod on %r and %r", x, y))
else:
self.assert_(y < r <= 0, Frm("bad mod from divmod on %r and %r", x, y))
def test_division(self):
digits = list(range(1, MAXDIGITS+1)) + list(range(KARATSUBA_CUTOFF,
KARATSUBA_CUTOFF + 14))
digits.append(KARATSUBA_CUTOFF * 3)
for lenx in digits:
x = self.getran(lenx)
for leny in digits:
y = self.getran(leny) or 1
self.check_division(x, y)
def test_karatsuba(self):
digits = list(range(1, 5)) + list(range(KARATSUBA_CUTOFF,
KARATSUBA_CUTOFF + 10))
digits.extend([KARATSUBA_CUTOFF * 10, KARATSUBA_CUTOFF * 100])
bits = [digit * SHIFT for digit in digits]
# Test products of long strings of 1 bits -- (2**x-1)*(2**y-1) ==
# 2**(x+y) - 2**x - 2**y + 1, so the proper result is easy to check.
for abits in bits:
a = (1 << abits) - 1
for bbits in bits:
if bbits < abits:
continue
b = (1 << bbits) - 1
x = a * b
y = ((1 << (abits + bbits)) -
(1 << abits) -
(1 << bbits) +
1)
self.assertEqual(x, y,
Frm("bad result for a*b: a=%r, b=%r, x=%r, y=%r", a, b, x, y))
def check_bitop_identities_1(self, x):
eq = self.assertEqual
eq(x & 0, 0, Frm("x & 0 != 0 for x=%r", x))
eq(x | 0, x, Frm("x | 0 != x for x=%r", x))
eq(x ^ 0, x, Frm("x ^ 0 != x for x=%r", x))
eq(x & -1, x, Frm("x & -1 != x for x=%r", x))
eq(x | -1, -1, Frm("x | -1 != -1 for x=%r", x))
eq(x ^ -1, ~x, Frm("x ^ -1 != ~x for x=%r", x))
eq(x, ~~x, Frm("x != ~~x for x=%r", x))
eq(x & x, x, Frm("x & x != x for x=%r", x))
eq(x | x, x, Frm("x | x != x for x=%r", x))
eq(x ^ x, 0, Frm("x ^ x != 0 for x=%r", x))
eq(x & ~x, 0, Frm("x & ~x != 0 for x=%r", x))
eq(x | ~x, -1, Frm("x | ~x != -1 for x=%r", x))
eq(x ^ ~x, -1, Frm("x ^ ~x != -1 for x=%r", x))
eq(-x, 1 + ~x, Frm("not -x == 1 + ~x for x=%r", x))
eq(-x, ~(x-1), Frm("not -x == ~(x-1) forx =%r", x))
for n in range(2*SHIFT):
p2 = 2 ** n
eq(x << n >> n, x,
Frm("x << n >> n != x for x=%r, n=%r", (x, n)))
eq(x // p2, x >> n,
Frm("x // p2 != x >> n for x=%r n=%r p2=%r", (x, n, p2)))
eq(x * p2, x << n,
Frm("x * p2 != x << n for x=%r n=%r p2=%r", (x, n, p2)))
eq(x & -p2, x >> n << n,
Frm("not x & -p2 == x >> n << n for x=%r n=%r p2=%r", (x, n, p2)))
eq(x & -p2, x & ~(p2 - 1),
Frm("not x & -p2 == x & ~(p2 - 1) for x=%r n=%r p2=%r", (x, n, p2)))
def check_bitop_identities_2(self, x, y):
eq = self.assertEqual
eq(x & y, y & x, Frm("x & y != y & x for x=%r, y=%r", (x, y)))
eq(x | y, y | x, Frm("x | y != y | x for x=%r, y=%r", (x, y)))
eq(x ^ y, y ^ x, Frm("x ^ y != y ^ x for x=%r, y=%r", (x, y)))
eq(x ^ y ^ x, y, Frm("x ^ y ^ x != y for x=%r, y=%r", (x, y)))
eq(x & y, ~(~x | ~y), Frm("x & y != ~(~x | ~y) for x=%r, y=%r", (x, y)))
eq(x | y, ~(~x & ~y), Frm("x | y != ~(~x & ~y) for x=%r, y=%r", (x, y)))
eq(x ^ y, (x | y) & ~(x & y),
Frm("x ^ y != (x | y) & ~(x & y) for x=%r, y=%r", (x, y)))
eq(x ^ y, (x & ~y) | (~x & y),
Frm("x ^ y == (x & ~y) | (~x & y) for x=%r, y=%r", (x, y)))
eq(x ^ y, (x | y) & (~x | ~y),
Frm("x ^ y == (x | y) & (~x | ~y) for x=%r, y=%r", (x, y)))
def check_bitop_identities_3(self, x, y, z):
eq = self.assertEqual
eq((x & y) & z, x & (y & z),
Frm("(x & y) & z != x & (y & z) for x=%r, y=%r, z=%r", (x, y, z)))
eq((x | y) | z, x | (y | z),
Frm("(x | y) | z != x | (y | z) for x=%r, y=%r, z=%r", (x, y, z)))
eq((x ^ y) ^ z, x ^ (y ^ z),
Frm("(x ^ y) ^ z != x ^ (y ^ z) for x=%r, y=%r, z=%r", (x, y, z)))
eq(x & (y | z), (x & y) | (x & z),
Frm("x & (y | z) != (x & y) | (x & z) for x=%r, y=%r, z=%r", (x, y, z)))
eq(x | (y & z), (x | y) & (x | z),
Frm("x | (y & z) != (x | y) & (x | z) for x=%r, y=%r, z=%r", (x, y, z)))
def test_bitop_identities(self):
for x in special:
self.check_bitop_identities_1(x)
digits = range(1, MAXDIGITS+1)
for lenx in digits:
x = self.getran(lenx)
self.check_bitop_identities_1(x)
for leny in digits:
y = self.getran(leny)
self.check_bitop_identities_2(x, y)
self.check_bitop_identities_3(x, y, self.getran((lenx + leny)//2))
def slow_format(self, x, base):
digits = []
sign = 0
if x < 0:
sign, x = 1, -x
while x:
x, r = divmod(x, base)
digits.append(int(r))
digits.reverse()
digits = digits or [0]
return '-'[:sign] + \
{2: '0b', 8: '0o', 10: '', 16: '0x'}[base] + \
"".join(map(lambda i: "0123456789abcdef"[i], digits))
def check_format_1(self, x):
for base, mapper in (8, oct), (10, repr), (16, hex):
got = mapper(x)
expected = self.slow_format(x, base)
msg = Frm("%s returned %r but expected %r for %r",
mapper.__name__, got, expected, x)
self.assertEqual(got, expected, msg)
self.assertEqual(int(got, 0), x, Frm('long("%s", 0) != %r', got, x))
# str() has to be checked a little differently since there's no
# trailing "L"
got = str(x)
expected = self.slow_format(x, 10)
msg = Frm("%s returned %r but expected %r for %r",
mapper.__name__, got, expected, x)
self.assertEqual(got, expected, msg)
def test_format(self):
for x in special:
self.check_format_1(x)
for i in range(10):
for lenx in range(1, MAXDIGITS+1):
x = self.getran(lenx)
self.check_format_1(x)
def test_long(self):
self.assertEqual(int(314), 314)
self.assertEqual(int(3.14), 3)
self.assertEqual(int(314), 314)
# Check that conversion from float truncates towards zero
self.assertEqual(int(-3.14), -3)
self.assertEqual(int(3.9), 3)
self.assertEqual(int(-3.9), -3)
self.assertEqual(int(3.5), 3)
self.assertEqual(int(-3.5), -3)
self.assertEqual(int("-3"), -3)
# Different base:
self.assertEqual(int("10",16), 16)
# Check conversions from string (same test set as for int(), and then some)
LL = [
('1' + '0'*20, 10**20),
('1' + '0'*100, 10**100)
]
L2 = L[:]
for s, v in L2 + LL:
for sign in "", "+", "-":
for prefix in "", " ", "\t", " \t\t ":
ss = prefix + sign + s
vv = v
if sign == "-" and v is not ValueError:
vv = -v
try:
self.assertEqual(int(ss), int(vv))
except ValueError:
pass
self.assertRaises(ValueError, int, '123\0')
self.assertRaises(ValueError, int, '53', 40)
# trailing L should no longer be accepted...
self.assertRaises(ValueError, int, '123L')
self.assertRaises(ValueError, int, '123l')
self.assertRaises(ValueError, int, '0L')
self.assertRaises(ValueError, int, '-37L')
self.assertRaises(ValueError, int, '0x32L', 16)
self.assertRaises(ValueError, int, '1L', 21)
# ... but it's just a normal digit if base >= 22
self.assertEqual(int('1L', 22), 43)
self.assertRaises(TypeError, int, 1, 12)
# SF patch #1638879: embedded NULs were not detected with
# explicit base
self.assertRaises(ValueError, int, '123\0', 10)
self.assertRaises(ValueError, int, '123\x00 245', 20)
self.assertEqual(int('100000000000000000000000000000000', 2),
4294967296)
self.assertEqual(int('102002022201221111211', 3), 4294967296)
self.assertEqual(int('10000000000000000', 4), 4294967296)
self.assertEqual(int('32244002423141', 5), 4294967296)
self.assertEqual(int('1550104015504', 6), 4294967296)
self.assertEqual(int('211301422354', 7), 4294967296)
self.assertEqual(int('40000000000', 8), 4294967296)
self.assertEqual(int('12068657454', 9), 4294967296)
self.assertEqual(int('4294967296', 10), 4294967296)
self.assertEqual(int('1904440554', 11), 4294967296)
self.assertEqual(int('9ba461594', 12), 4294967296)
self.assertEqual(int('535a79889', 13), 4294967296)
self.assertEqual(int('2ca5b7464', 14), 4294967296)
self.assertEqual(int('1a20dcd81', 15), 4294967296)
self.assertEqual(int('100000000', 16), 4294967296)
self.assertEqual(int('a7ffda91', 17), 4294967296)
self.assertEqual(int('704he7g4', 18), 4294967296)
self.assertEqual(int('4f5aff66', 19), 4294967296)
self.assertEqual(int('3723ai4g', 20), 4294967296)
self.assertEqual(int('281d55i4', 21), 4294967296)
self.assertEqual(int('1fj8b184', 22), 4294967296)
self.assertEqual(int('1606k7ic', 23), 4294967296)
self.assertEqual(int('mb994ag', 24), 4294967296)
self.assertEqual(int('hek2mgl', 25), 4294967296)
self.assertEqual(int('dnchbnm', 26), 4294967296)
self.assertEqual(int('b28jpdm', 27), 4294967296)
self.assertEqual(int('8pfgih4', 28), 4294967296)
self.assertEqual(int('76beigg', 29), 4294967296)
self.assertEqual(int('5qmcpqg', 30), 4294967296)
self.assertEqual(int('4q0jto4', 31), 4294967296)
self.assertEqual(int('4000000', 32), 4294967296)
self.assertEqual(int('3aokq94', 33), 4294967296)
self.assertEqual(int('2qhxjli', 34), 4294967296)
self.assertEqual(int('2br45qb', 35), 4294967296)
self.assertEqual(int('1z141z4', 36), 4294967296)
self.assertEqual(int('100000000000000000000000000000001', 2),
4294967297)
self.assertEqual(int('102002022201221111212', 3), 4294967297)
self.assertEqual(int('10000000000000001', 4), 4294967297)
self.assertEqual(int('32244002423142', 5), 4294967297)
self.assertEqual(int('1550104015505', 6), 4294967297)
self.assertEqual(int('211301422355', 7), 4294967297)
self.assertEqual(int('40000000001', 8), 4294967297)
self.assertEqual(int('12068657455', 9), 4294967297)
self.assertEqual(int('4294967297', 10), 4294967297)
self.assertEqual(int('1904440555', 11), 4294967297)
self.assertEqual(int('9ba461595', 12), 4294967297)
self.assertEqual(int('535a7988a', 13), 4294967297)
self.assertEqual(int('2ca5b7465', 14), 4294967297)
self.assertEqual(int('1a20dcd82', 15), 4294967297)
self.assertEqual(int('100000001', 16), 4294967297)
self.assertEqual(int('a7ffda92', 17), 4294967297)
self.assertEqual(int('704he7g5', 18), 4294967297)
self.assertEqual(int('4f5aff67', 19), 4294967297)
self.assertEqual(int('3723ai4h', 20), 4294967297)
self.assertEqual(int('281d55i5', 21), 4294967297)
self.assertEqual(int('1fj8b185', 22), 4294967297)
self.assertEqual(int('1606k7id', 23), 4294967297)
self.assertEqual(int('mb994ah', 24), 4294967297)
self.assertEqual(int('hek2mgm', 25), 4294967297)
self.assertEqual(int('dnchbnn', 26), 4294967297)
self.assertEqual(int('b28jpdn', 27), 4294967297)
self.assertEqual(int('8pfgih5', 28), 4294967297)
self.assertEqual(int('76beigh', 29), 4294967297)
self.assertEqual(int('5qmcpqh', 30), 4294967297)
self.assertEqual(int('4q0jto5', 31), 4294967297)
self.assertEqual(int('4000001', 32), 4294967297)
self.assertEqual(int('3aokq95', 33), 4294967297)
self.assertEqual(int('2qhxjlj', 34), 4294967297)
self.assertEqual(int('2br45qc', 35), 4294967297)
self.assertEqual(int('1z141z5', 36), 4294967297)
def test_conversion(self):
# Test __int__()
class ClassicMissingMethods:
pass
self.assertRaises(TypeError, int, ClassicMissingMethods())
class MissingMethods(object):
pass
self.assertRaises(TypeError, int, MissingMethods())
class Foo0:
def __int__(self):
return 42
class Foo1(object):
def __int__(self):
return 42
class Foo2(int):
def __int__(self):
return 42
class Foo3(int):
def __int__(self):
return self
class Foo4(int):
def __int__(self):
return 42
class Foo5(int):
def __int__(self):
return 42.
self.assertEqual(int(Foo0()), 42)
self.assertEqual(int(Foo1()), 42)
self.assertEqual(int(Foo2()), 42)
self.assertEqual(int(Foo3()), 0)
self.assertEqual(int(Foo4()), 42)
self.assertRaises(TypeError, int, Foo5())
class Classic:
pass
for base in (object, Classic):
class IntOverridesTrunc(base):
def __int__(self):
return 42
def __trunc__(self):
return -12
self.assertEqual(int(IntOverridesTrunc()), 42)
class JustTrunc(base):
def __trunc__(self):
return 42
self.assertEqual(int(JustTrunc()), 42)
class JustLong(base):
# test that __long__ no longer used in 3.x
def __long__(self):
return 42
self.assertRaises(TypeError, int, JustLong())
class LongTrunc(base):
# __long__ should be ignored in 3.x
def __long__(self):
return 42
def __trunc__(self):
return 1729
self.assertEqual(int(LongTrunc()), 1729)
for trunc_result_base in (object, Classic):
class Integral(trunc_result_base):
def __int__(self):
return 42
class TruncReturnsNonLong(base):
def __trunc__(self):
return Integral()
self.assertEqual(int(TruncReturnsNonLong()), 42)
class NonIntegral(trunc_result_base):
def __trunc__(self):
# Check that we avoid infinite recursion.
return NonIntegral()
class TruncReturnsNonIntegral(base):
def __trunc__(self):
return NonIntegral()
try:
int(TruncReturnsNonIntegral())
except TypeError as e:
self.assertEquals(str(e),
"__trunc__ returned non-Integral"
" (type NonIntegral)")
else:
self.fail("Failed to raise TypeError with %s" %
((base, trunc_result_base),))
def test_misc(self):
# check the extremes in int<->long conversion
hugepos = sys.maxsize
hugeneg = -hugepos - 1
hugepos_aslong = int(hugepos)
hugeneg_aslong = int(hugeneg)
self.assertEqual(hugepos, hugepos_aslong, "long(sys.maxsize) != sys.maxsize")
self.assertEqual(hugeneg, hugeneg_aslong,
"long(-sys.maxsize-1) != -sys.maxsize-1")
# long -> int should not fail for hugepos_aslong or hugeneg_aslong
x = int(hugepos_aslong)
try:
self.assertEqual(x, hugepos,
"converting sys.maxsize to long and back to int fails")
except OverflowError:
self.fail("int(long(sys.maxsize)) overflowed!")
if not isinstance(x, int):
raise TestFailed("int(long(sys.maxsize)) should have returned int")
x = int(hugeneg_aslong)
try:
self.assertEqual(x, hugeneg,
"converting -sys.maxsize-1 to long and back to int fails")
except OverflowError:
self.fail("int(long(-sys.maxsize-1)) overflowed!")
if not isinstance(x, int):
raise TestFailed("int(long(-sys.maxsize-1)) should have "
"returned int")
# but long -> int should overflow for hugepos+1 and hugeneg-1
x = hugepos_aslong + 1
try:
y = int(x)
except OverflowError:
self.fail("int(long(sys.maxsize) + 1) mustn't overflow")
self.assert_(isinstance(y, int),
"int(long(sys.maxsize) + 1) should have returned long")
x = hugeneg_aslong - 1
try:
y = int(x)
except OverflowError:
self.fail("int(long(-sys.maxsize-1) - 1) mustn't overflow")
self.assert_(isinstance(y, int),
"int(long(-sys.maxsize-1) - 1) should have returned long")
class long2(int):
pass
x = long2(1<<100)
y = int(x)
self.assert_(type(y) is int,
"overflowing int conversion must return long not long subtype")
# ----------------------------------- tests of auto int->long conversion
def test_auto_overflow(self):
import math, sys
special = [0, 1, 2, 3, sys.maxsize-1, sys.maxsize, sys.maxsize+1]
sqrt = int(math.sqrt(sys.maxsize))
special.extend([sqrt-1, sqrt, sqrt+1])
special.extend([-i for i in special])
def checkit(*args):
# Heavy use of nested scopes here!
self.assertEqual(got, expected,
Frm("for %r expected %r got %r", args, expected, got))
for x in special:
longx = int(x)
expected = -longx
got = -x
checkit('-', x)
for y in special:
longy = int(y)
expected = longx + longy
got = x + y
checkit(x, '+', y)
expected = longx - longy
got = x - y
checkit(x, '-', y)
expected = longx * longy
got = x * y
checkit(x, '*', y)
if y:
expected = longx / longy
got = x / y
checkit(x, '/', y)
expected = longx // longy
got = x // y
checkit(x, '//', y)
expected = divmod(longx, longy)
got = divmod(longx, longy)
checkit(x, 'divmod', y)
if abs(y) < 5 and not (x == 0 and y < 0):
expected = longx ** longy
got = x ** y
checkit(x, '**', y)
for z in special:
if z != 0 :
if y >= 0:
expected = pow(longx, longy, int(z))
got = pow(x, y, z)
checkit('pow', x, y, '%', z)
else:
self.assertRaises(TypeError, pow,longx, longy, int(z))
def test_float_overflow(self):
import math
for x in -2.0, -1.0, 0.0, 1.0, 2.0:
self.assertEqual(float(int(x)), x)
shuge = '12345' * 120
huge = 1 << 30000
mhuge = -huge
namespace = {'huge': huge, 'mhuge': mhuge, 'shuge': shuge, 'math': math}
for test in ["float(huge)", "float(mhuge)",
"complex(huge)", "complex(mhuge)",
"complex(huge, 1)", "complex(mhuge, 1)",
"complex(1, huge)", "complex(1, mhuge)",
"1. + huge", "huge + 1.", "1. + mhuge", "mhuge + 1.",
"1. - huge", "huge - 1.", "1. - mhuge", "mhuge - 1.",
"1. * huge", "huge * 1.", "1. * mhuge", "mhuge * 1.",
"1. // huge", "huge // 1.", "1. // mhuge", "mhuge // 1.",
"1. / huge", "huge / 1.", "1. / mhuge", "mhuge / 1.",
"1. ** huge", "huge ** 1.", "1. ** mhuge", "mhuge ** 1.",
"math.sin(huge)", "math.sin(mhuge)",
"math.sqrt(huge)", "math.sqrt(mhuge)", # should do better
# math.floor() of an int returns an int now
##"math.floor(huge)", "math.floor(mhuge)",
]:
self.assertRaises(OverflowError, eval, test, namespace)
# XXX Perhaps float(shuge) can raise OverflowError on some box?
# The comparison should not.
self.assertNotEqual(float(shuge), int(shuge),
"float(shuge) should not equal int(shuge)")
def test_logs(self):
import math
LOG10E = math.log10(math.e)
for exp in list(range(10)) + [100, 1000, 10000]:
value = 10 ** exp
log10 = math.log10(value)
self.assertAlmostEqual(log10, exp)
# log10(value) == exp, so log(value) == log10(value)/log10(e) ==
# exp/LOG10E
expected = exp / LOG10E
log = math.log(value)
self.assertAlmostEqual(log, expected)
for bad in -(1 << 10000), -2, 0:
self.assertRaises(ValueError, math.log, bad)
self.assertRaises(ValueError, math.log10, bad)
def test_mixed_compares(self):
eq = self.assertEqual
import math
# We're mostly concerned with that mixing floats and longs does the
# right stuff, even when longs are too large to fit in a float.
# The safest way to check the results is to use an entirely different
# method, which we do here via a skeletal rational class (which
# represents all Python ints, longs and floats exactly).
class Rat:
def __init__(self, value):
if isinstance(value, int):
self.n = value
self.d = 1
elif isinstance(value, float):
# Convert to exact rational equivalent.
f, e = math.frexp(abs(value))
assert f == 0 or 0.5 <= f < 1.0
# |value| = f * 2**e exactly
# Suck up CHUNK bits at a time; 28 is enough so that we suck
# up all bits in 2 iterations for all known binary double-
# precision formats, and small enough to fit in an int.
CHUNK = 28
top = 0
# invariant: |value| = (top + f) * 2**e exactly
while f:
f = math.ldexp(f, CHUNK)
digit = int(f)
assert digit >> CHUNK == 0
top = (top << CHUNK) | digit
f -= digit
assert 0.0 <= f < 1.0
e -= CHUNK
# Now |value| = top * 2**e exactly.
if e >= 0:
n = top << e
d = 1
else:
n = top
d = 1 << -e
if value < 0:
n = -n
self.n = n
self.d = d
assert float(n) / float(d) == value
else:
raise TypeError("can't deal with %r" % val)
def _cmp__(self, other):
if not isinstance(other, Rat):
other = Rat(other)
x, y = self.n * other.d, self.d * other.n
return (x > y) - (x < y)
def __eq__(self, other):
return self._cmp__(other) == 0
def __ne__(self, other):
return self._cmp__(other) != 0
def __ge__(self, other):
return self._cmp__(other) >= 0
def __gt__(self, other):
return self._cmp__(other) > 0
def __le__(self, other):
return self._cmp__(other) <= 0
def __lt__(self, other):
return self._cmp__(other) < 0
cases = [0, 0.001, 0.99, 1.0, 1.5, 1e20, 1e200]
# 2**48 is an important boundary in the internals. 2**53 is an
# important boundary for IEEE double precision.
for t in 2.0**48, 2.0**50, 2.0**53:
cases.extend([t - 1.0, t - 0.3, t, t + 0.3, t + 1.0,
int(t-1), int(t), int(t+1)])
cases.extend([0, 1, 2, sys.maxsize, float(sys.maxsize)])
# 1L<<20000 should exceed all double formats. long(1e200) is to
# check that we get equality with 1e200 above.
t = int(1e200)
cases.extend([0, 1, 2, 1 << 20000, t-1, t, t+1])
cases.extend([-x for x in cases])
for x in cases:
Rx = Rat(x)
for y in cases:
Ry = Rat(y)
Rcmp = (Rx > Ry) - (Rx < Ry)
xycmp = (x > y) - (x < y)
eq(Rcmp, xycmp, Frm("%r %r %d %d", x, y, Rcmp, xycmp))
eq(x == y, Rcmp == 0, Frm("%r == %r %d", x, y, Rcmp))
eq(x != y, Rcmp != 0, Frm("%r != %r %d", x, y, Rcmp))
eq(x < y, Rcmp < 0, Frm("%r < %r %d", x, y, Rcmp))
eq(x <= y, Rcmp <= 0, Frm("%r <= %r %d", x, y, Rcmp))
eq(x > y, Rcmp > 0, Frm("%r > %r %d", x, y, Rcmp))
eq(x >= y, Rcmp >= 0, Frm("%r >= %r %d", x, y, Rcmp))
def test__format__(self):
self.assertEqual(format(123456789, 'd'), '123456789')
self.assertEqual(format(123456789, 'd'), '123456789')
# sign and aligning are interdependent
self.assertEqual(format(1, "-"), '1')
self.assertEqual(format(-1, "-"), '-1')
self.assertEqual(format(1, "-3"), ' 1')
self.assertEqual(format(-1, "-3"), ' -1')
self.assertEqual(format(1, "+3"), ' +1')
self.assertEqual(format(-1, "+3"), ' -1')
self.assertEqual(format(1, " 3"), ' 1')
self.assertEqual(format(-1, " 3"), ' -1')
self.assertEqual(format(1, " "), ' 1')
self.assertEqual(format(-1, " "), '-1')
# hex
self.assertEqual(format(3, "x"), "3")
self.assertEqual(format(3, "X"), "3")
self.assertEqual(format(1234, "x"), "4d2")
self.assertEqual(format(-1234, "x"), "-4d2")
self.assertEqual(format(1234, "8x"), " 4d2")
self.assertEqual(format(-1234, "8x"), " -4d2")
self.assertEqual(format(1234, "x"), "4d2")
self.assertEqual(format(-1234, "x"), "-4d2")
self.assertEqual(format(-3, "x"), "-3")
self.assertEqual(format(-3, "X"), "-3")
self.assertEqual(format(int('be', 16), "x"), "be")
self.assertEqual(format(int('be', 16), "X"), "BE")
self.assertEqual(format(-int('be', 16), "x"), "-be")
self.assertEqual(format(-int('be', 16), "X"), "-BE")
# octal
self.assertEqual(format(3, "b"), "11")
self.assertEqual(format(-3, "b"), "-11")
self.assertEqual(format(1234, "b"), "10011010010")
self.assertEqual(format(-1234, "b"), "-10011010010")
self.assertEqual(format(1234, "-b"), "10011010010")
self.assertEqual(format(-1234, "-b"), "-10011010010")
self.assertEqual(format(1234, " b"), " 10011010010")
self.assertEqual(format(-1234, " b"), "-10011010010")
self.assertEqual(format(1234, "+b"), "+10011010010")
self.assertEqual(format(-1234, "+b"), "-10011010010")
# make sure these are errors
self.assertRaises(ValueError, format, 3, "1.3") # precision disallowed
self.assertRaises(ValueError, format, 3, "+c") # sign not allowed
# with 'c'
# ensure that only int and float type specifiers work
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'bcdoxXeEfFgGn%':
self.assertRaises(ValueError, format, 0, format_spec)
self.assertRaises(ValueError, format, 1, format_spec)
self.assertRaises(ValueError, format, -1, format_spec)
self.assertRaises(ValueError, format, 2**100, format_spec)
self.assertRaises(ValueError, format, -(2**100), format_spec)
# ensure that float type specifiers work; format converts
# the int to a float
for format_spec in 'eEfFgG%':
for value in [0, 1, -1, 100, -100, 1234567890, -1234567890]:
self.assertEqual(format(value, format_spec),
format(float(value), format_spec))
def test_nan_inf(self):
self.assertRaises(OverflowError, int, float('inf'))
self.assertRaises(OverflowError, int, float('-inf'))
self.assertRaises(ValueError, int, float('nan'))
def test_true_division(self):
huge = 1 << 40000
mhuge = -huge
self.assertEqual(huge / huge, 1.0)
self.assertEqual(mhuge / mhuge, 1.0)
self.assertEqual(huge / mhuge, -1.0)
self.assertEqual(mhuge / huge, -1.0)
self.assertEqual(1 / huge, 0.0)
self.assertEqual(1 / huge, 0.0)
self.assertEqual(1 / mhuge, 0.0)
self.assertEqual(1 / mhuge, 0.0)
self.assertEqual((666 * huge + (huge >> 1)) / huge, 666.5)
self.assertEqual((666 * mhuge + (mhuge >> 1)) / mhuge, 666.5)
self.assertEqual((666 * huge + (huge >> 1)) / mhuge, -666.5)
self.assertEqual((666 * mhuge + (mhuge >> 1)) / huge, -666.5)
self.assertEqual(huge / (huge << 1), 0.5)
self.assertEqual((1000000 * huge) / huge, 1000000)
namespace = {'huge': huge, 'mhuge': mhuge}
for overflow in ["float(huge)", "float(mhuge)",
"huge / 1", "huge / 2", "huge / -1", "huge / -2",
"mhuge / 100", "mhuge / 200"]:
self.assertRaises(OverflowError, eval, overflow, namespace)
for underflow in ["1 / huge", "2 / huge", "-1 / huge", "-2 / huge",
"100 / mhuge", "200 / mhuge"]:
result = eval(underflow, namespace)
self.assertEqual(result, 0.0,
"expected underflow to 0 from %r" % underflow)
for zero in ["huge / 0", "mhuge / 0"]:
self.assertRaises(ZeroDivisionError, eval, zero, namespace)
def test_small_ints(self):
for i in range(-5, 257):
self.assertTrue(i is i + 0)
self.assertTrue(i is i * 1)
self.assertTrue(i is i - 0)
self.assertTrue(i is i // 1)
self.assertTrue(i is i & -1)
self.assertTrue(i is i | 0)
self.assertTrue(i is i ^ 0)
self.assertTrue(i is ~~i)
self.assertTrue(i is i**1)
self.assertTrue(i is int(str(i)))
self.assertTrue(i is i<<2>>2, str(i))
# corner cases
i = 1 << 70
self.assertTrue(i - i is 0)
self.assertTrue(0 * i is 0)
def test_round(self):
# check round-half-even algorithm. For round to nearest ten;
# rounding map is invariant under adding multiples of 20
test_dict = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0,
6:10, 7:10, 8:10, 9:10, 10:10, 11:10, 12:10, 13:10, 14:10,
15:20, 16:20, 17:20, 18:20, 19:20}
for offset in range(-520, 520, 20):
for k, v in test_dict.items():
got = round(k+offset, -1)
expected = v+offset
self.assertEqual(got, expected)
self.assert_(type(got) is int)
# larger second argument
self.assertEqual(round(-150, -2), -200)
self.assertEqual(round(-149, -2), -100)
self.assertEqual(round(-51, -2), -100)
self.assertEqual(round(-50, -2), 0)
self.assertEqual(round(-49, -2), 0)
self.assertEqual(round(-1, -2), 0)
self.assertEqual(round(0, -2), 0)
self.assertEqual(round(1, -2), 0)
self.assertEqual(round(49, -2), 0)
self.assertEqual(round(50, -2), 0)
self.assertEqual(round(51, -2), 100)
self.assertEqual(round(149, -2), 100)
self.assertEqual(round(150, -2), 200)
self.assertEqual(round(250, -2), 200)
self.assertEqual(round(251, -2), 300)
self.assertEqual(round(172500, -3), 172000)
self.assertEqual(round(173500, -3), 174000)
self.assertEqual(round(31415926535, -1), 31415926540)
self.assertEqual(round(31415926535, -2), 31415926500)
self.assertEqual(round(31415926535, -3), 31415927000)
self.assertEqual(round(31415926535, -4), 31415930000)
self.assertEqual(round(31415926535, -5), 31415900000)
self.assertEqual(round(31415926535, -6), 31416000000)
self.assertEqual(round(31415926535, -7), 31420000000)
self.assertEqual(round(31415926535, -8), 31400000000)
self.assertEqual(round(31415926535, -9), 31000000000)
self.assertEqual(round(31415926535, -10), 30000000000)
self.assertEqual(round(31415926535, -11), 0)
self.assertEqual(round(31415926535, -12), 0)
self.assertEqual(round(31415926535, -999), 0)
# should get correct results even for huge inputs
for k in range(10, 100):
got = round(10**k + 324678, -3)
expect = 10**k + 325000
self.assertEqual(got, expect)
self.assert_(type(got) is int)
# nonnegative second argument: round(x, n) should just return x
for n in range(5):
for i in range(100):
x = random.randrange(-10000, 10000)
got = round(x, n)
self.assertEqual(got, x)
self.assert_(type(got) is int)
for huge_n in 2**31-1, 2**31, 2**63-1, 2**63, 2**100, 10**100:
self.assertEqual(round(8979323, huge_n), 8979323)
# omitted second argument
for i in range(100):
x = random.randrange(-10000, 10000)
got = round(x)
self.assertEqual(got, x)
self.assert_(type(got) is int)
# bad second argument
bad_exponents = ('brian', 2.0, 0j, None)
for e in bad_exponents:
self.assertRaises(TypeError, round, 3, e)
def test_main():
support.run_unittest(LongTest)
if __name__ == "__main__":
test_main()
|
mancoast/CPythonPyc_test
|
fail/301_test_long.py
|
Python
|
gpl-3.0
| 38,676
|
[
"Brian"
] |
7a80de02a496a0712357a0227a5f0a978d97edd1c34450b7308ce1c9707cd406
|
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Copyright (C) 2015 National Institutes of Health
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Written by: Christopher Coletta (github.com/colettace)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
import numpy as np
from .utils import output_railroad_switch
from .FeatureSpacePredictionExperiment import FeatureSpaceClassificationExperiment
#============================================================================
class _BaseGraph( object ):
"""An abstract base class that is supposed to hold onto objects on which to call
matplotlib.pyplot API methods."""
def __init__( self ):
# general stuff:
self.chart_title = None
self.file_name = None
self.split_result = None
# pyplot-specific stuff
self.figure = None
self.main_axes = None
def SaveToFile( self, filepath ):
if self.figure == None:
raise ValueError( 'No figure to save!' )
self.figure.savefig( filepath )
print 'Wrote chart "{0}" to file "{1}"'.format( self.chart_title, filepath )
#============================================================================
class PredictedValuesGraph( _BaseGraph ):
"""This is a concrete class that can produce two types of graphs that are produced
from SingleSamplePrediction data stored in a FeatureSpacePrediction."""
#=================================================================
def __init__( self, result, name=None, use_averaged_results=True ):
"""Constructor sorts ground truth values contained in FeatureSpacePrediction
and loads them into self.grouped_coords
use_averaged_results - bool - If this object has averaged results (due to tiling or
"per sample" aggregation across splits, use those results instead of
individual results."""
#FIXME: implement user-definable bin edges
self.split_result = result
if name is None:
name = result.name
self.chart_title = name
gt_vals, pred_vals = result.RankOrderSort( use_averaged_results=use_averaged_results )
whole_list = zip( gt_vals, pred_vals )
self.grouped_coords = {}
if result.test_set.discrete:
self.class_names = result.test_set.class_names
self.class_values = result.test_set.interpolation_coefficients
self.num_classes = result.test_set.num_classes
for class_val, class_name in zip( self.class_values, self.class_names ):
self.grouped_coords[ class_name ] = \
[ xy for xy in whole_list if xy[0] == class_val ]
else:
class_name = result.test_set.name
self.class_names = [ class_name ]
self.class_values = [ 1 ]
self.num_classes = 1
self.grouped_coords[ class_name ] = whole_list
_min = min( self.class_values )
ampl = max( self.class_values ) - _min
import matplotlib.pyplot as plt
self.class_colors = plt.cm.jet( [ float(val -_min)/ampl for val in self.class_values ] )
#=====================================================================
@classmethod
@output_railroad_switch
def NewFromHTMLReport( cls, filepath, use_averaged_results=True ):
"""Helper function to facilitate the fast generation of graphs from C++-generated
HTML Report files."""
exp = FeatureSpaceClassificationExperiment.NewFromHTMLReport( filepath )
exp.GenerateStats()
exp.PerSampleStatistics( quiet=True )
newgraphobj = cls( exp, use_averaged_results=use_averaged_results )
return newgraphobj
#=====================================================================
def RankOrderedPredictedValuesGraph( self, chart_title=None ):
"""This graph visualizes the distribution of predicted values generated by classification.
For each individual ImageClassification with ground truth value (i.e., class id) and
predicted value, all results are grouped within their class, sorted by predicted value
in ascending order, then ploted side-by-side.
Required the package matplotlib to be installed."""
print "Rendering rank-ordered predicted values graph"
import matplotlib
# Need following line to generate images on servers, see
# http://matplotlib.org/faq/howto_faq.html#generate-images-without-having-a-window-appear
matplotlib.use('Agg')
import matplotlib.pyplot as plt
self.figure = plt.figure( figsize=(20,20) )
self.main_axes = self.figure.add_subplot(111)
if chart_title:
self.chart_title = chart_title
self.main_axes.set_title( self.chart_title )
self.main_axes.set_xlabel( 'count' )
self.main_axes.set_ylabel( 'Predicted Value Scores' )
abscissa_index = 1
for class_name, class_color in zip( self.class_names, self.class_colors ):
ground_truth_vals, predicted_vals = zip( *self.grouped_coords[ class_name ] )
x_vals = [ i + abscissa_index for i in range( len( ground_truth_vals ) ) ]
self.main_axes.scatter( x_vals, predicted_vals, c=class_color, marker='o',
s=150, edgecolor='none', label=class_name )
abscissa_index += len( ground_truth_vals )
#self.main_axes.legend( loc = 'lower right')
self.main_axes.legend( loc = 'lower right')
return self.figure
#=====================================================================
def KernelSmoothedDensityGraph( self, chart_title=None ):
"""This graph visualizes the distribution of predicted values generated by classification.
A kernel-smoothed probability density function is plotted for each image class on
the same chart allowing comparison of distribution of predicted values amoung image class.
Requires the packages matplotlib and scipy. Uses scipy.stats.gaussian_kde to
generate kernel-smoothed probability density functions."""
print "Rendering kernel-smoothed probability density estimate graph"
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
self.figure = plt.figure( figsize=(20,20) )
self.main_axes = self.figure.add_subplot(111)
if chart_title:
self.chart_title = chart_title
self.main_axes.set_title( self.chart_title )
self.main_axes.set_xlabel( 'Age score' )
self.main_axes.set_ylabel( 'Probability density' )
from scipy.stats import gaussian_kde
for class_name, class_color in zip( self.class_names, self.class_colors ):
ground_truth_vals, predicted_vals = zip( *self.grouped_coords[ class_name ] )
pred_vals = np.array( predicted_vals )
lobound = pred_vals.min()
hibound = pred_vals.max()
kernel_smoother = gaussian_kde( pred_vals )
intervals = np.mgrid[ lobound:hibound:100j ]
density_estimates = kernel_smoother.evaluate( intervals )
self.main_axes.plot( intervals, density_estimates, c=class_color,
linewidth=6, label=class_name )
self.main_axes.legend()
return self.figure
#============================================================================
class FeatureTimingVersusAccuracyGraph( _BaseGraph ):
"""A cost/benefit analysis of the number of features used and the time it takes to calculate
that number of features for a single image"""
#FIXME: Add ability to do the first 50 or 100 features, make the graph, then
# ability to resume from where it left off to do the next 50.
def __init__( self, training_set, feature_weights, test_image_path,
chart_title=None, max_num_features=300 ):
self.timing_axes = None
import time
timings = []
from wndcharm.FeatureSpacePredictionExperiment import FeatureSpaceClassificationExperiment
from wndcharm.SingleSamplePrediction import SingleSampleClassification
from wndcharm.FeatureSpacePrediction import FeatureSpaceClassification
experiment = FeatureSpaceClassificationExperiment( training_set, training_set, feature_weights )
for number_of_features_to_use in range( 1, max_num_features + 1 ):
reduced_ts = None
reduced_fw = None
three_timings = []
# Take the best of 3
for timing in range( 3 ):
# Time the creation and classification of a single signature
t1 = time.time()
reduced_fw = feature_weights.Threshold( number_of_features_to_use )
sig = FeatureVector( source_filepath=test_image_path, feature_names=reduced_fw.feature_names ).GenerateFeatures()
reduced_ts = training_set.FeatureReduce( reduced_fw )
sig.Normalize( reduced_ts )
result = SingleSampleClassification.NewWND5( reduced_ts, reduced_fw, sig )
result.Print()
# FIXME: save intermediates just in case of interruption or parallization
# result.PickleMe()
t2 = time.time()
three_timings.append( t2 - t1 )
timings.append( min( three_timings ) )
# now, do a fit-on-fit test to measure classification accuracy
split_result = FeatureSpaceClassification.NewWND5( reduced_ts, reduced_ts, reduced_fw )
split_result.Print()
experiment.individual_results.append( split_result )
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
x_vals = list( range( 1, max_num_features + 1 ) )
self.figure = plt.figure()
self.main_axes = self.figure.add_subplot(111)
if chart_title == None:
self.chart_title = "Feature timing v. classification accuracy"
else:
self.chart_title = chart_title
self.main_axes.set_title( self.chart_title )
self.main_axes.set_xlabel( 'Number of features' )
self.main_axes.set_ylabel( 'Classification accuracy (%)', color='b' )
classification_accuracies = \
[ split_result.classification_accuracy * 100 for split_result in experiment.individual_results ]
self.main_axes.plot( x_vals, classification_accuracies, color='b', linewidth=2 )
for tl in self.main_axes.get_yticklabels():
tl.set_color('b')
self.timing_axes = self.main_axes.twinx()
self.timing_axes.set_ylabel( 'Time to calculate features (s)', color='r' )
self.timing_axes.plot( x_vals, timings, color='r' )
for tl in self.timing_axes.get_yticklabels():
tl.set_color('r')
#============================================================================
class AccuracyVersusNumFeaturesGraph( _BaseGraph ):
"""Graphing the figure of merit a a function of number of features"""
# FIXME: roll this class into FeatureTimingVersusAccuracyGraph, allowing
# both Discrete and continuous data
def __init__( self, training_set, feature_weights, chart_title=None, min_num_features=1, max_num_features=None, step=5, y_min=None, y_max=None, quiet=False):
from wndcharm.FeatureSpacePredictionExperiment import FeatureSpaceRegressionExperiment
from wndcharm.SingleSamplePrediction import SingleSampleRegression
from wndcharm.FeatureSpacePrediction import FeatureSpaceRegression
ls_experiment = FeatureSpaceRegressionExperiment( training_set, training_set, feature_weights, name="Least Squares Regression Method")
voting_experiment = FeatureSpaceRegressionExperiment( training_set, training_set, feature_weights, name="Voting Method")
if max_num_features is None:
max_num_features = len( feature_weights )
x_vals = range( min_num_features, max_num_features + 1, step )
for number_of_features_to_use in x_vals:
reduced_fw = feature_weights.Threshold( number_of_features_to_use )
reduced_ts = training_set.FeatureReduce( reduced_fw )
if not quiet:
reduced_fw.Print()
ls_split_result = FeatureSpaceRegression.NewLeastSquares( reduced_ts, None, reduced_fw, split_number=number_of_features_to_use, quiet=my_quiet )
if not quiet:
ls_split_result.Print()
ls_experiment.individual_results.append( ls_split_result )
voting_split_result = FeatureSpaceRegression.NewMultivariateLinear( reduced_ts, reduced_fw, split_number=number_of_features_to_use )
if not quiet:
voting_split_result.Print()
voting_experiment.individual_results.append( voting_split_result )
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
self.figure = plt.figure( figsize=(12, 8) )
self.main_axes = self.figure.add_subplot(111)
if chart_title == None:
self.chart_title = "R vs. num features, two methods"
else:
self.chart_title = chart_title
# need to make axes have same range
ls_yvals = [ split_result.std_err for split_result in ls_experiment.individual_results ]
voting_yvals = [ split_result.std_err for split_result in voting_experiment.individual_results ]
min_ls_yval = min(ls_yvals)
optimal_num_feats_ls = ls_yvals.index( min_ls_yval ) + 1 # count from 1, not 0
min_voting_yval = min(voting_yvals)
optimal_num_feats_voting = voting_yvals.index( min_voting_yval ) + 1 # count from 1, not 0
all_vals = ls_yvals + voting_yvals
if y_min is not None:
try:
y_min = float(y_min)
except:
raise ValueError( "Can't convert {0} to float".format(y_min))
_min = y_min
else:
_min = min( all_vals )
if y_max is not None:
try:
y_max = float(y_max)
except:
raise ValueError( "Can't convert {0} to float".format(y_max))
_max = y_max
else:
_max = max( all_vals )
# Plot least Squares Data
self.main_axes.set_title( self.chart_title )
self.main_axes.set_xlabel( 'Number of features' )
self.main_axes.set_ylabel( 'RMS Least Squares Method', color='b' )
self.main_axes.set_ylim( [_min, _max ] )
self.main_axes.plot( x_vals, ls_yvals, color='b', marker='o', linestyle='--' )
for tl in self.main_axes.get_yticklabels():
tl.set_color('b')
self.main_axes.annotate( 'min R={0:.3f} @ {1}'.format(min_ls_yval, optimal_num_feats_ls),
color='b',
xy=( optimal_num_feats_ls, min_ls_yval ),
xytext=( optimal_num_feats_ls, 0.8 * _max ),
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right' )
# Plot Voting method data
self.timing_axes = self.main_axes.twinx()
self.timing_axes.set_ylabel( 'RMS Voting Method', color='r' )
self.timing_axes.set_ylim( [_min, _max ] )
self.timing_axes.plot( x_vals, voting_yvals, color='r', marker='o', linestyle='--' )
for tl in self.timing_axes.get_yticklabels():
tl.set_color('r')
self.timing_axes.annotate( 'min R={0:.3f} @ {1}'.format(min_voting_yval, optimal_num_feats_voting),
color='r',
xy=( optimal_num_feats_voting, min_voting_yval ),
xytext=( optimal_num_feats_voting, 0.6 * _max ),
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right' )
#============================================================================
#class Dendrogram( object ):
# """Not implemented. In the future might use scipy.cluster (no unrooted dendrograms though!)
# or Biopython.Phylo to visualize. Perhaps could continue C++ implementation's use of PHYLIP's
# Fitch-Margoliash program "fitch" to generate Newick phylogeny, and visualize using
# native python tools."""
# pass
|
cnerger/wnd-charm
|
wndcharm/visualization.py
|
Python
|
lgpl-2.1
| 17,763
|
[
"Biopython"
] |
d49d0b7a078852369f89380239e32c8e1d40a0ea9e466475804f0f0d6a92dd99
|
from numpy import *
def loadDataSet(fileName):
# drop the colum(last colum) of label.
numFeat = len(open(fileName).readline().split('\t')) - 1
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = []
curLine = line.strip().split('\t')
for i in range(numFeat):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat, labelMat
# w = (X.T * X) ^ -1 * X.T * y
def standRegres(xArr, yArr):
xMat = mat(xArr)
yMat = mat(yArr).T
xTx = xMat.T*xMat
if linalg.det(xTx) == 0.0:
print("This matrix is singular, connot do inverse")
return
ws = xTx.I * (xMat.T*yMat)
return ws
# w = (X.T * W * X) ^ -1 * X.T * W * y
# weighted data points.
# Gaussian kernel:
# w(i, i) = exp(|x(i) - x| / (-2 * k^2))
# more far from testpoint lower weight the data point has.
def lwlr(testPoint, xArr, yArr, k = 1.0):
xMat = mat(xArr)
yMat = mat(yArr).T
m = shape(xMat)[0]
weights = mat(eye((m)))
for j in range(m):
diffMat = testPoint - xMat[j,:] # distance from testpoint to point in trainning data.
weights[j, j] = exp(diffMat*diffMat.T/(-2.0*k**2))
xTx = xMat.T * (weights * xMat)
if linalg.det(xTx) == 0.0:
print("This matrix is singular, cannot do inverse")
return
ws = xTx.I * (xMat.T * (weights * yMat))
return testPoint * ws
def lwlrTest(testArr, xArr, yArr, k=1.0):
m = shape(testArr)[0]
yHat = zeros(m)
for i in range(m):
yHat[i] = lwlr(testArr[i], xArr, yArr, k)
return yHat
def rssError(yArr, yHatArr):
return ((yArr - yHatArr) ** 2).sum()
# w = (X.T * X + lambda * I) ^ -1 * X.T * y
# Ridge regression adds an additional matrix (lambda * I) to the matrix (X.T * X) so that it’s non-singular,
# and we can take the inverse of the whole thing: (X.T * X + lambda * I)
# Ridge regression was originally developed to deal with the problem of having more
# features than data points. But it can also be used to add bias into our estimations, giving us a better estimate.
def ridgeRegres(xMat, yMat, lam = 0.2):
xTx = xMat.T * xMat
denom = xTx + eye(shape(xMat)[1]) * lam
if linalg.det(denom) == 0.0:
print("This matrix is singular, cannot do inverse")
return
ws = denom.I * (xMat.T * yMat)
return ws
def ridgeTest(xArr, yArr):
xMat = mat(xArr)
yMat = mat(yArr).T
yMean = mean(yMat, 0)
yMat = yMat - yMean
xMeans = mean(xMat, 0)
xVar = var(xMat, 0)
xMat = (xMat - xMeans) / xVar
numTestPts = 30
wMat = zeros((numTestPts, shape(xMat)[1]))
for i in range(numTestPts):
ws = ridgeRegres(xMat, yMat, exp(i-10))
wMat[i,:] = ws.T
return wMat
xArr, yArr = loadDataSet('ex0.txt')
print(xArr)
print(yArr)
ws = standRegres(xArr, yArr)
print(ws)
xMat = mat(xArr)
yMat = mat(yArr)
yHat = xMat * ws
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xMat[:, 1].flatten().A[0], yMat.T[:, 0].flatten().A[0])
xCopy = xMat.copy()
xCopy.sort(0)
yHat = xCopy*ws
ax.plot(xCopy[:, 1], yHat)
plt.show()
xArr,yArr=loadDataSet('ex0.txt')
print(yArr[0])
print(lwlr(xArr[0],xArr,yArr,1.0))
print(lwlr(xArr[0],xArr,yArr,0.001))
yHat = lwlrTest(xArr, xArr, yArr,0.003)
xMat = mat(xArr)
srtInd = xMat[:, 1].argsort(0)
xSort = xMat[srtInd][:,0,:]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(xSort[:, 1], yHat[srtInd])
ax.scatter(xMat[:, 1].flatten().A[0], mat(yArr).T.flatten().A[0], s = 2, c = 'red')
plt.show()
abX, abY = loadDataSet('abalone.txt')
ridgeWeights = ridgeTest(abX, abY)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(ridgeWeights)
plt.show()
|
zhuango/python
|
machine-learning-algorithms/mlalg/linearRegression/regression.py
|
Python
|
gpl-2.0
| 3,767
|
[
"Gaussian"
] |
3edc8bdf7c6c596bc970cb05b24f8b45dd42b09d46db8b00619d8d338dc66ec1
|
# -*- coding: utf-8 -*-
#
# testiaf.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
IAF neuron example
------------------
A DC current is injected into the neuron using a current generator
device. The membrane potential as well as the spiking activity are
recorded by corresponding devices.
It can be observed how the current charges the membrane, a spike
is emitted, the neuron becomes absolute refractory, and finally
starts to recover.
"""
###############################################################################
# First, we import all necessary modules for simulation and plotting
import nest
import matplotlib.pyplot as plt
###############################################################################
# Second the function ``build_network`` is defined to build the network and
# return the handles of the ``spike_recorder`` and the ``voltmeter``. The
# function takes the simulation resolution as argument
#
# The function first resets the simulation kernel and sets the number of
# threads and the simulation resolution. The ``iaf_psc_alpha`` neuron is
# created and the handle is stored in the variable `neuron`. The status of
# the neuron is changed so it receives an external current. Next a
# ``voltmeter`` and a ``spike_recorder`` are created and their handles stored
# in the variables `vm` and `sr` respectively.
#
# The voltmeter and spike recorder are then connected to the neuron. ``Connect``
# takes the device and neuron handles as input. The voltmeter is connected to the
# neuron and the neuron to the spike recorder because the neuron sends spikes
# to the recorder and the voltmeter 'observes' the neuron.
def build_network(dt):
nest.ResetKernel()
nest.SetKernelStatus({"local_num_threads": 1, "resolution": dt})
neuron = nest.Create('iaf_psc_alpha')
neuron.I_e = 376.0
vm = nest.Create('voltmeter')
sr = nest.Create('spike_recorder')
nest.Connect(vm, neuron)
nest.Connect(neuron, sr)
return vm, sr
###############################################################################
# The neuron is simulated for three different resolutions and then the
# voltage trace is plotted
for dt in [0.1, 0.5, 1.0]:
print(f"Running simulation with dt={dt:.2f}")
vm, sr = build_network(dt)
nest.Simulate(1000.0)
###########################################################################
# The network is simulated using ``Simulate``, which takes the desired
# simulation time in milliseconds and advances the network state by this
# amount of time. During simulation, the ``spike_recorder`` counts the
# spikes of the target neuron and the total number is read out at the
# end of the simulation period.
#
# The values of the voltage recorded by the voltmeter are read out and
# the values for the membrane potential are stored in potential and the
# corresponding times in the times array
potentials = vm.get('events', 'V_m')
times = vm.get('events', 'times')
###########################################################################
# Using the matplotlib library the voltage trace is plotted over time
plt.plot(times, potentials, label=f"dt={dt:.2f}")
print(f" Number of spikes: {sr.n_events}")
###########################################################################
# Finally the axis are labelled and a legend is generated
plt.legend(loc=3)
plt.xlabel("time (ms)")
plt.ylabel("V_m (mV)")
plt.show()
|
lekshmideepu/nest-simulator
|
pynest/examples/testiaf.py
|
Python
|
gpl-2.0
| 4,078
|
[
"NEURON"
] |
4f2e5b00fa7e48fecff38cf4534b8ef395a694cd1e2c8170bee3bb7cae74df6a
|
'''
PathwayGenie (c) University of Manchester 2017
PathwayGenie is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=too-many-arguments
import sys
from assembly_genie.assembly import AssemblyThread, _AMPLIGASE, \
_LCR_MASTERMIX, _WATER
class LcrThread(AssemblyThread):
'''Class implementing AssemblyGenie algorithms.'''
def run(self):
'''Exports recipes.'''
pools = self._get_pools()
# Write plates:
self._comp_well.update(self._write_plate('MastermixTrough',
[[_WATER],
[_LCR_MASTERMIX]]))
self._comp_well.update(self._write_plate('components',
self.get_order()
+ [[_AMPLIGASE]]))
# Write domino pools worklist:
self._comp_well.update(
self._write_dom_pool_worklist(pools, 'domino_pools', 3))
# Write LCR worklist:
self.__write_lcr_worklist('lcr', pools)
def __write_lcr_worklist(self, dest_plate_id, pools):
'''Writes LCR worklist.'''
self._write_worklist_header(dest_plate_id)
def_reagents = {_LCR_MASTERMIX: 7.0, _AMPLIGASE: 1.5}
# Write water (special case: appears in many wells to optimise
# dispensing efficiency):
part_vol = 1
self.__write_water_worklist(dest_plate_id, pools, 15.5, part_vol)
self.__write_parts_worklist(dest_plate_id, pools, part_vol)
self.__write_dom_pools_worklist(dest_plate_id, 1)
self.__write_default_reag_worklist(dest_plate_id, def_reagents)
def __write_water_worklist(self, dest_plate_id, pools, total, part_vol):
'''Write water worklist.'''
worklist = []
for dest_idx, ice_id in enumerate(self._ice_ids):
well = self._comp_well[_WATER][dest_idx]
h2o_vol = total - \
(len(pools[ice_id]['backbone']) +
len(pools[ice_id]['parts'])) * part_vol
# Write water:
worklist.append([dest_plate_id, dest_idx, well[1],
well[0], str(h2o_vol),
_WATER, _WATER, '',
ice_id])
self._write_worklist(dest_plate_id, worklist)
def __write_parts_worklist(self, dest_plate_id, pools, part_vol):
'''Write parts worklist.'''
worklist = []
for dest_idx, ice_id in enumerate(self._ice_ids):
# Write backbone:
for comp in pools[ice_id]['backbone']:
well = self._comp_well[comp[1]]
worklist.append([dest_plate_id, dest_idx, well[1],
well[0], str(part_vol),
comp[2], comp[5], comp[1],
ice_id])
# Write parts:
for comp in pools[ice_id]['parts']:
well = self._comp_well[comp[1]]
worklist.append([dest_plate_id, dest_idx, well[1],
well[0], str(1),
comp[2], comp[5], comp[1],
ice_id])
self._write_worklist(dest_plate_id, worklist)
def __write_dom_pools_worklist(self, dest_plate_id, vol):
'''Write domino pools worklist.'''
worklist = []
for dest_idx, ice_id in enumerate(self._ice_ids):
well = self._comp_well[ice_id + '_domino_pool']
worklist.append([dest_plate_id, dest_idx, well[1],
well[0], str(vol),
'domino pool', 'domino pool', '',
ice_id])
self._write_worklist(dest_plate_id, worklist)
def __write_default_reag_worklist(self, dest_plate_id, def_reagents):
'''Write default reagents worklist.'''
worklist = []
for dest_idx, ice_id in enumerate(self._ice_ids):
for reagent, vol in def_reagents.iteritems():
well = self._comp_well[reagent]
worklist.append([dest_plate_id, dest_idx, well[1],
well[0], str(vol),
reagent, reagent, '',
ice_id])
self._write_worklist(dest_plate_id, worklist)
def main(args):
'''main method.'''
thread = LcrThread({'ice': {'url': args[0],
'username': args[1],
'password': args[2]},
'ice_ids': args[3:]})
thread.run()
if __name__ == '__main__':
main(sys.argv[1:])
|
synbiochem/PathwayGenie
|
assembly_genie/lcr.py
|
Python
|
mit
| 4,796
|
[
"VisIt"
] |
aae3c732cb0c2979d28f72487a788864da9a3f025fa33295e0915c6374b70604
|
from django.conf.urls import patterns, url
from django.views.generic.detail import DetailView
from tasm import views, plotting
from tasm.models import RefSeq, Locus, Transcript
from tasm.forms import TranscriptFilterForm, RefSeqFilterForm
#
# URL for transcripts:
# /asm/<asm_pk>/loci - loci for the asssembly
# /asm/<asm_pk>/transcripts - transcripts for the asssembly
# /asm/<asm_pk>/hits - refseqs of BLAST hits for the assembly
#
# /loci/<locus_pk> - individual locus
# - also shows all transcripts for that locus
# /transcripts/<transcript_pk> - individual transcript
# - also shows all BLAST hits for that transcript
# /refseqs/<accession> - individual refseq
# - also shows all transcripts that have BLAST hits on that refseq
urlpatterns = patterns('',
url(r'^$', views.HomeView.as_view(), name='tasm_home_view'),
url(r'^refseqs/$', views.FilteredListView.as_view(
model=RefSeq,
template_name='tasm/refseq_list.html',
form_class=RefSeqFilterForm
), name='tasm_refseq_list_view'),
url(r'^asm/(?P<asm_pk>\d+)/loci/$', views.FilteredListView.as_view(
model=Locus,
template_name='tasm/loci_list.html'
), name='tasm_loci_for_asm_view'),
url(r'^asm/(?P<asm_pk>\d+)/transcripts/$', views.FilteredListView.as_view(
model=Transcript,
template_name='tasm/trasncript_list.html',
form_class=TranscriptFilterForm,
view_name='tasm_transcripts_for_asm_view'
), name='tasm_transcripts_for_asm_view'),
url(r'^asm/(?P<asm_pk>\d+)/best/$', views.BestTranscriptsView.as_view(
template_name='tasm/trasncript_list.html',
form_class=TranscriptFilterForm,
view_name='tasm_best_transcripts_for_asm_view'
), name='tasm_best_transcripts_for_asm_view'),
url(r'^asm/(?P<asm_pk>\d+)/orphans/$', views.BestOrphansView.as_view(
template_name='tasm/trasncript_list.html',
form_class=TranscriptFilterForm,
view_name='tasm_orphan_transcripts_for_asm_view'
), name='tasm_orphan_transcripts_for_asm_view'),
url(r'^asm/(?P<asm_pk>\d+)/hits/$', views.FilteredListView.as_view(
model=RefSeq,
template_name='tasm/refseq_list.html',
form_class=RefSeqFilterForm
), name='tasm_refseqs_for_asm_view'),
url(r'^asm/(?P<asm_pk>\d+)/plots/$', views.TranscriptPlotView.as_view(
template_name='tasm/plots.html',
view_name='tasm_transcript_plots_view'
), name='tasm_transcript_plots_view'),
url(r'^loci/(?P<pk>\d+)/$', DetailView.as_view(
model=Locus,
template_name='tasm/locus.html',
context_object_name='locus'), name='tasm_locus_view'),
url(r'^refseqs/(?P<accession>\w+)/$', DetailView.as_view(
model=RefSeq,
slug_field='accession',
slug_url_kwarg='accession',
template_name='tasm/refseq.html',
context_object_name='refseq'), name='tasm_refseq_view'),
url(r'^transcripts/(?P<pk>\d+)/$', DetailView.as_view(
model=Transcript,
template_name='tasm/transcript_list.html',
context_object_name='locus'), name='tasm_transcript_view'),
url(r'^asm/(?P<asm_pk>\d+)/plot/$', plotting.TranscriptPlotView.as_view(),
name='tasm_transcripts_plot_for_asm_view'),
)
|
eco32i/tweed
|
tasm/urls.py
|
Python
|
bsd-3-clause
| 3,330
|
[
"BLAST"
] |
6e38ca3837b50439eb4c51f71fe8382dee90a0b0e1011f42329dd24984f61952
|
from pyparsing import CaselessKeyword, Literal, Word, StringEnd, Optional, Combine, Group, ParseException
from pyparsing import alphas, nums, alphanums, delimitedList
import yaml
from dalton.config.models import SecurityGroup, Rule
class YamlFileSecurityGroupsConfigLoader(object):
"""
Loads SecurityGroups from a YAML configuration file.
Format example:
default:
options:
description: default group applied to all instances
prune: true
rules:
- tcp port 0-65535 default # Security Group with name 'default' to All TCP
- udp port 0-65535 sg-123456 # Security Group with id 'sg-123456' to All UDP
- icmp port 8 0.0.0.0/0 # Anywhere to ICMP Type 8; Echo Request
- tcp port 22 203.0.113.1 # Chief's Home to SSH
load-balancer:
options:
description: custom application server instances
prune: false
rules:
- tcp port 80, 443 0.0.0.0/0 # Anywhere to HTTP/HTTPS
"""
def __init__(self, yaml_path):
self.yaml_path = yaml_path
def load(self):
with open(self.yaml_path, 'r') as f:
configs = yaml.load(f.read())
return self.parse_configs(configs)
@classmethod
def parse_configs(cls, security_group_configs):
security_groups = {}
for security_group_name, security_group_config in security_group_configs.iteritems():
security_groups[security_group_name] = cls.parse_config(security_group_config or {})
return security_groups
@classmethod
def parse_config(cls, security_group_config):
options = security_group_config.get('options', {})
rules = set(rule for rule_string in security_group_config.get('rules', []) for rule in RuleParser.parse(rule_string))
return SecurityGroup(options.get('description', "Dalton Paradise"), rules, prune=options.get('prune', True))
class RuleParser(object):
"""
Responsible for parsing security group rules.
Rule syntax is specified in Augmented Backus-Naur Form (ABNF), including the following core ABNF syntax rules
defined by that specification: ALPHA (letters), DIGIT (decimal digits), and SP (space).
Rule = [ protocol SP ] [ "port" SP ] ports SP source
protocol = "tcp" / "udp" / "icmp"
ports = port-range *[ [ SP ] "," [ SP ] port-range ]
port-range = port [ "-" port ]
port = 1*DIGIT
source = security-group-id / security-group-name / ipv4-cidr-block
security-group-id = "sg-" 1*alphanum
security-group-name = 1*ALPHA *(alphanum / "-" / "." / "-")
alphanum = ALPHA / DIGIT
ipv4-cidr-block = ipv4-address [ "/" ipv4-cidr-length ]
ipv4-address = dec-octet "." dec-octet "." dec-octet "." dec-octet
dec-octet = DIGIT ; 0-9
/ %x31-39 DIGIT ; 10-99
/ "1" 2DIGIT ; 100-199
/ "2" %x30-34 DIGIT ; 200-249
/ "25" %x30-35 ; 250-255
ipv4-cidr-length = DIGIT ; 0-9
/ %x31-32 DIGIT ; 10-29
/ "3" %x30-32 ; 30-32
If unspecified, protocol will default to "tcp".
See the test cases for examples of supported rules.
"""
to_int = lambda tokens: int(tokens[0])
to_port_range = lambda tokens: [(tokens[0].port, tokens[0].port)] if tokens[0].port or tokens[0].port==0 else [(tokens[0][0].port, tokens[0][1].port)]
to_ipv4_prefix = lambda instr, loc, tokens: int(tokens[0]) if int(tokens[0]) <= 32 else RuleParser.raises(ParseException(instr, loc, "%s is an invalid prefix length" % tokens[0]))
to_octet = lambda instr, loc, tokens: int(tokens[0]) if int(tokens[0]) <= 255 else RuleParser.raises(ParseException(instr, loc, "%s is an invalid octet" % tokens[0]))
invalid_address = lambda instr, loc, expr, err: RuleParser.raises(ParseException(instr, loc, "%s is an invalid IPv4 address: %s" % (instr, err)))
protocol = CaselessKeyword("tcp") ^ CaselessKeyword("udp") ^ CaselessKeyword("icmp")
port = Group(Word(nums).setParseAction(to_int)('port'))
port_range = Group((port + Literal("-").suppress() + port))
normalized_port_range = (port ^ port_range).setParseAction(to_port_range)
ports = delimitedList(normalized_port_range)('ports')
security_group_id = Combine("sg-" + Word(alphanums, min=1))
security_group_name = Word(alphas, alphanums + "-._", min=1)
octet = Word(nums, min=1, max=3).setParseAction(to_octet)
ip_address = Combine(octet + ('.' + octet)*3)
ipv4_prefix = Word(nums, min=1, max=2).setParseAction(to_ipv4_prefix)
cidr_address = ip_address + Literal('/').suppress() + ipv4_prefix
ip_or_cidr_address = ip_address.copy().setParseAction(lambda tokens: [tokens[0], 32]) ^ cidr_address
normalized_address = ip_or_cidr_address.setParseAction(lambda tokens: tokens[0] + '/' + str(tokens[1])).setFailAction(invalid_address)
source = normalized_address('address') ^ security_group_id('security_group_id') ^ security_group_name('security_group_name')
parser = rule = Optional(protocol, default="tcp")('protocol') + Optional(CaselessKeyword("port")) + ports + source + StringEnd()
@classmethod
def parse(cls, rule_string):
"""
Parses a rule string into a list of rules.
This may return multiple rules because multiple ports and ranges
are supported in a single rule string.
"""
result = cls.parser.parseString(rule_string)
kwargs = {
'address': result.address or None,
'security_group_id': result.security_group_id or None,
'security_group_name': result.security_group_name or None
}
return [Rule(result.protocol, from_port, to_port, **kwargs) for from_port, to_port in result.ports]
@staticmethod
def raises(exception):
raise exception
|
signal/dalton
|
dalton/config/loader.py
|
Python
|
apache-2.0
| 6,384
|
[
"Dalton"
] |
046a46bb6d3075eb34a19dd8d860ef8906112e40f8d50db0628be81cbfce17e0
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utility functions and for loading neurons."""
import logging
import os
import shutil
import tempfile
import uuid
from functools import partial, lru_cache
from io import IOBase, open
from pathlib import Path
from neurom.core.population import Population
from neurom.exceptions import NeuroMError, RawDataError
from neurom.fst._core import FstNeuron
from neurom.io import neurolucida, swc, hdf5
from neurom.io.datawrapper import DataWrapper
L = logging.getLogger(__name__)
def _is_morphology_file(filepath):
"""Check if `filepath` is a file with one of morphology file extensions."""
return filepath.is_file() and filepath.suffix.lower() in {'.swc', '.h5', '.asc'}
class NeuronLoader(object):
"""Caching morphology loader.
Arguments:
directory: path to directory with morphology files
file_ext: file extension to look for (if not set, will pick any of .swc|.h5|.asc)
cache_size: size of LRU cache (if not set, no caching done)
"""
def __init__(self, directory, file_ext=None, cache_size=None):
"""Initialize a NeuronLoader object."""
self.directory = Path(directory)
self.file_ext = file_ext
if cache_size is not None:
self.get = lru_cache(maxsize=cache_size)(self.get)
def _filepath(self, name):
"""File path to `name` morphology file."""
if self.file_ext is None:
candidates = self.directory.glob(name + ".*")
try:
return next(filter(_is_morphology_file, candidates))
except StopIteration as e:
raise NeuroMError("Can not find morphology file for '%s' " % name) from e
else:
return Path(self.directory, name + self.file_ext)
# pylint:disable=method-hidden
def get(self, name):
"""Get `name` morphology data."""
return load_neuron(self._filepath(name))
def get_morph_files(directory):
"""Get a list of all morphology files in a directory.
Returns:
list with all files with extensions '.swc' , 'h5' or '.asc' (case insensitive)
"""
directory = Path(directory)
return list(filter(_is_morphology_file, directory.iterdir()))
def get_files_by_path(path):
"""Get a file or set of files from a file path.
Return list of files with path
"""
path = Path(path)
if path.is_file():
return [path]
if path.is_dir():
return get_morph_files(path)
raise IOError('Invalid data path %s' % path)
def load_neuron(handle, reader=None):
"""Build section trees from an h5 or swc file."""
if isinstance(handle, str):
handle = Path(handle)
rdw = load_data(handle, reader)
name = handle.stem if isinstance(handle, Path) else None
return FstNeuron(rdw, name)
def load_neurons(neurons,
neuron_loader=load_neuron,
name=None,
population_class=Population,
ignored_exceptions=()):
"""Create a population object.
From all morphologies in a directory of from morphologies in a list of file names.
Arguments:
neurons: directory path or list of neuron file paths
neuron_loader: function taking a filename and returning a neuron
population_class: class representing populations
name (str): optional name of population. By default 'Population' or\
filepath basename depending on whether neurons is list or\
directory path respectively.
Returns:
neuron population object
"""
if isinstance(neurons, str):
neurons = Path(neurons)
if isinstance(neurons, Path):
files = get_files_by_path(neurons)
name = name or neurons.name
else:
files = neurons
name = name or 'Population'
ignored_exceptions = tuple(ignored_exceptions)
pop = []
for f in files:
try:
pop.append(neuron_loader(f))
except NeuroMError as e:
if isinstance(e, ignored_exceptions):
L.info('Ignoring exception "%s" for file %s',
e, f.name)
continue
raise
return population_class(pop, name=name)
def _get_file(handle):
"""Returns the filename of the file to read.
If handle is a stream, a temp file is written on disk first
and its filename is returned
"""
if not isinstance(handle, IOBase):
return handle
fd, temp_file = tempfile.mkstemp(str(uuid.uuid4()), prefix='neurom-')
os.close(fd)
with open(temp_file, 'w') as fd:
handle.seek(0)
shutil.copyfileobj(handle, fd)
return temp_file
def load_data(handle, reader=None):
"""Unpack data into a raw data wrapper."""
if not reader:
reader = handle.suffix[1:].lower()
if reader not in _READERS:
raise NeuroMError('Do not have a loader for "%s" extension' % reader)
filename = _get_file(handle)
try:
return _READERS[reader](filename)
except Exception as e:
L.exception('Error reading file %s, using "%s" loader', filename, reader)
raise RawDataError('Error reading file %s:\n%s' % (filename, str(e))) from e
def _load_h5(filename):
"""Delay loading of h5py until it is needed."""
return hdf5.read(filename,
remove_duplicates=False,
data_wrapper=DataWrapper)
_READERS = {
'swc': partial(swc.read,
data_wrapper=DataWrapper),
'h5': _load_h5,
'asc': partial(neurolucida.read,
data_wrapper=DataWrapper)
}
|
wizmer/NeuroM
|
neurom/io/utils.py
|
Python
|
bsd-3-clause
| 7,273
|
[
"NEURON"
] |
4a7a08246739345a23b55e715f20b1a98f42f5808fccde17e4e7ca2d53316c40
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.